<div dir="ltr"><div>SET_SH_REG won't work with CP register shadowing. You need to use WRITE_DATA or WREG32.</div><div><br></div><div>Marek<br></div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Aug 24, 2020 at 7:57 AM Samuel Pitoiset <<a href="mailto:samuel.pitoiset@gmail.com">samuel.pitoiset@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">A trap handler can be used by userspace to catch shader exceptions<br>
like divide by zero, memory violations etc.<br>
<br>
On GFX6-GFX8, the registers used to configure TBA/TMA aren't<br>
privileged while on GFX9+ they are per VMID and privileged,<br>
so that only the KMD can configure them.<br>
<br>
This introduces a new CS chunk that can be used to set the<br>
TBA/TMA virtual address at submit time.<br>
<br>
TODO:<br>
- add GFX 6,7 and 10 support<br>
- rebase on top of amd-staging-drm-next (this branch currently<br>
hangs my GPU at boot)<br>
<br>
Signed-off-by: Samuel Pitoiset <<a href="mailto:samuel.pitoiset@gmail.com" target="_blank">samuel.pitoiset@gmail.com</a>><br>
---<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c   | 31 +++++++++++++++++<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c  |  3 +-<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h  |  4 +++<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h  |  4 +++<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  4 +++<br>
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 15 ++++++++-<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c    | 42 ++++++++++++++++++++++--<br>
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c    | 19 +++++++++++<br>
 include/uapi/drm/amdgpu_drm.h            |  8 +++++<br>
 9 files changed, 126 insertions(+), 4 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c<br>
index a512ccbc4dea..6ca5c4912e3a 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c<br>
@@ -104,6 +104,19 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,<br>
        return r;<br>
 }<br>
<br>
+static int amdgpu_cs_user_trap_chunk(struct amdgpu_cs_parser *p,<br>
+                                    struct drm_amdgpu_cs_chunk_trap *data,<br>
+                                    uint64_t *tba_addr, uint64_t *tma_addr)<br>
+{<br>
+       if (!data->tba_addr || !data->tma_addr)<br>
+               return -EINVAL;<br>
+<br>
+       *tba_addr = data->tba_addr;<br>
+       *tma_addr = data->tma_addr;<br>
+<br>
+       return 0;<br>
+}<br>
+<br>
 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)<br>
 {<br>
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;<br>
@@ -112,6 +125,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs<br>
        uint64_t *chunk_array;<br>
        unsigned size, num_ibs = 0;<br>
        uint32_t uf_offset = 0;<br>
+       uint64_t tba_addr = 0, tma_addr = 0;<br>
        int i;<br>
        int ret;<br>
<br>
@@ -214,6 +228,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs<br>
<br>
                        break;<br>
<br>
+               case AMDGPU_CHUNK_ID_TRAP:<br>
+                       size = sizeof(struct drm_amdgpu_cs_chunk_trap);<br>
+                       if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {<br>
+                               ret = -EINVAL;<br>
+                               goto free_partial_kdata;<br>
+                       }<br>
+<br>
+                       ret = amdgpu_cs_user_trap_chunk(p, p->chunks[i].kdata,<br>
+                                                       &tba_addr, &tma_addr);<br>
+                       if (ret)<br>
+                               goto free_partial_kdata;<br>
+                       break;<br>
+<br>
                case AMDGPU_CHUNK_ID_DEPENDENCIES:<br>
                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:<br>
                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:<br>
@@ -239,6 +266,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs<br>
<br>
        if (p-><a href="http://uf_entry.tv.bo" rel="noreferrer" target="_blank">uf_entry.tv.bo</a>)<br>
                p->job->uf_addr = uf_offset;<br>
+<br>
+       p->job->tba_addr = tba_addr;<br>
+       p->job->tma_addr = tma_addr;<br>
+<br>
        kfree(chunk_array);<br>
<br>
        /* Use this opportunity to fill in task info for the vm */<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
index 26127c7d2f32..1e703119e4c2 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c<br>
@@ -88,9 +88,10 @@<br>
  * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness<br>
  * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC<br>
  * - 3.39.0 - DMABUF implicit sync does a full pipeline sync<br>
+ * - 3.40.0 - Add AMDGPU_CHUNK_ID_TRAP<br>
  */<br>
 #define KMS_DRIVER_MAJOR       3<br>
-#define KMS_DRIVER_MINOR       39<br>
+#define KMS_DRIVER_MINOR       40<br>
 #define KMS_DRIVER_PATCHLEVEL  0<br>
<br>
 int amdgpu_vram_limit = 0;<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h<br>
index 8e58325bbca2..fd0d56724b4d 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h<br>
@@ -58,6 +58,10 @@ struct amdgpu_vmid {<br>
        uint32_t                oa_base;<br>
        uint32_t                oa_size;<br>
<br>
+       /* user trap */<br>
+       uint64_t                tba_addr;<br>
+       uint64_t                tma_addr;<br>
+<br>
        unsigned                pasid;<br>
        struct dma_fence        *pasid_mapping;<br>
 };<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h<br>
index 81caac9b958a..b8ed5b13ea44 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h<br>
@@ -62,6 +62,10 @@ struct amdgpu_job {<br>
        /* user fence handling */<br>
        uint64_t                uf_addr;<br>
        uint64_t                uf_sequence;<br>
+<br>
+       /* user trap handling */<br>
+       uint64_t                tba_addr;<br>
+       uint64_t                tma_addr;<br>
 };<br>
<br>
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h<br>
index da871d84b742..1f165a6295d9 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h<br>
@@ -197,6 +197,9 @@ struct amdgpu_ring_funcs {<br>
        void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);<br>
        int (*preempt_ib)(struct amdgpu_ring *ring);<br>
        void (*emit_mem_sync)(struct amdgpu_ring *ring);<br>
+       void (*emit_trap_handler)(struct amdgpu_ring *ring,<br>
+                                 uint32_t vmid,<br>
+                                 uint64_t tba_addr, uint64_t tma_addr);<br>
 };<br>
<br>
 struct amdgpu_ring {<br>
@@ -265,6 +268,7 @@ struct amdgpu_ring {<br>
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))<br>
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))<br>
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))<br>
+#define amdgpu_ring_emit_trap_handler(r, v, tba, tma) (r)->funcs->emit_trap_handler((r), (v), (tba), (tma))<br>
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))<br>
 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))<br>
 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
index 71e005cf2952..24916082de0b 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c<br>
@@ -1076,6 +1076,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,<br>
                id->gws_size != job->gws_size ||<br>
                id->oa_base != job->oa_base ||<br>
                id->oa_size != job->oa_size);<br>
+       bool trap_handler_needed = ring->funcs->emit_trap_handler && (<br>
+               id->tba_addr != job->tba_addr ||<br>
+               id->tma_addr != job->tma_addr);<br>
        bool vm_flush_needed = job->vm_needs_flush;<br>
        struct dma_fence *fence = NULL;<br>
        bool pasid_mapping_needed = false;<br>
@@ -1088,6 +1091,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,<br>
<br>
        if (amdgpu_vmid_had_gpu_reset(adev, id)) {<br>
                gds_switch_needed = true;<br>
+               trap_handler_needed = true;<br>
                vm_flush_needed = true;<br>
                pasid_mapping_needed = true;<br>
        }<br>
@@ -1099,12 +1103,14 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,<br>
        mutex_unlock(&id_mgr->lock);<br>
<br>
        gds_switch_needed &= !!ring->funcs->emit_gds_switch;<br>
+       trap_handler_needed &= !!ring->funcs->emit_trap_handler;<br>
        vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&<br>
                        job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;<br>
        pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&<br>
                ring->funcs->emit_wreg;<br>
<br>
-       if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)<br>
+       if (!vm_flush_needed && !gds_switch_needed &&<br>
+           !trap_handler_needed && !need_pipe_sync)<br>
                return 0;<br>
<br>
        if (ring->funcs->init_cond_exec)<br>
@@ -1158,6 +1164,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,<br>
                                            job->oa_size);<br>
        }<br>
<br>
+       if (ring->funcs->emit_trap_handler && trap_handler_needed) {<br>
+               id->tba_addr = job->tba_addr;<br>
+               id->tma_addr = job->tma_addr;<br>
+               amdgpu_ring_emit_trap_handler(ring, job->vmid, job->tba_addr,<br>
+                                             job->tma_addr);<br>
+       }<br>
+<br>
        if (ring->funcs->patch_cond_exec)<br>
                amdgpu_ring_patch_cond_exec(ring, patch_offset);<br>
<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
index 33f1c4a46ebe..59db577e8c8f 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
@@ -5222,6 +5222,40 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,<br>
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));<br>
 }<br>
<br>
+static void gfx_v8_0_ring_emit_trap_handler(struct amdgpu_ring *ring,<br>
+                                           uint32_t vmid,<br>
+                                           uint64_t tba_addr,<br>
+                                           uint64_t tma_addr)<br>
+{<br>
+       if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {<br>
+               static const u32 regs[] = {<br>
+                       mmSPI_SHADER_TBA_LO_PS,<br>
+                       mmSPI_SHADER_TBA_LO_VS,<br>
+                       mmSPI_SHADER_TBA_LO_GS,<br>
+                       mmSPI_SHADER_TBA_LO_ES,<br>
+                       mmSPI_SHADER_TBA_LO_HS,<br>
+                       mmSPI_SHADER_TBA_LO_LS,<br>
+               };<br>
+               int i;<br>
+<br>
+               for (i = 0; i < ARRAY_SIZE(regs); i++) {<br>
+                       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_SH_REG, 4));<br>
+                       amdgpu_ring_write(ring, regs[i] - PACKET3_SET_SH_REG_START);<br>
+                       amdgpu_ring_write(ring, lower_32_bits(tba_addr >> 8));<br>
+                       amdgpu_ring_write(ring, upper_32_bits(tba_addr >> 8));<br>
+                       amdgpu_ring_write(ring, lower_32_bits(tma_addr >> 8));<br>
+                       amdgpu_ring_write(ring, upper_32_bits(tma_addr >> 8));<br>
+               }<br>
+       } else {<br>
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SET_SH_REG, 4));<br>
+               amdgpu_ring_write(ring, mmCOMPUTE_TBA_LO - PACKET3_SET_SH_REG_START);<br>
+               amdgpu_ring_write(ring, lower_32_bits(tba_addr >> 8));<br>
+               amdgpu_ring_write(ring, upper_32_bits(tba_addr >> 8));<br>
+               amdgpu_ring_write(ring, lower_32_bits(tma_addr >> 8));<br>
+               amdgpu_ring_write(ring, upper_32_bits(tma_addr >> 8));<br>
+       }<br>
+}<br>
+<br>
 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)<br>
 {<br>
        WREG32(mmSQ_IND_INDEX,<br>
@@ -6890,7 +6924,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {<br>
                5 + /* HDP_INVL */<br>
                12 + 12 + /* FENCE x2 */<br>
                2 + /* SWITCH_BUFFER */<br>
-               5, /* SURFACE_SYNC */<br>
+               5 + /* SURFACE_SYNC */<br>
+               36, /* gfx_v8_0_ring_emit_trap_handler */<br>
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */<br>
        .emit_ib = gfx_v8_0_ring_emit_ib_gfx,<br>
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,<br>
@@ -6909,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {<br>
        .emit_wreg = gfx_v8_0_ring_emit_wreg,<br>
        .soft_recovery = gfx_v8_0_ring_soft_recovery,<br>
        .emit_mem_sync = gfx_v8_0_emit_mem_sync,<br>
+       .emit_trap_handler = gfx_v8_0_ring_emit_trap_handler,<br>
 };<br>
<br>
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {<br>
@@ -6926,7 +6962,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {<br>
                7 + /* gfx_v8_0_ring_emit_pipeline_sync */<br>
                VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */<br>
                7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */<br>
-               7, /* gfx_v8_0_emit_mem_sync_compute */<br>
+               7 + /* gfx_v8_0_emit_mem_sync_compute */<br>
+               6, /* gfx_v8_0_emit_trap_handler */<br>
        .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */<br>
        .emit_ib = gfx_v8_0_ring_emit_ib_compute,<br>
        .emit_fence = gfx_v8_0_ring_emit_fence_compute,<br>
@@ -6940,6 +6977,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {<br>
        .pad_ib = amdgpu_ring_generic_pad_ib,<br>
        .emit_wreg = gfx_v8_0_ring_emit_wreg,<br>
        .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,<br>
+       .emit_trap_handler = gfx_v8_0_ring_emit_trap_handler,<br>
 };<br>
<br>
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
index cb9d60a4e05e..4fc00f196085 100644<br>
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
@@ -4162,6 +4162,23 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,<br>
                                   (1 << (oa_size + oa_base)) - (1 << oa_base));<br>
 }<br>
<br>
+static void gfx_v9_0_ring_emit_trap_handler(struct amdgpu_ring *ring,<br>
+                                           uint32_t vmid,<br>
+                                           uint64_t tba_addr,<br>
+                                           uint64_t tma_addr)<br>
+{<br>
+       struct amdgpu_device *adev = ring->adev;<br>
+<br>
+       mutex_lock(&adev->srbm_mutex);<br>
+       soc15_grbm_select(adev, 0, 0, 0, vmid);<br>
+       WREG32_SOC15_RLC(GC, 0, mmSQ_SHADER_TBA_LO, lower_32_bits(tba_addr >> 8));<br>
+       WREG32_SOC15_RLC(GC, 0, mmSQ_SHADER_TBA_HI, upper_32_bits(tba_addr >> 8));<br>
+       WREG32_SOC15_RLC(GC, 0, mmSQ_SHADER_TMA_LO, lower_32_bits(tma_addr >> 8));<br>
+       WREG32_SOC15_RLC(GC, 0, mmSQ_SHADER_TMA_HI, upper_32_bits(tma_addr >> 8));<br>
+       soc15_grbm_select(adev, 0, 0, 0, 0);<br>
+       mutex_unlock(&adev->srbm_mutex);<br>
+}<br>
+<br>
 static const u32 vgpr_init_compute_shader[] =<br>
 {<br>
        0xb07c0000, 0xbe8000ff,<br>
@@ -6720,6 +6737,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {<br>
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,<br>
        .soft_recovery = gfx_v9_0_ring_soft_recovery,<br>
        .emit_mem_sync = gfx_v9_0_emit_mem_sync,<br>
+       .emit_trap_handler = gfx_v9_0_ring_emit_trap_handler,<br>
 };<br>
<br>
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {<br>
@@ -6756,6 +6774,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {<br>
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,<br>
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,<br>
        .emit_mem_sync = gfx_v9_0_emit_mem_sync,<br>
+       .emit_trap_handler = gfx_v9_0_ring_emit_trap_handler,<br>
 };<br>
<br>
 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {<br>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h<br>
index 3218576e109d..7eae264adb5d 100644<br>
--- a/include/uapi/drm/amdgpu_drm.h<br>
+++ b/include/uapi/drm/amdgpu_drm.h<br>
@@ -551,6 +551,7 @@ struct drm_amdgpu_gem_va {<br>
 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07<br>
 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT    0x08<br>
 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL  0x09<br>
+#define AMDGPU_CHUNK_ID_TRAP            0x0a<br>
<br>
 struct drm_amdgpu_cs_chunk {<br>
        __u32           chunk_id;<br>
@@ -645,6 +646,13 @@ struct drm_amdgpu_cs_chunk_syncobj {<br>
        __u64 point;<br>
 };<br>
<br>
+struct drm_amdgpu_cs_chunk_trap {<br>
+       /** Trap Base Address */<br>
+       __u64 tba_addr;<br>
+       /** Trap Memory Address */<br>
+       __u64 tma_addr;<br>
+};<br>
+<br>
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ     0<br>
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD  1<br>
 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD        2<br>
-- <br>
2.28.0<br>
<br>
_______________________________________________<br>
amd-gfx mailing list<br>
<a href="mailto:amd-gfx@lists.freedesktop.org" target="_blank">amd-gfx@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/amd-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/amd-gfx</a><br>
</blockquote></div>