<html>
  <head>
    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  </head>
  <body text="#000000" bgcolor="#FFFFFF">
    <div class="moz-cite-prefix">At least from coding style, backward
      compatibility etc.. this looks sane to me, so feel free to add an
      Acked-by.<br>
      <br>
      But I absolutely can't judge if that is correct from the hardware
      point of view or not.<br>
      <br>
      And I think that somebody else looking at this is mandatory for it
      to be committed.<br>
      <br>
      Christian.<br>
      <br>
      Am 28.01.19 um 18:25 schrieb Marek Olšák:<br>
    </div>
    <blockquote type="cite"
cite="mid:CAAxE2A7xs40YZvCQpMmv9X0Z98Sc7+ZEbFJDJLfmkMuWHzuprw@mail.gmail.com">
      <meta http-equiv="content-type" content="text/html; charset=UTF-8">
      <div dir="ltr">Ping<br>
      </div>
      <br>
      <div class="gmail_quote">
        <div dir="ltr" class="gmail_attr">On Tue, Jan 22, 2019 at 3:05
          PM Marek Olšák <<a href="mailto:maraeo@gmail.com"
            moz-do-not-send="true">maraeo@gmail.com</a>> wrote:<br>
        </div>
        <blockquote class="gmail_quote" style="margin:0px 0px 0px
          0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">From:
          Marek Olšák <<a href="mailto:marek.olsak@amd.com"
            target="_blank" moz-do-not-send="true">marek.olsak@amd.com</a>><br>
          <br>
          I'm not increasing the DRM version because GDS isn't totally
          without bugs yet.<br>
          <br>
          v2: update emit_ib_size<br>
          <br>
          Signed-off-by: Marek Olšák <<a
            href="mailto:marek.olsak@amd.com" target="_blank"
            moz-do-not-send="true">marek.olsak@amd.com</a>><br>
          ---<br>
           drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h |  2 ++<br>
           drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 19 +++++++++++-<br>
           drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 21 +++++++++++--<br>
           drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 40
          +++++++++++++++++++++++--<br>
           include/uapi/drm/amdgpu_drm.h           |  5 ++++<br>
           5 files changed, 82 insertions(+), 5 deletions(-)<br>
          <br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
          b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h<br>
          index ecbcefe49a98..f89f5734d985 100644<br>
          --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h<br>
          +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h<br>
          @@ -30,20 +30,22 @@ struct amdgpu_bo;<br>
           struct amdgpu_gds_asic_info {<br>
                  uint32_t        total_size;<br>
                  uint32_t        gfx_partition_size;<br>
                  uint32_t        cs_partition_size;<br>
           };<br>
          <br>
           struct amdgpu_gds {<br>
                  struct amdgpu_gds_asic_info     mem;<br>
                  struct amdgpu_gds_asic_info     gws;<br>
                  struct amdgpu_gds_asic_info     oa;<br>
          +       uint32_t                       
          gds_compute_max_wave_id;<br>
          +<br>
                  /* At present, GDS, GWS and OA resources for gfx
          (graphics)<br>
                   * is always pre-allocated and available for graphics
          operation.<br>
                   * Such resource is shared between all gfx clients.<br>
                   * TODO: move this operation to user space<br>
                   * */<br>
                  struct amdgpu_bo*               gds_gfx_bo;<br>
                  struct amdgpu_bo*               gws_gfx_bo;<br>
                  struct amdgpu_bo*               oa_gfx_bo;<br>
           };<br>
          <br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
          b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c<br>
          index 7984292f9282..a59e0fdf5a97 100644<br>
          --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c<br>
          +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c<br>
          @@ -2257,20 +2257,36 @@ static void
          gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,<br>
           }<br>
          <br>
           static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring
          *ring,<br>
                                                    struct amdgpu_job
          *job,<br>
                                                    struct amdgpu_ib
          *ib,<br>
                                                    uint32_t flags)<br>
           {<br>
                  unsigned vmid = AMDGPU_JOB_GET_VMID(job);<br>
                  u32 control = INDIRECT_BUFFER_VALID | ib->length_dw
          | (vmid << 24);<br>
          <br>
          +       /* Currently, there is a high possibility to get wave
          ID mismatch<br>
          +        * between ME and GDS, leading to a hw deadlock,
          because ME generates<br>
          +        * different wave IDs than the GDS expects. This
          situation happens<br>
          +        * randomly when at least 5 compute pipes use GDS
          ordered append.<br>
          +        * The wave IDs generated by ME are also wrong after
          suspend/resume.<br>
          +        * Those are probably bugs somewhere else in the
          kernel driver.<br>
          +        *<br>
          +        * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID
          counters in ME and<br>
          +        * GDS to 0 for this ring (me/pipe).<br>
          +        */<br>
          +       if (ib->flags &
          AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {<br>
          +               amdgpu_ring_write(ring,
          PACKET3(PACKET3_SET_CONFIG_REG, 1));<br>
          +               amdgpu_ring_write(ring,
          mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);<br>
          +               amdgpu_ring_write(ring,
          ring->adev->gds.gds_compute_max_wave_id);<br>
          +       }<br>
          +<br>
                  amdgpu_ring_write(ring,
          PACKET3(PACKET3_INDIRECT_BUFFER, 2));<br>
                  amdgpu_ring_write(ring,<br>
           #ifdef __BIG_ENDIAN<br>
                                                    (2 << 0) |<br>
           #endif<br>
                                                    (ib->gpu_addr
          & 0xFFFFFFFC));<br>
                  amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)
          & 0xFFFF);<br>
                  amdgpu_ring_write(ring, control);<br>
           }<br>
          <br>
          @@ -4993,21 +5009,21 @@ static const struct amdgpu_ring_funcs
          gfx_v7_0_ring_funcs_compute = {<br>
                  .get_rptr = gfx_v7_0_ring_get_rptr,<br>
                  .get_wptr = gfx_v7_0_ring_get_wptr_compute,<br>
                  .set_wptr = gfx_v7_0_ring_set_wptr_compute,<br>
                  .emit_frame_size =<br>
                          20 + /* gfx_v7_0_ring_emit_gds_switch */<br>
                          7 + /* gfx_v7_0_ring_emit_hdp_flush */<br>
                          5 + /* hdp invalidate */<br>
                          7 + /* gfx_v7_0_ring_emit_pipeline_sync */<br>
                          CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /*
          gfx_v7_0_ring_emit_vm_flush */<br>
                          7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute
          x3 for user fence, vm fence */<br>
          -       .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */<br>
          +       .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */<br>
                  .emit_ib = gfx_v7_0_ring_emit_ib_compute,<br>
                  .emit_fence = gfx_v7_0_ring_emit_fence_compute,<br>
                  .emit_pipeline_sync =
          gfx_v7_0_ring_emit_pipeline_sync,<br>
                  .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,<br>
                  .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,<br>
                  .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,<br>
                  .test_ring = gfx_v7_0_ring_test_ring,<br>
                  .test_ib = gfx_v7_0_ring_test_ib,<br>
                  .insert_nop = amdgpu_ring_insert_nop,<br>
                  .pad_ib = amdgpu_ring_generic_pad_ib,<br>
          @@ -5050,20 +5066,21 @@ static void
          gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)<br>
                  adev->gfx.priv_inst_irq.num_types = 1;<br>
                  adev->gfx.priv_inst_irq.funcs =
          &gfx_v7_0_priv_inst_irq_funcs;<br>
           }<br>
          <br>
           static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)<br>
           {<br>
                  /* init asci gds info */<br>
                  adev->gds.mem.total_size =
          RREG32(mmGDS_VMID0_SIZE);<br>
                  adev->gds.gws.total_size = 64;<br>
                  adev->gds.oa.total_size = 16;<br>
          +       adev->gds.gds_compute_max_wave_id =
          RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);<br>
          <br>
                  if (adev->gds.mem.total_size == 64 * 1024) {<br>
                          adev->gds.mem.gfx_partition_size = 4096;<br>
                          adev->gds.mem.cs_partition_size = 4096;<br>
          <br>
                          adev->gds.gws.gfx_partition_size = 4;<br>
                          adev->gds.gws.cs_partition_size = 4;<br>
          <br>
                          adev->gds.oa.gfx_partition_size = 4;<br>
                          adev->gds.oa.cs_partition_size = 1;<br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
          b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
          index a26747681ed6..b8e50a34bdb3 100644<br>
          --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
          +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c<br>
          @@ -6077,20 +6077,36 @@ static void
          gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,<br>
           }<br>
          <br>
           static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring
          *ring,<br>
                                                    struct amdgpu_job
          *job,<br>
                                                    struct amdgpu_ib
          *ib,<br>
                                                    uint32_t flags)<br>
           {<br>
                  unsigned vmid = AMDGPU_JOB_GET_VMID(job);<br>
                  u32 control = INDIRECT_BUFFER_VALID | ib->length_dw
          | (vmid << 24);<br>
          <br>
          +       /* Currently, there is a high possibility to get wave
          ID mismatch<br>
          +        * between ME and GDS, leading to a hw deadlock,
          because ME generates<br>
          +        * different wave IDs than the GDS expects. This
          situation happens<br>
          +        * randomly when at least 5 compute pipes use GDS
          ordered append.<br>
          +        * The wave IDs generated by ME are also wrong after
          suspend/resume.<br>
          +        * Those are probably bugs somewhere else in the
          kernel driver.<br>
          +        *<br>
          +        * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID
          counters in ME and<br>
          +        * GDS to 0 for this ring (me/pipe).<br>
          +        */<br>
          +       if (ib->flags &
          AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {<br>
          +               amdgpu_ring_write(ring,
          PACKET3(PACKET3_SET_CONFIG_REG, 1));<br>
          +               amdgpu_ring_write(ring,
          mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);<br>
          +               amdgpu_ring_write(ring,
          ring->adev->gds.gds_compute_max_wave_id);<br>
          +       }<br>
          +<br>
                  amdgpu_ring_write(ring,
          PACKET3(PACKET3_INDIRECT_BUFFER, 2));<br>
                  amdgpu_ring_write(ring,<br>
           #ifdef __BIG_ENDIAN<br>
                                          (2 << 0) |<br>
           #endif<br>
                                          (ib->gpu_addr &
          0xFFFFFFFC));<br>
                  amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)
          & 0xFFFF);<br>
                  amdgpu_ring_write(ring, control);<br>
           }<br>
          <br>
          @@ -6883,21 +6899,21 @@ static const struct amdgpu_ring_funcs
          gfx_v8_0_ring_funcs_compute = {<br>
                  .get_rptr = gfx_v8_0_ring_get_rptr,<br>
                  .get_wptr = gfx_v8_0_ring_get_wptr_compute,<br>
                  .set_wptr = gfx_v8_0_ring_set_wptr_compute,<br>
                  .emit_frame_size =<br>
                          20 + /* gfx_v8_0_ring_emit_gds_switch */<br>
                          7 + /* gfx_v8_0_ring_emit_hdp_flush */<br>
                          5 + /* hdp_invalidate */<br>
                          7 + /* gfx_v8_0_ring_emit_pipeline_sync */<br>
                          VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /*
          gfx_v8_0_ring_emit_vm_flush */<br>
                          7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute
          x3 for user fence, vm fence */<br>
          -       .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */<br>
          +       .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */<br>
                  .emit_ib = gfx_v8_0_ring_emit_ib_compute,<br>
                  .emit_fence = gfx_v8_0_ring_emit_fence_compute,<br>
                  .emit_pipeline_sync =
          gfx_v8_0_ring_emit_pipeline_sync,<br>
                  .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,<br>
                  .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,<br>
                  .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,<br>
                  .test_ring = gfx_v8_0_ring_test_ring,<br>
                  .test_ib = gfx_v8_0_ring_test_ib,<br>
                  .insert_nop = amdgpu_ring_insert_nop,<br>
                  .pad_ib = amdgpu_ring_generic_pad_ib,<br>
          @@ -6913,21 +6929,21 @@ static const struct amdgpu_ring_funcs
          gfx_v8_0_ring_funcs_kiq = {<br>
                  .get_rptr = gfx_v8_0_ring_get_rptr,<br>
                  .get_wptr = gfx_v8_0_ring_get_wptr_compute,<br>
                  .set_wptr = gfx_v8_0_ring_set_wptr_compute,<br>
                  .emit_frame_size =<br>
                          20 + /* gfx_v8_0_ring_emit_gds_switch */<br>
                          7 + /* gfx_v8_0_ring_emit_hdp_flush */<br>
                          5 + /* hdp_invalidate */<br>
                          7 + /* gfx_v8_0_ring_emit_pipeline_sync */<br>
                          17 + /* gfx_v8_0_ring_emit_vm_flush */<br>
                          7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3
          for user fence, vm fence */<br>
          -       .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */<br>
          +       .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */<br>
                  .emit_fence = gfx_v8_0_ring_emit_fence_kiq,<br>
                  .test_ring = gfx_v8_0_ring_test_ring,<br>
                  .insert_nop = amdgpu_ring_insert_nop,<br>
                  .pad_ib = amdgpu_ring_generic_pad_ib,<br>
                  .emit_rreg = gfx_v8_0_ring_emit_rreg,<br>
                  .emit_wreg = gfx_v8_0_ring_emit_wreg,<br>
           };<br>
          <br>
           static void gfx_v8_0_set_ring_funcs(struct amdgpu_device
          *adev)<br>
           {<br>
          @@ -6989,20 +7005,21 @@ static void
          gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)<br>
           {<br>
                  adev->gfx.rlc.funcs = &iceland_rlc_funcs;<br>
           }<br>
          <br>
           static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)<br>
           {<br>
                  /* init asci gds info */<br>
                  adev->gds.mem.total_size =
          RREG32(mmGDS_VMID0_SIZE);<br>
                  adev->gds.gws.total_size = 64;<br>
                  adev->gds.oa.total_size = 16;<br>
          +       adev->gds.gds_compute_max_wave_id =
          RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);<br>
          <br>
                  if (adev->gds.mem.total_size == 64 * 1024) {<br>
                          adev->gds.mem.gfx_partition_size = 4096;<br>
                          adev->gds.mem.cs_partition_size = 4096;<br>
          <br>
                          adev->gds.gws.gfx_partition_size = 4;<br>
                          adev->gds.gws.cs_partition_size = 4;<br>
          <br>
                          adev->gds.oa.gfx_partition_size = 4;<br>
                          adev->gds.oa.cs_partition_size = 1;<br>
          diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
          b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
          index 262ee3cf6f1c..5533f6e4f4a4 100644<br>
          --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
          +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c<br>
          @@ -4003,20 +4003,36 @@ static void
          gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,<br>
           }<br>
          <br>
           static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring
          *ring,<br>
                                                    struct amdgpu_job
          *job,<br>
                                                    struct amdgpu_ib
          *ib,<br>
                                                    uint32_t flags)<br>
           {<br>
                  unsigned vmid = AMDGPU_JOB_GET_VMID(job);<br>
                  u32 control = INDIRECT_BUFFER_VALID | ib->length_dw
          | (vmid << 24);<br>
          <br>
          +       /* Currently, there is a high possibility to get wave
          ID mismatch<br>
          +        * between ME and GDS, leading to a hw deadlock,
          because ME generates<br>
          +        * different wave IDs than the GDS expects. This
          situation happens<br>
          +        * randomly when at least 5 compute pipes use GDS
          ordered append.<br>
          +        * The wave IDs generated by ME are also wrong after
          suspend/resume.<br>
          +        * Those are probably bugs somewhere else in the
          kernel driver.<br>
          +        *<br>
          +        * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID
          counters in ME and<br>
          +        * GDS to 0 for this ring (me/pipe).<br>
          +        */<br>
          +       if (ib->flags &
          AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {<br>
          +               amdgpu_ring_write(ring,
          PACKET3(PACKET3_SET_CONFIG_REG, 1));<br>
          +               amdgpu_ring_write(ring,
          mmGDS_COMPUTE_MAX_WAVE_ID);<br>
          +               amdgpu_ring_write(ring,
          ring->adev->gds.gds_compute_max_wave_id);<br>
          +       }<br>
          +<br>
                  amdgpu_ring_write(ring,
          PACKET3(PACKET3_INDIRECT_BUFFER, 2));<br>
                  BUG_ON(ib->gpu_addr & 0x3); /* Dword align */<br>
                  amdgpu_ring_write(ring,<br>
           #ifdef __BIG_ENDIAN<br>
                                          (2 << 0) |<br>
           #endif<br>
                                         
          lower_32_bits(ib->gpu_addr));<br>
                  amdgpu_ring_write(ring,
          upper_32_bits(ib->gpu_addr));<br>
                  amdgpu_ring_write(ring, control);<br>
           }<br>
          @@ -4722,21 +4738,21 @@ static const struct amdgpu_ring_funcs
          gfx_v9_0_ring_funcs_compute = {<br>
                  .set_wptr = gfx_v9_0_ring_set_wptr_compute,<br>
                  .emit_frame_size =<br>
                          20 + /* gfx_v9_0_ring_emit_gds_switch */<br>
                          7 + /* gfx_v9_0_ring_emit_hdp_flush */<br>
                          5 + /* hdp invalidate */<br>
                          7 + /* gfx_v9_0_ring_emit_pipeline_sync */<br>
                          SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +<br>
                          SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +<br>
                          2 + /* gfx_v9_0_ring_emit_vm_flush */<br>
                          8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for
          user fence, vm fence */<br>
          -       .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */<br>
          +       .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */<br>
                  .emit_ib = gfx_v9_0_ring_emit_ib_compute,<br>
                  .emit_fence = gfx_v9_0_ring_emit_fence,<br>
                  .emit_pipeline_sync =
          gfx_v9_0_ring_emit_pipeline_sync,<br>
                  .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,<br>
                  .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,<br>
                  .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,<br>
                  .test_ring = gfx_v9_0_ring_test_ring,<br>
                  .test_ib = gfx_v9_0_ring_test_ib,<br>
                  .insert_nop = amdgpu_ring_insert_nop,<br>
                  .pad_ib = amdgpu_ring_generic_pad_ib,<br>
          @@ -4757,21 +4773,21 @@ static const struct amdgpu_ring_funcs
          gfx_v9_0_ring_funcs_kiq = {<br>
                  .set_wptr = gfx_v9_0_ring_set_wptr_compute,<br>
                  .emit_frame_size =<br>
                          20 + /* gfx_v9_0_ring_emit_gds_switch */<br>
                          7 + /* gfx_v9_0_ring_emit_hdp_flush */<br>
                          5 + /* hdp invalidate */<br>
                          7 + /* gfx_v9_0_ring_emit_pipeline_sync */<br>
                          SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +<br>
                          SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +<br>
                          2 + /* gfx_v9_0_ring_emit_vm_flush */<br>
                          8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3
          for user fence, vm fence */<br>
          -       .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */<br>
          +       .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */<br>
                  .emit_fence = gfx_v9_0_ring_emit_fence_kiq,<br>
                  .test_ring = gfx_v9_0_ring_test_ring,<br>
                  .insert_nop = amdgpu_ring_insert_nop,<br>
                  .pad_ib = amdgpu_ring_generic_pad_ib,<br>
                  .emit_rreg = gfx_v9_0_ring_emit_rreg,<br>
                  .emit_wreg = gfx_v9_0_ring_emit_wreg,<br>
                  .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,<br>
                  .emit_reg_write_reg_wait =
          gfx_v9_0_ring_emit_reg_write_reg_wait,<br>
           };<br>
          <br>
          @@ -4839,20 +4855,40 @@ static void
          gfx_v9_0_set_gds_init(struct amdgpu_device *adev)<br>
                          adev->gds.mem.total_size = 0x10000;<br>
                          break;<br>
                  case CHIP_RAVEN:<br>
                          adev->gds.mem.total_size = 0x1000;<br>
                          break;<br>
                  default:<br>
                          adev->gds.mem.total_size = 0x10000;<br>
                          break;<br>
                  }<br>
          <br>
          +       switch (adev->asic_type) {<br>
          +       case CHIP_VEGA10:<br>
          +       case CHIP_VEGA20:<br>
          +               adev->gds.gds_compute_max_wave_id = 0x7ff;<br>
          +               break;<br>
          +       case CHIP_VEGA12:<br>
          +               adev->gds.gds_compute_max_wave_id = 0x27f;<br>
          +               break;<br>
          +       case CHIP_RAVEN:<br>
          +               if (adev->rev_id >= 0x8)<br>
          +                       adev->gds.gds_compute_max_wave_id =
          0x77; /* raven2 */<br>
          +               else<br>
          +                       adev->gds.gds_compute_max_wave_id =
          0x15f; /* raven1 */<br>
          +               break;<br>
          +       default:<br>
          +               /* this really depends on the chip */<br>
          +               adev->gds.gds_compute_max_wave_id = 0x7ff;<br>
          +               break;<br>
          +       }<br>
          +<br>
                  adev->gds.gws.total_size = 64;<br>
                  adev->gds.oa.total_size = 16;<br>
          <br>
                  if (adev->gds.mem.total_size == 64 * 1024) {<br>
                          adev->gds.mem.gfx_partition_size = 4096;<br>
                          adev->gds.mem.cs_partition_size = 4096;<br>
          <br>
                          adev->gds.gws.gfx_partition_size = 4;<br>
                          adev->gds.gws.cs_partition_size = 4;<br>
          <br>
          diff --git a/include/uapi/drm/amdgpu_drm.h
          b/include/uapi/drm/amdgpu_drm.h<br>
          index faaad04814e4..662d379ea624 100644<br>
          --- a/include/uapi/drm/amdgpu_drm.h<br>
          +++ b/include/uapi/drm/amdgpu_drm.h<br>
          @@ -561,20 +561,25 @@ union drm_amdgpu_cs {<br>
           /* Preamble flag, which means the IB could be dropped if no
          context switch */<br>
           #define AMDGPU_IB_FLAG_PREAMBLE (1<<1)<br>
          <br>
           /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag
          detected */<br>
           #define AMDGPU_IB_FLAG_PREEMPT (1<<2)<br>
          <br>
           /* The IB fence should do the L2 writeback but not invalidate
          any shader<br>
            * caches (L2/vL1/sL1/I$). */<br>
           #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)<br>
          <br>
          +/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before
          PACKET3_INDIRECT_BUFFER.<br>
          + * This will reset wave ID counters for the IB.<br>
          + */<br>
          +#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)<br>
          +<br>
           struct drm_amdgpu_cs_chunk_ib {<br>
                  __u32 _pad;<br>
                  /** AMDGPU_IB_FLAG_* */<br>
                  __u32 flags;<br>
                  /** Virtual address to begin IB execution */<br>
                  __u64 va_start;<br>
                  /** Size of submission */<br>
                  __u32 ib_bytes;<br>
                  /** HW IP to submit to */<br>
                  __u32 ip_type;<br>
          -- <br>
          2.17.1<br>
          <br>
        </blockquote>
      </div>
      <br>
      <fieldset class="mimeAttachmentHeader"></fieldset>
      <pre class="moz-quote-pre" wrap="">_______________________________________________
amd-gfx mailing list
<a class="moz-txt-link-abbreviated" href="mailto:amd-gfx@lists.freedesktop.org">amd-gfx@lists.freedesktop.org</a>
<a class="moz-txt-link-freetext" href="https://lists.freedesktop.org/mailman/listinfo/amd-gfx">https://lists.freedesktop.org/mailman/listinfo/amd-gfx</a>
</pre>
    </blockquote>
    <br>
  </body>
</html>