[PATCH 2/2] drm/amdgpu/sdma6: implement ring reset callback for sdma6

Alex Deucher alexdeucher at gmail.com
Mon Sep 9 17:09:09 UTC 2024


On Mon, Sep 9, 2024 at 5:54 AM <jiadong.zhu at amd.com> wrote:
>
> From: Jiadong Zhu <Jiadong.Zhu at amd.com>
>
> Implement sdma queue reset callback using mes_reset_queue_mmio.
> Extract sdma resume sequence from sdma_v6_0_gfx_resume for queue restarting.

Might make more sense to split this patch in two.  One patch to split
out the per instance resume function, and one to implement the reset.

More comments below.

>
> Signed-off-by: Jiadong Zhu <Jiadong.Zhu at amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 266 +++++++++++++++----------
>  1 file changed, 157 insertions(+), 109 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> index 208a1fa9d4e7..d8865a60b788 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
> @@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
>  }
>
>  /**
> - * sdma_v6_0_gfx_resume - setup and start the async dma engines
> + * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine
>   *
>   * @adev: amdgpu_device pointer
> + * @i: instance
> + * @restore: used to restore wptr when restart
>   *
> - * Set up the gfx DMA ring buffers and enable them.
> - * Returns 0 for success, error for failure.
> + * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
> + * Return 0 for success.
>   */
> -static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
> +static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
>  {
>         struct amdgpu_ring *ring;
>         u32 rb_cntl, ib_cntl;
> @@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
>         u32 doorbell_offset;
>         u32 temp;
>         u64 wptr_gpu_addr;
> -       int i, r;
>
> -       for (i = 0; i < adev->sdma.num_instances; i++) {
> -               ring = &adev->sdma.instance[i].ring;
> +       ring = &adev->sdma.instance[i].ring;
> +       if (!amdgpu_sriov_vf(adev))
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
>
> -               if (!amdgpu_sriov_vf(adev))
> -                       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
> -
> -               /* Set ring buffer size in dwords */
> -               rb_bufsz = order_base_2(ring->ring_size / 4);
> -               rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
> +       /* Set ring buffer size in dwords */
> +       rb_bufsz = order_base_2(ring->ring_size / 4);
> +       rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
>  #ifdef __BIG_ENDIAN
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
> -                                       RPTR_WRITEBACK_SWAP_ENABLE, 1);
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
> +                               RPTR_WRITEBACK_SWAP_ENABLE, 1);
>  #endif
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
> -
> -               /* Initialize the ring buffer's read and write pointers */
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
> +
> +       /* Initialize the ring buffer's read and write pointers */
> +       if (restore) {
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
> +       } else {
>                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
>                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
>                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
>                 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
> +       }
> +       /* setup the wptr shadow polling */
> +       wptr_gpu_addr = ring->wptr_gpu_addr;
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
> +              lower_32_bits(wptr_gpu_addr));
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
> +              upper_32_bits(wptr_gpu_addr));
> +
> +       /* set the wb address whether it's enabled or not */
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
> +              upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
> +              lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
> +
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
> +
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
> +
> +       if (!restore)
> +               ring->wptr = 0;
>
> -               /* setup the wptr shadow polling */
> -               wptr_gpu_addr = ring->wptr_gpu_addr;
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
> -                      lower_32_bits(wptr_gpu_addr));
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
> -                      upper_32_bits(wptr_gpu_addr));
> -
> -               /* set the wb address whether it's enabled or not */
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
> -                      upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
> -                      lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
> +       /* before programing wptr to a less value, need set minor_ptr_update first */
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
>
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
> +       if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
> +       }
>
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
> +       doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
> +       doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
>
> -               ring->wptr = 0;
> -
> -               /* before programing wptr to a less value, need set minor_ptr_update first */
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
> +       if (ring->use_doorbell) {
> +               doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
> +               doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
> +                               OFFSET, ring->doorbell_index);
> +       } else {
> +               doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
> +       }
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
>
> -               if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
> -                       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
> -                       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
> -               }
> +       if (i == 0)
> +               adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> +                                             ring->doorbell_index,
> +                                             adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
>
> -               doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
> -               doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
> +       if (amdgpu_sriov_vf(adev))
> +               sdma_v6_0_ring_set_wptr(ring);
> +
> +       /* set minor_ptr_update to 0 after wptr programed */
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
> +
> +       /* Set up sdma hang watchdog */
> +       temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
> +       /* 100ms per unit */
> +       temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
> +                            max(adev->usec_timeout/100000, 1));
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
> +
> +       /* Set up RESP_MODE to non-copy addresses */
> +       temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
> +       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
> +       temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
> +
> +       /* program default cache read and write policy */
> +       temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
> +       /* clean read policy and write policy bits */
> +       temp &= 0xFF0FFF;
> +       temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
> +                (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
> +                SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
>
> -               if (ring->use_doorbell) {
> -                       doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
> -                       doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
> -                                       OFFSET, ring->doorbell_index);
> -               } else {
> -                       doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
> -               }
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
> -
> -               if (i == 0)
> -                       adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
> -                                                     ring->doorbell_index,
> -                                                     adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
> -
> -               if (amdgpu_sriov_vf(adev))
> -                       sdma_v6_0_ring_set_wptr(ring);
> -
> -               /* set minor_ptr_update to 0 after wptr programed */
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
> -
> -               /* Set up sdma hang watchdog */
> -               temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
> -               /* 100ms per unit */
> -               temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
> -                                    max(adev->usec_timeout/100000, 1));
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
> -
> -               /* Set up RESP_MODE to non-copy addresses */
> -               temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
> -               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
> -               temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
> -
> -               /* program default cache read and write policy */
> -               temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
> -               /* clean read policy and write policy bits */
> -               temp &= 0xFF0FFF;
> -               temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
> -                        (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
> -                        SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
> -
> -               if (!amdgpu_sriov_vf(adev)) {
> -                       /* unhalt engine */
> -                       temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
> -                       temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
> -                       temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
> -                       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
> -               }
> +       if (!amdgpu_sriov_vf(adev)) {
> +               /* unhalt engine */
> +               temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
> +               temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
> +               temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
> +               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
> +       }
>
> -               /* enable DMA RB */
> -               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
> +       /* enable DMA RB */
> +       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
>
> -               ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
> -               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
> +       ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
> +       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
>  #ifdef __BIG_ENDIAN
> -               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
> +       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
>  #endif
> -               /* enable DMA IBs */
> -               WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
> +       /* enable DMA IBs */
> +       WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
> +
> +       if (amdgpu_sriov_vf(adev))
> +               sdma_v6_0_enable(adev, true);
>
> -               if (amdgpu_sriov_vf(adev))
> -                       sdma_v6_0_enable(adev, true);
> +       return amdgpu_ring_test_helper(ring);
> +}
>
> -               r = amdgpu_ring_test_helper(ring);
> +/**
> + * sdma_v6_0_gfx_resume - setup and start the async dma engines
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Set up the gfx DMA ring buffers and enable them.
> + * Returns 0 for success, error for failure.
> + */
> +static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
> +{
> +       int i, r;
> +
> +       for (i = 0; i < adev->sdma.num_instances; i++) {
> +               r = sdma_v6_0_gfx_resume_instance(adev, i, false);
>                 if (r)
>                         return r;
>         }
> @@ -1469,6 +1491,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
>         return r;
>  }
>
> +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
> +{
> +       struct amdgpu_device *adev = ring->adev;
> +       int i, r;
> +
> +       if (amdgpu_sriov_vf(adev))
> +               return -EINVAL;
> +
> +       r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
> +       if (r)
> +               return r;
> +
> +       for (i = 0; i < adev->sdma.num_instances; i++) {
> +               if (ring == &adev->sdma.instance[i].ring)
> +                       break;
> +       }
> +
> +       if (i == adev->sdma.num_instances) {
> +               DRM_ERROR("sdma instance not found\n");
> +               return -EINVAL;
> +       }

I'd suggest moving the instance checks to the top of the function
since it doesn't make any sense to call
amdgpu_mes_reset_legacy_queue() in that case either.

Alex

> +
> +       return sdma_v6_0_gfx_resume_instance(adev, i, true);
> +}
> +
>  static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
>                                         struct amdgpu_irq_src *source,
>                                         unsigned type,
> @@ -1652,6 +1699,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
>         .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
>         .init_cond_exec = sdma_v6_0_ring_init_cond_exec,
>         .preempt_ib = sdma_v6_0_ring_preempt_ib,
> +       .reset = sdma_v6_0_reset_queue,
>  };
>
>  static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
> --
> 2.25.1
>


More information about the amd-gfx mailing list