[v5 4/6] drm/amdgpu: optimize queue reset and stop logic
Alex Deucher
alexdeucher at gmail.com
Fri Apr 11 15:52:44 UTC 2025
On Fri, Apr 11, 2025 at 4:42 AM Jesse.zhang at amd.com <jesse.zhang at amd.com> wrote:
>
> From: "Jesse.zhang at amd.com" <Jesse.zhang at amd.com>
>
> This patch refactors the SDMA v5.x queue reset and stop logic to improve
> code readability, maintainability, and performance. The key changes include:
>
> 1. **Generalized `sdma_v5_x_gfx_stop` Function**:
> - Added an `inst_mask` parameter to allow stopping specific SDMA instances
> instead of all instances. This is useful for resetting individual queues.
>
> 2. **Simplified `sdma_v5_x_reset_queue` Function**:
> - Removed redundant loops and checks by directly using the `ring->me` field
> to identify the SDMA instance.
> - Reused the `sdma_v5_x_gfx_stop` function to stop the queue, reducing code
> duplication.
>
> v1: The general coding style is to declare variables like "i" or "r" last. E.g. longest lines first and short lasts. (Chritian)
>
> Signed-off-by: Jesse Zhang <Jesse.Zhang at amd.com>
> Acked-by: Alex Deucher <alexander.deucher at amd.com>
Might want to split this per IP? Either way:
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 31 ++++++++------------------
> drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 27 ++++++++--------------
> 2 files changed, 18 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> index bef80b318f8e..11dfa0b7544d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> @@ -557,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
> * sdma_v5_0_gfx_stop - stop the gfx async dma engines
> *
> * @adev: amdgpu_device pointer
> - *
> + * @inst_mask: mask of dma engine instances to be disabled
> * Stop the gfx async dma ring buffers (NAVI10).
> */
> -static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
> +static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
> {
> u32 rb_cntl, ib_cntl;
> int i;
>
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> + for_each_inst(i, inst_mask) {
> rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
> WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
> @@ -657,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
> {
> u32 f32_cntl;
> int i;
> + uint32_t inst_mask;
>
> + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
> if (!enable) {
> - sdma_v5_0_gfx_stop(adev);
> + sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
> sdma_v5_0_rlc_stop(adev);
> }
>
> @@ -1546,33 +1548,18 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
>
> static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
> {
> - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg;
> + u32 f32_cntl, freeze, cntl, stat1_reg;
> struct amdgpu_device *adev = ring->adev;
> int i, j, r;
>
> if (amdgpu_sriov_vf(adev))
> return -EINVAL;
>
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - if (ring == &adev->sdma.instance[i].ring)
> - break;
> - }
> -
> - if (i == adev->sdma.num_instances) {
> - DRM_ERROR("sdma instance not found\n");
> - return -EINVAL;
> - }
> -
> + i = ring->me;
> amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
>
> /* stop queue */
> - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
> - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
> - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
> -
> - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
> - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
> + sdma_v5_0_gfx_stop(adev, 1 << i);
>
> /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
> freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> index 4cd7010ad0d7..db6630c3f30a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> @@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
> * sdma_v5_2_gfx_stop - stop the gfx async dma engines
> *
> * @adev: amdgpu_device pointer
> - *
> + * @inst_mask: mask of dma engine instances to be disabled
> * Stop the gfx async dma ring buffers.
> */
> -static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
> +static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
> {
> u32 rb_cntl, ib_cntl;
> int i;
>
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> + for_each_inst(i, inst_mask) {
> rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
> WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
> @@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
> {
> u32 f32_cntl;
> int i;
> + uint32_t inst_mask;
>
> + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
> if (!enable) {
> - sdma_v5_2_gfx_stop(adev);
> + sdma_v5_2_gfx_stop(adev, inst_mask);
> sdma_v5_2_rlc_stop(adev);
> }
>
> @@ -1459,29 +1461,18 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
>
> static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
> {
> + u32 rb_cntl, f32_cntl, freeze, cntl, stat1_reg;
> struct amdgpu_device *adev = ring->adev;
> int i, j, r;
> - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg;
>
> if (amdgpu_sriov_vf(adev))
> return -EINVAL;
>
> - for (i = 0; i < adev->sdma.num_instances; i++) {
> - if (ring == &adev->sdma.instance[i].ring)
> - break;
> - }
> -
> - if (i == adev->sdma.num_instances) {
> - DRM_ERROR("sdma instance not found\n");
> - return -EINVAL;
> - }
> -
> + i = ring->me;
> amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
>
> /* stop queue */
> - ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
> - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
> - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
> + sdma_v5_2_gfx_stop(adev, 1 << i);
>
> rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
> rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
> --
> 2.25.1
>
More information about the amd-gfx
mailing list