[PATCH v2 3/3] drm/amdgpu/sdma4.4.2: implement ring reset callback for sdma4.4.2

Lazar, Lijo lijo.lazar at amd.com
Wed Oct 9 06:02:32 UTC 2024



On 10/9/2024 9:15 AM, jiadong.zhu at amd.com wrote:
> From: Jiadong Zhu <Jiadong.Zhu at amd.com>
> 
> Implement sdma queue reset callback via SMU interface.
> 
> v2: Leverage inst_stop/start functions in reset sequence.
>     Use GET_INST for physical SDMA instance.
>     Disable apu for sdma reset.
> 
> Signed-off-by: Jiadong Zhu <Jiadong.Zhu at amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 97 +++++++++++++++++++-----
>  1 file changed, 79 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> index c77889040760..3cb16af9b78e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
> @@ -667,11 +667,12 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
>   *
>   * @adev: amdgpu_device pointer
>   * @i: instance to resume
> + * @restore: used to restore wptr when restart
>   *
>   * Set up the gfx DMA ring buffers and enable them.
>   * Returns 0 for success, error for failure.
>   */
> -static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
> +static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
>  {
>  	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
>  	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
> @@ -698,16 +699,24 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>  	WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
>  	WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
>  
> -	ring->wptr = 0;
> +	if (!restore)
> +		ring->wptr = 0;
>  
>  	/* before programing wptr to a less value, need set minor_ptr_update first */
>  	WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
>  
>  	/* Initialize the ring buffer's read and write pointers */
> -	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
> -	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
> -	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
> -	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
> +	if (restore) {
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
> +	} else {
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
> +	}
>  
>  	doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
>  	doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
> @@ -759,7 +768,7 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
>   * Set up the page DMA ring buffers and enable them.
>   * Returns 0 for success, error for failure.
>   */
> -static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
> +static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
>  {
>  	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
>  	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
> @@ -775,10 +784,17 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
>  	WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
>  
>  	/* Initialize the ring buffer's read and write pointers */
> -	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
> -	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
> -	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
> -	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
> +	if (restore) {
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
> +		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
> +	} else {
> +		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
> +		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
> +		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
> +		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
> +	}
>  
>  	/* set the wb address whether it's enabled or not */
>  	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
> @@ -792,7 +808,8 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
>  	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
>  	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
>  
> -	ring->wptr = 0;
> +	if (!restore)
> +		ring->wptr = 0;
>  
>  	/* before programing wptr to a less value, need set minor_ptr_update first */
>  	WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
> @@ -916,7 +933,7 @@ static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
>   * Returns 0 for success, error for failure.
>   */
>  static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
> -				  uint32_t inst_mask)
> +				  uint32_t inst_mask, bool restore)
>  {
>  	struct amdgpu_ring *ring;
>  	uint32_t tmp_mask;
> @@ -927,7 +944,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
>  		sdma_v4_4_2_inst_enable(adev, false, inst_mask);
>  	} else {
>  		/* bypass sdma microcode loading on Gopher */
> -		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
> +		if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
>  		    adev->sdma.instance[0].fw) {
>  			r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
>  			if (r)
> @@ -946,9 +963,9 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
>  		uint32_t temp;
>  
>  		WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
> -		sdma_v4_4_2_gfx_resume(adev, i);
> +		sdma_v4_4_2_gfx_resume(adev, i, restore);
>  		if (adev->sdma.has_page_queue)
> -			sdma_v4_4_2_page_resume(adev, i);
> +			sdma_v4_4_2_page_resume(adev, i, restore);
>  
>  		/* set utc l1 enable flag always to 1 */
>  		temp = RREG32_SDMA(i, regSDMA_CNTL);
> @@ -1390,6 +1407,7 @@ static int sdma_v4_4_2_sw_init(void *handle)
>  		ring = &adev->sdma.instance[i].ring;
>  		ring->ring_obj = NULL;
>  		ring->use_doorbell = true;
> +		ring->me = i;

Appears like an unrelated change. This is alredy done in
sdma_v4_4_2_set_ring_funcs()

>  		aid_id = adev->sdma.instance[i].aid_id;
>  
>  		DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
> @@ -1477,7 +1495,7 @@ static int sdma_v4_4_2_hw_init(void *handle)
>  	if (!amdgpu_sriov_vf(adev))
>  		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
>  
> -	r = sdma_v4_4_2_inst_start(adev, inst_mask);
> +	r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
>  
>  	return r;
>  }
> @@ -1566,6 +1584,48 @@ static int sdma_v4_4_2_soft_reset(void *handle)
>  	return 0;
>  }
>  
> +static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	int i, r;
> +	u32 preempt, inst_mask;
> +
> +	if ((adev->flags & AMD_IS_APU) || amdgpu_sriov_vf(adev))
> +		return -EINVAL;
> +
> +	/* stop queue */
> +	inst_mask = 1 << ring->me;
> +	sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
> +	if (adev->sdma.has_page_queue)
> +		sdma_v4_4_2_inst_page_stop(adev, inst_mask);
> +
> +	/* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0*/
> +	preempt = RREG32_SDMA(ring->me, regSDMA_GFX_PREEMPT);
> +	preempt = REG_SET_FIELD(preempt, SDMA_GFX_PREEMPT, IB_PREEMPT, 0);
> +	WREG32_SDMA(ring->me, regSDMA_GFX_PREEMPT, preempt);
> +
> +	r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
> +	if (r) {
> +		dev_err(adev->dev, "amdgpu_dpm_reset_sdma failed(%d).\n", r);

If this is here, then better to avoid another print in smu callback. May
rephrase as "SDMA %d reset failed".

> +		return r;
> +	}
> +
> +	udelay(50);
> +
> +	for (i = 0; i < adev->usec_timeout; i++) {
> +		if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
> +			break;
> +		udelay(1);
> +	}
> +
> +	if (i == adev->usec_timeout) {
> +		dev_err(adev->dev, "waiting for unhalt failed.\n");

Better to add more context - ex: "timed out waiting for SDMA %d unhalt
after reset".

Thanks,
Lijo

> +		return -ETIMEDOUT;
> +	}
> +
> +	return sdma_v4_4_2_inst_start(adev, inst_mask, true);
> +}
> +
>  static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
>  					struct amdgpu_irq_src *source,
>  					unsigned type,
> @@ -1948,6 +2008,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
>  	.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
>  	.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
>  	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
> +	.reset = sdma_v4_4_2_reset_queue,
>  };
>  
>  static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
> @@ -2160,7 +2221,7 @@ static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
>  	if (!amdgpu_sriov_vf(adev))
>  		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
>  
> -	r = sdma_v4_4_2_inst_start(adev, inst_mask);
> +	r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
>  
>  	return r;
>  }


More information about the amd-gfx mailing list