[PATCH v3] drm/amdgpu: Ensure the DMA engine is deactivated during set ups

Lang Yu Lang.Yu at amd.com
Thu Apr 28 08:34:19 UTC 2022


On 04/28/ , Christian König wrote:
> Adding a few more people to review this.
> 
> Am 28.04.22 um 04:11 schrieb ricetons at gmail.com:
> > From: Haohui Mai <haohui at alt-chain.io>
> > 
> > The patch fully deactivates the DMA engine before setting up the ring
> > buffer to avoid potential data races and crashes.
> > 
> > Signed-off-by: Haohui Mai <haohui at alt-chain.io>
> 
> From my side this is Acked-by: Christian König <christian.koenig at amd.com>,
> but I'm not so deeply into the hardware programming sequence to fully judge.
> 
> Enabling the engine first and then setting it up is certainly incorrect, but
> could be that I'm missing something else. So please guys take a look as
> well.
> 
> Thanks,
> Christian.
> 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 110 +++++++++++++------------
> >   1 file changed, 59 insertions(+), 51 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> > index 013d2dec81d0..a22aafd2d7f6 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> > @@ -459,7 +459,6 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
> >   	}
> >   }
> > -
> >   /**
> >    * sdma_v5_2_gfx_stop - stop the gfx async dma engines
> >    *
> > @@ -505,17 +504,21 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
> >   }
> >   /**
> > - * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
> > + * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
> > + * context switch for an instance
> >    *
> >    * @adev: amdgpu_device pointer
> > - * @enable: enable/disable the DMA MEs context switch.
> > + * @instance_idx: the index of the SDMA instance
> >    *
> > - * Halt or unhalt the async dma engines context switch.
> > + * Unhalt the async dma engines context switch.
> >    */
> > -static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
> > +static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
> >   {
> >   	u32 f32_cntl, phase_quantum = 0;
> > -	int i;
> > +
> > +	if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
> > +		return;
> > +	}
> >   	if (amdgpu_sdma_phase_quantum) {
> >   		unsigned value = amdgpu_sdma_phase_quantum;
> > @@ -539,61 +542,71 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
> >   		phase_quantum =
> >   			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
> >   			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
> > -	}
> > -
> > -	for (i = 0; i < adev->sdma.num_instances; i++) {
> > -		if (enable && amdgpu_sdma_phase_quantum) {
> > -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
> > -			       phase_quantum);
> > -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
> > -			       phase_quantum);
> > -			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
> > -			       phase_quantum);
> > -		}
> > -		if (!amdgpu_sriov_vf(adev)) {
> > -			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> > -			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> > -					AUTO_CTXSW_ENABLE, enable ? 1 : 0);
> > -			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
> > -		}
> > +		WREG32_SOC15_IP(GC,
> > +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
> > +			phase_quantum);
> > +		WREG32_SOC15_IP(GC,
> > +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
> > +		    phase_quantum);
> > +		WREG32_SOC15_IP(GC,
> > +			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
> > +		    phase_quantum);
> >   	}
> > +	if (!amdgpu_sriov_vf(adev)) {
> > +		f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
> > +		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> > +				AUTO_CTXSW_ENABLE, 1);
> > +		WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
> > +	}
> >   }
> >   /**
> > - * sdma_v5_2_enable - stop the async dma engines
> > + * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
> >    *
> >    * @adev: amdgpu_device pointer
> > - * @enable: enable/disable the DMA MEs.
> >    *
> > - * Halt or unhalt the async dma engines.
> > + * Halt the async dma engines context switch.
> >    */
> > -static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
> > +static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
> >   {
> >   	u32 f32_cntl;
> >   	int i;
> > -	if (!enable) {
> > -		sdma_v5_2_gfx_stop(adev);
> > -		sdma_v5_2_rlc_stop(adev);
> > -	}
> > +	if (amdgpu_sriov_vf(adev))
> > +		return;
> > -	if (!amdgpu_sriov_vf(adev)) {
> > -		for (i = 0; i < adev->sdma.num_instances; i++) {
> > -			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
> > -			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
> > -			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
> > -		}
> > +	for (i = 0; i < adev->sdma.num_instances; i++) {
> > +		f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> > +		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> > +				AUTO_CTXSW_ENABLE, 0);
> > +		WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
> >   	}
> >   }
> > +/**
> > + * sdma_v5_2_halt - stop the async dma engines
> > + *
> > + * @adev: amdgpu_device pointer
> > + *
> > + * Halt the async dma engines.
> > + */
> > +static void sdma_v5_2_halt(struct amdgpu_device *adev)
> > +{
> > +	sdma_v5_2_gfx_stop(adev);
> > +	sdma_v5_2_rlc_stop(adev);
> > +}

Following codes are missing in halt/unhalt,

f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);

Probably AMDGPU_FW_LOAD_DIRECT(sdma_v5_2_load_microcode) needs these codes.

Regards,
Lang

> > +
> >   /**
> >    * sdma_v5_2_gfx_resume - setup and start the async dma engines
> >    *
> >    * @adev: amdgpu_device pointer
> >    *
> >    * Set up the gfx DMA ring buffers and enable them.
> > + * It assumes that the dma engine is stopped for each instance.
> > + * The function enables the engine and preemptions sequentially for each instance.
> > + *
> >    * Returns 0 for success, error for failure.
> >    */
> >   static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
> > @@ -737,10 +750,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
> >   		ring->sched.ready = true;
> > -		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
> > -			sdma_v5_2_ctx_switch_enable(adev, true);
> > -			sdma_v5_2_enable(adev, true);
> > -		}
> > +		sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
> >   		r = amdgpu_ring_test_ring(ring);
> >   		if (r) {
> > @@ -784,7 +794,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
> >   	int i, j;
> >   	/* halt the MEs */
> > -	sdma_v5_2_enable(adev, false);
> > +	sdma_v5_2_halt(adev);
> >   	for (i = 0; i < adev->sdma.num_instances; i++) {
> >   		if (!adev->sdma.instance[i].fw)
> > @@ -856,8 +866,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
> >   	int r = 0;
> >   	if (amdgpu_sriov_vf(adev)) {
> > -		sdma_v5_2_ctx_switch_enable(adev, false);
> > -		sdma_v5_2_enable(adev, false);
> > +		sdma_v5_2_ctx_switch_disable_all(adev);
> > +		sdma_v5_2_halt(adev);
> >   		/* set RB registers */
> >   		r = sdma_v5_2_gfx_resume(adev);
> > @@ -881,12 +891,10 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
> >   		amdgpu_gfx_off_ctrl(adev, false);
> >   	sdma_v5_2_soft_reset(adev);
> > -	/* unhalt the MEs */
> > -	sdma_v5_2_enable(adev, true);
> > -	/* enable sdma ring preemption */
> > -	sdma_v5_2_ctx_switch_enable(adev, true);
> > -	/* start the gfx rings and rlc compute queues */
> > +	/* Soft reset supposes to disable the dma engine and preemption.
> > +	 * Now start the gfx rings and rlc compute queues.
> > +	 */
> >   	r = sdma_v5_2_gfx_resume(adev);
> >   	if (adev->in_s0ix)
> >   		amdgpu_gfx_off_ctrl(adev, true);
> > @@ -1340,8 +1348,8 @@ static int sdma_v5_2_hw_fini(void *handle)
> >   	if (amdgpu_sriov_vf(adev))
> >   		return 0;
> > -	sdma_v5_2_ctx_switch_enable(adev, false);
> > -	sdma_v5_2_enable(adev, false);
> > +	sdma_v5_2_ctx_switch_disable_all(adev);
> > +	sdma_v5_2_halt(adev);
> >   	return 0;
> >   }
> 


More information about the amd-gfx mailing list