[PATCH v6] drm/amdgpu: Ensure the DMA engine is deactivated during set ups

Alex Deucher alexdeucher at gmail.com
Thu May 5 16:33:25 UTC 2022


On Sat, Apr 30, 2022 at 3:34 AM <ricetons at gmail.com> wrote:
>
> From: Haohui Mai <ricetons at gmail.com>
>
> The patch fully deactivates the DMA engine before setting up the ring
> buffer to avoid potential data races and crashes.

Does this actually fix an issue you are seeing?  I don't think it will
hurt anything, but I also don't think it's strictly necessary.  AFAIK,
only the HALT bit really matters.  So the commit message might be
somewhat misleading.

Alex

>
> Signed-off-by: Haohui Mai <ricetons at gmail.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 109 +++++++++++++++----------
>  1 file changed, 64 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> index 013d2dec81d0..1fac9d8e99de 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
> @@ -459,7 +459,6 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
>         }
>  }
>
> -
>  /**
>   * sdma_v5_2_gfx_stop - stop the gfx async dma engines
>   *
> @@ -505,17 +504,21 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
>  }
>
>  /**
> - * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
> + * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
> + * context switch for an instance
>   *
>   * @adev: amdgpu_device pointer
> - * @enable: enable/disable the DMA MEs context switch.
> + * @instance_idx: the index of the SDMA instance
>   *
> - * Halt or unhalt the async dma engines context switch.
> + * Unhalt the async dma engines context switch.
>   */
> -static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
> +static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
>  {
>         u32 f32_cntl, phase_quantum = 0;
> -       int i;
> +
> +       if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
> +               return;
> +       }
>
>         if (amdgpu_sdma_phase_quantum) {
>                 unsigned value = amdgpu_sdma_phase_quantum;
> @@ -539,50 +542,68 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
>                 phase_quantum =
>                         value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
>                         unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
> -       }
> -
> -       for (i = 0; i < adev->sdma.num_instances; i++) {
> -               if (enable && amdgpu_sdma_phase_quantum) {
> -                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
> -                              phase_quantum);
> -                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
> -                              phase_quantum);
> -                       WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
> -                              phase_quantum);
> -               }
>
> -               if (!amdgpu_sriov_vf(adev)) {
> -                       f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> -                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> -                                       AUTO_CTXSW_ENABLE, enable ? 1 : 0);
> -                       WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
> -               }
> +               WREG32_SOC15_IP(GC,
> +                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
> +                       phase_quantum);
> +               WREG32_SOC15_IP(GC,
> +                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
> +                   phase_quantum);
> +               WREG32_SOC15_IP(GC,
> +                       sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
> +                   phase_quantum);
>         }
>
> +       if (!amdgpu_sriov_vf(adev)) {
> +               f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
> +               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> +                               AUTO_CTXSW_ENABLE, 1);
> +               WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
> +       }
>  }
>
>  /**
> - * sdma_v5_2_enable - stop the async dma engines
> + * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
>   *
>   * @adev: amdgpu_device pointer
> - * @enable: enable/disable the DMA MEs.
>   *
> - * Halt or unhalt the async dma engines.
> + * Halt the async dma engines context switch.
>   */
> -static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
> +static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
>  {
>         u32 f32_cntl;
>         int i;
>
> -       if (!enable) {
> -               sdma_v5_2_gfx_stop(adev);
> -               sdma_v5_2_rlc_stop(adev);
> +       if (amdgpu_sriov_vf(adev))
> +               return;
> +
> +       for (i = 0; i < adev->sdma.num_instances; i++) {
> +               f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
> +               f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
> +                               AUTO_CTXSW_ENABLE, 0);
> +               WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
>         }
> +}
> +
> +/**
> + * sdma_v5_2_halt - stop the async dma engines
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Halt the async dma engines.
> + */
> +static void sdma_v5_2_halt(struct amdgpu_device *adev)
> +{
> +       int i;
> +       u32 f32_cntl;
> +
> +       sdma_v5_2_gfx_stop(adev);
> +       sdma_v5_2_rlc_stop(adev);
>
>         if (!amdgpu_sriov_vf(adev)) {
>                 for (i = 0; i < adev->sdma.num_instances; i++) {
>                         f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
> -                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
> +                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
>                         WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
>                 }
>         }
> @@ -594,6 +615,9 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
>   * @adev: amdgpu_device pointer
>   *
>   * Set up the gfx DMA ring buffers and enable them.
> + * It assumes that the dma engine is stopped for each instance.
> + * The function enables the engine and preemptions sequentially for each instance.
> + *
>   * Returns 0 for success, error for failure.
>   */
>  static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
> @@ -737,10 +761,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
>
>                 ring->sched.ready = true;
>
> -               if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
> -                       sdma_v5_2_ctx_switch_enable(adev, true);
> -                       sdma_v5_2_enable(adev, true);
> -               }
> +               sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
>
>                 r = amdgpu_ring_test_ring(ring);
>                 if (r) {
> @@ -784,7 +805,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
>         int i, j;
>
>         /* halt the MEs */
> -       sdma_v5_2_enable(adev, false);
> +       sdma_v5_2_halt(adev);
>
>         for (i = 0; i < adev->sdma.num_instances; i++) {
>                 if (!adev->sdma.instance[i].fw)
> @@ -856,8 +877,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
>         int r = 0;
>
>         if (amdgpu_sriov_vf(adev)) {
> -               sdma_v5_2_ctx_switch_enable(adev, false);
> -               sdma_v5_2_enable(adev, false);
> +               sdma_v5_2_ctx_switch_disable_all(adev);
> +               sdma_v5_2_halt(adev);
>
>                 /* set RB registers */
>                 r = sdma_v5_2_gfx_resume(adev);
> @@ -881,12 +902,10 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
>                 amdgpu_gfx_off_ctrl(adev, false);
>
>         sdma_v5_2_soft_reset(adev);
> -       /* unhalt the MEs */
> -       sdma_v5_2_enable(adev, true);
> -       /* enable sdma ring preemption */
> -       sdma_v5_2_ctx_switch_enable(adev, true);
>
> -       /* start the gfx rings and rlc compute queues */
> +       /* Soft reset supposes to disable the dma engine and preemption.
> +        * Now start the gfx rings and rlc compute queues.
> +        */
>         r = sdma_v5_2_gfx_resume(adev);
>         if (adev->in_s0ix)
>                 amdgpu_gfx_off_ctrl(adev, true);
> @@ -1340,8 +1359,8 @@ static int sdma_v5_2_hw_fini(void *handle)
>         if (amdgpu_sriov_vf(adev))
>                 return 0;
>
> -       sdma_v5_2_ctx_switch_enable(adev, false);
> -       sdma_v5_2_enable(adev, false);
> +       sdma_v5_2_ctx_switch_disable_all(adev);
> +       sdma_v5_2_halt(adev);
>
>         return 0;
>  }
> --
> 2.25.1
>


More information about the amd-gfx mailing list