[PATCH 18/29] drm/amdgpu/vcn: separate idle work by instance

Alex Deucher alexdeucher at gmail.com
Mon Oct 28 19:30:45 UTC 2024


On Thu, Oct 24, 2024 at 11:23 PM <boyuan.zhang at amd.com> wrote:
>
> From: Boyuan Zhang <boyuan.zhang at amd.com>
>
> Previously idle working handling is for all VCN instances. As a result, when one
> of the instance finishes its job, the idle work can't be triggered if the other
> instance is still busy.
>
> Now, move the idle_work from amdgpu_vcn to amdgpu_vcn_inst, in order to
> track work by vcn instance. Add work_inst to track the instance number
> that the work belongs to. As a result, the idle work can now be triggered
> once the job is done on one of the vcn instance, and no need to consider
> the work on the other vcn instance.
>
> Signed-off-by: Boyuan Zhang <boyuan.zhang at amd.com>
> Reviewed-by: Christian König <christian.koenig at amd.com>

Reviewed-by: Alex Deucher <alexander.deucher at amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c  | 66 ++++++++++++------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h  |  4 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c |  2 +-
>  drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c    | 14 ++---
>  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c  |  2 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c  |  2 +-
>  17 files changed, 58 insertions(+), 54 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 49802e66a358..3d2d2a0d98c8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -118,7 +118,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
>         unsigned int fw_shared_size, log_offset;
>         int i, r;
>
> -       INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
> +       for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> +               adev->vcn.inst[i].adev = adev;
> +               adev->vcn.inst[i].work_inst = i;
> +               INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
> +       }
>         mutex_init(&adev->vcn.vcn_pg_lock);
>         mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
>         atomic_set(&adev->vcn.total_submission_cnt, 0);
> @@ -326,7 +330,8 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
>  {
>         bool in_ras_intr = amdgpu_ras_intr_triggered();
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
> +               cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
>
>         /* err_event_athub will corrupt VCPU buffer, so we need to
>          * restore fw data and clear buffer in amdgpu_vcn_resume() */
> @@ -382,46 +387,43 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
>
>  static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
>  {
> -       struct amdgpu_device *adev =
> -               container_of(work, struct amdgpu_device, vcn.idle_work.work);
> -       unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
> -       unsigned int i, j;
> +       struct amdgpu_vcn_inst *vcn_inst =
> +               container_of(work, struct amdgpu_vcn_inst, idle_work.work);
> +       struct amdgpu_device *adev = vcn_inst->adev;
> +       unsigned int inst = vcn_inst->work_inst;
> +       unsigned int fence = 0;
> +       unsigned int i;
>         int r = 0;
>
> -       for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
> -               if (adev->vcn.harvest_config & (1 << j))
> -                       continue;
> -
> -               for (i = 0; i < adev->vcn.num_enc_rings; ++i)
> -                       fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
> -
> -               /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
> -               if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
> -                   !adev->vcn.using_unified_queue) {
> -                       struct dpg_pause_state new_state;
> -
> -                       if (fence[j] ||
> -                               unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
> -                               new_state.fw_based = VCN_DPG_STATE__PAUSE;
> -                       else
> -                               new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
> +       if (adev->vcn.harvest_config & (1 << inst))
> +               return;
>
> -                       adev->vcn.pause_dpg_mode(adev, j, &new_state);
> -               }
> +       for (i = 0; i < adev->vcn.num_enc_rings; ++i)
> +               fence += amdgpu_fence_count_emitted(&adev->vcn.inst[inst].ring_enc[i]);
>
> -               fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
> -               fences += fence[j];
> +       /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
> +       if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
> +                       !adev->vcn.using_unified_queue) {
> +               struct dpg_pause_state new_state;
> +               if (fence ||
> +                               unlikely(atomic_read(&adev->vcn.inst[inst].dpg_enc_submission_cnt)))
> +                       new_state.fw_based = VCN_DPG_STATE__PAUSE;
> +               else
> +                       new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
> +               adev->vcn.pause_dpg_mode(adev, inst, &new_state);
>         }
>
> -       if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
> +       fence += amdgpu_fence_count_emitted(&adev->vcn.inst[inst].ring_dec);
> +
> +       if (!fence && !atomic_read(&adev->vcn.total_submission_cnt)) {
>                 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
>                        AMD_PG_STATE_GATE);
>                 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
> -                               false);
> +                                                                                       false);
>                 if (r)
>                         dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
>         } else {
> -               schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
> +               schedule_delayed_work(&adev->vcn.inst[inst].idle_work, VCN_IDLE_TIMEOUT);
>         }
>  }
>
> @@ -432,7 +434,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
>
>         atomic_inc(&adev->vcn.total_submission_cnt);
>
> -       if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
> +       if (!cancel_delayed_work_sync(&adev->vcn.inst[ring->me].idle_work)) {
>                 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
>                                 true);
>                 if (r)
> @@ -481,7 +483,7 @@ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
>
>         atomic_dec(&ring->adev->vcn.total_submission_cnt);
>
> -       schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
> +       schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work, VCN_IDLE_TIMEOUT);
>  }
>
>  int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 2b8c9b8d4494..2282c4d14ae7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -279,6 +279,7 @@ struct amdgpu_vcn_fw_shared {
>  };
>
>  struct amdgpu_vcn_inst {
> +       struct amdgpu_device *adev;
>         struct amdgpu_bo        *vcpu_bo;
>         void                    *cpu_addr;
>         uint64_t                gpu_addr;
> @@ -301,6 +302,8 @@ struct amdgpu_vcn_inst {
>         enum amd_powergating_state cur_state;
>         uint8_t                 vcn_config;
>         uint32_t                vcn_codec_disable_mask;
> +       struct delayed_work     idle_work;
> +       uint8_t                 work_inst;
>  };
>
>  struct amdgpu_vcn_ras {
> @@ -309,7 +312,6 @@ struct amdgpu_vcn_ras {
>
>  struct amdgpu_vcn {
>         unsigned                fw_version;
> -       struct delayed_work     idle_work;
>         unsigned                num_enc_rings;
>         bool                    indirect_sram;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> index 03b8b7cd5229..8031406e20ff 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> @@ -604,7 +604,7 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
>  static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
>  {
>         struct  amdgpu_device *adev = ring->adev;
> -       bool    set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       bool    set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>         int             cnt = 0;
>
>         mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
> index 1100d832abfc..aed61615299d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
> @@ -150,7 +150,7 @@ static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
>               RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
> index 3d72e383b7df..28a1e8ce417f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
> @@ -211,7 +211,7 @@ static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int i;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>                 if (adev->jpeg.harvest_config & (1 << i))
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
> index 200403a07d34..f83c7a58b91a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
> @@ -164,7 +164,7 @@ static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
>               RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
> index 0a4939895b6a..568ff06b3b6a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
> @@ -195,7 +195,7 @@ static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>         if (!amdgpu_sriov_vf(adev)) {
>                 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
>                         RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
> index d89863213ae7..3d57607bb3f7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
> @@ -219,7 +219,7 @@ static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int i;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>                 if (adev->jpeg.harvest_config & (1 << i))
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
> index 09eaf7f07710..124cb15e3980 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
> @@ -161,7 +161,7 @@ static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
>               RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index c2eb187b0a27..f07a5a8393c0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -150,7 +150,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
>                 return r;
>
>         /* Override the work func */
> -       adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
> +       adev->vcn.inst[0].idle_work.work.func = vcn_v1_0_idle_work_handler;
>
>         amdgpu_vcn_setup_ucode(adev);
>
> @@ -277,7 +277,7 @@ static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
>                 (adev->vcn.inst[0].cur_state != AMD_PG_STATE_GATE &&
> @@ -301,7 +301,7 @@ static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         bool idle_work_unexecuted;
>
> -       idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>         if (idle_work_unexecuted) {
>                 if (adev->pm.dpm_enabled)
>                         amdgpu_dpm_enable_vcn(adev, false, 0);
> @@ -1830,7 +1830,7 @@ static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
>  static void vcn_v1_0_idle_work_handler(struct work_struct *work)
>  {
>         struct amdgpu_device *adev =
> -               container_of(work, struct amdgpu_device, vcn.idle_work.work);
> +               container_of(work, struct amdgpu_device, vcn.inst[0].idle_work.work);
>         unsigned int fences = 0, i;
>
>         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
> @@ -1863,14 +1863,14 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
>                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
>                                AMD_PG_STATE_GATE);
>         } else {
> -               schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
> +               schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
>         }
>  }
>
>  static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
>  {
>         struct  amdgpu_device *adev = ring->adev;
> -       bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
>
> @@ -1922,7 +1922,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
>
>  void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
>  {
> -       schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
> +       schedule_delayed_work(&ring->adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
>         mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
>  }
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> index 04edbb368903..419ecba12c9b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> @@ -313,7 +313,7 @@ static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
>  {
>         struct amdgpu_device *adev = ip_block->adev;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[0].idle_work);
>
>         if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
>             (adev->vcn.inst[0].cur_state != AMD_PG_STATE_GATE &&
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> index 010970faa5fd..7e7ce00806cc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
> @@ -387,7 +387,7 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.harvest_config & (1 << inst))
>                 return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> index 690224a5e783..ca4ee368db02 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> @@ -420,7 +420,7 @@ static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.harvest_config & (1 << inst))
>                 return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> index 0cc0eb52b54f..ee6c08707312 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
> @@ -349,7 +349,7 @@ static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.harvest_config & (1 << inst))
>                 return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> index db6f8d424777..2c66a7a4ff25 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
> @@ -315,7 +315,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.inst[inst].cur_state != AMD_PG_STATE_GATE)
>                 vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> index 0f3b25d3b9d8..d725c12ffdaf 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
> @@ -298,7 +298,7 @@ static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.harvest_config & (1 << inst))
>                 return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> index 15620e111d04..3856388179b8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
> @@ -262,7 +262,7 @@ static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
>         struct amdgpu_device *adev = ip_block->adev;
>         int inst = ip_block->instance;
>
> -       cancel_delayed_work_sync(&adev->vcn.idle_work);
> +       cancel_delayed_work_sync(&adev->vcn.inst[inst].idle_work);
>
>         if (adev->vcn.harvest_config & (1 << inst))
>                 return 0;
> --
> 2.34.1
>


More information about the amd-gfx mailing list