[PATCH] drm/amdgpu:All UVD instances share one idle_work handle
Stefan Agner
stefan at agner.ch
Tue Jun 19 09:04:53 UTC 2018
On 18.06.2018 20:00, James Zhu wrote:
> All UVD instanses have only one dpm control, so it is better
> to share one idle_work handle.
Compiles fine with clang here.
Tested-by: Stefan Agner <stefan at agner.ch>
>
> Signed-off-by: James Zhu <James.Zhu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 14 +++++++-------
> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 2 +-
> 2 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 04d77f1..cc15d32 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
> unsigned family_id;
> int i, j, r;
>
> - INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
> + INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
>
> switch (adev->asic_type) {
> #ifdef CONFIG_DRM_AMDGPU_CIK
> @@ -331,12 +331,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
> void *ptr;
> int i, j;
>
> + cancel_delayed_work_sync(&adev->uvd.idle_work);
> +
> for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> if (adev->uvd.inst[j].vcpu_bo == NULL)
> continue;
>
> - cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
> -
> /* only valid for physical mode */
> if (adev->asic_type < CHIP_POLARIS10) {
> for (i = 0; i < adev->uvd.max_handles; ++i)
> @@ -1162,7 +1162,7 @@ int amdgpu_uvd_get_destroy_msg(struct
> amdgpu_ring *ring, uint32_t handle,
> static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
> {
> struct amdgpu_device *adev =
> - container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
> + container_of(work, struct amdgpu_device, uvd.idle_work.work);
> unsigned fences = 0, i, j;
>
> for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> @@ -1184,7 +1184,7 @@ static void amdgpu_uvd_idle_work_handler(struct
> work_struct *work)
> AMD_CG_STATE_GATE);
> }
> } else {
> - schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
> + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
> }
> }
>
> @@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
> if (amdgpu_sriov_vf(adev))
> return;
>
> - set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
> + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
> if (set_clocks) {
> if (adev->pm.dpm_enabled) {
> amdgpu_dpm_enable_uvd(adev, true);
> @@ -1213,7 +1213,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
> void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
> {
> if (!amdgpu_sriov_vf(ring->adev))
> - schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
> + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
> }
>
> /**
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> index b1579fb..8b23a1b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> @@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
> void *saved_bo;
> atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
> struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
> - struct delayed_work idle_work;
> struct amdgpu_ring ring;
> struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
> struct amdgpu_irq_src irq;
> @@ -62,6 +61,7 @@ struct amdgpu_uvd {
> bool address_64_bit;
> bool use_ctx_buf;
> struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
> + struct delayed_work idle_work;
> };
>
> int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
More information about the amd-gfx
mailing list