[PATCH 1/2] drm/amdgpu/vcn1.0: use its own idle handler and begin use funcs
James Zhu
jamesz at amd.com
Thu Dec 12 17:59:37 UTC 2019
Reviewed-by: James Zhu <James.Zhu at amd.com> for the series.
On 2019-12-12 11:06 a.m., Leo Liu wrote:
> Because VCN1.0 power management and DPG mode are managed together with
> JPEG1.0 under both HW and FW, so separated them from general VCN code.
> Also the multiple instances case got removed, since VCN1.0 HW just have
> a single instance.
>
> Signed-off-by: Leo Liu <leo.liu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 3 +
> drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 3 +-
> drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 88 ++++++++++++++++++++++++-
> drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h | 2 +
> 5 files changed, 96 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 428cfd58b37d..e962c87d04cf 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -39,9 +39,6 @@
> #include "vcn/vcn_1_0_offset.h"
> #include "vcn/vcn_1_0_sh_mask.h"
>
> -/* 1 second timeout */
> -#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
> -
> /* Firmware Names */
> #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
> #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
> @@ -71,7 +68,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
> unsigned char fw_check;
> int i, r;
>
> - INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
> + /* For VCN2.0 and above */
> + if (adev->asic_type >= CHIP_ARCTURUS)
> + INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
>
> switch (adev->asic_type) {
> case CHIP_RAVEN:
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 402a5046b985..3484ead62046 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -56,6 +56,9 @@
> #define VCN_VID_IP_ADDRESS_2_0 0x0
> #define VCN_AON_IP_ADDRESS_2_0 0x30000
>
> +/* 1 second timeout */
> +#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
> +
> #define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
> ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
> WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
> diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> index a141408dfb23..0debfd9f428c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
> @@ -25,6 +25,7 @@
> #include "amdgpu_jpeg.h"
> #include "soc15.h"
> #include "soc15d.h"
> +#include "vcn_v1_0.h"
>
> #include "vcn/vcn_1_0_offset.h"
> #include "vcn/vcn_1_0_sh_mask.h"
> @@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
> .insert_start = jpeg_v1_0_decode_ring_insert_start,
> .insert_end = jpeg_v1_0_decode_ring_insert_end,
> .pad_ib = amdgpu_ring_generic_pad_ib,
> - .begin_use = amdgpu_vcn_ring_begin_use,
> + .begin_use = vcn_v1_0_ring_begin_use,
> .end_use = amdgpu_vcn_ring_end_use,
> .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
> .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index 652cecc030b3..7395286540e1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -25,6 +25,7 @@
>
> #include "amdgpu.h"
> #include "amdgpu_vcn.h"
> +#include "amdgpu_pm.h"
> #include "soc15.h"
> #include "soc15d.h"
> #include "soc15_common.h"
> @@ -51,6 +52,8 @@ static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_sta
> static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
> struct dpg_pause_state *new_state);
>
> +static void vcn_v1_0_idle_work_handler(struct work_struct *work);
> +
> /**
> * vcn_v1_0_early_init - set function pointers
> *
> @@ -101,6 +104,7 @@ static int vcn_v1_0_sw_init(void *handle)
> return r;
> }
>
> + INIT_DELAYED_WORK(&adev->vcn.idle_work, vcn_v1_0_idle_work_handler);
> r = amdgpu_vcn_sw_init(adev);
> if (r)
> return r;
> @@ -1758,6 +1762,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
> return ret;
> }
>
> +static void vcn_v1_0_idle_work_handler(struct work_struct *work)
> +{
> + struct amdgpu_device *adev =
> + container_of(work, struct amdgpu_device, vcn.idle_work.work);
> + unsigned int fences = 0, i;
> +
> + for (i = 0; i < adev->vcn.num_enc_rings; ++i)
> + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
> +
> + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
> + struct dpg_pause_state new_state;
> +
> + if (fences)
> + new_state.fw_based = VCN_DPG_STATE__PAUSE;
> + else
> + new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
> +
> + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
> + new_state.jpeg = VCN_DPG_STATE__PAUSE;
> + else
> + new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
> +
> + adev->vcn.pause_dpg_mode(adev, &new_state);
> + }
> +
> + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
> + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
> +
> + if (fences == 0) {
> + amdgpu_gfx_off_ctrl(adev, true);
> + if (adev->pm.dpm_enabled)
> + amdgpu_dpm_enable_uvd(adev, false);
> + else
> + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
> + AMD_PG_STATE_GATE);
> + } else {
> + schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
> + }
> +}
> +
> +void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
> +{
> + struct amdgpu_device *adev = ring->adev;
> + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
> +
> + if (set_clocks) {
> + amdgpu_gfx_off_ctrl(adev, false);
> + if (adev->pm.dpm_enabled)
> + amdgpu_dpm_enable_uvd(adev, true);
> + else
> + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
> + AMD_PG_STATE_UNGATE);
> + }
> +
> + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
> + struct dpg_pause_state new_state;
> + unsigned int fences = 0, i;
> +
> + for (i = 0; i < adev->vcn.num_enc_rings; ++i)
> + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
> +
> + if (fences)
> + new_state.fw_based = VCN_DPG_STATE__PAUSE;
> + else
> + new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
> +
> + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
> + new_state.jpeg = VCN_DPG_STATE__PAUSE;
> + else
> + new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
> +
> + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
> + new_state.fw_based = VCN_DPG_STATE__PAUSE;
> + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
> + new_state.jpeg = VCN_DPG_STATE__PAUSE;
> +
> + adev->vcn.pause_dpg_mode(adev, &new_state);
> + }
> +}
> +
> static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
> .name = "vcn_v1_0",
> .early_init = vcn_v1_0_early_init,
> @@ -1804,7 +1888,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
> .insert_start = vcn_v1_0_dec_ring_insert_start,
> .insert_end = vcn_v1_0_dec_ring_insert_end,
> .pad_ib = amdgpu_ring_generic_pad_ib,
> - .begin_use = amdgpu_vcn_ring_begin_use,
> + .begin_use = vcn_v1_0_ring_begin_use,
> .end_use = amdgpu_vcn_ring_end_use,
> .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
> .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
> @@ -1836,7 +1920,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
> .insert_nop = amdgpu_ring_insert_nop,
> .insert_end = vcn_v1_0_enc_ring_insert_end,
> .pad_ib = amdgpu_ring_generic_pad_ib,
> - .begin_use = amdgpu_vcn_ring_begin_use,
> + .begin_use = vcn_v1_0_ring_begin_use,
> .end_use = amdgpu_vcn_ring_end_use,
> .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
> .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
> index 2a497a7a4840..f67d7391fc21 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
> @@ -24,6 +24,8 @@
> #ifndef __VCN_V1_0_H__
> #define __VCN_V1_0_H__
>
> +void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
> +
> extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
>
> #endif
More information about the amd-gfx
mailing list