[PATCH 02/29] drm/amd/pm: power up or down vcn by instance
Alex Deucher
alexdeucher at gmail.com
Mon Oct 28 19:07:36 UTC 2024
On Thu, Oct 24, 2024 at 10:48 PM <boyuan.zhang at amd.com> wrote:
>
> From: Boyuan Zhang <boyuan.zhang at amd.com>
>
> For smu ip with multiple vcn instances (smu 11/13/14), remove all the
> for loop in dpm_set_vcn_enable() functions. And use the instance
> argument to power up/down vcn for the given instance only, instead
> of powering up/down for all vcn instances.
>
> v2: remove all duplicated functions in v1.
>
> remove for-loop from each ip, and temporarily move to dpm_set_vcn_enable,
> in order to keep the exact same logic as before, until further separation
> in the next patch.
>
> Signed-off-by: Boyuan Zhang <boyuan.zhang at amd.com>
> Acked-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 9 +++--
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 20 +++++------
> .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 16 ++++-----
> .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 35 ++++++++-----------
> 4 files changed, 35 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index fe2a740766a2..ccacba56159e 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -238,6 +238,7 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
> {
> struct smu_power_context *smu_power = &smu->smu_power;
> struct smu_power_gate *power_gate = &smu_power->power_gate;
> + struct amdgpu_device *adev = smu->adev;
> int ret = 0;
>
> /*
> @@ -252,9 +253,11 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
> if (atomic_read(&power_gate->vcn_gated) ^ enable)
> return 0;
>
> - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
> - if (!ret)
> - atomic_set(&power_gate->vcn_gated, !enable);
> + for (int i = 0; i < adev->vcn.num_vcn_inst; i++) {
> + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, i);
> + if (ret)
> + return ret;
> + }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index a9cb28ce2133..24cf17e172f4 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -1157,19 +1157,15 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> - /* vcn dpm on is a prerequisite for vcn power gate messages */
> - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - 0x10000 * i, NULL);
> - if (ret)
> - return ret;
> - }
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
> + /* vcn dpm on is a prerequisite for vcn power gate messages */
> + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + 0x10000 * inst, NULL);
> }
>
> return ret;
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> index 2bfea740dace..bb506d15d787 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
> @@ -2108,18 +2108,14 @@ int smu_v13_0_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
>
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - i << 16U, NULL);
> - if (ret)
> - return ret;
> - }
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + inst << 16U, NULL);
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> index ecb0164d533e..5460f8e62264 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
> @@ -1511,29 +1511,24 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu,
> int inst)
> {
> struct amdgpu_device *adev = smu->adev;
> - int i, ret = 0;
> + int ret = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->vcn.harvest_config & (1 << inst))
> + return ret;
>
> - if (smu->is_apu) {
> - if (i == 0)
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
> - i << 16U, NULL);
> - else if (i == 1)
> - ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
> - i << 16U, NULL);
> - } else {
> + if (smu->is_apu) {
> + if (inst == 0)
> ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> - i << 16U, NULL);
> - }
> -
> - if (ret)
> - return ret;
> + SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
> + inst << 16U, NULL);
> + else if (inst == 1)
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
> + inst << 16U, NULL);
> + } else {
> + ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
> + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
> + inst << 16U, NULL);
> }
>
> return ret;
> --
> 2.34.1
>
More information about the amd-gfx
mailing list