[PATCH V3 16/17] drm/amd/pm: revise the performance level setting APIs
Lazar, Lijo
lijo.lazar at amd.com
Thu Dec 2 15:37:38 UTC 2021
On 12/2/2021 8:39 AM, Evan Quan wrote:
> Avoid cross callings which make lock protection enforcement
> on amdgpu_dpm_force_performance_level() impossible.
>
> Signed-off-by: Evan Quan <evan.quan at amd.com>
> Change-Id: Ie658140f40ab906ce2ec47576a086062b61076a6
> ---
> drivers/gpu/drm/amd/pm/amdgpu_pm.c | 29 ++++++++++++++++---
> .../gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 17 ++++++-----
> .../gpu/drm/amd/pm/powerplay/amd_powerplay.c | 12 --------
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 12 --------
> 4 files changed, 34 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> index f5c0ae032954..5e5006af6b75 100644
> --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
> @@ -305,6 +305,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
> enum amd_dpm_forced_level level;
> enum amd_dpm_forced_level current_level;
> int ret = 0;
> + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
> + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
> + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
> + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
>
> if (amdgpu_in_reset(adev))
> return -EPERM;
> @@ -358,10 +362,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
> }
>
> /* profile_exit setting is valid only when current mode is in profile mode */
> - if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
> - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
> - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
> - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
> + if (!(current_level & profile_mode_mask) &&
> (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
> pr_err("Currently not in any profile mode!\n");
> pm_runtime_mark_last_busy(ddev->dev);
> @@ -369,6 +370,26 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
> return -EINVAL;
> }
>
> + if (!(current_level & profile_mode_mask) &&
> + (level & profile_mode_mask)) {
> + /* enter UMD Pstate */
> + amdgpu_device_ip_set_powergating_state(adev,
> + AMD_IP_BLOCK_TYPE_GFX,
> + AMD_PG_STATE_UNGATE);
> + amdgpu_device_ip_set_clockgating_state(adev,
> + AMD_IP_BLOCK_TYPE_GFX,
> + AMD_CG_STATE_UNGATE);
> + } else if ((current_level & profile_mode_mask) &&
> + !(level & profile_mode_mask)) {
> + /* exit UMD Pstate */
> + amdgpu_device_ip_set_clockgating_state(adev,
> + AMD_IP_BLOCK_TYPE_GFX,
> + AMD_CG_STATE_GATE);
> + amdgpu_device_ip_set_powergating_state(adev,
> + AMD_IP_BLOCK_TYPE_GFX,
> + AMD_PG_STATE_GATE);
> + }
> +
> if (amdgpu_dpm_force_performance_level(adev, level)) {
> pm_runtime_mark_last_busy(ddev->dev);
> pm_runtime_put_autosuspend(ddev->dev);
> diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> index 3c6ee493e410..9613c6181c17 100644
> --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
> @@ -953,6 +953,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
>
> static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
> {
> + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> struct amdgpu_ps *ps;
> enum amd_pm_state_type dpm_state;
> int ret;
> @@ -976,7 +977,7 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
> else
> return -EINVAL;
>
> - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
> + if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
> printk("switching from power state:\n");
> amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
> printk("switching to power state:\n");
> @@ -985,14 +986,14 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
>
> /* update whether vce is active */
> ps->vce_active = adev->pm.dpm.vce_active;
> - if (adev->powerplay.pp_funcs->display_configuration_changed)
> + if (pp_funcs->display_configuration_changed)
> amdgpu_dpm_display_configuration_changed(adev);
>
> ret = amdgpu_dpm_pre_set_power_state(adev);
> if (ret)
> return ret;
>
> - if (adev->powerplay.pp_funcs->check_state_equal) {
> + if (pp_funcs->check_state_equal) {
> if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
> equal = false;
> }
> @@ -1000,24 +1001,24 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
> if (equal)
> return 0;
>
> - if (adev->powerplay.pp_funcs->set_power_state)
> - adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle);
> + if (pp_funcs->set_power_state)
> + pp_funcs->set_power_state(adev->powerplay.pp_handle);
>
> amdgpu_dpm_post_set_power_state(adev);
>
> adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
> adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
>
> - if (adev->powerplay.pp_funcs->force_performance_level) {
> + if (pp_funcs->force_performance_level) {
> if (adev->pm.dpm.thermal_active) {
> enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
> /* force low perf level for thermal */
> - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
> + pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
> /* save the user's level */
> adev->pm.dpm.forced_level = level;
> } else {
> /* otherwise, user selected level */
> - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
> + pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
> }
> }
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> index d57d5c28c013..5a14ddd3ef05 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> @@ -323,12 +323,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
> if (*level & profile_mode_mask) {
> hwmgr->saved_dpm_level = hwmgr->dpm_level;
> hwmgr->en_umd_pstate = true;
> - amdgpu_device_ip_set_powergating_state(hwmgr->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_PG_STATE_UNGATE);
> - amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_CG_STATE_UNGATE);
> }
> } else {
> /* exit umd pstate, restore level, enable gfx cg*/
> @@ -336,12 +330,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
> if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
> *level = hwmgr->saved_dpm_level;
> hwmgr->en_umd_pstate = false;
> - amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_CG_STATE_GATE);
> - amdgpu_device_ip_set_powergating_state(hwmgr->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_PG_STATE_GATE);
> }
> }
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 1edc71dde3e4..e25b3b6fd22d 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -1681,12 +1681,6 @@ static int smu_enable_umd_pstate(void *handle,
> smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
> smu_dpm_ctx->enable_umd_pstate = true;
> smu_gpo_control(smu, false);
> - amdgpu_device_ip_set_powergating_state(smu->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_PG_STATE_UNGATE);
> - amdgpu_device_ip_set_clockgating_state(smu->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_CG_STATE_UNGATE);
Now that this entry point is not expected to be used as standalone, you
can also remove the enable_umd_pstate callback in IP block.
Thanks,
Lijo
> smu_gfx_ulv_control(smu, false);
> smu_deep_sleep_control(smu, false);
> amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
> @@ -1700,12 +1694,6 @@ static int smu_enable_umd_pstate(void *handle,
> amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
> smu_deep_sleep_control(smu, true);
> smu_gfx_ulv_control(smu, true);
> - amdgpu_device_ip_set_clockgating_state(smu->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_CG_STATE_GATE);
> - amdgpu_device_ip_set_powergating_state(smu->adev,
> - AMD_IP_BLOCK_TYPE_GFX,
> - AMD_PG_STATE_GATE);
> smu_gpo_control(smu, true);
> }
> }
>
More information about the amd-gfx
mailing list