[PATCH] drm/amdgpu/smu: make the set_performance_level logic easier to follow
Quan, Evan
Evan.Quan at amd.com
Tue Dec 24 02:04:07 UTC 2019
Reviewed-by: Evan Quan <evan.quan at amd.com>
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> On Behalf Of Alex
> Deucher
> Sent: Tuesday, December 24, 2019 3:54 AM
> To: amd-gfx at lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
> Subject: [PATCH] drm/amdgpu/smu: make the set_performance_level logic
> easier to follow
>
> Have every asic provide a callback for this rather than a mix
> of generic and asic specific code.
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 44 +------------------
> drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1 +
> drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 3 ++
> drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 30 +++++++++++--
> drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 30 +++++++++++--
> drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 39 ++++++++++++++++
> drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 1 +
> 7 files changed, 100 insertions(+), 48 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 936c68298786..d07c4f2ccee7 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1607,43 +1607,6 @@ static int smu_enable_umd_pstate(void *handle,
> return 0;
> }
>
> -static int smu_default_set_performance_level(struct smu_context *smu, enum
> amd_dpm_forced_level level)
> -{
> - int ret = 0;
> - uint32_t sclk_mask, mclk_mask, soc_mask;
> -
> - switch (level) {
> - case AMD_DPM_FORCED_LEVEL_HIGH:
> - ret = smu_force_dpm_limit_value(smu, true);
> - break;
> - case AMD_DPM_FORCED_LEVEL_LOW:
> - ret = smu_force_dpm_limit_value(smu, false);
> - break;
> - case AMD_DPM_FORCED_LEVEL_AUTO:
> - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> - ret = smu_unforce_dpm_levels(smu);
> - break;
> - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> - ret = smu_get_profiling_clk_mask(smu, level,
> - &sclk_mask,
> - &mclk_mask,
> - &soc_mask);
> - if (ret)
> - return ret;
> - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
> - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask,
> false);
> - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask,
> false);
> - break;
> - case AMD_DPM_FORCED_LEVEL_MANUAL:
> - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> - default:
> - break;
> - }
> - return ret;
> -}
> -
> int smu_adjust_power_state_dynamic(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> bool skip_display_settings)
> @@ -1681,11 +1644,8 @@ int smu_adjust_power_state_dynamic(struct
> smu_context *smu,
> if (smu_dpm_ctx->dpm_level != level) {
> ret = smu_asic_set_performance_level(smu, level);
> if (ret) {
> - ret = smu_default_set_performance_level(smu, level);
> - if (ret) {
> - pr_err("Failed to set performance level!");
> - return ret;
> - }
> + pr_err("Failed to set performance level!");
> + return ret;
> }
>
> /* update the saved copy */
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index be4ae0aea9a0..dd575a1baeda 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2117,6 +2117,7 @@ static const struct pptable_funcs arcturus_ppt_funcs
> = {
> .get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
> .get_power_profile_mode = arcturus_get_power_profile_mode,
> .set_power_profile_mode = arcturus_set_power_profile_mode,
> + .set_performance_level = smu_v11_0_set_performance_level,
> /* debug (internal used) */
> .dump_pptable = arcturus_dump_pptable,
> .get_power_limit = arcturus_get_power_limit,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index 786de7741990..db3f78676aeb 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -262,4 +262,7 @@ int smu_v11_0_set_default_od_settings(struct
> smu_context *smu, bool initialize,
>
> uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
>
> +int smu_v11_0_set_performance_level(struct smu_context *smu,
> + enum amd_dpm_forced_level level);
> +
> #endif
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index bf87e93b26fc..a8ae5cd969a4 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -1649,19 +1649,43 @@ static int navi10_set_peak_clock_by_device(struct
> smu_context *smu)
> return ret;
> }
>
> -static int navi10_set_performance_level(struct smu_context *smu, enum
> amd_dpm_forced_level level)
> +static int navi10_set_performance_level(struct smu_context *smu,
> + enum amd_dpm_forced_level level)
> {
> int ret = 0;
> + uint32_t sclk_mask, mclk_mask, soc_mask;
>
> switch (level) {
> + case AMD_DPM_FORCED_LEVEL_HIGH:
> + ret = smu_force_dpm_limit_value(smu, true);
> + break;
> + case AMD_DPM_FORCED_LEVEL_LOW:
> + ret = smu_force_dpm_limit_value(smu, false);
> + break;
> + case AMD_DPM_FORCED_LEVEL_AUTO:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> + ret = smu_unforce_dpm_levels(smu);
> + break;
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> + ret = smu_get_profiling_clk_mask(smu, level,
> + &sclk_mask,
> + &mclk_mask,
> + &soc_mask);
> + if (ret)
> + return ret;
> + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
> + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask,
> false);
> + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask,
> false);
> + break;
> case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> ret = navi10_set_peak_clock_by_device(smu);
> break;
> + case AMD_DPM_FORCED_LEVEL_MANUAL:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> default:
> - ret = -EINVAL;
> break;
> }
> -
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index 979772dbe6a9..e73644beffd9 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -708,19 +708,43 @@ static int renoir_set_peak_clock_by_device(struct
> smu_context *smu)
> return ret;
> }
>
> -static int renoir_set_performance_level(struct smu_context *smu, enum
> amd_dpm_forced_level level)
> +static int renoir_set_performance_level(struct smu_context *smu,
> + enum amd_dpm_forced_level level)
> {
> int ret = 0;
> + uint32_t sclk_mask, mclk_mask, soc_mask;
>
> switch (level) {
> + case AMD_DPM_FORCED_LEVEL_HIGH:
> + ret = smu_force_dpm_limit_value(smu, true);
> + break;
> + case AMD_DPM_FORCED_LEVEL_LOW:
> + ret = smu_force_dpm_limit_value(smu, false);
> + break;
> + case AMD_DPM_FORCED_LEVEL_AUTO:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> + ret = smu_unforce_dpm_levels(smu);
> + break;
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> + ret = smu_get_profiling_clk_mask(smu, level,
> + &sclk_mask,
> + &mclk_mask,
> + &soc_mask);
> + if (ret)
> + return ret;
> + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
> + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask,
> false);
> + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask,
> false);
> + break;
> case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> ret = renoir_set_peak_clock_by_device(smu);
> break;
> + case AMD_DPM_FORCED_LEVEL_MANUAL:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> default:
> - ret = -EINVAL;
> break;
> }
> -
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index 7781d245f8ef..73935cf7ff39 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -1860,3 +1860,42 @@ int smu_v11_0_set_default_od_settings(struct
> smu_context *smu, bool initialize,
> }
> return ret;
> }
> +
> +int smu_v11_0_set_performance_level(struct smu_context *smu,
> + enum amd_dpm_forced_level level)
> +{
> + int ret = 0;
> + uint32_t sclk_mask, mclk_mask, soc_mask;
> +
> + switch (level) {
> + case AMD_DPM_FORCED_LEVEL_HIGH:
> + ret = smu_force_dpm_limit_value(smu, true);
> + break;
> + case AMD_DPM_FORCED_LEVEL_LOW:
> + ret = smu_force_dpm_limit_value(smu, false);
> + break;
> + case AMD_DPM_FORCED_LEVEL_AUTO:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> + ret = smu_unforce_dpm_levels(smu);
> + break;
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> + ret = smu_get_profiling_clk_mask(smu, level,
> + &sclk_mask,
> + &mclk_mask,
> + &soc_mask);
> + if (ret)
> + return ret;
> + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
> + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask,
> false);
> + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask,
> false);
> + break;
> + case AMD_DPM_FORCED_LEVEL_MANUAL:
> + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> + default:
> + break;
> + }
> + return ret;
> +}
> +
> diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> index 250ff5aa1305..534c46bc0146 100644
> --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
> @@ -3194,6 +3194,7 @@ static const struct pptable_funcs vega20_ppt_funcs =
> {
> .get_od_percentage = vega20_get_od_percentage,
> .get_power_profile_mode = vega20_get_power_profile_mode,
> .set_power_profile_mode = vega20_set_power_profile_mode,
> + .set_performance_level = smu_v11_0_set_performance_level,
> .set_od_percentage = vega20_set_od_percentage,
> .set_default_od_settings = vega20_set_default_od_settings,
> .od_edit_dpm_table = vega20_odn_edit_dpm_table,
> --
> 2.24.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfx&data=02%7C01%7Cevan.quan%40amd.com%7Cb54028f3ace24b8530c
> 608d787e1ef9c%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6371
> 27276822420727&sdata=HIikQGIFZM84MDP%2B%2F%2BCMWrHJoCPWm
> 0TbynxSjCz3rwU%3D&reserved=0
More information about the amd-gfx
mailing list