[PATCH v3] drm/amd/swsmu: enable more Pstates profile levels for SMU v14.0.0 and v14.0.1
Huang, Tim
Tim.Huang at amd.com
Tue Jul 2 06:28:37 UTC 2024
[AMD Official Use Only - AMD Internal Distribution Only]
This patch is,
Reviewed-by: Tim Huang <tim.huang at amd.com>
> -----Original Message-----
> From: Ma, Li <Li.Ma at amd.com>
> Sent: Tuesday, July 2, 2024 2:20 PM
> To: amd-gfx at lists.freedesktop.org
> Cc: Huang, Tim <Tim.Huang at amd.com>; Deucher, Alexander
> <Alexander.Deucher at amd.com>; Zhang, Yifan <Yifan1.Zhang at amd.com>; Ma,
> Li <Li.Ma at amd.com>
> Subject: [PATCH v3] drm/amd/swsmu: enable more Pstates profile levels for
> SMU v14.0.0 and v14.0.1
>
> V1: This patch enables following UMD stable Pstates profile
> levels for power_dpm_force_performance_level interface.
>
> - profile_peak
> - profile_min_mclk
> - profile_min_sclk
> - profile_standard
>
> V2: Fix conflict with commit "drm/amd/pm: smu v14.0.4 reuse smu v14.0.0
> dpmtable "
>
> V3: Add VCLK1 and DCLK1 support for SMU V14.0.1
> And avoid to set VCLK1 and DCLK1 for SMU v14.0.0
>
> Signed-off-by: Li Ma <li.ma at amd.com>
> ---
> .../drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 152
> ++++++++++++++++--
> 1 file changed, 142 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
> index 3a9d58c036ea..5d47d58944f6 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
> @@ -65,6 +65,10 @@
>
> #define SMU_MALL_PG_CONFIG_DEFAULT
> SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
>
> +#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700
> +#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678
> +#define SMU_14_0_0_UMD_PSTATE_FCLK 1800
> +
> #define FEATURE_MASK(feature) (1ULL << feature) #define
> SMC_DPM_FEATURE ( \
> FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ @@ -725,7 +729,7 @@
> static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context
> *smu, {
> if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0,
> 1))
> smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level,
> freq);
> - else
> + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1)
> smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level,
> freq);
>
> return 0;
> @@ -818,9 +822,11 @@ static int
> smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
> break;
> case SMU_MCLK:
> case SMU_UCLK:
> - case SMU_FCLK:
> max_dpm_level = 0;
> break;
> + case SMU_FCLK:
> + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
> + break;
> case SMU_SOCCLK:
> max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
> break;
> @@ -855,7 +861,7 @@ static int
> smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
> min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
> break;
> case SMU_FCLK:
> - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
> + min_dpm_level = 0;
> break;
> case SMU_SOCCLK:
> min_dpm_level = 0;
> @@ -936,9 +942,11 @@ static int
> smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
> break;
> case SMU_MCLK:
> case SMU_UCLK:
> - case SMU_FCLK:
> max_dpm_level = 0;
> break;
> + case SMU_FCLK:
> + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
> + break;
> case SMU_SOCCLK:
> max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
> break;
> @@ -969,7 +977,7 @@ static int
> smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
> min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
> break;
> case SMU_FCLK:
> - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
> + min_dpm_level = 0;
> break;
> case SMU_SOCCLK:
> min_dpm_level = 0;
> @@ -1001,7 +1009,7 @@ static int
> smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, {
> if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0,
> 1))
> smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
> - else
> + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1)
> smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
>
> return 0;
> @@ -1020,9 +1028,15 @@ static int
> smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
> case SMU_VCLK:
> member_type = METRICS_AVERAGE_VCLK;
> break;
> + case SMU_VCLK1:
> + member_type = METRICS_AVERAGE_VCLK1;
> + break;
> case SMU_DCLK:
> member_type = METRICS_AVERAGE_DCLK;
> break;
> + case SMU_DCLK1:
> + member_type = METRICS_AVERAGE_DCLK1;
> + break;
> case SMU_MCLK:
> member_type = METRICS_AVERAGE_UCLK;
> break;
> @@ -1106,7 +1120,7 @@ static int
> smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, {
> if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0,
> 1))
> smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
> - else
> + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1)
> smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
>
> return 0;
> @@ -1250,6 +1264,8 @@ static int smu_v14_0_0_force_clk_levels(struct
> smu_context *smu,
> case SMU_FCLK:
> case SMU_VCLK:
> case SMU_DCLK:
> + case SMU_VCLK1:
> + case SMU_DCLK1:
> ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type,
> soft_min_level, &min_freq);
> if (ret)
> break;
> @@ -1268,13 +1284,67 @@ static int smu_v14_0_0_force_clk_levels(struct
> smu_context *smu,
> return ret;
> }
>
> -static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
> +static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context
> *smu,
> + enum amd_dpm_forced_level level,
> + enum smu_clk_type clk_type,
> + uint32_t *min_clk,
> + uint32_t *max_clk)
> +{
> + uint32_t clk_limit = 0;
> + int ret = 0;
> +
> + switch (clk_type) {
> + case SMU_GFXCLK:
> + case SMU_SCLK:
> + clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK;
> + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK,
> NULL, &clk_limit);
> + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK,
> &clk_limit, NULL);
> + break;
> + case SMU_SOCCLK:
> + clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK;
> + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> + smu_v14_0_common_get_dpm_ultimate_freq(smu,
> SMU_SOCCLK, NULL, &clk_limit);
> + break;
> + case SMU_FCLK:
> + clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK;
> + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK,
> NULL, &clk_limit);
> + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK,
> &clk_limit, NULL);
> + break;
> + case SMU_VCLK:
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK,
> NULL, &clk_limit);
> + break;
> + case SMU_VCLK1:
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1,
> NULL, &clk_limit);
> + break;
> + case SMU_DCLK:
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK,
> NULL, &clk_limit);
> + break;
> + case SMU_DCLK1:
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1,
> NULL, &clk_limit);
> + break;
> + default:
> + ret = -EINVAL;
> + break;
> + }
> + *min_clk = *max_clk = clk_limit;
> + return ret;
> +}
> +
> +static int smu_v14_0_common_set_performance_level(struct smu_context
> +*smu,
> enum amd_dpm_forced_level level) {
> struct amdgpu_device *adev = smu->adev;
> uint32_t sclk_min = 0, sclk_max = 0;
> uint32_t fclk_min = 0, fclk_max = 0;
> uint32_t socclk_min = 0, socclk_max = 0;
> + uint32_t vclk_min = 0, vclk_max = 0;
> + uint32_t dclk_min = 0, dclk_max = 0;
> + uint32_t vclk1_min = 0, vclk1_max = 0;
> + uint32_t dclk1_min = 0, dclk1_max = 0;
> int ret = 0;
>
> switch (level) {
> @@ -1282,28 +1352,54 @@ static int
> smu_v14_0_0_set_performance_level(struct smu_context *smu,
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK,
> NULL, &sclk_max);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK,
> NULL, &fclk_max);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK,
> NULL, &socclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK,
> NULL, &vclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK,
> NULL, &dclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1,
> NULL, &vclk1_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1,
> NULL,
> +&dclk1_max);
> sclk_min = sclk_max;
> fclk_min = fclk_max;
> socclk_min = socclk_max;
> + vclk_min = vclk_max;
> + dclk_min = dclk_max;
> + vclk1_min = vclk1_max;
> + dclk1_min = dclk1_max;
> break;
> case AMD_DPM_FORCED_LEVEL_LOW:
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK,
> &sclk_min, NULL);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK,
> &fclk_min, NULL);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK,
> &socclk_min, NULL);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK,
> &vclk_min, NULL);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK,
> &dclk_min, NULL);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1,
> &vclk1_min, NULL);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1,
> &dclk1_min,
> +NULL);
> sclk_max = sclk_min;
> fclk_max = fclk_min;
> socclk_max = socclk_min;
> + vclk_max = vclk_min;
> + dclk_max = dclk_min;
> + vclk1_max = vclk1_min;
> + dclk1_max = dclk1_min;
> break;
> case AMD_DPM_FORCED_LEVEL_AUTO:
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK,
> &sclk_min, &sclk_max);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK,
> &fclk_min, &fclk_max);
> smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK,
> &socclk_min, &socclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK,
> &vclk_min, &vclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK,
> &dclk_min, &dclk_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1,
> &vclk1_min, &vclk1_max);
> + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1,
> &dclk1_min,
> +&dclk1_max);
> break;
> case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
> case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
> case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
> case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
> - /* Temporarily do nothing since the optimal clocks haven't been
> provided yet */
> + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SCLK,
> &sclk_min, &sclk_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_FCLK,
> &fclk_min, &fclk_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level,
> SMU_SOCCLK, &socclk_min, &socclk_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK,
> &vclk_min, &vclk_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK,
> &dclk_min, &dclk_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK1,
> &vclk1_min, &vclk1_max);
> + smu_v14_0_common_get_dpm_profile_freq(smu, level,
> SMU_DCLK1,
> +&dclk1_min, &dclk1_max);
> break;
> case AMD_DPM_FORCED_LEVEL_MANUAL:
> case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
> @@ -1343,6 +1439,42 @@ static int
> smu_v14_0_0_set_performance_level(struct smu_context *smu,
> return ret;
> }
>
> + if (vclk_min && vclk_max) {
> + ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
> + SMU_VCLK,
> + vclk_min,
> + vclk_max);
> + if (ret)
> + return ret;
> + }
> +
> + if (vclk1_min && vclk1_max) {
> + ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
> + SMU_VCLK1,
> + vclk1_min,
> + vclk1_max);
> + if (ret)
> + return ret;
> + }
> +
> + if (dclk_min && dclk_max) {
> + ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
> + SMU_DCLK,
> + dclk_min,
> + dclk_max);
> + if (ret)
> + return ret;
> + }
> +
> + if (dclk1_min && dclk1_max) {
> + ret = smu_v14_0_0_set_soft_freq_limited_range(smu,
> + SMU_DCLK1,
> + dclk1_min,
> + dclk1_max);
> + if (ret)
> + return ret;
> + }
> +
> return ret;
> }
>
> @@ -1520,7 +1652,7 @@ static const struct pptable_funcs
> smu_v14_0_0_ppt_funcs = {
> .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
> .print_clk_levels = smu_v14_0_0_print_clk_levels,
> .force_clk_levels = smu_v14_0_0_force_clk_levels,
> - .set_performance_level = smu_v14_0_0_set_performance_level,
> + .set_performance_level = smu_v14_0_common_set_performance_level,
> .set_fine_grain_gfx_freq_parameters =
> smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
> .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
> .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
> --
> 2.25.1
More information about the amd-gfx
mailing list