[PATCH 2/3] drm/amd/powerplay: force sclk limit for peak profile
Chengming Gui
Jack.Gui at amd.com
Thu Jul 18 10:02:17 UTC 2019
force different GFX clocks with different SKUs for navi10:
XL (other rev_id): 1625MHz
XT (F1/C1): 1755MHz
XTX (F0/C0): 1830MHz
Signed-off-by: Chengming Gui <Jack.Gui at amd.com>
---
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 +
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 2 +
drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 66 +++++++++++++++++++++++++-
3 files changed, 68 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 122985c..693414f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -664,6 +664,8 @@ static int smu_sw_init(void *handle)
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->smu_dpm.default_sclk_limit = 0;
+ smu->smu_dpm.peak_sclk_limit = 0;
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 135a323..acb522b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -441,6 +441,8 @@ struct smu_dpm_context {
void *dpm_context;
void *golden_dpm_context;
bool enable_umd_pstate;
+ uint32_t default_sclk_limit;
+ uint32_t peak_sclk_limit;
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level requested_dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 895a4e5..b4deb9e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -37,6 +37,15 @@
#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#define NV_NV10_F0 0xF0
+#define NV_NV10_C0 0xC0
+#define NV_NV10_F1 0xF1
+#define NV_NV10_C1 0xC1
+
+#define NV_NV10_PEAK_SCLK_XTX 1830
+#define NV_NV10_PEAK_SCLK_XT 1755
+#define NV_NV10_PEAK_SCLK_XL 1625
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -675,6 +684,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
int ret = 0, size = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -682,6 +692,23 @@ static int navi10_force_clk_levels(struct smu_context *smu,
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
+ if (smu_dpm_ctx->peak_sclk_limit) {
+ max_freq = smu_dpm_ctx->peak_sclk_limit;
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return size;
+ } else {
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ if (ret)
+ return size;
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ if (ret)
+ return size;
+ }
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return size;
+ break;
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
@@ -690,11 +717,9 @@ static int navi10_force_clk_levels(struct smu_context *smu,
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
-
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
return size;
-
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
if (ret)
return size;
@@ -838,6 +863,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
int ret = 0, i = 0;
uint32_t min_freq, max_freq;
enum smu_clk_type clk_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum smu_clk_type clks[] = {
SMU_GFXCLK,
@@ -851,10 +877,18 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
+ if (clk_type == SMU_GFXCLK && smu_dpm_ctx->default_sclk_limit != 0) {
+ max_freq = smu_dpm_ctx->default_sclk_limit;
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
+ &min_freq, NULL);
+ if (ret)
+ return ret;
+ }
ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
+ smu_dpm_ctx->peak_sclk_limit = 0;
return ret;
}
@@ -1531,6 +1565,33 @@ static int navi10_set_ppfeature_status(struct smu_context *smu,
return 0;
}
+static int navi10_apply_clocks_adjust_rules(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_dpm_ctx->default_sclk_limit == 0) {
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL,
+ &smu_dpm_ctx->default_sclk_limit);
+ return ret;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK &&
+ smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+ if (adev->rev_id == NV_NV10_F0 || adev->rev_id == NV_NV10_C0)
+ smu_dpm_ctx->peak_sclk_limit = NV_NV10_PEAK_SCLK_XTX;
+ else if (adev->rev_id == NV_NV10_F1 || adev->rev_id == NV_NV10_C1)
+ smu_dpm_ctx->peak_sclk_limit = NV_NV10_PEAK_SCLK_XT;
+ else
+ smu_dpm_ctx->peak_sclk_limit = NV_NV10_PEAK_SCLK_XL;
+ } else if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && smu_dpm_ctx->peak_sclk_limit != 0) {
+ smu_dpm_ctx->peak_sclk_limit = 0;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1566,6 +1627,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
.get_ppfeature_status = navi10_get_ppfeature_status,
.set_ppfeature_status = navi10_set_ppfeature_status,
+ .apply_clocks_adjust_rules = navi10_apply_clocks_adjust_rules,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
--
2.7.4
More information about the amd-gfx
mailing list