[PATCH] drm/amd/pp: need to notify umd the pstate clock.
Rex Zhu
Rex.Zhu at amd.com
Wed Dec 13 09:52:30 UTC 2017
Change-Id: I344731cc6398c40976e08a125808bbfa85cb59a3
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 43 +++++++++++++---------
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 36 ++++++++++++++----
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +
3 files changed, 56 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8edb0c4..ecf9449 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2590,8 +2590,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
+ tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+ }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -2603,6 +2605,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+
+ hwmgr->pstate_sclk = tmp_sclk;
+ hwmgr->pstate_mclk = tmp_mclk;
+
return 0;
}
@@ -2614,6 +2620,10 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
+ ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
+ if (ret)
+ return ret;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -2628,9 +2638,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
- if (ret)
- return ret;
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
@@ -4292,7 +4299,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
+ int i, now, p, size = 0;
uint32_t clock, pcie_speed;
switch (type) {
@@ -4301,32 +4308,34 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
+ if (hwmgr->pstate_sclk == sclk_table->dpm_levels[i].value)
+ p = i;
+ if (clock <= sclk_table->dpm_levels[i].value)
+ now = i;
}
- now = i;
for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
+ (i == now) ? "*" : "",
+ (i == p) ? "P" : "");
break;
case PP_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
+ if (hwmgr->pstate_mclk == mclk_table->dpm_levels[i].value)
+ p = i;
+ if (clock <= mclk_table->dpm_levels[i].value)
+ now = i;
}
- now = i;
for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
+ (i == now) ? "*" : "",
+ (i == p) ? "P" : "");
break;
case PP_PCIE:
pcie_speed = smu7_get_current_pcie_speed(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 07d256d..82c56f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4207,6 +4207,8 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4218,6 +4220,7 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
}
+
return 0;
}
@@ -4248,6 +4251,10 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
+ ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
@@ -4262,9 +4269,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
- if (ret)
- return ret;
vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
break;
@@ -4594,13 +4598,20 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
- int i, now, size = 0;
+ int i, p, now, size = 0;
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
break;
+ for (i = 0; i < sclk_table->count; i++) {
+ if (hwmgr->pstate_sclk == sclk_table->dpm_levels[i].value) {
+ p = i;
+ break;
+ }
+ }
+
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentGfxclkIndex),
"Attempt to get current sclk index Failed!",
@@ -4611,14 +4622,22 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
+ (i == now) ? "*" : "",
+ (i == p) ? "P" : "");
break;
case PP_MCLK:
if (data->registry_data.mclk_dpm_key_disabled)
break;
+ for (i = 0; i < mclk_table->count; i++) {
+ if (hwmgr->pstate_mclk == mclk_table->dpm_levels[i].value) {
+ p = i;
+ break;
+ }
+ }
+
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentUclkIndex),
"Attempt to get current mclk index Failed!",
@@ -4629,9 +4648,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s %s\n",
i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
+ (i == now) ? "*" : "",
+ (i == p) ? "P" : "");
break;
case PP_PCIE:
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 004a40e..568f56c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -780,6 +780,8 @@ struct pp_hwmgr {
struct amd_pp_profile default_compute_power_profile;
enum amd_pp_profile_type current_power_profile;
bool en_umd_pstate;
+ uint32_t pstate_sclk;
+ uint32_t pstate_mclk;
};
struct cgs_irq_src_funcs {
--
1.9.1
More information about the amd-gfx
mailing list