[PATCH 2/2] drm/amd/powerplay: input check for unsupported message/clock index
Evan Quan
evan.quan at amd.com
Fri Jul 12 07:35:05 UTC 2019
This can avoid them to be handled in a wrong way without notice.
Since not all SMU messages/clocks are supported on every SMU11 ASIC.
Change-Id: I440b80833c81066cd36613beae555f2fa068199f
Signed-off-by: Evan Quan <evan.quan at amd.com>
---
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 18 ++++++++----
drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 31 +++++++++++++++-----
drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 33 ++++++++++++++++++----
drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 31 +++++++++++++++-----
4 files changed, 88 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 69a3078181ef..7015fe1011e8 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -307,7 +307,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count)
+ if (!table_data || table_id >= smu_table->table_count || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -427,10 +427,12 @@ int smu_feature_init_dpm(struct smu_context *smu)
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -445,10 +447,12 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
@@ -471,10 +475,12 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return 0;
WARN_ON(feature_id > feature->feature_num);
@@ -490,10 +496,12 @@ int smu_feature_set_supported(struct smu_context *smu,
bool enable)
{
struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_id;
+ int feature_id;
int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
+ if (feature_id < 0)
+ return -EINVAL;
WARN_ON(feature_id > feature->feature_num);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index e5fa82e10535..4cb0c18b12ce 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -216,8 +216,10 @@ static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = navi10_message_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU message: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -230,8 +232,10 @@ static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = navi10_clk_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU clock: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -244,8 +248,10 @@ static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = navi10_feature_mask_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU feature: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -258,8 +264,10 @@ static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = navi10_table_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU table: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -272,8 +280,10 @@ static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = navi10_pwr_src_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported power source: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -287,8 +297,10 @@ static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P
return -EINVAL;
mapping = navi10_workload_map[profile];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported workload: %d\n", (int)profile);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -971,7 +983,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1004,6 +1016,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
(void *)(&activity_monitor), false);
@@ -1132,6 +1147,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c60899de88bb..12b7f763dddd 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -922,11 +922,17 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
enum smu_clk_type clock_select)
{
int ret = 0;
+ int clk_id;
if (!smu->pm_enabled)
return ret;
+
+ clk_id = smu_clk_get_index(smu, clock_select);
+ if (clk_id < 0)
+ return -EINVAL;
+
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
@@ -941,7 +947,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- smu_clk_get_index(smu, clock_select) << 16);
+ clk_id << 16);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
@@ -1037,6 +1043,11 @@ static int smu_v11_0_get_power_limit(struct smu_context *smu,
bool get_default)
{
int ret = 0;
+ int power_src;
+
+ power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
+ if (power_src < 0)
+ return -EINVAL;
if (get_default) {
mutex_lock(&smu->mutex);
@@ -1048,7 +1059,7 @@ static int smu_v11_0_get_power_limit(struct smu_context *smu,
mutex_unlock(&smu->mutex);
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16);
+ power_src << 16);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
@@ -1091,16 +1102,21 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
{
int ret = 0;
uint32_t freq = 0;
+ int asic_clk_id;
if (clk_id >= SMU_CLK_COUNT || !value)
return -EINVAL;
+ asic_clk_id = smu_clk_get_index(smu, clk_id);
+ if (asic_clk_id < 0)
+ return -EINVAL;
+
/* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
- if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0)
+ if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (smu_clk_get_index(smu, clk_id) << 16));
+ (asic_clk_id << 16));
if (ret)
return ret;
@@ -1280,6 +1296,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
int ret = 0;
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+ int clk_id;
if (!smu->pm_enabled)
return -EINVAL;
@@ -1311,9 +1328,13 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (ret)
goto failed;
+ clk_id = smu_clk_get_index(smu, clk_select);
+ if (clk_id < 0)
+ goto failed;
+
mutex_lock(&smu->mutex);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- (smu_clk_get_index(smu, clk_select) << 16) | clk_freq);
+ (clk_id << 16) | clk_freq);
mutex_unlock(&smu->mutex);
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4685791a65da..dbc17920b879 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -231,8 +231,10 @@ static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = vega20_table_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU table: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -245,8 +247,10 @@ static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = vega20_pwr_src_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported power source: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -259,8 +263,10 @@ static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = vega20_feature_mask_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU feature: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -273,8 +279,10 @@ static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = vega20_clk_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU clock: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -287,8 +295,10 @@ static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
return -EINVAL;
mapping = vega20_message_map[index];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU message: %d\n", index);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -301,8 +311,10 @@ static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P
return -EINVAL;
mapping = vega20_workload_map[profile];
- if (!(mapping.valid_mapping))
+ if (!(mapping.valid_mapping)) {
+ pr_warn("Unsupported SMU workload: %d\n", (int)profile);
return -EINVAL;
+ }
return mapping.map_to;
}
@@ -1778,7 +1790,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
uint32_t i, size = 0;
- uint16_t workload_type = 0;
+ int16_t workload_type = 0;
static const char *profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
@@ -1811,6 +1823,9 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ return -EINVAL;
+
result = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
(void *)(&activity_monitor), false);
@@ -1963,6 +1978,8 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0)
+ return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type);
--
2.21.0
More information about the amd-gfx
mailing list