<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<style type="text/css" style="display:none;"> P {margin-top:0;margin-bottom:0;} </style>
</head>
<body dir="ltr">
<p style="font-family:Arial;font-size:10pt;color:#0078D7;margin:15pt;" align="Left">
[AMD Official Use Only - Internal Distribution Only]<br>
</p>
<br>
<div>
<div style="font-family: Calibri, Arial, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">
Have an improvement suggestion inline. Whether it is accepted or not, the patch is</div>
<div style="font-family: Calibri, Arial, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">
<br>
</div>
<div style="font-family: Calibri, Arial, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">
Reviewed-by: Yong Zhao <Yong.Zhao@amd.com></div>
<div style="font-family: Calibri, Arial, Helvetica, sans-serif; font-size: 12pt; color: rgb(0, 0, 0);">
<span style="font-family: Calibri, Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255); display: inline !important">Tested-by: Yong Zhao <Yong.Zhao@amd.com></span><br>
</div>
<div id="appendonsend"></div>
<div style="font-family:Calibri,Arial,Helvetica,sans-serif; font-size:12pt; color:rgb(0,0,0)">
<br>
</div>
<hr tabindex="-1" style="display:inline-block; width:98%">
<div id="divRplyFwdMsg" dir="ltr"><font face="Calibri, sans-serif" color="#000000" style="font-size:11pt"><b>From:</b> Quan, Evan <Evan.Quan@amd.com><br>
<b>Sent:</b> Monday, May 25, 2020 2:37 AM<br>
<b>To:</b> amd-gfx@lists.freedesktop.org <amd-gfx@lists.freedesktop.org><br>
<b>Cc:</b> Deucher, Alexander <Alexander.Deucher@amd.com>; Zhao, Yong <Yong.Zhao@amd.com>; Quan, Evan <Evan.Quan@amd.com><br>
<b>Subject:</b> [PATCH] drm/amd/powerplay: check whether SMU IP is enabled before access</font>
<div> </div>
</div>
<div class="BodyFragment"><font size="2"><span style="font-size:11pt">
<div class="PlainText">Since on early phase of bringup, the SMU IP may be not enabled or<br>
supported. Without this, we may hit null pointer dereference on<br>
accessing smu->adev.<br>
<br>
Change-Id: I644175e926cd4fef8259f89002d6f8eda04fe42c<br>
Signed-off-by: Evan Quan <evan.quan@amd.com><br>
---<br>
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 266 +++++++++------------<br>
1 file changed, 113 insertions(+), 153 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c<br>
index a78a1f542ea9..f7428996cc74 100644<br>
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c<br>
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c<br>
@@ -61,7 +61,6 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask<br>
<br>
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
size_t size = 0;<br>
int ret = 0, i = 0;<br>
uint32_t feature_mask[2] = { 0 };<br>
@@ -70,8 +69,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)<br>
uint32_t sort_feature[SMU_FEATURE_COUNT];<br>
uint64_t hw_feature_count = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -155,10 +154,9 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)<br>
uint64_t feature_2_enabled = 0;<br>
uint64_t feature_2_disabled = 0;<br>
uint64_t feature_enables = 0;<br>
- struct amdgpu_device *adev = smu->adev;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -436,11 +434,10 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)<br>
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,<br>
bool gate)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
switch (block_type) {<br>
case AMD_IP_BLOCK_TYPE_UVD:<br>
@@ -577,11 +574,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)<br>
int smu_sys_get_pp_table(struct smu_context *smu, void **table)<br>
{<br>
struct smu_table_context *smu_table = &smu->smu_table;<br>
- struct amdgpu_device *adev = smu->adev;<br>
uint32_t powerplay_table_size;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)<br>
return -EINVAL;<br>
@@ -603,12 +599,11 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)<br>
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)<br>
{<br>
struct smu_table_context *smu_table = &smu->smu_table;<br>
- struct amdgpu_device *adev = smu->adev;<br>
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (header->usStructureSize != size) {<br>
pr_err("pp table size not matched !\n");<br>
@@ -1622,15 +1617,11 @@ static int smu_resume(void *handle)<br>
int smu_display_configuration_change(struct smu_context *smu,<br>
const struct amd_pp_display_configuration *display_config)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int index = 0;<br>
int num_of_active_display = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
-<br>
- if (!is_support_sw_smu(smu->adev))<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!display_config)<br>
return -EINVAL;<br>
@@ -1691,15 +1682,11 @@ int smu_get_current_clocks(struct smu_context *smu,<br>
struct amd_pp_clock_info *clocks)<br>
{<br>
struct amd_pp_simple_clock_info simple_clocks = {0};<br>
- struct amdgpu_device *adev = smu->adev;<br>
struct smu_clock_info hw_clocks;<br>
int ret = 0;<br>
<br>
- if (!is_support_sw_smu(smu->adev))<br>
- return -EINVAL;<br>
-<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -1855,11 +1842,10 @@ int smu_handle_task(struct smu_context *smu,<br>
enum amd_pp_task task_id,<br>
bool lock_needed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (lock_needed)<br>
mutex_lock(&smu->mutex);<br>
@@ -1894,12 +1880,11 @@ int smu_switch_power_profile(struct smu_context *smu,<br>
bool en)<br>
{<br>
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);<br>
- struct amdgpu_device *adev = smu->adev;<br>
long workload;<br>
uint32_t index;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))<br>
return -EINVAL;<br>
@@ -1929,11 +1914,10 @@ int smu_switch_power_profile(struct smu_context *smu,<br>
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)<br>
{<br>
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);<br>
- struct amdgpu_device *adev = smu->adev;<br>
enum amd_dpm_forced_level level;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)<br>
return -EINVAL;<br>
@@ -1948,11 +1932,10 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)<br>
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)<br>
{<br>
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)<br>
return -EINVAL;<br>
@@ -1976,11 +1959,10 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev<br>
<br>
int smu_set_display_count(struct smu_context *smu, uint32_t count)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
ret = smu_init_display_count(smu, count);<br>
@@ -1995,11 +1977,10 @@ int smu_force_clk_levels(struct smu_context *smu,<br>
bool lock_needed)<br>
{<br>
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {<br>
pr_debug("force clock level is for dpm manual mode only.\n");<br>
@@ -2031,6 +2012,9 @@ int smu_set_mp1_state(struct smu_context *smu,<br>
uint16_t msg;<br>
int ret;<br>
<br>
+ if (!smu->pm_enabled)<br>
+ return -EOPNOTSUPP;</div>
<div class="PlainText">[yz] Is this needed? We can just check the condition at the entry functions rather than all functions. This also applies to other similar places like this. <br>
+<br>
mutex_lock(&smu->mutex);<br>
<br>
switch (mp1_state) {<br>
@@ -2067,11 +2051,10 @@ int smu_set_mp1_state(struct smu_context *smu,<br>
int smu_set_df_cstate(struct smu_context *smu,<br>
enum pp_df_cstate state)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)<br>
return 0;<br>
@@ -2089,11 +2072,10 @@ int smu_set_df_cstate(struct smu_context *smu,<br>
<br>
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)<br>
return 0;<br>
@@ -2127,10 +2109,9 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,<br>
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)<br>
{<br>
void *table = smu->smu_table.watermarks_table;<br>
- struct amdgpu_device *adev = smu->adev;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (!table)<br>
return -EINVAL;<br>
@@ -2155,11 +2136,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,<br>
<br>
int smu_set_ac_dc(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
/* controlled by firmware */<br>
if (smu->dc_controlled_by_gpio)<br>
@@ -2219,11 +2199,10 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =<br>
<br>
int smu_load_microcode(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2237,11 +2216,10 @@ int smu_load_microcode(struct smu_context *smu)<br>
<br>
int smu_check_fw_status(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2269,11 +2247,10 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)<br>
<br>
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2290,12 +2267,11 @@ int smu_get_power_limit(struct smu_context *smu,<br>
bool def,<br>
bool lock_needed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
if (lock_needed) {<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
}<br>
@@ -2311,11 +2287,10 @@ int smu_get_power_limit(struct smu_context *smu,<br>
<br>
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2329,11 +2304,10 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)<br>
<br>
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2347,11 +2321,10 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch<br>
<br>
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2365,11 +2338,10 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)<br>
<br>
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2385,11 +2357,10 @@ int smu_od_edit_dpm_table(struct smu_context *smu,<br>
enum PP_OD_DPM_TABLE_COMMAND type,<br>
long *input, uint32_t size)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2405,11 +2376,10 @@ int smu_read_sensor(struct smu_context *smu,<br>
enum amd_pp_sensors sensor,<br>
void *data, uint32_t *size)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2423,11 +2393,10 @@ int smu_read_sensor(struct smu_context *smu,<br>
<br>
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2444,11 +2413,10 @@ int smu_set_power_profile_mode(struct smu_context *smu,<br>
uint32_t param_size,<br>
bool lock_needed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (lock_needed)<br>
mutex_lock(&smu->mutex);<br>
@@ -2465,11 +2433,10 @@ int smu_set_power_profile_mode(struct smu_context *smu,<br>
<br>
int smu_get_fan_control_mode(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2483,11 +2450,10 @@ int smu_get_fan_control_mode(struct smu_context *smu)<br>
<br>
int smu_set_fan_control_mode(struct smu_context *smu, int value)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2501,11 +2467,10 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)<br>
<br>
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2519,11 +2484,10 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)<br>
<br>
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2537,11 +2501,10 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)<br>
<br>
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2555,11 +2518,10 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)<br>
<br>
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2573,11 +2535,10 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)<br>
<br>
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
if (smu->ppt_funcs->set_active_display_count)<br>
ret = smu->ppt_funcs->set_active_display_count(smu, count);<br>
@@ -2589,11 +2550,10 @@ int smu_get_clock_by_type(struct smu_context *smu,<br>
enum amd_pp_clock_type type,<br>
struct amd_pp_clocks *clocks)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2608,11 +2568,10 @@ int smu_get_clock_by_type(struct smu_context *smu,<br>
int smu_get_max_high_clocks(struct smu_context *smu,<br>
struct amd_pp_simple_clock_info *clocks)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2628,11 +2587,10 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,<br>
enum smu_clk_type clk_type,<br>
struct pp_clock_levels_with_latency *clocks)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2648,11 +2606,10 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,<br>
enum amd_pp_clock_type type,<br>
struct pp_clock_levels_with_voltage *clocks)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2668,11 +2625,10 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,<br>
int smu_display_clock_voltage_request(struct smu_context *smu,<br>
struct pp_display_clock_request *clock_req)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2687,11 +2643,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu,<br>
<br>
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = -EINVAL;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2705,11 +2660,10 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl<br>
<br>
int smu_notify_smu_enable_pwe(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2724,11 +2678,10 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)<br>
int smu_set_xgmi_pstate(struct smu_context *smu,<br>
uint32_t pstate)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2742,11 +2695,10 @@ int smu_set_xgmi_pstate(struct smu_context *smu,<br>
<br>
int smu_set_azalia_d3_pme(struct smu_context *smu)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2770,6 +2722,9 @@ bool smu_baco_is_support(struct smu_context *smu)<br>
{<br>
bool ret = false;<br>
<br>
+ if (!smu->pm_enabled)<br>
+ return false;<br>
+<br>
mutex_lock(&smu->mutex);<br>
<br>
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)<br>
@@ -2796,6 +2751,9 @@ int smu_baco_enter(struct smu_context *smu)<br>
{<br>
int ret = 0;<br>
<br>
+ if (!smu->pm_enabled)<br>
+ return -EOPNOTSUPP;<br>
+<br>
mutex_lock(&smu->mutex);<br>
<br>
if (smu->ppt_funcs->baco_enter)<br>
@@ -2810,6 +2768,9 @@ int smu_baco_exit(struct smu_context *smu)<br>
{<br>
int ret = 0;<br>
<br>
+ if (!smu->pm_enabled)<br>
+ return -EOPNOTSUPP;<br>
+<br>
mutex_lock(&smu->mutex);<br>
<br>
if (smu->ppt_funcs->baco_exit)<br>
@@ -2824,6 +2785,9 @@ int smu_mode2_reset(struct smu_context *smu)<br>
{<br>
int ret = 0;<br>
<br>
+ if (!smu->pm_enabled)<br>
+ return -EOPNOTSUPP;<br>
+<br>
mutex_lock(&smu->mutex);<br>
<br>
if (smu->ppt_funcs->mode2_reset)<br>
@@ -2837,11 +2801,10 @@ int smu_mode2_reset(struct smu_context *smu)<br>
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,<br>
struct pp_smu_nv_clock_table *max_clocks)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2857,11 +2820,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,<br>
unsigned int *clock_values_in_khz,<br>
unsigned int *num_states)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2876,10 +2838,9 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,<br>
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)<br>
{<br>
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;<br>
- struct amdgpu_device *adev = smu->adev;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
@@ -2894,11 +2855,10 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)<br>
int smu_get_dpm_clock_table(struct smu_context *smu,<br>
struct dpm_clocks *clock_table)<br>
{<br>
- struct amdgpu_device *adev = smu->adev;<br>
int ret = 0;<br>
<br>
- if (!adev->pm.dpm_enabled)<br>
- return -EINVAL;<br>
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)<br>
+ return -EOPNOTSUPP;<br>
<br>
mutex_lock(&smu->mutex);<br>
<br>
-- <br>
2.26.2<br>
<br>
</div>
</span></font></div>
</div>
</body>
</html>