[PATCH] drm/amd/powerplay: check whether SMU IP is enabled before access
Quan, Evan
Evan.Quan at amd.com
Tue May 26 04:14:53 UTC 2020
[AMD Official Use Only - Internal Distribution Only]
Thanks Yong.
I get your point. But you know some of those APIs are called by other IPs(like smu_get_clock_by_type_with_latency called by DAL in amdgpu_dm_pp_smu.c).
And caller need to know whether the SMU ip added or even whether the interface supported(adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency). That's not a good practice I believe. Caller should not care about the implementation details.
The API itself should handle those situations well(whether SMU ip added/dpm enabled/interface supported). So, according to that, i would like to keep the (!smu->pm_enabled) in those APIs' implementation.
Regards,
Evan
From: Zhao, Yong <Yong.Zhao at amd.com>
Sent: Tuesday, May 26, 2020 2:15 AM
To: Quan, Evan <Evan.Quan at amd.com>; amd-gfx at lists.freedesktop.org
Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
Subject: Re: [PATCH] drm/amd/powerplay: check whether SMU IP is enabled before access
[AMD Official Use Only - Internal Distribution Only]
Have an improvement suggestion inline. Whether it is accepted or not, the patch is
Reviewed-by: Yong Zhao <Yong.Zhao at amd.com<mailto:Yong.Zhao at amd.com>>
Tested-by: Yong Zhao <Yong.Zhao at amd.com<mailto:Yong.Zhao at amd.com>>
________________________________
From: Quan, Evan <Evan.Quan at amd.com<mailto:Evan.Quan at amd.com>>
Sent: Monday, May 25, 2020 2:37 AM
To: amd-gfx at lists.freedesktop.org<mailto:amd-gfx at lists.freedesktop.org> <amd-gfx at lists.freedesktop.org<mailto:amd-gfx at lists.freedesktop.org>>
Cc: Deucher, Alexander <Alexander.Deucher at amd.com<mailto:Alexander.Deucher at amd.com>>; Zhao, Yong <Yong.Zhao at amd.com<mailto:Yong.Zhao at amd.com>>; Quan, Evan <Evan.Quan at amd.com<mailto:Evan.Quan at amd.com>>
Subject: [PATCH] drm/amd/powerplay: check whether SMU IP is enabled before access
Since on early phase of bringup, the SMU IP may be not enabled or
supported. Without this, we may hit null pointer dereference on
accessing smu->adev.
Change-Id: I644175e926cd4fef8259f89002d6f8eda04fe42c
Signed-off-by: Evan Quan <evan.quan at amd.com<mailto:evan.quan at amd.com>>
---
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 266 +++++++++------------
1 file changed, 113 insertions(+), 153 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index a78a1f542ea9..f7428996cc74 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -61,7 +61,6 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
- struct amdgpu_device *adev = smu->adev;
size_t size = 0;
int ret = 0, i = 0;
uint32_t feature_mask[2] = { 0 };
@@ -70,8 +69,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -155,10 +154,9 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
- struct amdgpu_device *adev = smu->adev;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -436,11 +434,10 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
@@ -577,11 +574,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct amdgpu_device *adev = smu->adev;
uint32_t powerplay_table_size;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
@@ -603,12 +599,11 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct amdgpu_device *adev = smu->adev;
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
@@ -1622,15 +1617,11 @@ static int smu_resume(void *handle)
int smu_display_configuration_change(struct smu_context *smu,
const struct amd_pp_display_configuration *display_config)
{
- struct amdgpu_device *adev = smu->adev;
int index = 0;
int num_of_active_display = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
-
- if (!is_support_sw_smu(smu->adev))
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!display_config)
return -EINVAL;
@@ -1691,15 +1682,11 @@ int smu_get_current_clocks(struct smu_context *smu,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks = {0};
- struct amdgpu_device *adev = smu->adev;
struct smu_clock_info hw_clocks;
int ret = 0;
- if (!is_support_sw_smu(smu->adev))
- return -EINVAL;
-
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -1855,11 +1842,10 @@ int smu_handle_task(struct smu_context *smu,
enum amd_pp_task task_id,
bool lock_needed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -1894,12 +1880,11 @@ int smu_switch_power_profile(struct smu_context *smu,
bool en)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- struct amdgpu_device *adev = smu->adev;
long workload;
uint32_t index;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
@@ -1929,11 +1914,10 @@ int smu_switch_power_profile(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- struct amdgpu_device *adev = smu->adev;
enum amd_dpm_forced_level level;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1948,11 +1932,10 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1976,11 +1959,10 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
int smu_set_display_count(struct smu_context *smu, uint32_t count)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
ret = smu_init_display_count(smu, count);
@@ -1995,11 +1977,10 @@ int smu_force_clk_levels(struct smu_context *smu,
bool lock_needed)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("force clock level is for dpm manual mode only.\n");
@@ -2031,6 +2012,9 @@ int smu_set_mp1_state(struct smu_context *smu,
uint16_t msg;
int ret;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
[yz] Is this needed? We can just check the condition at the entry functions rather than all functions. This also applies to other similar places like this.
+
mutex_lock(&smu->mutex);
switch (mp1_state) {
@@ -2067,11 +2051,10 @@ int smu_set_mp1_state(struct smu_context *smu,
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
@@ -2089,11 +2072,10 @@ int smu_set_df_cstate(struct smu_context *smu,
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
return 0;
@@ -2127,10 +2109,9 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
void *table = smu->smu_table.watermarks_table;
- struct amdgpu_device *adev = smu->adev;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (!table)
return -EINVAL;
@@ -2155,11 +2136,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
int smu_set_ac_dc(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
/* controlled by firmware */
if (smu->dc_controlled_by_gpio)
@@ -2219,11 +2199,10 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
int smu_load_microcode(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2237,11 +2216,10 @@ int smu_load_microcode(struct smu_context *smu)
int smu_check_fw_status(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2269,11 +2247,10 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2290,12 +2267,11 @@ int smu_get_power_limit(struct smu_context *smu,
bool def,
bool lock_needed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (lock_needed) {
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
}
@@ -2311,11 +2287,10 @@ int smu_get_power_limit(struct smu_context *smu,
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2329,11 +2304,10 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2347,11 +2321,10 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2365,11 +2338,10 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2385,11 +2357,10 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2405,11 +2376,10 @@ int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2423,11 +2393,10 @@ int smu_read_sensor(struct smu_context *smu,
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2444,11 +2413,10 @@ int smu_set_power_profile_mode(struct smu_context *smu,
uint32_t param_size,
bool lock_needed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -2465,11 +2433,10 @@ int smu_set_power_profile_mode(struct smu_context *smu,
int smu_get_fan_control_mode(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2483,11 +2450,10 @@ int smu_get_fan_control_mode(struct smu_context *smu)
int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2501,11 +2467,10 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2519,11 +2484,10 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2537,11 +2501,10 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2555,11 +2518,10 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2573,11 +2535,10 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
@@ -2589,11 +2550,10 @@ int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2608,11 +2568,10 @@ int smu_get_clock_by_type(struct smu_context *smu,
int smu_get_max_high_clocks(struct smu_context *smu,
struct amd_pp_simple_clock_info *clocks)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2628,11 +2587,10 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2648,11 +2606,10 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2668,11 +2625,10 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2687,11 +2643,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
{
- struct amdgpu_device *adev = smu->adev;
int ret = -EINVAL;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2705,11 +2660,10 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
int smu_notify_smu_enable_pwe(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2724,11 +2678,10 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2742,11 +2695,10 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
int smu_set_azalia_d3_pme(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2770,6 +2722,9 @@ bool smu_baco_is_support(struct smu_context *smu)
{
bool ret = false;
+ if (!smu->pm_enabled)
+ return false;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
@@ -2796,6 +2751,9 @@ int smu_baco_enter(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->baco_enter)
@@ -2810,6 +2768,9 @@ int smu_baco_exit(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->baco_exit)
@@ -2824,6 +2785,9 @@ int smu_mode2_reset(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->mode2_reset)
@@ -2837,11 +2801,10 @@ int smu_mode2_reset(struct smu_context *smu)
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2857,11 +2820,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2876,10 +2838,9 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
- struct amdgpu_device *adev = smu->adev;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
@@ -2894,11 +2855,10 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!adev->pm.dpm_enabled)
- return -EINVAL;
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
--
2.26.2
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20200526/f0877ef8/attachment-0001.htm>
More information about the amd-gfx
mailing list