[PATCH] drm/amd/powerplay: swSMU code cleanup

Quan, Evan Evan.Quan at amd.com
Fri Oct 25 06:24:40 UTC 2019


Put common code in smu_v11_0.c/h, smu_v12_0.c/h and
smu_internal.c/h. Also drop redundant APIs.

Change-Id: I9c51effadf58dce9d891b9b83bb05f7388a0d7d5
Signed-off-by: Evan Quan <evan.quan at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c       |    6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c        |   21 +-
 drivers/gpu/drm/amd/powerplay/Makefile        |    3 +-
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c    | 2019 ++++++-----------
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  |   70 +-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h    |   82 +-
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |   18 +-
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |    4 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c    |  152 +-
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c    |   33 +-
 drivers/gpu/drm/amd/powerplay/smu_internal.c  |  490 ++++
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |   74 +-
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c     |  207 +-
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c     |   20 +-
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c    |   80 +-
 15 files changed, 1635 insertions(+), 1644 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/powerplay/smu_internal.c

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 28d32725285b..263265245e19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -912,8 +912,7 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 	if (is_support_sw_smu(adev)) {
 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
 					     low ? &clk_freq : NULL,
-					     !low ? &clk_freq : NULL,
-					     true);
+					     !low ? &clk_freq : NULL);
 		if (ret)
 			return 0;
 		return clk_freq * 100;
@@ -931,8 +930,7 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 	if (is_support_sw_smu(adev)) {
 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
 					     low ? &clk_freq : NULL,
-					     !low ? &clk_freq : NULL,
-					     true);
+					     !low ? &clk_freq : NULL);
 		if (ret)
 			return 0;
 		return clk_freq * 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 660efe009749..5c36cda5548e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -957,7 +957,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
@@ -1004,7 +1004,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 
@@ -1044,7 +1044,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
 
@@ -1084,7 +1084,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
 
@@ -1124,7 +1124,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
 
@@ -1164,7 +1164,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 		return ret;
 
 	if (is_support_sw_smu(adev))
-		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
+		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
 	else if (adev->powerplay.pp_funcs->force_clock_level)
 		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
 
@@ -1356,7 +1356,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 	}
 	parameter[parameter_size] = profile_mode;
 	if (is_support_sw_smu(adev))
-		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
+		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
 	else if (adev->powerplay.pp_funcs->set_power_profile_mode)
 		ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
 	if (!ret)
@@ -2065,7 +2065,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 	uint32_t limit = 0;
 
 	if (is_support_sw_smu(adev)) {
-		smu_get_power_limit(&adev->smu, &limit, true, true);
+		smu_get_power_limit(&adev->smu, &limit, true);
 		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2083,7 +2083,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 	uint32_t limit = 0;
 
 	if (is_support_sw_smu(adev)) {
-		smu_get_power_limit(&adev->smu, &limit, false,  true);
+		smu_get_power_limit(&adev->smu, &limit, false);
 		return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
 	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
 		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -3077,8 +3077,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
 		struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
 		smu_handle_task(&adev->smu,
 				smu_dpm->dpm_level,
-				AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
-				true);
+				AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
 	} else {
 		if (adev->powerplay.pp_funcs->dispatch_tasks) {
 			if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 390345f2d601..6a8143aee557 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,8 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
 
 include $(AMD_POWERPLAY)
 
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o vega20_ppt.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o vega20_ppt.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o \
+            smu_internal.o
 
 AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 3ce01e1994fc..dd5f22743982 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -21,14 +21,9 @@
  */
 
 #include "pp_debug.h"
-#include <linux/firmware.h>
-#include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
-#include "smu_v11_0.h"
-#include "smu_v12_0.h"
 #include "atom.h"
 #include "amd_pcie.h"
 #include "vega20_ppt.h"
@@ -36,671 +31,6 @@
 #include "navi10_ppt.h"
 #include "renoir_ppt.h"
 
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(type)	#type
-static const char* __smu_message_names[] = {
-	SMU_MESSAGE_TYPES
-};
-
-const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
-{
-	if (type < 0 || type >= SMU_MSG_MAX_COUNT)
-		return "unknown smu message";
-	return __smu_message_names[type];
-}
-
-#undef __SMU_DUMMY_MAP
-#define __SMU_DUMMY_MAP(fea)	#fea
-static const char* __smu_feature_names[] = {
-	SMU_FEATURE_MASKS
-};
-
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
-{
-	if (feature < 0 || feature >= SMU_FEATURE_COUNT)
-		return "unknown smu feature";
-	return __smu_feature_names[feature];
-}
-
-size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
-{
-	size_t size = 0;
-	int ret = 0, i = 0;
-	uint32_t feature_mask[2] = { 0 };
-	int32_t feature_index = 0;
-	uint32_t count = 0;
-	uint32_t sort_feature[SMU_FEATURE_COUNT];
-	uint64_t hw_feature_count = 0;
-
-	mutex_lock(&smu->mutex);
-
-	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
-	if (ret)
-		goto failed;
-
-	size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
-			feature_mask[1], feature_mask[0]);
-
-	for (i = 0; i < SMU_FEATURE_COUNT; i++) {
-		feature_index = smu_feature_get_index(smu, i);
-		if (feature_index < 0)
-			continue;
-		sort_feature[feature_index] = i;
-		hw_feature_count++;
-	}
-
-	for (i = 0; i < hw_feature_count; i++) {
-		size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
-			       count++,
-			       smu_get_feature_name(smu, sort_feature[i]),
-			       i,
-			       !!smu_feature_is_enabled(smu, sort_feature[i]) ?
-			       "enabled" : "disabled");
-	}
-
-failed:
-	mutex_unlock(&smu->mutex);
-
-	return size;
-}
-
-static int smu_feature_update_enable_state(struct smu_context *smu,
-					   uint64_t feature_mask,
-					   bool enabled)
-{
-	struct smu_feature *feature = &smu->smu_feature;
-	uint32_t feature_low = 0, feature_high = 0;
-	int ret = 0;
-
-	if (!smu->pm_enabled)
-		return ret;
-
-	feature_low = (feature_mask >> 0 ) & 0xffffffff;
-	feature_high = (feature_mask >> 32) & 0xffffffff;
-
-	if (enabled) {
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
-						  feature_low);
-		if (ret)
-			return ret;
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
-						  feature_high);
-		if (ret)
-			return ret;
-	} else {
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
-						  feature_low);
-		if (ret)
-			return ret;
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
-						  feature_high);
-		if (ret)
-			return ret;
-	}
-
-	mutex_lock(&feature->mutex);
-	if (enabled)
-		bitmap_or(feature->enabled, feature->enabled,
-				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-	else
-		bitmap_andnot(feature->enabled, feature->enabled,
-				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-	mutex_unlock(&feature->mutex);
-
-	return ret;
-}
-
-int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
-{
-	int ret = 0;
-	uint32_t feature_mask[2] = { 0 };
-	uint64_t feature_2_enabled = 0;
-	uint64_t feature_2_disabled = 0;
-	uint64_t feature_enables = 0;
-
-	mutex_lock(&smu->mutex);
-
-	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
-	if (ret)
-		goto out;
-
-	feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
-
-	feature_2_enabled  = ~feature_enables & new_mask;
-	feature_2_disabled = feature_enables & ~new_mask;
-
-	if (feature_2_enabled) {
-		ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
-		if (ret)
-			goto out;
-	}
-	if (feature_2_disabled) {
-		ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
-		if (ret)
-			goto out;
-	}
-
-out:
-	mutex_unlock(&smu->mutex);
-
-	return ret;
-}
-
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
-{
-	int ret = 0;
-
-	if (!if_version && !smu_version)
-		return -EINVAL;
-
-	if (if_version) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
-		if (ret)
-			return ret;
-
-		ret = smu_read_smc_arg(smu, if_version);
-		if (ret)
-			return ret;
-	}
-
-	if (smu_version) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
-		if (ret)
-			return ret;
-
-		ret = smu_read_smc_arg(smu, smu_version);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max)
-{
-	int ret = 0;
-
-	if (min <= 0 && max <= 0)
-		return -EINVAL;
-
-	if (!smu_clk_dpm_is_enabled(smu, clk_type))
-		return 0;
-
-	ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
-	return ret;
-}
-
-int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max)
-{
-	int ret = 0, clk_id = 0;
-	uint32_t param;
-
-	if (min <= 0 && max <= 0)
-		return -EINVAL;
-
-	if (!smu_clk_dpm_is_enabled(smu, clk_type))
-		return 0;
-
-	clk_id = smu_clk_get_index(smu, clk_type);
-	if (clk_id < 0)
-		return clk_id;
-
-	if (max > 0) {
-		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
-						  param);
-		if (ret)
-			return ret;
-	}
-
-	if (min > 0) {
-		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
-						  param);
-		if (ret)
-			return ret;
-	}
-
-
-	return ret;
-}
-
-int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			   uint32_t *min, uint32_t *max, bool lock_needed)
-{
-	uint32_t clock_limit;
-	int ret = 0;
-
-	if (!min && !max)
-		return -EINVAL;
-
-	if (lock_needed)
-		mutex_lock(&smu->mutex);
-
-	if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
-		switch (clk_type) {
-		case SMU_MCLK:
-		case SMU_UCLK:
-			clock_limit = smu->smu_table.boot_values.uclk;
-			break;
-		case SMU_GFXCLK:
-		case SMU_SCLK:
-			clock_limit = smu->smu_table.boot_values.gfxclk;
-			break;
-		case SMU_SOCCLK:
-			clock_limit = smu->smu_table.boot_values.socclk;
-			break;
-		default:
-			clock_limit = 0;
-			break;
-		}
-
-		/* clock in Mhz unit */
-		if (min)
-			*min = clock_limit / 100;
-		if (max)
-			*max = clock_limit / 100;
-	} else {
-		/*
-		 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
-		 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
-		 */
-		ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
-	}
-
-	if (lock_needed)
-		mutex_unlock(&smu->mutex);
-
-	return ret;
-}
-
-int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
-			      uint16_t level, uint32_t *value)
-{
-	int ret = 0, clk_id = 0;
-	uint32_t param;
-
-	if (!value)
-		return -EINVAL;
-
-	if (!smu_clk_dpm_is_enabled(smu, clk_type))
-		return 0;
-
-	clk_id = smu_clk_get_index(smu, clk_type);
-	if (clk_id < 0)
-		return clk_id;
-
-	param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
-
-	ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
-					  param);
-	if (ret)
-		return ret;
-
-	ret = smu_read_smc_arg(smu, &param);
-	if (ret)
-		return ret;
-
-	/* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
-	 * now, we un-support it */
-	*value = param & 0x7fffffff;
-
-	return ret;
-}
-
-int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t *value)
-{
-	return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
-}
-
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
-{
-	enum smu_feature_mask feature_id = 0;
-
-	switch (clk_type) {
-	case SMU_MCLK:
-	case SMU_UCLK:
-		feature_id = SMU_FEATURE_DPM_UCLK_BIT;
-		break;
-	case SMU_GFXCLK:
-	case SMU_SCLK:
-		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
-		break;
-	case SMU_SOCCLK:
-		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
-		break;
-	default:
-		return true;
-	}
-
-	if(!smu_feature_is_enabled(smu, feature_id)) {
-		return false;
-	}
-
-	return true;
-}
-
-
-int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
-			   bool gate)
-{
-	int ret = 0;
-
-	mutex_lock(&smu->mutex);
-
-	switch (block_type) {
-	case AMD_IP_BLOCK_TYPE_UVD:
-		ret = smu_dpm_set_uvd_enable(smu, gate);
-		break;
-	case AMD_IP_BLOCK_TYPE_VCE:
-		ret = smu_dpm_set_vce_enable(smu, gate);
-		break;
-	case AMD_IP_BLOCK_TYPE_GFX:
-		ret = smu_gfx_off_control(smu, gate);
-		break;
-	case AMD_IP_BLOCK_TYPE_SDMA:
-		ret = smu_powergate_sdma(smu, gate);
-		break;
-	default:
-		break;
-	}
-
-	mutex_unlock(&smu->mutex);
-
-	return ret;
-}
-
-int smu_get_power_num_states(struct smu_context *smu,
-			     struct pp_states_info *state_info)
-{
-	if (!state_info)
-		return -EINVAL;
-
-	/* not support power state */
-	memset(state_info, 0, sizeof(struct pp_states_info));
-	state_info->nums = 1;
-	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
-
-	return 0;
-}
-
-int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
-			   void *data, uint32_t *size)
-{
-	struct smu_power_context *smu_power = &smu->smu_power;
-	struct smu_power_gate *power_gate = &smu_power->power_gate;
-	int ret = 0;
-
-	if(!data || !size)
-		return -EINVAL;
-
-	switch (sensor) {
-	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
-		*((uint32_t *)data) = smu->pstate_sclk;
-		*size = 4;
-		break;
-	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
-		*((uint32_t *)data) = smu->pstate_mclk;
-		*size = 4;
-		break;
-	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
-		ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
-		*size = 8;
-		break;
-	case AMDGPU_PP_SENSOR_UVD_POWER:
-		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
-		*size = 4;
-		break;
-	case AMDGPU_PP_SENSOR_VCE_POWER:
-		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
-		*size = 4;
-		break;
-	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
-		*(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
-		*size = 4;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	if (ret)
-		*size = 0;
-
-	return ret;
-}
-
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
-		     void *table_data, bool drv2smu)
-{
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct amdgpu_device *adev = smu->adev;
-	struct smu_table *table = NULL;
-	int ret = 0;
-	int table_id = smu_table_get_index(smu, table_index);
-
-	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
-		return -EINVAL;
-
-	table = &smu_table->tables[table_index];
-
-	if (drv2smu)
-		memcpy(table->cpu_addr, table_data, table->size);
-
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
-					  upper_32_bits(table->mc_address));
-	if (ret)
-		return ret;
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
-					  lower_32_bits(table->mc_address));
-	if (ret)
-		return ret;
-	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
-					  SMU_MSG_TransferTableDram2Smu :
-					  SMU_MSG_TransferTableSmu2Dram,
-					  table_id | ((argument & 0xFFFF) << 16));
-	if (ret)
-		return ret;
-
-	/* flush hdp cache */
-	adev->nbio.funcs->hdp_flush(adev, NULL);
-
-	if (!drv2smu)
-		memcpy(table_data, table->cpu_addr, table->size);
-
-	return ret;
-}
-
-bool is_support_sw_smu(struct amdgpu_device *adev)
-{
-	if (adev->asic_type == CHIP_VEGA20)
-		return (amdgpu_dpm == 2) ? true: false;
-	else if (adev->asic_type >= CHIP_ARCTURUS)
-		return true;
-	else
-		return false;
-}
-
-bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
-{
-	if (amdgpu_dpm != 1)
-		return false;
-
-	if (adev->asic_type == CHIP_VEGA20)
-		return true;
-
-	return false;
-}
-
-int smu_sys_get_pp_table(struct smu_context *smu, void **table)
-{
-	struct smu_table_context *smu_table = &smu->smu_table;
-	uint32_t powerplay_table_size;
-
-	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
-		return -EINVAL;
-
-	mutex_lock(&smu->mutex);
-
-	if (smu_table->hardcode_pptable)
-		*table = smu_table->hardcode_pptable;
-	else
-		*table = smu_table->power_play_table;
-
-	powerplay_table_size = smu_table->power_play_table_size;
-
-	mutex_unlock(&smu->mutex);
-
-	return powerplay_table_size;
-}
-
-int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
-{
-	struct smu_table_context *smu_table = &smu->smu_table;
-	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
-	int ret = 0;
-
-	if (!smu->pm_enabled)
-		return -EINVAL;
-	if (header->usStructureSize != size) {
-		pr_err("pp table size not matched !\n");
-		return -EIO;
-	}
-
-	mutex_lock(&smu->mutex);
-	if (!smu_table->hardcode_pptable)
-		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
-	if (!smu_table->hardcode_pptable) {
-		ret = -ENOMEM;
-		goto failed;
-	}
-
-	memcpy(smu_table->hardcode_pptable, buf, size);
-	smu_table->power_play_table = smu_table->hardcode_pptable;
-	smu_table->power_play_table_size = size;
-
-	ret = smu_reset(smu);
-	if (ret)
-		pr_info("smu reset failed, ret = %d\n", ret);
-
-failed:
-	mutex_unlock(&smu->mutex);
-	return ret;
-}
-
-int smu_feature_init_dpm(struct smu_context *smu)
-{
-	struct smu_feature *feature = &smu->smu_feature;
-	int ret = 0;
-	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
-
-	if (!smu->pm_enabled)
-		return ret;
-	mutex_lock(&feature->mutex);
-	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
-	mutex_unlock(&feature->mutex);
-
-	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
-					     SMU_FEATURE_MAX/32);
-	if (ret)
-		return ret;
-
-	mutex_lock(&feature->mutex);
-	bitmap_or(feature->allowed, feature->allowed,
-		      (unsigned long *)allowed_feature_mask,
-		      feature->feature_num);
-	mutex_unlock(&feature->mutex);
-
-	return ret;
-}
-
-
-int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
-{
-	struct amdgpu_device *adev = smu->adev;
-	struct smu_feature *feature = &smu->smu_feature;
-	int feature_id;
-	int ret = 0;
-
-	if (adev->flags & AMD_IS_APU)
-		return 1;
-
-	feature_id = smu_feature_get_index(smu, mask);
-	if (feature_id < 0)
-		return 0;
-
-	WARN_ON(feature_id > feature->feature_num);
-
-	mutex_lock(&feature->mutex);
-	ret = test_bit(feature_id, feature->enabled);
-	mutex_unlock(&feature->mutex);
-
-	return ret;
-}
-
-int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
-			    bool enable)
-{
-	struct smu_feature *feature = &smu->smu_feature;
-	int feature_id;
-
-	feature_id = smu_feature_get_index(smu, mask);
-	if (feature_id < 0)
-		return -EINVAL;
-
-	WARN_ON(feature_id > feature->feature_num);
-
-	return smu_feature_update_enable_state(smu,
-					       1ULL << feature_id,
-					       enable);
-}
-
-int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
-{
-	struct smu_feature *feature = &smu->smu_feature;
-	int feature_id;
-	int ret = 0;
-
-	feature_id = smu_feature_get_index(smu, mask);
-	if (feature_id < 0)
-		return 0;
-
-	WARN_ON(feature_id > feature->feature_num);
-
-	mutex_lock(&feature->mutex);
-	ret = test_bit(feature_id, feature->supported);
-	mutex_unlock(&feature->mutex);
-
-	return ret;
-}
-
-int smu_feature_set_supported(struct smu_context *smu,
-			      enum smu_feature_mask mask,
-			      bool enable)
-{
-	struct smu_feature *feature = &smu->smu_feature;
-	int feature_id;
-	int ret = 0;
-
-	feature_id = smu_feature_get_index(smu, mask);
-	if (feature_id < 0)
-		return -EINVAL;
-
-	WARN_ON(feature_id > feature->feature_num);
-
-	mutex_lock(&feature->mutex);
-	if (enable)
-		test_and_set_bit(feature_id, feature->supported);
-	else
-		test_and_clear_bit(feature_id, feature->supported);
-	mutex_unlock(&feature->mutex);
-
-	return ret;
-}
-
 static int smu_set_funcs(struct amdgpu_device *adev)
 {
 	struct smu_context *smu = &adev->smu;
@@ -751,26 +81,9 @@ static int smu_late_init(void *handle)
 	if (!smu->pm_enabled)
 		return 0;
 
-	smu_handle_task(&adev->smu,
+	smu_cmn_handle_task(&adev->smu,
 			smu->smu_dpm.dpm_level,
-			AMD_PP_TASK_COMPLETE_INIT,
-			false);
-
-	return 0;
-}
-
-int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
-			    uint16_t *size, uint8_t *frev, uint8_t *crev,
-			    uint8_t **addr)
-{
-	struct amdgpu_device *adev = smu->adev;
-	uint16_t data_start;
-
-	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
-					   size, frev, crev, &data_start))
-		return -EINVAL;
-
-	*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+			AMD_PP_TASK_COMPLETE_INIT);
 
 	return 0;
 }
@@ -969,6 +282,46 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
 	return 0;
 }
 
+static int smu_override_pcie_parameters(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t pcie_gen = 0, pcie_width = 0;
+	int ret;
+
+	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+		pcie_gen = 3;
+	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+		pcie_gen = 2;
+	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+		pcie_gen = 1;
+	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+		pcie_gen = 0;
+
+	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
+	 */
+	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+		pcie_width = 6;
+	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+		pcie_width = 5;
+	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+		pcie_width = 4;
+	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+		pcie_width = 3;
+	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+		pcie_width = 2;
+	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+		pcie_width = 1;
+
+	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+	if (ret)
+		pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+	return ret;
+}
+
 static int smu_smc_table_hw_init(struct smu_context *smu,
 				 bool initialize)
 {
@@ -1069,7 +422,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 		 * Set min deep sleep dce fclk with bootup value from vbios via
 		 * SetMinDeepSleepDcefclk MSG.
 		 */
-		ret = smu_set_min_dcef_deep_sleep(smu);
+		ret = smu_set_min_dcef_deep_sleep(smu,
+				smu->smu_table.boot_values.dcefclk / 100);
 		if (ret)
 			return ret;
 	}
@@ -1098,7 +452,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 		if (ret)
 			return ret;
 
-		ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
+		ret = smu_cmn_get_power_limit(smu, &smu->default_power_limit, true);
 		if (ret)
 			return ret;
 	}
@@ -1201,6 +555,32 @@ static int smu_start_smc_engine(struct smu_context *smu)
 	return ret;
 }
 
+static int smu_feature_init_dpm(struct smu_context *smu)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	int ret = 0;
+	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
+
+	if (!smu->pm_enabled)
+		return ret;
+	mutex_lock(&feature->mutex);
+	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
+	mutex_unlock(&feature->mutex);
+
+	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
+					     SMU_FEATURE_MAX/32);
+	if (ret)
+		return ret;
+
+	mutex_lock(&feature->mutex);
+	bitmap_or(feature->allowed, feature->allowed,
+		      (unsigned long *)allowed_feature_mask,
+		      feature->feature_num);
+	mutex_unlock(&feature->mutex);
+
+	return ret;
+}
+
 static int smu_hw_init(void *handle)
 {
 	int ret;
@@ -1308,22 +688,6 @@ static int smu_hw_fini(void *handle)
 	return 0;
 }
 
-int smu_reset(struct smu_context *smu)
-{
-	struct amdgpu_device *adev = smu->adev;
-	int ret = 0;
-
-	ret = smu_hw_fini(adev);
-	if (ret)
-		return ret;
-
-	ret = smu_hw_init(adev);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
 static int smu_suspend(void *handle)
 {
 	int ret;
@@ -1348,158 +712,45 @@ static int smu_suspend(void *handle)
 
 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-	if (adev->asic_type >= CHIP_NAVI10 &&
-	    adev->gfx.rlc.funcs->stop)
-		adev->gfx.rlc.funcs->stop(adev);
-
-	return 0;
-}
-
-static int smu_resume(void *handle)
-{
-	int ret;
-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct smu_context *smu = &adev->smu;
-
-	pr_info("SMU is resuming...\n");
-
-	ret = smu_start_smc_engine(smu);
-	if (ret) {
-		pr_err("SMU is not ready yet!\n");
-		goto failed;
-	}
-
-	ret = smu_smc_table_hw_init(smu, false);
-	if (ret)
-		goto failed;
-
-	ret = smu_start_thermal_control(smu);
-	if (ret)
-		goto failed;
-
-	if (smu->is_apu)
-		smu_set_gfx_cgpg(&adev->smu, true);
-
-	smu->disable_uclk_switch = 0;
-
-	pr_info("SMU is resumed successfully!\n");
-
-	return 0;
-
-failed:
-	return ret;
-}
-
-int smu_display_configuration_change(struct smu_context *smu,
-				     const struct amd_pp_display_configuration *display_config)
-{
-	int index = 0;
-	int num_of_active_display = 0;
-
-	if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
-		return -EINVAL;
-
-	if (!display_config)
-		return -EINVAL;
-
-	mutex_lock(&smu->mutex);
-
-	if (smu->ppt_funcs->set_deep_sleep_dcefclk)
-		smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
-				display_config->min_dcef_deep_sleep_set_clk / 100);
-
-	for (index = 0; index < display_config->num_path_including_non_display; index++) {
-		if (display_config->displays[index].controller_id != 0)
-			num_of_active_display++;
-	}
-
-	smu_set_active_display_count(smu, num_of_active_display);
-
-	smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
-			   display_config->cpu_cc6_disable,
-			   display_config->cpu_pstate_disable,
-			   display_config->nb_pstate_switch_disable);
-
-	mutex_unlock(&smu->mutex);
-
-	return 0;
-}
-
-static int smu_get_clock_info(struct smu_context *smu,
-			      struct smu_clock_info *clk_info,
-			      enum smu_perf_level_designation designation)
-{
-	int ret;
-	struct smu_performance_level level = {0};
-
-	if (!clk_info)
-		return -EINVAL;
-
-	ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
-	if (ret)
-		return -EINVAL;
-
-	clk_info->min_mem_clk = level.memory_clock;
-	clk_info->min_eng_clk = level.core_clock;
-	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
-
-	ret = smu_get_perf_level(smu, designation, &level);
-	if (ret)
-		return -EINVAL;
-
-	clk_info->min_mem_clk = level.memory_clock;
-	clk_info->min_eng_clk = level.core_clock;
-	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
-
+	if (adev->asic_type >= CHIP_NAVI10 &&
+	    adev->gfx.rlc.funcs->stop)
+		adev->gfx.rlc.funcs->stop(adev);
+
 	return 0;
 }
 
-int smu_get_current_clocks(struct smu_context *smu,
-			   struct amd_pp_clock_info *clocks)
+static int smu_resume(void *handle)
 {
-	struct amd_pp_simple_clock_info simple_clocks = {0};
-	struct smu_clock_info hw_clocks;
-	int ret = 0;
-
-	if (!is_support_sw_smu(smu->adev))
-		return -EINVAL;
-
-	mutex_lock(&smu->mutex);
-
-	smu_get_dal_power_level(smu, &simple_clocks);
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	struct smu_context *smu = &adev->smu;
 
-	if (smu->support_power_containment)
-		ret = smu_get_clock_info(smu, &hw_clocks,
-					 PERF_LEVEL_POWER_CONTAINMENT);
-	else
-		ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
+	pr_info("SMU is resuming...\n");
 
+	ret = smu_start_smc_engine(smu);
 	if (ret) {
-		pr_err("Error in smu_get_clock_info\n");
+		pr_err("SMU is not ready yet!\n");
 		goto failed;
 	}
 
-	clocks->min_engine_clock = hw_clocks.min_eng_clk;
-	clocks->max_engine_clock = hw_clocks.max_eng_clk;
-	clocks->min_memory_clock = hw_clocks.min_mem_clk;
-	clocks->max_memory_clock = hw_clocks.max_mem_clk;
-	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
-	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
-	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
-	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+	ret = smu_smc_table_hw_init(smu, false);
+	if (ret)
+		goto failed;
 
-        if (simple_clocks.level == 0)
-                clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
-        else
-                clocks->max_clocks_state = simple_clocks.level;
+	ret = smu_start_thermal_control(smu);
+	if (ret)
+		goto failed;
 
-        if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
-                clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
-                clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
-        }
+	if (smu->is_apu)
+		smu_set_gfx_cgpg(&adev->smu, true);
+
+	smu->disable_uclk_switch = 0;
+
+	pr_info("SMU is resumed successfully!\n");
+
+	return 0;
 
 failed:
-	mutex_unlock(&smu->mutex);
 	return ret;
 }
 
@@ -1559,941 +810,1099 @@ static int smu_enable_umd_pstate(void *handle,
 	return 0;
 }
 
-static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+const struct amd_ip_funcs smu_ip_funcs = {
+	.name = "smu",
+	.early_init = smu_early_init,
+	.late_init = smu_late_init,
+	.sw_init = smu_sw_init,
+	.sw_fini = smu_sw_fini,
+	.hw_init = smu_hw_init,
+	.hw_fini = smu_hw_fini,
+	.suspend = smu_suspend,
+	.resume = smu_resume,
+	.is_idle = NULL,
+	.check_soft_reset = NULL,
+	.wait_for_idle = NULL,
+	.soft_reset = NULL,
+	.set_clockgating_state = smu_set_clockgating_state,
+	.set_powergating_state = smu_set_powergating_state,
+	.enable_umd_pstate = smu_enable_umd_pstate,
+};
+
+const struct amdgpu_ip_block_version smu_v11_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 11,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &smu_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version smu_v12_0_ip_block =
+{
+	.type = AMD_IP_BLOCK_TYPE_SMC,
+	.major = 12,
+	.minor = 0,
+	.rev = 0,
+	.funcs = &smu_ip_funcs,
+};
+
+int smu_load_microcode(struct smu_context *smu)
 {
 	int ret = 0;
-	uint32_t sclk_mask, mclk_mask, soc_mask;
 
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = smu_force_dpm_limit_value(smu, true);
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = smu_force_dpm_limit_value(smu, false);
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
-		ret = smu_unforce_dpm_levels(smu);
-		break;
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-		ret = smu_get_profiling_clk_mask(smu, level,
-						 &sclk_mask,
-						 &mclk_mask,
-						 &soc_mask);
-		if (ret)
-			return ret;
-		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
-		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
-		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
-		break;
-	case AMD_DPM_FORCED_LEVEL_MANUAL:
-	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
-	default:
-		break;
-	}
+	mutex_lock(&smu->mutex);
+
+	if (smu->ppt_funcs->load_microcode)
+		ret = smu->ppt_funcs->load_microcode(smu);
+
+	mutex_unlock(&smu->mutex);
+
 	return ret;
 }
 
-int smu_adjust_power_state_dynamic(struct smu_context *smu,
-				   enum amd_dpm_forced_level level,
-				   bool skip_display_settings)
+int smu_check_fw_status(struct smu_context *smu)
 {
 	int ret = 0;
-	int index = 0;
-	long workload;
-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 
-	if (!smu->pm_enabled)
-		return -EINVAL;
+	mutex_lock(&smu->mutex);
 
-	if (!skip_display_settings) {
-		ret = smu_display_config_changed(smu);
-		if (ret) {
-			pr_err("Failed to change display config!");
-			return ret;
-		}
-	}
+	if (smu->ppt_funcs->check_fw_status)
+		ret = smu->ppt_funcs->check_fw_status(smu);
 
-	ret = smu_apply_clocks_adjust_rules(smu);
-	if (ret) {
-		pr_err("Failed to apply clocks adjust rules!");
-		return ret;
-	}
+	mutex_unlock(&smu->mutex);
 
-	if (!skip_display_settings) {
-		ret = smu_notify_smc_dispaly_config(smu);
-		if (ret) {
-			pr_err("Failed to notify smc display config!");
-			return ret;
-		}
-	}
+	return ret;
+}
 
-	if (smu_dpm_ctx->dpm_level != level) {
-		ret = smu_asic_set_performance_level(smu, level);
-		if (ret) {
-			ret = smu_default_set_performance_level(smu, level);
-			if (ret) {
-				pr_err("Failed to set performance level!");
-				return ret;
-			}
-		}
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+	int ret = 0;
 
-		/* update the saved copy */
-		smu_dpm_ctx->dpm_level = level;
-	}
+	mutex_lock(&smu->mutex);
 
-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-		index = fls(smu->workload_mask);
-		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
-		workload = smu->workload_setting[index];
+	if (smu->ppt_funcs->set_fan_speed_rpm)
+		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
 
-		if (smu->power_profile_mode != workload)
-			smu_set_power_profile_mode(smu, &workload, 0, false);
-	}
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_handle_task(struct smu_context *smu,
-		    enum amd_dpm_forced_level level,
-		    enum amd_pp_task task_id,
-		    bool lock_needed)
+int smu_get_power_limit(struct smu_context *smu,
+			uint32_t *limit,
+			bool def)
 {
 	int ret = 0;
 
-	if (lock_needed)
-		mutex_lock(&smu->mutex);
+	mutex_lock(&smu->mutex);
+	ret = smu_cmn_get_power_limit(smu, limit, def);
+	mutex_unlock(&smu->mutex);
 
-	switch (task_id) {
-	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
-		ret = smu_pre_display_config_changed(smu);
-		if (ret)
-			goto out;
-		ret = smu_set_cpu_power_state(smu);
-		if (ret)
-			goto out;
-		ret = smu_adjust_power_state_dynamic(smu, level, false);
-		break;
-	case AMD_PP_TASK_COMPLETE_INIT:
-	case AMD_PP_TASK_READJUST_POWER_STATE:
-		ret = smu_adjust_power_state_dynamic(smu, level, true);
-		break;
-	default:
-		break;
-	}
+	return ret;
+}
 
-out:
-	if (lock_needed)
-		mutex_unlock(&smu->mutex);
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+	int ret = 0;
+
+	mutex_lock(&smu->mutex);
+
+	if (smu->ppt_funcs->set_power_limit)
+		ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_switch_power_profile(struct smu_context *smu,
-			     enum PP_SMC_POWER_PROFILE type,
-			     bool en)
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
 {
-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-	long workload;
-	uint32_t index;
+	int ret = 0;
 
-	if (!smu->pm_enabled)
-		return -EINVAL;
+	mutex_lock(&smu->mutex);
 
-	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
-		return -EINVAL;
+	if (smu->ppt_funcs->print_clk_levels)
+		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+	mutex_unlock(&smu->mutex);
+
+	return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (!en) {
-		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
-		index = fls(smu->workload_mask);
-		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
-		workload = smu->workload_setting[index];
-	} else {
-		smu->workload_mask |= (1 << smu->workload_prority[type]);
-		index = fls(smu->workload_mask);
-		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
-		workload = smu->workload_setting[index];
-	}
+	if (smu->ppt_funcs->get_od_percentage)
+		ret = smu->ppt_funcs->get_od_percentage(smu, type);
 
-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
-		smu_set_power_profile_mode(smu, &workload, 0, false);
+	mutex_unlock(&smu->mutex);
+
+	return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+	int ret = 0;
+
+	mutex_lock(&smu->mutex);
+
+	if (smu->ppt_funcs->set_od_percentage)
+		ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
 
 	mutex_unlock(&smu->mutex);
 
-	return 0;
+	return ret;
 }
 
-enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
+int smu_od_edit_dpm_table(struct smu_context *smu,
+			  enum PP_OD_DPM_TABLE_COMMAND type,
+			  long *input, uint32_t size)
 {
-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-	enum amd_dpm_forced_level level;
+	int ret = 0;
+
+	mutex_lock(&smu->mutex);
+
+	if (smu->ppt_funcs->od_edit_dpm_table)
+		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+	mutex_unlock(&smu->mutex);
+
+	return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+		    enum amd_pp_sensors sensor,
+		    void *data, uint32_t *size)
+{
+	int ret = 0;
 
-	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
-		return -EINVAL;
+	mutex_lock(&smu->mutex);
 
-	mutex_lock(&(smu->mutex));
-	level = smu_dpm_ctx->dpm_level;
-	mutex_unlock(&(smu->mutex));
+	if (smu->ppt_funcs->read_sensor)
+		ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
 
-	return level;
+	mutex_unlock(&smu->mutex);
+
+	return ret;
 }
 
-int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
 {
-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
-		return -EINVAL;
-
 	mutex_lock(&smu->mutex);
 
-	ret = smu_enable_umd_pstate(smu, &level);
-	if (ret) {
-		mutex_unlock(&smu->mutex);
-		return ret;
-	}
-
-	ret = smu_handle_task(smu, level,
-			      AMD_PP_TASK_READJUST_POWER_STATE,
-			      false);
+	if (smu->ppt_funcs->get_power_profile_mode)
+		ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_display_count(struct smu_context *smu, uint32_t count)
+int smu_set_power_profile_mode(struct smu_context *smu,
+			       long *param,
+			       uint32_t param_size)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
-	ret = smu_init_display_count(smu, count);
+	ret = smu_cmn_set_power_profile_mode(smu, param, param_size);
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_force_clk_levels(struct smu_context *smu,
-			 enum smu_clk_type clk_type,
-			 uint32_t mask,
-			 bool lock_needed)
+int smu_get_fan_control_mode(struct smu_context *smu)
 {
-	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-		pr_debug("force clock level is for dpm manual mode only.\n");
-		return -EINVAL;
-	}
-
-	if (lock_needed)
-		mutex_lock(&smu->mutex);
+	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
-		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+	if (smu->ppt_funcs->get_fan_control_mode)
+		ret = smu->ppt_funcs->get_fan_control_mode(smu);
 
-	if (lock_needed)
-		mutex_unlock(&smu->mutex);
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_mp1_state(struct smu_context *smu,
-		      enum pp_mp1_state mp1_state)
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
 {
-	uint16_t msg;
-	int ret;
-
-	/*
-	 * The SMC is not fully ready. That may be
-	 * expected as the IP may be masked.
-	 * So, just return without error.
-	 */
-	if (!smu->pm_enabled)
-		return 0;
+	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	switch (mp1_state) {
-	case PP_MP1_STATE_SHUTDOWN:
-		msg = SMU_MSG_PrepareMp1ForShutdown;
-		break;
-	case PP_MP1_STATE_UNLOAD:
-		msg = SMU_MSG_PrepareMp1ForUnload;
-		break;
-	case PP_MP1_STATE_RESET:
-		msg = SMU_MSG_PrepareMp1ForReset;
-		break;
-	case PP_MP1_STATE_NONE:
-	default:
-		mutex_unlock(&smu->mutex);
-		return 0;
-	}
-
-	/* some asics may not support those messages */
-	if (smu_msg_get_index(smu, msg) < 0) {
-		mutex_unlock(&smu->mutex);
-		return 0;
-	}
-
-	ret = smu_send_smc_msg(smu, msg);
-	if (ret)
-		pr_err("[PrepareMp1] Failed!\n");
+	if (smu->ppt_funcs->set_fan_control_mode)
+		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_df_cstate(struct smu_context *smu,
-		      enum pp_df_cstate state)
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 {
 	int ret = 0;
 
-	/*
-	 * The SMC is not fully ready. That may be
-	 * expected as the IP may be masked.
-	 * So, just return without error.
-	 */
-	if (!smu->pm_enabled)
-		return 0;
-
-	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
-		return 0;
-
 	mutex_lock(&smu->mutex);
 
-	ret = smu->ppt_funcs->set_df_cstate(smu, state);
-	if (ret)
-		pr_err("[SetDfCstate] failed!\n");
+	if (smu->ppt_funcs->get_fan_speed_percent)
+		ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_write_watermarks_table(struct smu_context *smu)
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 {
 	int ret = 0;
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *table = NULL;
 
-	table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+	mutex_lock(&smu->mutex);
 
-	if (!table->cpu_addr)
-		return -EINVAL;
+	if (smu->ppt_funcs->set_fan_speed_percent)
+		ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
 
-	ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
-				true);
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
-		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
 {
 	int ret = 0;
-	struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
-	void *table = watermarks->cpu_addr;
 
 	mutex_lock(&smu->mutex);
 
-	if (!smu->disable_watermark &&
-			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-		smu_set_watermarks_table(smu, table, clock_ranges);
-		smu->watermarks_bitmap |= WATERMARKS_EXIST;
-		smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
-	}
+	if (smu->ppt_funcs->get_fan_speed_rpm)
+		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-const struct amd_ip_funcs smu_ip_funcs = {
-	.name = "smu",
-	.early_init = smu_early_init,
-	.late_init = smu_late_init,
-	.sw_init = smu_sw_init,
-	.sw_fini = smu_sw_fini,
-	.hw_init = smu_hw_init,
-	.hw_fini = smu_hw_fini,
-	.suspend = smu_suspend,
-	.resume = smu_resume,
-	.is_idle = NULL,
-	.check_soft_reset = NULL,
-	.wait_for_idle = NULL,
-	.soft_reset = NULL,
-	.set_clockgating_state = smu_set_clockgating_state,
-	.set_powergating_state = smu_set_powergating_state,
-	.enable_umd_pstate = smu_enable_umd_pstate,
-};
-
-const struct amdgpu_ip_block_version smu_v11_0_ip_block =
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
 {
-	.type = AMD_IP_BLOCK_TYPE_SMC,
-	.major = 11,
-	.minor = 0,
-	.rev = 0,
-	.funcs = &smu_ip_funcs,
-};
+	int ret = 0;
 
-const struct amdgpu_ip_block_version smu_v12_0_ip_block =
-{
-	.type = AMD_IP_BLOCK_TYPE_SMC,
-	.major = 12,
-	.minor = 0,
-	.rev = 0,
-	.funcs = &smu_ip_funcs,
-};
+	mutex_lock(&smu->mutex);
+	ret = smu_set_min_dcef_deep_sleep(smu, clk);
+	mutex_unlock(&smu->mutex);
 
-int smu_load_microcode(struct smu_context *smu)
+	return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->load_microcode)
-		ret = smu->ppt_funcs->load_microcode(smu);
+	if (smu->ppt_funcs->set_active_display_count)
+		ret = smu->ppt_funcs->set_active_display_count(smu, count);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_check_fw_status(struct smu_context *smu)
+int smu_get_clock_by_type(struct smu_context *smu,
+			  enum amd_pp_clock_type type,
+			  struct amd_pp_clocks *clocks)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->check_fw_status)
-		ret = smu->ppt_funcs->check_fw_status(smu);
+	if (smu->ppt_funcs->get_clock_by_type)
+		ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+int smu_get_max_high_clocks(struct smu_context *smu,
+			    struct amd_pp_simple_clock_info *clocks)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_gfx_cgpg)
-		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+	if (smu->ppt_funcs->get_max_high_clocks)
+		ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+				       enum smu_clk_type clk_type,
+				       struct pp_clock_levels_with_latency *clocks)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_speed_rpm)
-		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+	if (smu->ppt_funcs->get_clock_by_type_with_latency)
+		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_power_limit(struct smu_context *smu,
-			uint32_t *limit,
-			bool def,
-			bool lock_needed)
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+				       enum amd_pp_clock_type type,
+				       struct pp_clock_levels_with_voltage *clocks)
 {
 	int ret = 0;
 
-	if (lock_needed)
-		mutex_lock(&smu->mutex);
+	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_power_limit)
-		ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+	if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+		ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
 
-	if (lock_needed)
-		mutex_unlock(&smu->mutex);
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+				      struct pp_display_clock_request *clock_req)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_power_limit)
-		ret = smu->ppt_funcs->set_power_limit(smu, limit);
+	if (smu->ppt_funcs->display_clock_voltage_request)
+		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
 {
-	int ret = 0;
+	int ret = -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->print_clk_levels)
-		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+	if (smu->ppt_funcs->display_disable_memory_clock_switch)
+		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_od_percentage)
-		ret = smu->ppt_funcs->get_od_percentage(smu, type);
+	if (smu->ppt_funcs->notify_smu_enable_pwe)
+		ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+int smu_set_xgmi_pstate(struct smu_context *smu,
+			uint32_t pstate)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_od_percentage)
-		ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+	if (smu->ppt_funcs->set_xgmi_pstate)
+		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_od_edit_dpm_table(struct smu_context *smu,
-			  enum PP_OD_DPM_TABLE_COMMAND type,
-			  long *input, uint32_t size)
+int smu_set_azalia_d3_pme(struct smu_context *smu)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->od_edit_dpm_table)
-		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+	if (smu->ppt_funcs->set_azalia_d3_pme)
+		ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_read_sensor(struct smu_context *smu,
-		    enum amd_pp_sensors sensor,
-		    void *data, uint32_t *size)
+bool smu_baco_is_support(struct smu_context *smu)
 {
-	int ret = 0;
+	bool ret = false;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->read_sensor)
-		ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+	if (smu->ppt_funcs->baco_is_support)
+		ret = smu->ppt_funcs->baco_is_support(smu);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+int smu_baco_reset(struct smu_context *smu)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_power_profile_mode)
-		ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+	if (smu->ppt_funcs->baco_reset)
+		ret = smu->ppt_funcs->baco_reset(smu);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_power_profile_mode(struct smu_context *smu,
-			       long *param,
-			       uint32_t param_size,
-			       bool lock_needed)
+int smu_mode2_reset(struct smu_context *smu)
 {
 	int ret = 0;
 
-	if (lock_needed)
-		mutex_lock(&smu->mutex);
+	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_power_profile_mode)
-		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+	if (smu->ppt_funcs->mode2_reset)
+		ret = smu->ppt_funcs->mode2_reset(smu);
 
-	if (lock_needed)
-		mutex_unlock(&smu->mutex);
+	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-
-int smu_get_fan_control_mode(struct smu_context *smu)
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+					 struct pp_smu_nv_clock_table *max_clocks)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_fan_control_mode)
-		ret = smu->ppt_funcs->get_fan_control_mode(smu);
+	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_fan_control_mode(struct smu_context *smu, int value)
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+			    unsigned int *clock_values_in_khz,
+			    unsigned int *num_states)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_control_mode)
-		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+	if (smu->ppt_funcs->get_uclk_dpm_states)
+		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
 {
-	int ret = 0;
+	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_fan_speed_percent)
-		ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+	if (smu->ppt_funcs->get_current_power_state)
+		pm_state = smu->ppt_funcs->get_current_power_state(smu);
 
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return pm_state;
 }
 
-int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+int smu_get_dpm_clock_table(struct smu_context *smu,
+			    struct dpm_clocks *clock_table)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_fan_speed_percent)
-		ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+	if (smu->ppt_funcs->get_dpm_clock_table)
+		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
 {
-	int ret = 0;
+	size_t size = 0;
+	int ret = 0, i = 0;
+	uint32_t feature_mask[2] = { 0 };
+	int32_t feature_index = 0;
+	uint32_t count = 0;
+	uint32_t sort_feature[SMU_FEATURE_COUNT];
+	uint64_t hw_feature_count = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_fan_speed_rpm)
-		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	if (ret)
+		goto failed;
+
+	size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
+			feature_mask[1], feature_mask[0]);
+
+	for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+		feature_index = smu_feature_get_index(smu, i);
+		if (feature_index < 0)
+			continue;
+		sort_feature[feature_index] = i;
+		hw_feature_count++;
+	}
+
+	for (i = 0; i < hw_feature_count; i++) {
+		size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
+			       count++,
+			       smu_get_feature_name(smu, sort_feature[i]),
+			       i,
+			       !!smu_feature_is_enabled(smu, sort_feature[i]) ?
+			       "enabled" : "disabled");
+	}
 
+failed:
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return size;
 }
 
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
 {
 	int ret = 0;
+	uint32_t feature_mask[2] = { 0 };
+	uint64_t feature_2_enabled = 0;
+	uint64_t feature_2_disabled = 0;
+	uint64_t feature_enables = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_deep_sleep_dcefclk)
-		ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+	if (ret)
+		goto out;
+
+	feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
+
+	feature_2_enabled  = ~feature_enables & new_mask;
+	feature_2_disabled = feature_enables & ~new_mask;
+
+	if (feature_2_enabled) {
+		ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
+		if (ret)
+			goto out;
+	}
+	if (feature_2_disabled) {
+		ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
+		if (ret)
+			goto out;
+	}
 
+out:
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
+			   bool gate)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->set_active_display_count)
-		ret = smu->ppt_funcs->set_active_display_count(smu, count);
+	switch (block_type) {
+	case AMD_IP_BLOCK_TYPE_UVD:
+		ret = smu_dpm_set_uvd_enable(smu, gate);
+		break;
+	case AMD_IP_BLOCK_TYPE_VCE:
+		ret = smu_dpm_set_vce_enable(smu, gate);
+		break;
+	case AMD_IP_BLOCK_TYPE_GFX:
+		ret = smu_gfx_off_control(smu, gate);
+		break;
+	case AMD_IP_BLOCK_TYPE_SDMA:
+		ret = smu_powergate_sdma(smu, gate);
+		break;
+	default:
+		break;
+	}
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_clock_by_type(struct smu_context *smu,
-			  enum amd_pp_clock_type type,
-			  struct amd_pp_clocks *clocks)
+int smu_get_power_num_states(struct smu_context *smu,
+			     struct pp_states_info *state_info)
 {
-	int ret = 0;
+	if (!state_info)
+		return -EINVAL;
 
-	mutex_lock(&smu->mutex);
+	/* not support power state */
+	memset(state_info, 0, sizeof(struct pp_states_info));
+	state_info->nums = 1;
+	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
 
-	if (smu->ppt_funcs->get_clock_by_type)
-		ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+	return 0;
+}
 
-	mutex_unlock(&smu->mutex);
+bool is_support_sw_smu(struct amdgpu_device *adev)
+{
+	if (adev->asic_type == CHIP_VEGA20)
+		return (amdgpu_dpm == 2) ? true: false;
+	else if (adev->asic_type >= CHIP_ARCTURUS)
+		return true;
+	else
+		return false;
+}
 
-	return ret;
+bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
+{
+	if (amdgpu_dpm != 1)
+		return false;
+
+	if (adev->asic_type == CHIP_VEGA20)
+		return true;
+
+	return false;
 }
 
-int smu_get_max_high_clocks(struct smu_context *smu,
-			    struct amd_pp_simple_clock_info *clocks)
+int smu_sys_get_pp_table(struct smu_context *smu, void **table)
 {
-	int ret = 0;
+	struct smu_table_context *smu_table = &smu->smu_table;
+	uint32_t powerplay_table_size;
+
+	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
+		return -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_max_high_clocks)
-		ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+	if (smu_table->hardcode_pptable)
+		*table = smu_table->hardcode_pptable;
+	else
+		*table = smu_table->power_play_table;
+
+	powerplay_table_size = smu_table->power_play_table_size;
 
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return powerplay_table_size;
 }
 
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
-				       enum smu_clk_type clk_type,
-				       struct pp_clock_levels_with_latency *clocks)
+static int smu_reset(struct smu_context *smu)
 {
+	struct amdgpu_device *adev = smu->adev;
 	int ret = 0;
 
-	mutex_lock(&smu->mutex);
-
-	if (smu->ppt_funcs->get_clock_by_type_with_latency)
-		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+	ret = smu_hw_fini(adev);
+	if (ret)
+		return ret;
 
-	mutex_unlock(&smu->mutex);
+	ret = smu_hw_init(adev);
+	if (ret)
+		return ret;
 
 	return ret;
 }
 
-int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
-				       enum amd_pp_clock_type type,
-				       struct pp_clock_levels_with_voltage *clocks)
+int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
 {
+	struct smu_table_context *smu_table = &smu->smu_table;
+	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
 	int ret = 0;
 
+	if (!smu->pm_enabled)
+		return -EINVAL;
+	if (header->usStructureSize != size) {
+		pr_err("pp table size not matched !\n");
+		return -EIO;
+	}
+
 	mutex_lock(&smu->mutex);
+	if (!smu_table->hardcode_pptable)
+		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+	if (!smu_table->hardcode_pptable) {
+		ret = -ENOMEM;
+		goto failed;
+	}
 
-	if (smu->ppt_funcs->get_clock_by_type_with_voltage)
-		ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+	memcpy(smu_table->hardcode_pptable, buf, size);
+	smu_table->power_play_table = smu_table->hardcode_pptable;
+	smu_table->power_play_table_size = size;
 
-	mutex_unlock(&smu->mutex);
+	ret = smu_reset(smu);
+	if (ret)
+		pr_info("smu reset failed, ret = %d\n", ret);
 
+failed:
+	mutex_unlock(&smu->mutex);
 	return ret;
 }
 
-
-int smu_display_clock_voltage_request(struct smu_context *smu,
-				      struct pp_display_clock_request *clock_req)
+int smu_display_configuration_change(struct smu_context *smu,
+				     const struct amd_pp_display_configuration *display_config)
 {
-	int ret = 0;
+	int index = 0;
+	int num_of_active_display = 0;
+
+	if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+		return -EINVAL;
+
+	if (!display_config)
+		return -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->display_clock_voltage_request)
-		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+	smu_set_min_dcef_deep_sleep(smu,
+				display_config->min_dcef_deep_sleep_set_clk / 100);
+
+	for (index = 0; index < display_config->num_path_including_non_display; index++) {
+		if (display_config->displays[index].controller_id != 0)
+			num_of_active_display++;
+	}
+
+	smu_set_active_display_count(smu, num_of_active_display);
+
+	smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
+			   display_config->cpu_cc6_disable,
+			   display_config->cpu_pstate_disable,
+			   display_config->nb_pstate_switch_disable);
 
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return 0;
 }
 
-
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+static int smu_get_clock_info(struct smu_context *smu,
+			      struct smu_clock_info *clk_info,
+			      enum smu_perf_level_designation designation)
 {
-	int ret = -EINVAL;
+	int ret;
+	struct smu_performance_level level = {0};
 
-	mutex_lock(&smu->mutex);
+	if (!clk_info)
+		return -EINVAL;
 
-	if (smu->ppt_funcs->display_disable_memory_clock_switch)
-		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+	ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
+	if (ret)
+		return -EINVAL;
 
-	mutex_unlock(&smu->mutex);
+	clk_info->min_mem_clk = level.memory_clock;
+	clk_info->min_eng_clk = level.core_clock;
+	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
 
-	return ret;
+	ret = smu_get_perf_level(smu, designation, &level);
+	if (ret)
+		return -EINVAL;
+
+	clk_info->min_mem_clk = level.memory_clock;
+	clk_info->min_eng_clk = level.core_clock;
+	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+	return 0;
 }
 
-int smu_notify_smu_enable_pwe(struct smu_context *smu)
+int smu_get_current_clocks(struct smu_context *smu,
+			   struct amd_pp_clock_info *clocks)
 {
+	struct amd_pp_simple_clock_info simple_clocks = {0};
+	struct smu_clock_info hw_clocks;
 	int ret = 0;
 
+	if (!is_support_sw_smu(smu->adev))
+		return -EINVAL;
+
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->notify_smu_enable_pwe)
-		ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+	smu_get_dal_power_level(smu, &simple_clocks);
 
-	mutex_unlock(&smu->mutex);
+	if (smu->support_power_containment)
+		ret = smu_get_clock_info(smu, &hw_clocks,
+					 PERF_LEVEL_POWER_CONTAINMENT);
+	else
+		ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
 
-	return ret;
-}
+	if (ret) {
+		pr_err("Error in smu_get_clock_info\n");
+		goto failed;
+	}
 
-int smu_set_xgmi_pstate(struct smu_context *smu,
-			uint32_t pstate)
-{
-	int ret = 0;
+	clocks->min_engine_clock = hw_clocks.min_eng_clk;
+	clocks->max_engine_clock = hw_clocks.max_eng_clk;
+	clocks->min_memory_clock = hw_clocks.min_mem_clk;
+	clocks->max_memory_clock = hw_clocks.max_mem_clk;
+	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
 
-	mutex_lock(&smu->mutex);
+        if (simple_clocks.level == 0)
+                clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+        else
+                clocks->max_clocks_state = simple_clocks.level;
 
-	if (smu->ppt_funcs->set_xgmi_pstate)
-		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+        if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
+                clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+                clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+        }
 
+failed:
 	mutex_unlock(&smu->mutex);
-
 	return ret;
 }
 
-int smu_set_azalia_d3_pme(struct smu_context *smu)
+int smu_handle_task(struct smu_context *smu,
+		    enum amd_dpm_forced_level level,
+		    enum amd_pp_task task_id)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
-
-	if (smu->ppt_funcs->set_azalia_d3_pme)
-		ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
-
+	ret = smu_cmn_handle_task(smu, level, task_id);
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-bool smu_baco_is_support(struct smu_context *smu)
+int smu_switch_power_profile(struct smu_context *smu,
+			     enum PP_SMC_POWER_PROFILE type,
+			     bool en)
 {
-	bool ret = false;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+	long workload;
+	uint32_t index;
+
+	if (!smu->pm_enabled)
+		return -EINVAL;
+
+	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+		return -EINVAL;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->baco_is_support)
-		ret = smu->ppt_funcs->baco_is_support(smu);
+	if (!en) {
+		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+		index = fls(smu->workload_mask);
+		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+		workload = smu->workload_setting[index];
+	} else {
+		smu->workload_mask |= (1 << smu->workload_prority[type]);
+		index = fls(smu->workload_mask);
+		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+		workload = smu->workload_setting[index];
+	}
+
+	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+		smu_cmn_set_power_profile_mode(smu, &workload, 0);
 
 	mutex_unlock(&smu->mutex);
 
-	return ret;
+	return 0;
 }
 
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 {
-	if (smu->ppt_funcs->baco_get_state)
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+	enum amd_dpm_forced_level level;
+
+	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
 		return -EINVAL;
 
-	mutex_lock(&smu->mutex);
-	*state = smu->ppt_funcs->baco_get_state(smu);
-	mutex_unlock(&smu->mutex);
+	mutex_lock(&(smu->mutex));
+	level = smu_dpm_ctx->dpm_level;
+	mutex_unlock(&(smu->mutex));
 
-	return 0;
+	return level;
 }
 
-int smu_baco_reset(struct smu_context *smu)
+int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
 {
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 	int ret = 0;
 
+	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
+		return -EINVAL;
+
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->baco_reset)
-		ret = smu->ppt_funcs->baco_reset(smu);
+	ret = smu_enable_umd_pstate(smu, &level);
+	if (ret) {
+		mutex_unlock(&smu->mutex);
+		return ret;
+	}
+
+	ret = smu_cmn_handle_task(smu, level,
+			      AMD_PP_TASK_READJUST_POWER_STATE);
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_mode2_reset(struct smu_context *smu)
+int smu_set_display_count(struct smu_context *smu, uint32_t count)
 {
 	int ret = 0;
 
 	mutex_lock(&smu->mutex);
+	ret = smu_init_display_count(smu, count);
+	mutex_unlock(&smu->mutex);
 
-	if (smu->ppt_funcs->mode2_reset)
-		ret = smu->ppt_funcs->mode2_reset(smu);
+	return ret;
+}
+
+int smu_force_clk_levels(struct smu_context *smu,
+			 enum smu_clk_type clk_type,
+			 uint32_t mask)
+{
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+	int ret = 0;
 
+	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+		pr_debug("force clock level is for dpm manual mode only.\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&smu->mutex);
+	ret = smu_cmn_force_clk_levels(smu, clk_type, mask);
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
-					 struct pp_smu_nv_clock_table *max_clocks)
+int smu_set_mp1_state(struct smu_context *smu,
+		      enum pp_mp1_state mp1_state)
 {
-	int ret = 0;
+	uint16_t msg;
+	int ret;
+
+	/*
+	 * The SMC is not fully ready. That may be
+	 * expected as the IP may be masked.
+	 * So, just return without error.
+	 */
+	if (!smu->pm_enabled)
+		return 0;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
-		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+	switch (mp1_state) {
+	case PP_MP1_STATE_SHUTDOWN:
+		msg = SMU_MSG_PrepareMp1ForShutdown;
+		break;
+	case PP_MP1_STATE_UNLOAD:
+		msg = SMU_MSG_PrepareMp1ForUnload;
+		break;
+	case PP_MP1_STATE_RESET:
+		msg = SMU_MSG_PrepareMp1ForReset;
+		break;
+	case PP_MP1_STATE_NONE:
+	default:
+		mutex_unlock(&smu->mutex);
+		return 0;
+	}
+
+	/* some asics may not support those messages */
+	if (smu_msg_get_index(smu, msg) < 0) {
+		mutex_unlock(&smu->mutex);
+		return 0;
+	}
+
+	ret = smu_send_smc_msg(smu, msg);
+	if (ret)
+		pr_err("[PrepareMp1] Failed!\n");
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-int smu_get_uclk_dpm_states(struct smu_context *smu,
-			    unsigned int *clock_values_in_khz,
-			    unsigned int *num_states)
+int smu_set_df_cstate(struct smu_context *smu,
+		      enum pp_df_cstate state)
 {
 	int ret = 0;
 
+	/*
+	 * The SMC is not fully ready. That may be
+	 * expected as the IP may be masked.
+	 * So, just return without error.
+	 */
+	if (!smu->pm_enabled)
+		return 0;
+
+	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+		return 0;
+
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_uclk_dpm_states)
-		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+	ret = smu->ppt_funcs->set_df_cstate(smu, state);
+	if (ret)
+		pr_err("[SetDfCstate] failed!\n");
 
 	mutex_unlock(&smu->mutex);
 
 	return ret;
 }
 
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+	int ret = 0;
+	struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+	void *table = watermarks->cpu_addr;
 
 	mutex_lock(&smu->mutex);
 
-	if (smu->ppt_funcs->get_current_power_state)
-		pm_state = smu->ppt_funcs->get_current_power_state(smu);
+	if (!smu->disable_watermark &&
+			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+		smu_set_watermarks_table(smu, table, clock_ranges);
+		smu->watermarks_bitmap |= WATERMARKS_EXIST;
+		smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+	}
 
 	mutex_unlock(&smu->mutex);
 
-	return pm_state;
+	return ret;
 }
 
-int smu_get_dpm_clock_table(struct smu_context *smu,
-			    struct dpm_clocks *clock_table)
+int smu_get_dpm_freq_range(struct smu_context *smu,
+			   enum smu_clk_type clk_type,
+			   uint32_t *min,
+			   uint32_t *max)
 {
 	int ret = 0;
 
-	mutex_lock(&smu->mutex);
-
-	if (smu->ppt_funcs->get_dpm_clock_table)
-		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+	if (!min && !max)
+		return -EINVAL;
 
+	mutex_lock(&smu->mutex);
+	ret = smu_cmn_get_dpm_freq_range(smu, clk_type, min, max);
 	mutex_unlock(&smu->mutex);
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 5228ed2e28c2..7196634691d1 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -338,6 +338,33 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
 	return 0;
 }
 
+static int arcturus_init_smc_tables(struct smu_context *smu)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_table *tables = NULL;
+	int ret = 0;
+
+	if (smu_table->tables)
+		return -EINVAL;
+
+	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+			 GFP_KERNEL);
+	if (!tables)
+		return -ENOMEM;
+
+	smu_table->tables = tables;
+
+	ret = arcturus_tables_init(smu, tables);
+	if (ret)
+		return ret;
+
+	ret = arcturus_allocate_dpm_context(smu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static int
 arcturus_get_allowed_feature_mask(struct smu_context *smu,
 				  uint32_t *feature_mask, uint32_t num)
@@ -529,6 +556,30 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
 	return 0;
 }
 
+static int arcturus_parse_pptable(struct smu_context *smu)
+{
+	int ret;
+
+	struct smu_table_context *table_context = &smu->smu_table;
+	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+
+	if (table_context->driver_pptable)
+		return -EINVAL;
+
+	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
+
+	if (!table_context->driver_pptable)
+		return -ENOMEM;
+
+	ret = arcturus_store_powerplay_table(smu);
+	if (ret)
+		return -EINVAL;
+
+	ret = arcturus_append_powerplay_table(smu);
+
+	return ret;
+}
+
 static int arcturus_run_btc(struct smu_context *smu)
 {
 	int ret = 0;
@@ -1851,19 +1902,11 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.get_smu_table_index = arcturus_get_smu_table_index,
 	.get_smu_power_index= arcturus_get_pwr_src_index,
 	.get_workload_type = arcturus_get_workload_type,
-	/* internal structurs allocations */
-	.tables_init = arcturus_tables_init,
-	.alloc_dpm_context = arcturus_allocate_dpm_context,
-	/* pptable related */
-	.check_powerplay_table = arcturus_check_powerplay_table,
-	.store_powerplay_table = arcturus_store_powerplay_table,
-	.append_powerplay_table = arcturus_append_powerplay_table,
 	/* init dpm */
 	.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
 	/* btc */
 	.run_btc = arcturus_run_btc,
 	/* dpm/clk tables */
-	.set_default_dpm_table = arcturus_set_default_dpm_table,
 	.populate_umd_state_clk = arcturus_populate_umd_state_clk,
 	.get_thermal_temperature_range = arcturus_get_thermal_temperature_range,
 	.get_current_clk_freq_by_table = arcturus_get_current_clk_freq_by_table,
@@ -1884,7 +1927,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
 	.init_microcode = smu_v11_0_init_microcode,
 	.load_microcode = smu_v11_0_load_microcode,
-	.init_smc_tables = smu_v11_0_init_smc_tables,
+	.init_smc_tables = arcturus_init_smc_tables,
 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
 	.init_power = smu_v11_0_init_power,
 	.fini_power = smu_v11_0_fini_power,
@@ -1892,9 +1935,9 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.setup_pptable = smu_v11_0_setup_pptable,
 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
 	.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
-	.check_pptable = smu_v11_0_check_pptable,
-	.parse_pptable = smu_v11_0_parse_pptable,
-	.populate_smc_tables = smu_v11_0_populate_smc_pptable,
+	.check_pptable = arcturus_check_powerplay_table,
+	.parse_pptable = arcturus_parse_pptable,
+	.populate_smc_tables = arcturus_set_default_dpm_table,
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
@@ -1913,7 +1956,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
 	.start_thermal_control = smu_v11_0_start_thermal_control,
 	.stop_thermal_control = smu_v11_0_stop_thermal_control,
-	.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
@@ -1925,12 +1967,10 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
 	.baco_is_support= smu_v11_0_baco_is_support,
-	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
 	.baco_reset = smu_v11_0_baco_reset,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
-	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 402a021f237b..650b5047ad3d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -394,10 +394,6 @@ struct smu_context
 };
 
 struct pptable_funcs {
-	int (*alloc_dpm_context)(struct smu_context *smu);
-	int (*store_powerplay_table)(struct smu_context *smu);
-	int (*check_powerplay_table)(struct smu_context *smu);
-	int (*append_powerplay_table)(struct smu_context *smu);
 	int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
 	int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index);
 	int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index);
@@ -407,7 +403,6 @@ struct pptable_funcs {
 	int (*run_btc)(struct smu_context *smu);
 	int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
 	enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
-	int (*set_default_dpm_table)(struct smu_context *smu);
 	int (*set_power_state)(struct smu_context *smu);
 	int (*populate_umd_state_clk)(struct smu_context *smu);
 	int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
@@ -449,7 +444,6 @@ struct pptable_funcs {
 				      uint32_t *soc_mask);
 	int (*set_cpu_power_state)(struct smu_context *smu);
 	bool (*is_dpm_running)(struct smu_context *smu);
-	int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
 	int (*set_thermal_fan_table)(struct smu_context *smu);
 	int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
 	int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
@@ -488,7 +482,7 @@ struct pptable_funcs {
 	int (*powergate_vcn)(struct smu_context *smu, bool gate);
 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
 	int (*write_pptable)(struct smu_context *smu);
-	int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+	int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
 	int (*set_tool_table_location)(struct smu_context *smu);
 	int (*notify_memory_pool_location)(struct smu_context *smu);
 	int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
@@ -505,7 +499,6 @@ struct pptable_funcs {
 	int (*init_max_sustainable_clocks)(struct smu_context *smu);
 	int (*start_thermal_control)(struct smu_context *smu);
 	int (*stop_thermal_control)(struct smu_context *smu);
-	int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
 	int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
 	int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
 			      bool cc6_disable, bool pstate_disable,
@@ -537,27 +530,22 @@ struct pptable_funcs {
 	int (*set_azalia_d3_pme)(struct smu_context *smu);
 	int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
 	bool (*baco_is_support)(struct smu_context *smu);
-	enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
 	int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
 	int (*baco_reset)(struct smu_context *smu);
 	int (*mode2_reset)(struct smu_context *smu);
 	int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
-	int (*override_pcie_parameters)(struct smu_context *smu);
 };
 
 int smu_load_microcode(struct smu_context *smu);
 
 int smu_check_fw_status(struct smu_context *smu);
 
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-
 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
 
 int smu_get_power_limit(struct smu_context *smu,
 			uint32_t *limit,
-			bool def,
-			bool lock_needed);
+			bool def);
 
 int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
@@ -575,8 +563,7 @@ int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
 
 int smu_set_power_profile_mode(struct smu_context *smu,
 			       long *param,
-			       uint32_t param_size,
-			       bool lock_needed);
+			       uint32_t param_size);
 int smu_get_fan_control_mode(struct smu_context *smu);
 int smu_set_fan_control_mode(struct smu_context *smu, int value);
 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
@@ -619,80 +606,38 @@ int smu_baco_reset(struct smu_context *smu);
 
 int smu_mode2_reset(struct smu_context *smu);
 
-extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
-				   uint16_t *size, uint8_t *frev, uint8_t *crev,
-				   uint8_t **addr);
-
-extern const struct amd_ip_funcs smu_ip_funcs;
-
-extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-
-extern int smu_feature_init_dpm(struct smu_context *smu);
-
-extern int smu_feature_is_enabled(struct smu_context *smu,
-				  enum smu_feature_mask mask);
-extern int smu_feature_set_enabled(struct smu_context *smu,
-				   enum smu_feature_mask mask, bool enable);
-extern int smu_feature_is_supported(struct smu_context *smu,
-				    enum smu_feature_mask mask);
-extern int smu_feature_set_supported(struct smu_context *smu,
-				     enum smu_feature_mask mask, bool enable);
-
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
-		     void *table_data, bool drv2smu);
-
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev);
-int smu_reset(struct smu_context *smu);
-int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
-			   void *data, uint32_t *size);
 int smu_sys_get_pp_table(struct smu_context *smu, void **table);
 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size);
 int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
-int smu_write_watermarks_table(struct smu_context *smu);
 int smu_set_watermarks_for_clock_ranges(
 		struct smu_context *smu,
 		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
 
 /* smu to display interface */
-extern int smu_display_configuration_change(struct smu_context *smu, const
+int smu_display_configuration_change(struct smu_context *smu, const
 					    struct amd_pp_display_configuration
 					    *display_config);
-extern int smu_get_current_clocks(struct smu_context *smu,
+int smu_get_current_clocks(struct smu_context *smu,
 				  struct amd_pp_clock_info *clocks);
-extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
-extern int smu_handle_task(struct smu_context *smu,
+int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
+int smu_handle_task(struct smu_context *smu,
 			   enum amd_dpm_forced_level level,
-			   enum amd_pp_task task_id,
-			   bool lock_needed);
+			   enum amd_pp_task task_id);
 int smu_switch_power_profile(struct smu_context *smu,
 			     enum PP_SMC_POWER_PROFILE type,
 			     bool en);
-int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
-int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
-			      uint16_t level, uint32_t *value);
-int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t *value);
-int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			   uint32_t *min, uint32_t *max, bool lock_needed);
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max);
-int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max);
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
 int smu_set_display_count(struct smu_context *smu, uint32_t count);
-bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
-const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
 int smu_force_clk_levels(struct smu_context *smu,
 			 enum smu_clk_type clk_type,
-			 uint32_t mask,
-			 bool lock_needed);
+			 uint32_t mask);
 int smu_set_mp1_state(struct smu_context *smu,
 		      enum pp_mp1_state mp1_state);
 int smu_set_df_cstate(struct smu_context *smu,
@@ -708,4 +653,13 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
 int smu_get_dpm_clock_table(struct smu_context *smu,
 			    struct dpm_clocks *clock_table);
 
+int smu_get_dpm_freq_range(struct smu_context *smu,
+			   enum smu_clk_type clk_type,
+			   uint32_t *min,
+			   uint32_t *max);
+
+extern const struct amd_ip_funcs smu_ip_funcs;
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 88ee66683271..184fb7c9d8dc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -135,8 +135,6 @@ int smu_v11_0_init_microcode(struct smu_context *smu);
 
 int smu_v11_0_load_microcode(struct smu_context *smu);
 
-int smu_v11_0_init_smc_tables(struct smu_context *smu);
-
 int smu_v11_0_fini_smc_tables(struct smu_context *smu);
 
 int smu_v11_0_init_power(struct smu_context *smu);
@@ -151,17 +149,11 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
 
 int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
 
-int smu_v11_0_check_pptable(struct smu_context *smu);
-
-int smu_v11_0_parse_pptable(struct smu_context *smu);
-
-int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
-
 int smu_v11_0_check_fw_version(struct smu_context *smu);
 
 int smu_v11_0_write_pptable(struct smu_context *smu);
 
-int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu, uint32_t clk);
 
 int smu_v11_0_set_tool_table_location(struct smu_context *smu);
 
@@ -203,8 +195,6 @@ int smu_v11_0_read_sensor(struct smu_context *smu,
 				 enum amd_pp_sensors sensor,
 				 void *data, uint32_t *size);
 
-int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
 int
 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
 					struct pp_display_clock_request
@@ -237,8 +227,6 @@ int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
 
 bool smu_v11_0_baco_is_support(struct smu_context *smu);
 
-enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
-
 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
 
 int smu_v11_0_baco_reset(struct smu_context *smu);
@@ -249,6 +237,6 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
 
-int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
-
+int smu_v11_0_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t min, uint32_t max);
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 9b9f5df0911c..68a96550e534 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -60,12 +60,8 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
 
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
 
-uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
-
 int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
 
-int smu_v12_0_init_smc_tables(struct smu_context *smu);
-
 int smu_v12_0_fini_smc_tables(struct smu_context *smu);
 
 int smu_v12_0_populate_smc_tables(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 34390656a03e..1121d064176c 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -528,6 +528,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
 	return 0;
 }
 
+static int navi10_parse_pptable(struct smu_context *smu)
+{
+	int ret;
+
+	struct smu_table_context *table_context = &smu->smu_table;
+	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+
+	if (table_context->driver_pptable)
+		return -EINVAL;
+
+	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
+
+	if (!table_context->driver_pptable)
+		return -ENOMEM;
+
+	ret = navi10_store_powerplay_table(smu);
+	if (ret)
+		return -EINVAL;
+
+	ret = navi10_append_powerplay_table(smu);
+
+	return ret;
+}
+
 static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
@@ -592,6 +616,33 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
 	return 0;
 }
 
+static int navi10_init_smc_tables(struct smu_context *smu)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_table *tables = NULL;
+	int ret = 0;
+
+	if (smu_table->tables)
+		return -EINVAL;
+
+	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+			 GFP_KERNEL);
+	if (!tables)
+		return -ENOMEM;
+
+	smu_table->tables = tables;
+
+	ret = navi10_tables_init(smu, tables);
+	if (ret)
+		return ret;
+
+	ret = navi10_allocate_dpm_context(smu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static int navi10_set_default_dpm_table(struct smu_context *smu)
 {
 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -690,6 +741,46 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
 	return dpm_desc->SnapToDiscrete == 0 ? true : false;
 }
 
+static int navi10_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
+			      uint16_t level, uint32_t *value)
+{
+	int ret = 0, clk_id = 0;
+	uint32_t param;
+
+	if (!value)
+		return -EINVAL;
+
+	if (!smu_clk_dpm_is_enabled(smu, clk_type))
+		return 0;
+
+	clk_id = smu_clk_get_index(smu, clk_type);
+	if (clk_id < 0)
+		return clk_id;
+
+	param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+	ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
+					  param);
+	if (ret)
+		return ret;
+
+	ret = smu_read_smc_arg(smu, &param);
+	if (ret)
+		return ret;
+
+	/* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
+	 * now, we un-support it */
+	*value = param & 0x7fffffff;
+
+	return ret;
+}
+
+static int navi10_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t *value)
+{
+	return navi10_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+}
+
 static int navi10_print_clk_levels(struct smu_context *smu,
 			enum smu_clk_type clk_type, char *buf)
 {
@@ -713,13 +804,13 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 		/* 10KHz -> MHz */
 		cur_value = cur_value / 100;
 
-		ret = smu_get_dpm_level_count(smu, clk_type, &count);
+		ret = navi10_get_dpm_level_count(smu, clk_type, &count);
 		if (ret)
 			return size;
 
 		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
 			for (i = 0; i < count; i++) {
-				ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
+				ret = navi10_get_dpm_freq_by_index(smu, clk_type, i, &value);
 				if (ret)
 					return size;
 
@@ -727,10 +818,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
 						cur_value == value ? "*" : "");
 			}
 		} else {
-			ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
+			ret = navi10_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
 			if (ret)
 				return size;
-			ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
+			ret = navi10_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
 			if (ret)
 				return size;
 
@@ -772,11 +863,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
 	case SMU_UCLK:
 	case SMU_DCEFCLK:
 	case SMU_FCLK:
-		ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+		ret = navi10_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
 		if (ret)
 			return size;
 
-		ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+		ret = navi10_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
 		if (ret)
 			return size;
 
@@ -796,13 +887,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
 	int ret = 0;
 	uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
 
-	ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
+	ret = smu_cmn_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
 	if (ret)
 		return ret;
 
 	smu->pstate_sclk = min_sclk_freq * 100;
 
-	ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
+	ret = smu_cmn_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
 	if (ret)
 		return ret;
 
@@ -822,7 +913,7 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
 	case SMU_GFXCLK:
 	case SMU_DCEFCLK:
 	case SMU_SOCCLK:
-		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
+		ret = navi10_get_dpm_level_count(smu, clk_type, &level_count);
 		if (ret)
 			return ret;
 
@@ -830,7 +921,7 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
 		clocks->num_levels = level_count;
 
 		for (i = 0; i < level_count; i++) {
-			ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq);
+			ret = navi10_get_dpm_freq_by_index(smu, clk_type, i, &freq);
 			if (ret)
 				return ret;
 
@@ -855,10 +946,10 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
 		return ret;
 
 	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
-		ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
 		if (ret)
 			return ret;
-		ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
+		ret = smu_v11_0_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
 		if (ret)
 			return ret;
 	}
@@ -905,7 +996,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
 
 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
 		clk_type = clks[i];
-		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
 		if (ret)
 			return ret;
 
@@ -932,7 +1023,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
 
 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
 		clk_type = clks[i];
-		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
 		if (ret)
 			return ret;
 
@@ -1230,21 +1321,21 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
 			*mclk_mask = 0;
 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
 		if(sclk_mask) {
-			ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
+			ret = navi10_get_dpm_level_count(smu, SMU_SCLK, &level_count);
 			if (ret)
 				return ret;
 			*sclk_mask = level_count - 1;
 		}
 
 		if(mclk_mask) {
-			ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
+			ret = navi10_get_dpm_level_count(smu, SMU_MCLK, &level_count);
 			if (ret)
 				return ret;
 			*mclk_mask = level_count - 1;
 		}
 
 		if(soc_mask) {
-			ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
+			ret = navi10_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
 			if (ret)
 				return ret;
 			*soc_mask = level_count - 1;
@@ -1285,7 +1376,7 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
 	}
 
 	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
-		ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
+		ret = smu_v11_0_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
 		if (ret) {
 			pr_err("[%s] Set hard min uclk failed!", __func__);
 			return ret;
@@ -1509,10 +1600,10 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
 		return -EINVAL;
 	}
 
-	ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
+	ret = navi10_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
 	if (ret)
 		return ret;
-	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+	ret = navi10_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
 	if (ret)
 		return ret;
 
@@ -1571,9 +1662,9 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
 		return 0;
 
 	if(disable_memory_clock_switch)
-		ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
+		ret = smu_v11_0_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
 	else
-		ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
+		ret = smu_v11_0_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
 
 	if(!ret)
 		smu->disable_uclk_switch = disable_memory_clock_switch;
@@ -1654,11 +1745,6 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
 
 
 static const struct pptable_funcs navi10_ppt_funcs = {
-	.tables_init = navi10_tables_init,
-	.alloc_dpm_context = navi10_allocate_dpm_context,
-	.store_powerplay_table = navi10_store_powerplay_table,
-	.check_powerplay_table = navi10_check_powerplay_table,
-	.append_powerplay_table = navi10_append_powerplay_table,
 	.get_smu_msg_index = navi10_get_smu_msg_index,
 	.get_smu_clk_index = navi10_get_smu_clk_index,
 	.get_smu_feature_index = navi10_get_smu_feature_index,
@@ -1666,7 +1752,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_smu_power_index= navi10_get_pwr_src_index,
 	.get_workload_type = navi10_get_workload_type,
 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
-	.set_default_dpm_table = navi10_set_default_dpm_table,
 	.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
 	.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
 	.print_clk_levels = navi10_print_clk_levels,
@@ -1694,7 +1779,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.update_pcie_parameters = navi10_update_pcie_parameters,
 	.init_microcode = smu_v11_0_init_microcode,
 	.load_microcode = smu_v11_0_load_microcode,
-	.init_smc_tables = smu_v11_0_init_smc_tables,
+	.init_smc_tables = navi10_init_smc_tables,
 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
 	.init_power = smu_v11_0_init_power,
 	.fini_power = smu_v11_0_fini_power,
@@ -1702,9 +1787,9 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.setup_pptable = smu_v11_0_setup_pptable,
 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
 	.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
-	.check_pptable = smu_v11_0_check_pptable,
-	.parse_pptable = smu_v11_0_parse_pptable,
-	.populate_smc_tables = smu_v11_0_populate_smc_pptable,
+	.check_pptable = navi10_check_powerplay_table,
+	.parse_pptable = navi10_parse_pptable,
+	.populate_smc_tables = navi10_set_default_dpm_table,
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
@@ -1723,7 +1808,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
 	.start_thermal_control = smu_v11_0_start_thermal_control,
 	.stop_thermal_control = smu_v11_0_stop_thermal_control,
-	.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
@@ -1735,12 +1819,10 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
 	.baco_is_support= smu_v11_0_baco_is_support,
-	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
 	.baco_reset = smu_v11_0_baco_reset,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
-	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 45c5f54e60d8..6392f888669c 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -157,6 +157,24 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
 	return 0;
 }
 
+static int renoir_init_smc_tables(struct smu_context *smu)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_table *tables = NULL;
+
+	if (smu_table->tables)
+		return -EINVAL;
+
+	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+			 GFP_KERNEL);
+	if (!tables)
+		return -ENOMEM;
+
+	smu_table->tables = tables;
+
+	return renoir_tables_init(smu, tables);
+}
+
 /**
  * This interface just for getting uclk ultimate freq and should't introduce
  * other likewise function result in overmuch callback.
@@ -195,7 +213,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
 	case SMU_SCLK:
 		/* retirve table returned paramters unit is MHz */
 		cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
-		ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
 		if (!ret) {
 			/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
 			if (cur_value  == max)
@@ -313,7 +331,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
 
 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
 		clk_type = clks[i];
-		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
 		if (ret)
 			return ret;
 
@@ -347,7 +365,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
 
 		clk_type = clk_feature_map[i].clk_type;
 
-		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
 		if (ret)
 			return ret;
 
@@ -468,7 +486,7 @@ static int renoir_force_clk_levels(struct smu_context *smu,
 			return -EINVAL;
 		}
 
-		ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+		ret = smu_cmn_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq);
 		if (ret)
 			return ret;
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
@@ -544,7 +562,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
 	int ret = 0;
 	uint32_t sclk_freq = 0, uclk_freq = 0;
 
-	ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+	ret = smu_cmn_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq);
 	if (ret)
 		return ret;
 
@@ -552,7 +570,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
 	if (ret)
 		return ret;
 
-	ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+	ret = smu_cmn_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq);
 	if (ret)
 		return ret;
 
@@ -675,7 +693,6 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
 static const struct pptable_funcs renoir_ppt_funcs = {
 	.get_smu_msg_index = renoir_get_smu_msg_index,
 	.get_smu_table_index = renoir_get_smu_table_index,
-	.tables_init = renoir_tables_init,
 	.set_power_state = NULL,
 	.get_dpm_clk_limited = renoir_get_dpm_clk_limited,
 	.print_clk_levels = renoir_print_clk_levels,
@@ -700,7 +717,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.read_smc_arg = smu_v12_0_read_arg,
 	.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
 	.gfx_off_control = smu_v12_0_gfx_off_control,
-	.init_smc_tables = smu_v12_0_init_smc_tables,
+	.init_smc_tables = renoir_init_smc_tables,
 	.fini_smc_tables = smu_v12_0_fini_smc_tables,
 	.populate_smc_tables = smu_v12_0_populate_smc_tables,
 	.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.c b/drivers/gpu/drm/amd/powerplay/smu_internal.c
new file mode 100644
index 000000000000..7dd89aa8bf34
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "smu_internal.h"
+#include "atom.h"
+
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+	enum smu_feature_mask feature_id = 0;
+
+	switch (clk_type) {
+	case SMU_MCLK:
+	case SMU_UCLK:
+		feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+		break;
+	case SMU_GFXCLK:
+	case SMU_SCLK:
+		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+		break;
+	case SMU_SOCCLK:
+		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+		break;
+	default:
+		return true;
+	}
+
+	if(!smu_feature_is_enabled(smu, feature_id)) {
+		return false;
+	}
+
+	return true;
+}
+
+int smu_feature_update_enable_state(struct smu_context *smu,
+				    uint64_t feature_mask,
+				    bool enabled)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	uint32_t feature_low = 0, feature_high = 0;
+	int ret = 0;
+
+	if (!smu->pm_enabled)
+		return ret;
+
+	feature_low = (feature_mask >> 0 ) & 0xffffffff;
+	feature_high = (feature_mask >> 32) & 0xffffffff;
+
+	if (enabled) {
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+						  feature_low);
+		if (ret)
+			return ret;
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+						  feature_high);
+		if (ret)
+			return ret;
+	} else {
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+						  feature_low);
+		if (ret)
+			return ret;
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+						  feature_high);
+		if (ret)
+			return ret;
+	}
+
+	mutex_lock(&feature->mutex);
+	if (enabled)
+		bitmap_or(feature->enabled, feature->enabled,
+				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+	else
+		bitmap_andnot(feature->enabled, feature->enabled,
+				(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+	mutex_unlock(&feature->mutex);
+
+	return ret;
+}
+
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+{
+	int ret = 0;
+
+	if (!if_version && !smu_version)
+		return -EINVAL;
+
+	if (if_version) {
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+		if (ret)
+			return ret;
+
+		ret = smu_read_smc_arg(smu, if_version);
+		if (ret)
+			return ret;
+	}
+
+	if (smu_version) {
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
+		if (ret)
+			return ret;
+
+		ret = smu_read_smc_arg(smu, smu_version);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t min, uint32_t max)
+{
+	int ret = 0;
+
+	if (min <= 0 && max <= 0)
+		return -EINVAL;
+
+	if (!smu_clk_dpm_is_enabled(smu, clk_type))
+		return 0;
+
+	ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+	return ret;
+}
+
+int smu_cmn_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			   uint32_t *min, uint32_t *max)
+{
+	uint32_t clock_limit;
+	int ret = 0;
+
+	if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+		switch (clk_type) {
+		case SMU_MCLK:
+		case SMU_UCLK:
+			clock_limit = smu->smu_table.boot_values.uclk;
+			break;
+		case SMU_GFXCLK:
+		case SMU_SCLK:
+			clock_limit = smu->smu_table.boot_values.gfxclk;
+			break;
+		case SMU_SOCCLK:
+			clock_limit = smu->smu_table.boot_values.socclk;
+			break;
+		default:
+			clock_limit = 0;
+			break;
+		}
+
+		/* clock in Mhz unit */
+		if (min)
+			*min = clock_limit / 100;
+		if (max)
+			*max = clock_limit / 100;
+	} else {
+		/*
+		 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+		 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+		 */
+		ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
+	}
+
+	return ret;
+}
+
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
+		     void *table_data, bool drv2smu)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct amdgpu_device *adev = smu->adev;
+	struct smu_table *table = NULL;
+	int ret = 0;
+	int table_id = smu_table_get_index(smu, table_index);
+
+	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+		return -EINVAL;
+
+	table = &smu_table->tables[table_index];
+
+	if (drv2smu)
+		memcpy(table->cpu_addr, table_data, table->size);
+
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
+					  upper_32_bits(table->mc_address));
+	if (ret)
+		return ret;
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
+					  lower_32_bits(table->mc_address));
+	if (ret)
+		return ret;
+	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
+					  SMU_MSG_TransferTableDram2Smu :
+					  SMU_MSG_TransferTableSmu2Dram,
+					  table_id | ((argument & 0xFFFF) << 16));
+	if (ret)
+		return ret;
+
+	/* flush hdp cache */
+	adev->nbio.funcs->hdp_flush(adev, NULL);
+
+	if (!drv2smu)
+		memcpy(table_data, table->cpu_addr, table->size);
+
+	return ret;
+}
+
+int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
+{
+	struct amdgpu_device *adev = smu->adev;
+	struct smu_feature *feature = &smu->smu_feature;
+	int feature_id;
+	int ret = 0;
+
+	if (adev->flags & AMD_IS_APU)
+		return 1;
+
+	feature_id = smu_feature_get_index(smu, mask);
+	if (feature_id < 0)
+		return 0;
+
+	WARN_ON(feature_id > feature->feature_num);
+
+	mutex_lock(&feature->mutex);
+	ret = test_bit(feature_id, feature->enabled);
+	mutex_unlock(&feature->mutex);
+
+	return ret;
+}
+
+int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
+			    bool enable)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	int feature_id;
+
+	feature_id = smu_feature_get_index(smu, mask);
+	if (feature_id < 0)
+		return -EINVAL;
+
+	WARN_ON(feature_id > feature->feature_num);
+
+	return smu_feature_update_enable_state(smu,
+					       1ULL << feature_id,
+					       enable);
+}
+
+int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	int feature_id;
+	int ret = 0;
+
+	feature_id = smu_feature_get_index(smu, mask);
+	if (feature_id < 0)
+		return 0;
+
+	WARN_ON(feature_id > feature->feature_num);
+
+	mutex_lock(&feature->mutex);
+	ret = test_bit(feature_id, feature->supported);
+	mutex_unlock(&feature->mutex);
+
+	return ret;
+}
+
+int smu_feature_set_supported(struct smu_context *smu,
+			      enum smu_feature_mask mask,
+			      bool enable)
+{
+	struct smu_feature *feature = &smu->smu_feature;
+	int feature_id;
+	int ret = 0;
+
+	feature_id = smu_feature_get_index(smu, mask);
+	if (feature_id < 0)
+		return -EINVAL;
+
+	WARN_ON(feature_id > feature->feature_num);
+
+	mutex_lock(&feature->mutex);
+	if (enable)
+		test_and_set_bit(feature_id, feature->supported);
+	else
+		test_and_clear_bit(feature_id, feature->supported);
+	mutex_unlock(&feature->mutex);
+
+	return ret;
+}
+
+int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+			    uint16_t *size, uint8_t *frev, uint8_t *crev,
+			    uint8_t **addr)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint16_t data_start;
+
+	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+					   size, frev, crev, &data_start))
+		return -EINVAL;
+
+	*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+	return 0;
+}
+
+static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+	uint32_t sclk_mask, mclk_mask, soc_mask;
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = smu_force_dpm_limit_value(smu, true);
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = smu_force_dpm_limit_value(smu, false);
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+		ret = smu_unforce_dpm_levels(smu);
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = smu_get_profiling_clk_mask(smu, level,
+						 &sclk_mask,
+						 &mclk_mask,
+						 &soc_mask);
+		if (ret)
+			return ret;
+		smu_cmn_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+		smu_cmn_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+		smu_cmn_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+				   enum amd_dpm_forced_level level,
+				   bool skip_display_settings)
+{
+	int ret = 0;
+	int index = 0;
+	long workload;
+	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+	if (!smu->pm_enabled)
+		return -EINVAL;
+
+	if (!skip_display_settings) {
+		ret = smu_display_config_changed(smu);
+		if (ret) {
+			pr_err("Failed to change display config!");
+			return ret;
+		}
+	}
+
+	ret = smu_apply_clocks_adjust_rules(smu);
+	if (ret) {
+		pr_err("Failed to apply clocks adjust rules!");
+		return ret;
+	}
+
+	if (!skip_display_settings) {
+		ret = smu_notify_smc_dispaly_config(smu);
+		if (ret) {
+			pr_err("Failed to notify smc display config!");
+			return ret;
+		}
+	}
+
+	if (smu_dpm_ctx->dpm_level != level) {
+		ret = smu_asic_set_performance_level(smu, level);
+		if (ret) {
+			ret = smu_default_set_performance_level(smu, level);
+			if (ret) {
+				pr_err("Failed to set performance level!");
+				return ret;
+			}
+		}
+
+		/* update the saved copy */
+		smu_dpm_ctx->dpm_level = level;
+	}
+
+	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+		index = fls(smu->workload_mask);
+		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+		workload = smu->workload_setting[index];
+
+		if (smu->power_profile_mode != workload)
+			smu_cmn_set_power_profile_mode(smu, &workload, 0);
+	}
+
+	return ret;
+}
+
+int smu_cmn_handle_task(struct smu_context *smu,
+			enum amd_dpm_forced_level level,
+			enum amd_pp_task task_id)
+{
+	int ret = 0;
+
+	switch (task_id) {
+	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+		ret = smu_pre_display_config_changed(smu);
+		if (ret)
+			goto out;
+		ret = smu_set_cpu_power_state(smu);
+		if (ret)
+			goto out;
+		ret = smu_adjust_power_state_dynamic(smu, level, false);
+		break;
+	case AMD_PP_TASK_COMPLETE_INIT:
+	case AMD_PP_TASK_READJUST_POWER_STATE:
+		ret = smu_adjust_power_state_dynamic(smu, level, true);
+		break;
+	default:
+		break;
+	}
+
+out:
+	return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+	int ret = 0;
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_table *table = NULL;
+
+	table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+	if (!table->cpu_addr)
+		return -EINVAL;
+
+	ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+				true);
+
+	return ret;
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(type)	#type
+static const char* __smu_message_names[] = {
+	SMU_MESSAGE_TYPES
+};
+
+const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
+{
+	if (type < 0 || type >= SMU_MSG_MAX_COUNT)
+		return "unknown smu message";
+	return __smu_message_names[type];
+}
+
+#undef __SMU_DUMMY_MAP
+#define __SMU_DUMMY_MAP(fea)	#fea
+static const char* __smu_feature_names[] = {
+	SMU_FEATURE_MASKS
+};
+
+const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
+{
+	if (feature < 0 || feature >= SMU_FEATURE_COUNT)
+		return "unknown smu feature";
+	return __smu_feature_names[feature];
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 8bcda7871309..21acf7aedc42 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -57,8 +57,8 @@
 	((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
 #define smu_write_pptable(smu) \
 	((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
-	((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu, clk) \
+	((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu), (clk)) : 0)
 #define smu_set_tool_table_location(smu) \
 	((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
 #define smu_notify_memory_pool_location(smu) \
@@ -81,8 +81,6 @@
 	((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
 #define smu_read_smc_arg(smu, arg) \
 	((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
-	((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
 #define smu_init_display_count(smu, count) \
 	((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
 #define smu_feature_set_allowed_mask(smu) \
@@ -93,14 +91,6 @@
 	((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
 #define smu_notify_display_change(smu) \
 	((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
-	((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
-	((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
-	((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
-	((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
 #define smu_populate_umd_state_clk(smu) \
 	((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
 #define smu_set_default_od8_settings(smu) \
@@ -109,8 +99,6 @@
 #define smu_get_current_clk_freq(smu, clk_id, value) \
 	((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
 
-#define smu_tables_init(smu, tab) \
-	((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
 #define smu_set_thermal_fan_table(smu) \
 	((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
 #define smu_start_thermal_control(smu) \
@@ -195,10 +183,62 @@
 #define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
 		((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
 
-#define smu_override_pcie_parameters(smu) \
-		((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
-
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
 		((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
 
+#define smu_set_gfx_cgpg(smu, enabled) \
+		((smu)->ppt_funcs->set_gfx_cgpg ? (smu)->ppt_funcs->set_gfx_cgpg((smu), (enabled)) : 0)
+
+#define smu_cmn_force_clk_levels(smu, clk_type, mask) \
+		((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (mask)) : 0)
+
+#define smu_cmn_get_power_limit(smu, limit, def) \
+		((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
+
+#define smu_cmn_set_power_profile_mode(smu, param, param_size) \
+		((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
+
+int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+			    uint16_t *size, uint8_t *frev, uint8_t *crev,
+			    uint8_t **addr);
+
+int smu_feature_is_enabled(struct smu_context *smu,
+				  enum smu_feature_mask mask);
+int smu_feature_set_enabled(struct smu_context *smu,
+				   enum smu_feature_mask mask, bool enable);
+int smu_feature_is_supported(struct smu_context *smu,
+				    enum smu_feature_mask mask);
+int smu_feature_set_supported(struct smu_context *smu,
+				     enum smu_feature_mask mask, bool enable);
+
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
+		     void *table_data, bool drv2smu);
+
+int smu_write_watermarks_table(struct smu_context *smu);
+
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
+
+int smu_cmn_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			   uint32_t *min, uint32_t *max);
+
+int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t min, uint32_t max);
+
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
+
+int
+smu_cmn_handle_task(struct smu_context *smu,
+		    enum amd_dpm_forced_level level,
+		    enum amd_pp_task task_id);
+
+int smu_feature_update_enable_state(struct smu_context *smu,
+				    uint64_t feature_mask,
+				    bool enabled);
+
+const char *smu_get_message_name(struct smu_context *smu,
+				 enum smu_message_type type);
+
+const char *smu_get_feature_name(struct smu_context *smu,
+				 enum smu_feature_mask feature);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 7e882999abad..bdc21cae3a1e 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -402,16 +402,6 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
 	return 0;
 }
 
-static int smu_v11_0_init_dpm_context(struct smu_context *smu)
-{
-	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
-
-	if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
-		return -EINVAL;
-
-	return smu_alloc_dpm_context(smu);
-}
-
 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
 {
 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -432,33 +422,6 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
 	return 0;
 }
 
-int smu_v11_0_init_smc_tables(struct smu_context *smu)
-{
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *tables = NULL;
-	int ret = 0;
-
-	if (smu_table->tables)
-		return -EINVAL;
-
-	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
-			 GFP_KERNEL);
-	if (!tables)
-		return -ENOMEM;
-
-	smu_table->tables = tables;
-
-	ret = smu_tables_init(smu, tables);
-	if (ret)
-		return ret;
-
-	ret = smu_v11_0_init_dpm_context(smu);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
@@ -717,47 +680,6 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
 	return ret;
 }
 
-int smu_v11_0_check_pptable(struct smu_context *smu)
-{
-	int ret;
-
-	ret = smu_check_powerplay_table(smu);
-	return ret;
-}
-
-int smu_v11_0_parse_pptable(struct smu_context *smu)
-{
-	int ret;
-
-	struct smu_table_context *table_context = &smu->smu_table;
-	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
-
-	if (table_context->driver_pptable)
-		return -EINVAL;
-
-	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
-
-	if (!table_context->driver_pptable)
-		return -ENOMEM;
-
-	ret = smu_store_powerplay_table(smu);
-	if (ret)
-		return -EINVAL;
-
-	ret = smu_append_powerplay_table(smu);
-
-	return ret;
-}
-
-int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
-{
-	int ret;
-
-	ret = smu_set_default_dpm_table(smu);
-
-	return ret;
-}
-
 int smu_v11_0_write_pptable(struct smu_context *smu)
 {
 	struct smu_table_context *table_context = &smu->smu_table;
@@ -769,10 +691,13 @@ int smu_v11_0_write_pptable(struct smu_context *smu)
 	return ret;
 }
 
-int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu, uint32_t clk)
 {
 	int ret;
 
+	if (!smu->pm_enabled)
+		return 0;
+
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_SetMinDeepSleepDcefclk, clk);
 	if (ret)
@@ -781,18 +706,6 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 	return ret;
 }
 
-int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
-{
-	struct smu_table_context *table_context = &smu->smu_table;
-
-	if (!smu->pm_enabled)
-		return 0;
-	if (!table_context)
-		return -EINVAL;
-
-	return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
-}
-
 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 {
 	int ret = 0;
@@ -1225,6 +1138,52 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
 
 }
 
+static int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+			   void *data, uint32_t *size)
+{
+	struct smu_power_context *smu_power = &smu->smu_power;
+	struct smu_power_gate *power_gate = &smu_power->power_gate;
+	int ret = 0;
+
+	if(!data || !size)
+		return -EINVAL;
+
+	switch (sensor) {
+	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+		*((uint32_t *)data) = smu->pstate_sclk;
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+		*((uint32_t *)data) = smu->pstate_mclk;
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+		ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+		*size = 8;
+		break;
+	case AMDGPU_PP_SENSOR_UVD_POWER:
+		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_VCE_POWER:
+		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
+		*size = 4;
+		break;
+	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+		*(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
+		*size = 4;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		*size = 0;
+
+	return ret;
+}
+
 int smu_v11_0_read_sensor(struct smu_context *smu,
 				 enum amd_pp_sensors sensor,
 				 void *data, uint32_t *size)
@@ -1305,7 +1264,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
 		if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
 			return 0;
 
-		ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
+		ret = smu_v11_0_set_hard_freq_range(smu, clk_select, clk_freq, 0);
 
 		if(clk_select == SMU_UCLK)
 			smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1618,7 +1577,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
 	return false;
 }
 
-enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
 {
 	struct smu_baco_context *smu_baco = &smu->smu_baco;
 	enum smu_baco_state baco_state;
@@ -1739,43 +1698,37 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 	return ret;
 }
 
-int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
+int smu_v11_0_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
+			    uint32_t min, uint32_t max)
 {
-	struct amdgpu_device *adev = smu->adev;
-	uint32_t pcie_gen = 0, pcie_width = 0;
-	int ret;
+	int ret = 0, clk_id = 0;
+	uint32_t param;
 
-	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
-		pcie_gen = 3;
-	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-		pcie_gen = 2;
-	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
-		pcie_gen = 1;
-	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
-		pcie_gen = 0;
-
-	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
-	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
-	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
-	 */
-	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
-		pcie_width = 6;
-	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
-		pcie_width = 5;
-	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
-		pcie_width = 4;
-	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
-		pcie_width = 3;
-	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
-		pcie_width = 2;
-	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
-		pcie_width = 1;
-
-	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+	if (min <= 0 && max <= 0)
+		return -EINVAL;
 
-	if (ret)
-		pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+	if (!smu_clk_dpm_is_enabled(smu, clk_type))
+		return 0;
 
-	return ret;
+	clk_id = smu_clk_get_index(smu, clk_type);
+	if (clk_id < 0)
+		return clk_id;
 
+	if (max > 0) {
+		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+						  param);
+		if (ret)
+			return ret;
+	}
+
+	if (min > 0) {
+		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+						  param);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
 }
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 139dd737eaa5..53c33abd8f02 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -224,7 +224,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
  * Returns 2=Not in GFXOFF.
  * Returns 3=Transition into GFXOFF.
  */
-uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
 {
 	uint32_t reg;
 	uint32_t gfxOff_Status = 0;
@@ -261,24 +261,6 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
 	return ret;
 }
 
-int smu_v12_0_init_smc_tables(struct smu_context *smu)
-{
-	struct smu_table_context *smu_table = &smu->smu_table;
-	struct smu_table *tables = NULL;
-
-	if (smu_table->tables)
-		return -EINVAL;
-
-	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
-			 GFP_KERNEL);
-	if (!tables)
-		return -ENOMEM;
-
-	smu_table->tables = tables;
-
-	return smu_tables_init(smu, tables);
-}
-
 int smu_v12_0_fini_smc_tables(struct smu_context *smu)
 {
 	struct smu_table_context *smu_table = &smu->smu_table;
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 7125406c6256..d6ccde80c6de 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -376,6 +376,33 @@ static int vega20_allocate_dpm_context(struct smu_context *smu)
 	return 0;
 }
 
+static int vega20_init_smc_tables(struct smu_context *smu)
+{
+	struct smu_table_context *smu_table = &smu->smu_table;
+	struct smu_table *tables = NULL;
+	int ret = 0;
+
+	if (smu_table->tables)
+		return -EINVAL;
+
+	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
+			 GFP_KERNEL);
+	if (!tables)
+		return -ENOMEM;
+
+	smu_table->tables = tables;
+
+	ret = vega20_tables_init(smu, tables);
+	if (ret)
+		return ret;
+
+	ret = vega20_allocate_dpm_context(smu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static int vega20_setup_od8_information(struct smu_context *smu)
 {
 	ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
@@ -562,6 +589,30 @@ static int vega20_append_powerplay_table(struct smu_context *smu)
 	return 0;
 }
 
+static int vega20_parse_pptable(struct smu_context *smu)
+{
+	int ret;
+
+	struct smu_table_context *table_context = &smu->smu_table;
+	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+
+	if (table_context->driver_pptable)
+		return -EINVAL;
+
+	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
+
+	if (!table_context->driver_pptable)
+		return -ENOMEM;
+
+	ret = vega20_store_powerplay_table(smu);
+	if (ret)
+		return -EINVAL;
+
+	ret = vega20_append_powerplay_table(smu);
+
+	return ret;
+}
+
 static int vega20_check_powerplay_table(struct smu_context *smu)
 {
 	ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
@@ -2583,9 +2634,9 @@ static int vega20_set_od_percentage(struct smu_context *smu,
 		single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
 	}
 
-	ret = smu_handle_task(smu, smu_dpm->dpm_level,
-			      AMD_PP_TASK_READJUST_POWER_STATE,
-			      false);
+	ret = smu_cmn_handle_task(smu,
+				  smu_dpm->dpm_level,
+				  AMD_PP_TASK_READJUST_POWER_STATE);
 
 set_od_failed:
 	return ret;
@@ -2812,9 +2863,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
 	}
 
 	if (type == PP_OD_COMMIT_DPM_TABLE) {
-		ret = smu_handle_task(smu, smu_dpm->dpm_level,
-				      AMD_PP_TASK_READJUST_POWER_STATE,
-				      false);
+		ret = smu_cmn_handle_task(smu,
+					  smu_dpm->dpm_level,
+					  AMD_PP_TASK_READJUST_POWER_STATE);
 	}
 
 	return ret;
@@ -3169,11 +3220,6 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
 
 
 static const struct pptable_funcs vega20_ppt_funcs = {
-	.tables_init = vega20_tables_init,
-	.alloc_dpm_context = vega20_allocate_dpm_context,
-	.store_powerplay_table = vega20_store_powerplay_table,
-	.check_powerplay_table = vega20_check_powerplay_table,
-	.append_powerplay_table = vega20_append_powerplay_table,
 	.get_smu_msg_index = vega20_get_smu_msg_index,
 	.get_smu_clk_index = vega20_get_smu_clk_index,
 	.get_smu_feature_index = vega20_get_smu_feature_index,
@@ -3183,7 +3229,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.run_btc = vega20_run_btc_afll,
 	.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
 	.get_current_power_state = vega20_get_current_power_state,
-	.set_default_dpm_table = vega20_set_default_dpm_table,
 	.set_power_state = NULL,
 	.populate_umd_state_clk = vega20_populate_umd_state_clk,
 	.print_clk_levels = vega20_print_clk_levels,
@@ -3215,7 +3260,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.update_pcie_parameters = vega20_update_pcie_parameters,
 	.init_microcode = smu_v11_0_init_microcode,
 	.load_microcode = smu_v11_0_load_microcode,
-	.init_smc_tables = smu_v11_0_init_smc_tables,
+	.init_smc_tables = vega20_init_smc_tables,
 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
 	.init_power = smu_v11_0_init_power,
 	.fini_power = smu_v11_0_fini_power,
@@ -3223,9 +3268,9 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.setup_pptable = smu_v11_0_setup_pptable,
 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
 	.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
-	.check_pptable = smu_v11_0_check_pptable,
-	.parse_pptable = smu_v11_0_parse_pptable,
-	.populate_smc_tables = smu_v11_0_populate_smc_pptable,
+	.check_pptable = vega20_check_powerplay_table,
+	.parse_pptable = vega20_parse_pptable,
+	.populate_smc_tables = vega20_set_default_dpm_table,
 	.check_fw_version = smu_v11_0_check_fw_version,
 	.write_pptable = smu_v11_0_write_pptable,
 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
@@ -3244,7 +3289,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
 	.start_thermal_control = smu_v11_0_start_thermal_control,
 	.stop_thermal_control = smu_v11_0_stop_thermal_control,
-	.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
@@ -3256,12 +3300,10 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
 	.baco_is_support= smu_v11_0_baco_is_support,
-	.baco_get_state = smu_v11_0_baco_get_state,
 	.baco_set_state = smu_v11_0_baco_set_state,
 	.baco_reset = smu_v11_0_baco_reset,
 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
-	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 };
 
 void vega20_set_ppt_funcs(struct smu_context *smu)
-- 
2.23.0



More information about the amd-gfx mailing list