[PATCH 4/4] drm/amdgpu: extend profiling mode.

Rex Zhu Rex.Zhu at amd.com
Mon Jan 9 06:51:40 UTC 2017


in profiling mode, powerplay will fix power state
as stable as possible.and disable gfx cg and LBPW feature.

profile_standard: as a prerequisite, ensure power and thermal
sustainable, set clocks ratio as close to the highest clock
ratio as possible.
profile_min_sclk: fix mclk as profile_normal, set lowest sclk
profile_min_mclk: fix sclk as profile_normal, set lowest mclk
profile_peak: set highest sclk and mclk, power and thermal not
sustainable
profile_exit: exit profile mode. enable gfx cg/lbpw feature.

Change-Id: I217b8cfb98b6a628a11091776debe039bad997a2
Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c           |  38 ++++---
 drivers/gpu/drm/amd/amdgpu/ci_dpm.c              |   5 +-
 drivers/gpu/drm/amd/include/amd_shared.h         |   6 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c   |   3 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 127 ++++++++++++++++++++++-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h        |   1 +
 6 files changed, 154 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8438642..a6a4d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -120,12 +120,15 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
 
 	level = amdgpu_dpm_get_performance_level(adev);
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-			(level & (AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-			(level & AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
-			(level & AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
-			(level & AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
-			(level & AMD_DPM_FORCED_LEVEL_PROFILING) ? "profiling" :
-			"unknown"));
+			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+			(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+			"unknown");
 }
 
 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
@@ -154,9 +157,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 		level = AMD_DPM_FORCED_LEVEL_AUTO;
 	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_MANUAL;
-	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
-		level = AMD_DPM_FORCED_LEVEL_PROFILING;
-	} else {
+	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
+	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
+	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
+	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
+	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+	}  else {
 		count = -EINVAL;
 		goto fail;
 	}
@@ -164,14 +175,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 	if (current_level == level)
 		return 0;
 
-	if (level == AMD_DPM_FORCED_LEVEL_PROFILING)
-		amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
-						AMD_CG_STATE_UNGATE);
-	else if (level != AMD_DPM_FORCED_LEVEL_PROFILING &&
-			current_level == AMD_DPM_FORCED_LEVEL_PROFILING)
-		amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
-						AMD_CG_STATE_GATE);
-
 	if (adev->pp_enabled)
 		amdgpu_dpm_force_performance_level(adev, level);
 	else {
@@ -188,6 +191,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 			adev->pm.dpm.forced_level = level;
 		mutex_unlock(&adev->pm.mutex);
 	}
+
 fail:
 	return count;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 9a544ad..ece94ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6571,8 +6571,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
 {
 	struct ci_power_info *pi = ci_get_pi(adev);
 
-	if (!(adev->pm.dpm.forced_level &
-		(AMD_DPM_FORCED_LEVEL_MANUAL | AMD_DPM_FORCED_LEVEL_PROFILING)))
+	if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+				AMD_DPM_FORCED_LEVEL_LOW |
+				AMD_DPM_FORCED_LEVEL_HIGH))
 		return -EINVAL;
 
 	switch (type) {
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 92138a9..6fb5870 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,7 +85,11 @@ enum amd_dpm_forced_level {
 	AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
 	AMD_DPM_FORCED_LEVEL_LOW = 0x4,
 	AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
-	AMD_DPM_FORCED_LEVEL_PROFILING = 0x10,
+	AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
+	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
+	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
+	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
+	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
 };
 
 enum amd_powergating_state {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 93c1384..0668b0b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1642,8 +1642,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
 static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
 		enum pp_clock_type type, uint32_t mask)
 {
-	if (!(hwmgr->dpm_level &
-		(AMD_DPM_FORCED_LEVEL_MANUAL | AMD_DPM_FORCED_LEVEL_PROFILING)))
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
 		return -EINVAL;
 
 	switch (type) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index acd038cc..8ced2f4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -90,6 +90,8 @@ enum DPM_EVENT_SRC {
 };
 
 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask);
 
 static struct smu7_power_state *cast_phw_smu7_power_state(
 				  struct pp_hw_power_state *hw_ps)
@@ -2490,36 +2492,152 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 	}
 
 	return 0;
+}
+
+static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
+{
+	uint32_t percentage;
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+	int32_t tmp_mclk;
+	int32_t tmp_sclk;
+	int32_t count;
+
+	if (golden_dpm_table->mclk_table.count < 1)
+		return -EINVAL;
+
+	percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+			golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+	if (golden_dpm_table->mclk_table.count == 1) {
+		percentage = 70;
+		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
+	} else {
+		tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+		*mclk_mask = golden_dpm_table->mclk_table.count - 2;
+	}
+
+	tmp_sclk = tmp_mclk * percentage / 100;
+
+	if (hwmgr->pp_table_version == PP_TABLE_V0) {
+		for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+			count >= 0; count--) {
+			if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
+				tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
+				*sclk_mask = count;
+				break;
+			}
+		}
+		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+			*sclk_mask = 0;
+
+		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+			*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
+	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+		struct phm_ppt_v1_information *table_info =
+				(struct phm_ppt_v1_information *)(hwmgr->pptable);
 
+		for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
+			if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
+				tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
+				*sclk_mask = count;
+				break;
+			}
+		}
+		if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+			*sclk_mask = 0;
+
+		if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+			*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+	}
+
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
+		*mclk_mask = 0;
+	else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+		*mclk_mask = golden_dpm_table->mclk_table.count - 1;
+
+	*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+	return 0;
 }
+
 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
 				enum amd_dpm_forced_level level)
 {
 	int ret = 0;
+	uint32_t sclk_mask = 0;
+	uint32_t mclk_mask = 0;
+	uint32_t pcie_mask = 0;
+	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+	if (level == hwmgr->dpm_level)
+		return ret;
+
+	if (!(hwmgr->dpm_level & profile_mode_mask)) {
+		/* enter profile mode, save current level, disable gfx cg*/
+		if (level & profile_mode_mask) {
+			hwmgr->saved_dpm_level = hwmgr->dpm_level;
+			cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_GFX,
+						AMD_CG_STATE_UNGATE);
+		}
+	} else {
+		/* exit profile mode, restore level, enable gfx cg*/
+		if (!(level & profile_mode_mask)) {
+			if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+				level = hwmgr->saved_dpm_level;
+			cgs_set_clockgating_state(hwmgr->device,
+					AMD_IP_BLOCK_TYPE_GFX,
+					AMD_CG_STATE_GATE);
+		}
+	}
 
 	switch (level) {
 	case AMD_DPM_FORCED_LEVEL_HIGH:
 		ret = smu7_force_dpm_highest(hwmgr);
 		if (ret)
 			return ret;
+		hwmgr->dpm_level = level;
 		break;
 	case AMD_DPM_FORCED_LEVEL_LOW:
 		ret = smu7_force_dpm_lowest(hwmgr);
 		if (ret)
 			return ret;
+		hwmgr->dpm_level = level;
 		break;
 	case AMD_DPM_FORCED_LEVEL_AUTO:
 		ret = smu7_unforce_dpm_levels(hwmgr);
 		if (ret)
 			return ret;
+		hwmgr->dpm_level = level;
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
+		if (ret)
+			return ret;
+		hwmgr->dpm_level = level;
+		smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+		smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+		smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
 		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
 	default:
 		break;
 	}
 
-	hwmgr->dpm_level = level;
+	if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
+		smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+	else
+		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
 
-	return ret;
+	return 0;
 }
 
 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -4053,8 +4171,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-	if (!(hwmgr->dpm_level &
-		(AMD_DPM_FORCED_LEVEL_MANUAL | AMD_DPM_FORCED_LEVEL_PROFILING)))
+	if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+				AMD_DPM_FORCED_LEVEL_LOW |
+				AMD_DPM_FORCED_LEVEL_HIGH))
 		return -EINVAL;
 
 	switch (type) {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 27217a7..7275a29 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -612,6 +612,7 @@ struct pp_hwmgr {
 	uint32_t num_vce_state_tables;
 
 	enum amd_dpm_forced_level dpm_level;
+	enum amd_dpm_forced_level saved_dpm_level;
 	bool block_hw_access;
 	struct phm_gfx_arbiter gfx_arbiter;
 	struct phm_acp_arbiter acp_arbiter;
-- 
1.9.1



More information about the amd-gfx mailing list