[PATCH 124/159] drm/amd/pm: Enable performance determinism on aldebaran

Alex Deucher alexander.deucher at amd.com
Wed Feb 24 22:18:24 UTC 2021


From: Lijo Lazar <lijo.lazar at amd.com>

Performance Determinism is a new mode in Aldebaran where PMFW tries to
maintain sustained performance level. It can be enabled on a per-die
basis on aldebaran. To guarantee that it remains within the power cap,
a max GFX frequency needs to be specified in this mode. A new
power_dpm_force_performance_level, "perf_determinism", is defined to enable
this mode in amdgpu. The max frequency (in MHz) can be specified through
pp_dpm_sclk. The mode will be disabled once any other performance level
is chosen.

Ex: To enable perf determinism at 900Mhz max gfx clock

echo perf_determinism > /sys/bus/pci/devices/.../power_dpm_force_performance_level
echo max 900 > /sys/bus/pci/devices/.../pp_dpm_sclk

Signed-off-by: Lijo Lazar <lijo.lazar at amd.com>
Reviewed-by: Kenneth Feng <kenneth.feng at amd.com>
Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
---
 .../gpu/drm/amd/include/kgd_pp_interface.h    |  1 +
 drivers/gpu/drm/amd/pm/amdgpu_pm.c            | 88 +++++++++++++++++++
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     |  6 +-
 .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 59 ++++++++++++-
 4 files changed, 150 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a41875ac5dfb..c6b5c789abf0 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -48,6 +48,7 @@ enum amd_dpm_forced_level {
 	AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
 	AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
 	AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+	AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM = 0x200,
 };
 
 enum amd_pm_state_type {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index b770dd634ab6..309ee950b073 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -298,6 +298,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
 			(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+			(level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
 			"unknown");
 }
 
@@ -333,6 +334,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
 	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
 		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
+		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
 	}  else {
 		return -EINVAL;
 	}
@@ -1090,6 +1093,83 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
  */
 #define AMDGPU_MASK_BUF_MAX	(32 * 13)
 
+static int amdgpu_read_clk(const char *buf,
+		size_t count,
+		uint32_t *min,
+		uint32_t *max)
+{
+	int ret;
+	char *tmp;
+	char *token = NULL;
+	char *tag;
+	char *value;
+	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+	const char delimiter[3] = {' ', '\n', '\0'};
+	size_t bytes;
+	int i = 0;
+
+	bytes = min(count, sizeof(buf_cpy) - 1);
+	memcpy(buf_cpy, buf, bytes);
+	buf_cpy[bytes] = '\0';
+	tmp = buf_cpy;
+
+	*min = *max = 0;
+	while (i < 2) {
+		ret = -EINVAL;
+		token = strsep(&tmp, delimiter);
+		if (!token || !*token)
+			break;
+		tag = token;
+
+		token = strsep(&tmp, delimiter);
+		if (!token || !*token)
+			break;
+		value = token;
+
+		if (!strncmp(tag, "min", strlen("min")))
+			ret = kstrtou32(value, 0, min);
+		else if (!strncmp(tag, "max", strlen("max")))
+			ret = kstrtou32(value, 0, max);
+
+		if (ret)
+			break;
+		++i;
+	}
+
+	/* should get a non-zero value for min or max */
+	if (!*min && !*max)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int amdgpu_set_clk_minmax(struct amdgpu_device *adev,
+		uint32_t clk_type,
+		uint32_t min,
+		uint32_t max)
+{
+	int ret;
+
+	if (!is_support_sw_smu(adev) || amdgpu_sriov_vf(adev))
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+	if (ret < 0) {
+		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+		return ret;
+	}
+
+	ret = smu_set_soft_freq_range(&adev->smu, clk_type, min, max);
+
+	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
 {
 	int ret;
@@ -1128,10 +1208,18 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 	struct amdgpu_device *adev = drm_to_adev(ddev);
 	int ret;
 	uint32_t mask = 0;
+	uint32_t min;
+	uint32_t max;
 
 	if (amdgpu_in_reset(adev))
 		return -EPERM;
 
+	ret = amdgpu_read_clk(buf, count, &min, &max);
+	if (!ret) {
+		ret = amdgpu_set_clk_minmax(adev, SMU_GFXCLK, min, max);
+		return ret ? ret:count;
+	}
+
 	ret = amdgpu_read_mask(buf, count, &mask);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 474a5dd04c43..affe091587d3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1618,7 +1618,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
 		smu_dpm_ctx->dpm_level = level;
 	}
 
-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
 		index = fls(smu->workload_mask);
 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
 		workload = smu->workload_setting[index];
@@ -1693,7 +1694,8 @@ int smu_switch_power_profile(struct smu_context *smu,
 		workload = smu->workload_setting[index];
 	}
 
-	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
 		smu_set_power_profile_mode(smu, &workload, 0, false);
 
 	mutex_unlock(&smu->mutex);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 2427681fab8a..629523858660 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1078,7 +1078,6 @@ static int aldebaran_get_power_limit(struct smu_context *smu)
 			dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
 			return -EINVAL;
 		}
-
 		power_limit = pptable->PptLimit;
 	}
 
@@ -1103,7 +1102,19 @@ static int aldebaran_system_features_control(struct  smu_context *smu, bool enab
 static int aldebaran_set_performance_level(struct smu_context *smu,
 					   enum amd_dpm_forced_level level)
 {
+	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+
+	/* Disable determinism if switching to another mode */
+	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+			&& (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+
+
 	switch (level) {
+
+	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+		return 0;
+
 	case AMD_DPM_FORCED_LEVEL_HIGH:
 	case AMD_DPM_FORCED_LEVEL_LOW:
 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1117,6 +1128,50 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
 	return smu_v13_0_set_performance_level(smu, level);
 }
 
+static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
+					  enum smu_clk_type clk_type,
+					  uint32_t min,
+					  uint32_t max)
+{
+	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t min_clk;
+	uint32_t max_clk;
+	int ret = 0;
+
+	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+		return -EINVAL;
+
+	if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+		return -EINVAL;
+
+	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+			(max > dpm_context->dpm_tables.gfx_table.max)) {
+			dev_warn(adev->dev,
+					"Invalid max frequency %d MHz specified for determinism\n", max);
+			return -EINVAL;
+		}
+
+		/* Restore default min/max clocks and enable determinism */
+		min_clk = dpm_context->dpm_tables.gfx_table.min;
+		max_clk = dpm_context->dpm_tables.gfx_table.max;
+		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+		if (!ret) {
+			usleep_range(500, 1000);
+			ret = smu_cmn_send_smc_msg_with_param(smu,
+					SMU_MSG_EnableDeterminism,
+					max, NULL);
+			if (ret)
+				dev_err(adev->dev,
+						"Failed to enable determinism at GFX clock %d MHz\n", max);
+		}
+	}
+
+	return ret;
+}
+
 static bool aldebaran_is_dpm_running(struct smu_context *smu)
 {
 	int ret = 0;
@@ -1351,7 +1406,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
 	.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
 	.baco_is_support= aldebaran_is_baco_supported,
 	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
-	.set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
+	.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
 	.set_df_cstate = aldebaran_set_df_cstate,
 	.allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
 	.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
-- 
2.29.2



More information about the amd-gfx mailing list