[PATCH] drm/amd/pm: support ss metrics read for smu11
Sathishkumar S
sathishkumar.sundararaju at amd.com
Wed May 11 07:21:50 UTC 2022
support reading smartshift apu and dgpu power for smu11 based asic
v2: add new version of SmuMetrics and make calculation more readable (Lijo)
v3: avoid calculations that result in -ve values and skip related checks
v4: use the current power limit on dGPU and exclude smu 11_0_7 (Lijo)
Signed-off-by: Sathishkumar S <sathishkumar.sundararaju at amd.com>
Acked-by: Alex Deucher <alexander.deucher at amd.com>
---
.../pmfw_if/smu11_driver_if_sienna_cichlid.h | 63 +++++++
.../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 161 ++++++++++++++----
2 files changed, 187 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 08f0bb2af5d2..280d42778f28 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -1540,11 +1540,74 @@ typedef struct {
} SmuMetrics_V3_t;
+typedef struct {
+ uint32_t CurrClock[PPCLK_COUNT];
+
+ uint16_t AverageGfxclkFrequencyPreDs;
+ uint16_t AverageGfxclkFrequencyPostDs;
+ uint16_t AverageFclkFrequencyPreDs;
+ uint16_t AverageFclkFrequencyPostDs;
+ uint16_t AverageUclkFrequencyPreDs;
+ uint16_t AverageUclkFrequencyPostDs;
+
+
+ uint16_t AverageGfxActivity;
+ uint16_t AverageUclkActivity;
+ uint8_t CurrSocVoltageOffset;
+ uint8_t CurrGfxVoltageOffset;
+ uint8_t CurrMemVidOffset;
+ uint8_t Padding8;
+ uint16_t AverageSocketPower;
+ uint16_t TemperatureEdge;
+ uint16_t TemperatureHotspot;
+ uint16_t TemperatureMem;
+ uint16_t TemperatureVrGfx;
+ uint16_t TemperatureVrMem0;
+ uint16_t TemperatureVrMem1;
+ uint16_t TemperatureVrSoc;
+ uint16_t TemperatureLiquid0;
+ uint16_t TemperatureLiquid1;
+ uint16_t TemperaturePlx;
+ uint16_t Padding16;
+ uint32_t AccCnt;
+ uint8_t ThrottlingPercentage[THROTTLER_COUNT];
+
+
+ uint8_t LinkDpmLevel;
+ uint8_t CurrFanPwm;
+ uint16_t CurrFanSpeed;
+
+ //BACO metrics, PMFW-1721
+ //metrics for D3hot entry/exit and driver ARM msgs
+ uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+ //PMFW-4362
+ uint32_t EnergyAccumulator;
+ uint16_t AverageVclk0Frequency;
+ uint16_t AverageDclk0Frequency;
+ uint16_t AverageVclk1Frequency;
+ uint16_t AverageDclk1Frequency;
+ uint16_t VcnUsagePercentage0;
+ uint16_t VcnUsagePercentage1;
+ uint8_t PcieRate;
+ uint8_t PcieWidth;
+ uint16_t AverageGfxclkFrequencyTarget;
+
+ uint8_t ApuSTAPMSmartShiftLimit;
+ uint8_t AverageApuSocketPower;
+ uint8_t ApuSTAPMLimit;
+ uint8_t Padding8_2;
+
+} SmuMetrics_V4_t;
+
typedef struct {
union {
SmuMetrics_t SmuMetrics;
SmuMetrics_V2_t SmuMetrics_V2;
SmuMetrics_V3_t SmuMetrics_V3;
+ SmuMetrics_V4_t SmuMetrics_V4;
};
uint32_t Spare[1];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 86ab276b6b0b..503439754f08 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -585,6 +585,102 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status;
}
+static int sienna_cichlid_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
+{
+ struct smu_11_0_7_powerplay_table *powerplay_table =
+ (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
+ uint32_t power_limit, od_percent;
+ uint16_t *table_member;
+
+ GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
+
+ if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
+ power_limit =
+ table_member[PPT_THROTTLER_PPT0];
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ if (smu->od_enabled) {
+ od_percent =
+ le32_to_cpu(powerplay_table->overdrive_table.max[
+ SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+
+ dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n",
+ od_percent, power_limit);
+
+ power_limit *= (100 + od_percent);
+ power_limit /= 100;
+ }
+ *max_power_limit = power_limit;
+ }
+
+ return 0;
+}
+
+static void sienna_cichlid_get_smartshift_power_percentage(struct smu_context *smu,
+ uint32_t *apu_percent,
+ uint32_t *dgpu_percent)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_V4_t *metrics_v4 =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V4);
+ uint16_t *table_member;
+ uint16_t powerRatio = 0;
+ uint16_t apu_power_limit = 0;
+ uint16_t dgpu_power_limit = 0;
+ uint32_t apu_boost = 0;
+ uint32_t dgpu_boost = 0;
+ uint32_t cur_power_limit;
+
+ sienna_cichlid_get_power_limit(smu, &cur_power_limit, NULL, NULL);
+ if (metrics_v4->ApuSTAPMSmartShiftLimit != 0) {
+ GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
+ apu_power_limit = metrics_v4->ApuSTAPMLimit;
+ dgpu_power_limit = cur_power_limit;
+ powerRatio = (((apu_power_limit +
+ dgpu_power_limit) * 100) /
+ metrics_v4->ApuSTAPMSmartShiftLimit);
+ if (powerRatio > 100) {
+ apu_power_limit = (apu_power_limit * 100) /
+ powerRatio;
+ dgpu_power_limit = (dgpu_power_limit * 100) /
+ powerRatio;
+ }
+ if (metrics_v4->AverageApuSocketPower > apu_power_limit &&
+ apu_power_limit != 0) {
+ apu_boost = ((metrics_v4->AverageApuSocketPower -
+ apu_power_limit) * 100) /
+ apu_power_limit;
+ if (apu_boost > 100)
+ apu_boost = 100;
+ }
+
+ if (metrics_v4->AverageSocketPower > dgpu_power_limit &&
+ dgpu_power_limit != 0) {
+ dgpu_boost = ((metrics_v4->AverageSocketPower -
+ dgpu_power_limit) * 100) /
+ dgpu_power_limit;
+ if (dgpu_boost > 100)
+ dgpu_boost = 100;
+ }
+
+ if (dgpu_boost >= apu_boost)
+ apu_boost = 0;
+ else
+ dgpu_boost = 0;
+ }
+ *apu_percent = apu_boost;
+ *dgpu_percent = dgpu_boost;
+}
+
static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
@@ -600,6 +696,8 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
bool use_metrics_v3 = false;
uint16_t average_gfx_activity;
int ret = 0;
+ uint32_t apu_percent = 0;
+ uint32_t dgpu_percent = 0;
if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4900))
@@ -738,6 +836,15 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
/* Only supported in 0x3A5300+, metrics_v3 requires 0x3A4900+ */
*value = use_metrics_v3 ? metrics_v3->PublicSerialNumLower32 : 0;
break;
+ case METRICS_SS_APU_SHARE:
+ sienna_cichlid_get_smartshift_power_percentage(smu, &apu_percent, &dgpu_percent);
+ *value = apu_percent;
+ break;
+ case METRICS_SS_DGPU_SHARE:
+ sienna_cichlid_get_smartshift_power_percentage(smu, &apu_percent, &dgpu_percent);
+ *value = dgpu_percent;
+ break;
+
default:
*value = UINT_MAX;
break;
@@ -1728,6 +1835,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
{
int ret = 0;
uint16_t *temp;
+ struct amdgpu_device *adev = smu->adev;
if(!data || !size)
return -EINVAL;
@@ -1788,6 +1896,22 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_SS_APU_SHARE:
+ if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_SS_APU_SHARE, (uint32_t *)data);
+ *size = 4;
+ } else
+ ret = -EOPNOTSUPP;
+ break;
+ case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
+ if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) {
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_SS_DGPU_SHARE, (uint32_t *)data);
+ *size = 4;
+ } else
+ ret = -EOPNOTSUPP;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1907,43 +2031,6 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
-static int sienna_cichlid_get_power_limit(struct smu_context *smu,
- uint32_t *current_power_limit,
- uint32_t *default_power_limit,
- uint32_t *max_power_limit)
-{
- struct smu_11_0_7_powerplay_table *powerplay_table =
- (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent;
- uint16_t *table_member;
-
- GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
-
- if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
- power_limit =
- table_member[PPT_THROTTLER_PPT0];
- }
-
- if (current_power_limit)
- *current_power_limit = power_limit;
- if (default_power_limit)
- *default_power_limit = power_limit;
-
- if (max_power_limit) {
- if (smu->od_enabled) {
- od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
-
- dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
-
- power_limit *= (100 + od_percent);
- power_limit /= 100;
- }
- *max_power_limit = power_limit;
- }
-
- return 0;
-}
-
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
--
2.25.1
More information about the amd-gfx
mailing list