[PATCH v2 4/8] drm/amd/pm: Add navi1x throttler translation
Lazar, Lijo
Lijo.Lazar at amd.com
Mon May 31 05:12:37 UTC 2021
[AMD Official Use Only]
-----Original Message-----
From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> On Behalf Of Graham Sider
Sent: Saturday, May 29, 2021 1:28 AM
To: amd-gfx at lists.freedesktop.org
Cc: Kasiviswanathan, Harish <Harish.Kasiviswanathan at amd.com>; Sider, Graham <Graham.Sider at amd.com>; Sakhnovitch, Elena (Elen) <Elena.Sakhnovitch at amd.com>
Subject: [PATCH v2 4/8] drm/amd/pm: Add navi1x throttler translation
Perform dependent to independent throttle status translation for navi1x.
Signed-off-by: Graham Sider <Graham.Sider at amd.com>
---
.../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 56 +++++++++++++++++++
1 file changed, 56 insertions(+)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 78fe13183e8b..878ec698909c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -524,6 +524,54 @@ static int navi10_tables_init(struct smu_context *smu)
return -ENOMEM;
}
+static uint64_t navi1x_get_indep_throttler_status(
+ uint32_t dep_status)
+{
+ if (dep_status == 0)
+ return 0;
+
+ uint64_t indep_status = 0;
< > Extending Harish's comments - it's very unlikely that all or even 50% of these bits are set together. It may be better to do something like
while (bit=ffs(dep_status)) {
indep_status |= 1ULL << mapping[bit-1];
dep_status &= ~(1UL << bit - 1);
}
Will need a lookup table, should be fine though.
Thanks,
Lijo
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_EDGE_BIT, THROTTLER_TEMP_EDGE_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_HOTSPOT_BIT, THROTTLER_TEMP_HOTSPOT_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_MEM_BIT, THROTTLER_TEMP_MEM_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_VR_GFX_BIT, THROTTLER_TEMP_VR_GFX_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_VR_MEM_BIT, THROTTLER_TEMP_VR_MEM0_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_VR_MEM_BIT, THROTTLER_TEMP_VR_MEM1_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_VR_SOC_BIT, THROTTLER_TEMP_VR_SOC_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_LIQUID_BIT, THROTTLER_TEMP_LIQUID0_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TEMP_LIQUID_BIT, THROTTLER_TEMP_LIQUID1_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TDC_GFX_BIT, THROTTLER_TDC_GFX_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_TDC_SOC_BIT, THROTTLER_TDC_SOC_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_PPT0_BIT, THROTTLER_PPT0_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_PPT1_BIT, THROTTLER_PPT1_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_PPT2_BIT, THROTTLER_PPT2_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_PPT3_BIT, THROTTLER_PPT3_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_FIT_BIT, THROTTLER_FIT_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_PPM_BIT, THROTTLER_PPM_BIT);
+ indep_status |= smu_u64_throttler_bit(dep_status,
+ SMU_THROTTLER_APCC_BIT, THROTTLER_APCC_BIT);
+
+ return indep_status;
+}
+
static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
@@ -2673,6 +2721,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ navi1x_get_indep_throttler_status(metrics.ThrottlerStatus);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2750,6 +2800,8 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ navi1x_get_indep_throttler_status(metrics.ThrottlerStatus);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2826,6 +2878,8 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ navi1x_get_indep_throttler_status(metrics.ThrottlerStatus);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
@@ -2908,6 +2962,8 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ navi1x_get_indep_throttler_status(metrics.ThrottlerStatus);
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
--
2.17.1
_______________________________________________
amd-gfx mailing list
amd-gfx at lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=04%7C01%7Clijo.lazar%40amd.com%7C0e8287b706374269018508d922135f02%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637578288869743604%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=9u%2FUr8%2FPuSekq2ttc0NiYeacZoAnPZNb2BTXUSD2Fd8%3D&reserved=0
More information about the amd-gfx
mailing list