[PATCH 2/2] drm/amd/pm: Fill max mem bw & total app clk counter
Asad Kamal
asad.kamal at amd.com
Mon Nov 25 14:38:26 UTC 2024
Fill max memory bandwidth and total app clock counter to metrics v1_7
Signed-off-by: Asad Kamal <asad.kamal at amd.com>
---
.../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 34 +++++++++++++++----
1 file changed, 28 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index ab3c93ddce46..e77fa9dbf404 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -119,6 +119,18 @@ static inline bool smu_v13_0_6_is_other_end_count_available(struct smu_context *
}
}
+static inline bool smu_v13_0_6_is_blw_host_limit_available(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ return smu->smc_fw_version >= 0x557900;
+ case IP_VERSION(13, 0, 14):
+ return smu->smc_fw_version >= 0x05551000;
+ default:
+ return false;
+ }
+}
+
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -2318,7 +2330,7 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
- bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst;
+ bool per_inst, smu_13_0_6_per_inst, smu_13_0_14_per_inst, apu_per_inst, xcp_stats_xcd_fill;
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_7 *gpu_metrics =
(struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
@@ -2356,6 +2368,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_umc_activity =
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
+
gpu_metrics->curr_socket_power =
SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
/* Energy counter reported in 15.259uJ (2^-16) units */
@@ -2465,6 +2480,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
(smu->smc_fw_version >= 0x05550B00);
per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst;
+ xcp_stats_xcd_fill = per_inst || smu_v13_0_6_is_blw_host_limit_available(smu);
for_each_xcp(adev->xcp_mgr, xcp, i) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
@@ -2485,15 +2501,21 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
}
- if (per_inst) {
+ if (xcp_stats_xcd_fill) {
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
idx = 0;
for_each_inst(k, inst_mask) {
inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+ if (per_inst) {
+ gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
+ gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+ }
+ if (smu_v13_0_6_is_blw_host_limit_available(smu))
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ [inst]);
idx++;
}
}
--
2.46.0
More information about the amd-gfx
mailing list