[PATCH 5/6] drm/amd/pm: Use gpu_metrics_v1_8 for smu_v13_0_6
Lazar, Lijo
lijo.lazar at amd.com
Wed Mar 26 09:22:18 UTC 2025
On 3/26/2025 1:43 PM, Asad Kamal wrote:
> Use gpu_metrics_v1_8 for smu_v13_0_6 to fill metrics data
>
> v2: Move exposing caps to separate patch, move smu_v13.0.12 gpu metrics
> 1.8 usage to separate patch (Lijo)
>
> Signed-off-by: Asad Kamal <asad.kamal at amd.com>
Patches 5 and 6 are -
Reviewed-by: Lijo Lazar <lijo.lazar at amd.com>
Thanks,
Lijo
> ---
> .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 22 +++++++++++++++----
> .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h | 1 +
> 2 files changed, 19 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
> index f8489ebbd2ad..4f767948b172 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
> @@ -507,7 +507,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
> return -ENOMEM;
> smu_table->metrics_time = 0;
>
> - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
> + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
> smu_table->gpu_metrics_table =
> kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
> if (!smu_table->gpu_metrics_table) {
> @@ -2468,8 +2468,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
> static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
> {
> struct smu_table_context *smu_table = &smu->smu_table;
> - struct gpu_metrics_v1_7 *gpu_metrics =
> - (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
> + struct gpu_metrics_v1_8 *gpu_metrics =
> + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
> int version = smu_v13_0_6_get_metrics_version(smu);
> int ret = 0, xcc_id, inst, i, j, k, idx;
> struct amdgpu_device *adev = smu->adev;
> @@ -2495,7 +2495,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
> metrics_v1 = (MetricsTableV1_t *)metrics_v0;
> metrics_v2 = (MetricsTableV2_t *)metrics_v0;
>
> - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
> + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
>
> gpu_metrics->temperature_hotspot =
> SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
> @@ -2648,6 +2648,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
> gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
> SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
> version)[inst]);
> + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
> + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
> + SMUQ10_ROUND
> + (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
> + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
> + SMUQ10_ROUND
> + (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
> + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
> + SMUQ10_ROUND
> + (metrics_v0->GfxclkLowUtilizationAcc[inst]);
> + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
> + SMUQ10_ROUND
> + (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
> + }
> idx++;
> }
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
> index f28b1401fc76..2a8f42dbd9f5 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
> @@ -65,6 +65,7 @@ enum smu_v13_0_6_caps {
> SMU_CAP(ACA_SYND),
> SMU_CAP(SDMA_RESET),
> SMU_CAP(STATIC_METRICS),
> + SMU_CAP(HST_LIMIT_METRICS),
> SMU_CAP(ALL),
> };
>
More information about the amd-gfx
mailing list