[PATCH 1/2] drm/amd/pm: update the gpu metrics data retrieving for Sienna Cichlid
Lazar, Lijo
lijo.lazar at amd.com
Wed Jun 30 03:51:58 UTC 2021
On 6/30/2021 8:56 AM, Quan, Evan wrote:
> [AMD Official Use Only]
>
>
>
>> -----Original Message-----
>> From: Lazar, Lijo <Lijo.Lazar at amd.com>
>> Sent: Tuesday, June 29, 2021 9:38 PM
>> To: Quan, Evan <Evan.Quan at amd.com>; amd-gfx at lists.freedesktop.org
>> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
>> Subject: Re: [PATCH 1/2] drm/amd/pm: update the gpu metrics data
>> retrieving for Sienna Cichlid
>>
>>
>>
>> On 6/25/2021 1:42 PM, Evan Quan wrote:
>>> Due to the structure layout change: "uint32_t ThrottlerStatus" -> "
>>> uint8_t ThrottlingPercentage[THROTTLER_COUNT]".
>>>
>>> Change-Id: Id5c148b0584d972ae73fb9d7347a312944cec13d
>>> Signed-off-by: Evan Quan <evan.quan at amd.com>
>>> ---
>>> .../pm/inc/smu11_driver_if_sienna_cichlid.h | 63 ++++-
>>> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 234 ++++++++++++---
>> ---
>>> 2 files changed, 222 insertions(+), 75 deletions(-)
>>>
>>> diff --git
>>> a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
>>> b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
>>> index 61c87c39be80..0b916a1933df 100644
>>> --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
>>> +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
>>> @@ -211,6 +211,7 @@ typedef enum {
>>> #define THROTTLER_FIT_BIT 17
>>> #define THROTTLER_PPM_BIT 18
>>> #define THROTTLER_APCC_BIT 19
>>> +#define THROTTLER_COUNT 20
>>>
>>> // FW DState Features Control Bits
>>> // FW DState Features Control Bits
>>> @@ -1406,7 +1407,67 @@ typedef struct {
>>> } SmuMetrics_t;
>>>
>>> typedef struct {
>>> - SmuMetrics_t SmuMetrics;
>>> + uint32_t CurrClock[PPCLK_COUNT];
>>> +
>>> + uint16_t AverageGfxclkFrequencyPreDs; uint16_t
>>> + AverageGfxclkFrequencyPostDs; uint16_t AverageFclkFrequencyPreDs;
>>> + uint16_t AverageFclkFrequencyPostDs; uint16_t
>>> + AverageUclkFrequencyPreDs ; uint16_t AverageUclkFrequencyPostDs ;
>>> +
>>> +
>>> + uint16_t AverageGfxActivity ;
>>> + uint16_t AverageUclkActivity ;
>>> + uint8_t CurrSocVoltageOffset ;
>>> + uint8_t CurrGfxVoltageOffset ;
>>> + uint8_t CurrMemVidOffset ;
>>> + uint8_t Padding8 ;
>>> + uint16_t AverageSocketPower ;
>>> + uint16_t TemperatureEdge ;
>>> + uint16_t TemperatureHotspot ;
>>> + uint16_t TemperatureMem ;
>>> + uint16_t TemperatureVrGfx ;
>>> + uint16_t TemperatureVrMem0 ;
>>> + uint16_t TemperatureVrMem1 ;
>>> + uint16_t TemperatureVrSoc ;
>>> + uint16_t TemperatureLiquid0 ;
>>> + uint16_t TemperatureLiquid1 ;
>>> + uint16_t TemperaturePlx ;
>>> + uint16_t Padding16 ;
>>> + uint32_t AccCnt ;
>>> + uint8_t ThrottlingPercentage[THROTTLER_COUNT];
>>> +
>>> +
>>> + uint8_t LinkDpmLevel;
>>> + uint8_t CurrFanPwm;
>>> + uint16_t CurrFanSpeed;
>>> +
>>> + //BACO metrics, PMFW-1721
>>> + //metrics for D3hot entry/exit and driver ARM msgs uint8_t
>>> + D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
>>> + uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
>>> + uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
>>> +
>>> + //PMFW-4362
>>> + uint32_t EnergyAccumulator;
>>> + uint16_t AverageVclk0Frequency ;
>>> + uint16_t AverageDclk0Frequency ;
>>> + uint16_t AverageVclk1Frequency ;
>>> + uint16_t AverageDclk1Frequency ;
>>> + uint16_t VcnActivityPercentage ; //place holder, David N. to provide full
>> sequence
>>> + uint8_t PcieRate ;
>>> + uint8_t PcieWidth ;
>>> + uint16_t AverageGfxclkFrequencyTarget; uint16_t Padding16_2;
>>> +
>>> +} SmuMetrics_V2_t;
>>> +
>>> +typedef struct {
>>> + union {
>>> + SmuMetrics_t SmuMetrics;
>>> + SmuMetrics_V2_t SmuMetrics_V2;
>>> + };
>>> uint32_t Spare[1];
>>>
>>> // Padding - ignore
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> index 0c3407025eb2..f882c6756bf0 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> @@ -80,6 +80,13 @@
>>> (*member) = (smu->smu_table.driver_pptable +
>> offsetof(PPTable_t, field));\
>>> } while(0)
>>>
>>> +#define GET_METRICS_MEMBER(field, member) do { \
>>> + if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && (smu-
>>> smc_fw_version >= 0x3A4300)) \
>>> + (*member) = ((void *)(&(((SmuMetricsExternal_t *)(smu-
>>> smu_table.metrics_table))->SmuMetrics_V2)) + offsetof(SmuMetrics_V2_t,
>> field)); \
>>> + else \
>>> + (*member) = ((void *)(&(((SmuMetricsExternal_t
>>> +*)(smu->smu_table.metrics_table))->SmuMetrics)) +
>>> +offsetof(SmuMetrics_t, field)); \ } while(0)
>>> +
>>> static int get_table_size(struct smu_context *smu)
>>> {
>>> if (smu->adev->asic_type == CHIP_BEIGE_GOBY) @@ -489,13
>> +496,33 @@
>>> static int sienna_cichlid_tables_init(struct smu_context *smu)
>>> return -ENOMEM;
>>> }
>>>
>>> +static uint32_t sienna_cichlid_get_throttler_status_locked(struct
>>> +smu_context *smu) {
>>> + struct smu_table_context *smu_table= &smu->smu_table;
>>> + SmuMetricsExternal_t *metrics_ext =
>>> + (SmuMetricsExternal_t *)(smu_table->metrics_table);
>>> + uint32_t throttler_status = 0;
>>> + int i;
>>> +
>>> + if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) &&
>>> + (smu->smc_fw_version >= 0x3A4300)) {
>>> + for (i = 0; i < THROTTLER_COUNT; i++) {
>>> + if (metrics_ext-
>>> SmuMetrics_V2.ThrottlingPercentage[i])
>>> + throttler_status |= 1U << i;
>>> + }
>>> + } else {
>>> + throttler_status = metrics_ext->SmuMetrics.ThrottlerStatus;
>>> + }
>>> +
>>> + return throttler_status;
>>> +}
>>> +
>>> static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
>>> MetricsMember_t member,
>>> uint32_t *value)
>>> {
>>> - struct smu_table_context *smu_table= &smu->smu_table;
>>> - SmuMetrics_t *metrics =
>>> - &(((SmuMetricsExternal_t *)(smu_table->metrics_table))-
>>> SmuMetrics);
>>> + uint32_t *data_u32;
>>> + uint16_t *data_u16;
>>> int ret = 0;
>>>
>>> mutex_lock(&smu->metrics_lock);
>>> @@ -510,78 +537,100 @@ static int
>>> sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
>>>
>>> switch (member) {
>>> case METRICS_CURR_GFXCLK:
>>> - *value = metrics->CurrClock[PPCLK_GFXCLK];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>
>> One problem with this style is the need to track the datatype of each field.
>> Why not use the old style?
>>
>> metricsv1? metricsv1->field : metricsv2->field;
> [Quan, Evan] With that, there will be too many "if{}else{}". Also I see there seems coming some new changes for the metrics table. I'm afraid the situation will be worse in the further.
> With this macro way, we can keep the code clean and tidy.
I was talking about something like this, not with if..else. v1 or v2 is
assigned to be non-null based on FW version check.
*value = metricsv1? metricsv1->CurrClock[PPCLK_GFXCLK]:
metricsv2->CurrClock[PPCLK_GFXCLK];
The number of condition checks are same with the macro as well (it only
hides it and probably this one also may be hidden using a macro). The
problem is if newer datastructure changes the datatype of a field, then
the current macro won't be reliable. Anyway, if there are more changes
expected in the metrics table, let's wait.
Thanks,
Lijo
>
> BR
> Evan
>>
>> Thanks,
>> Lijo
>>
>>> + *value = data_u32[PPCLK_GFXCLK];
>>> break;
>>> case METRICS_CURR_SOCCLK:
>>> - *value = metrics->CurrClock[PPCLK_SOCCLK];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_SOCCLK];
>>> break;
>>> case METRICS_CURR_UCLK:
>>> - *value = metrics->CurrClock[PPCLK_UCLK];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_UCLK];
>>> break;
>>> case METRICS_CURR_VCLK:
>>> - *value = metrics->CurrClock[PPCLK_VCLK_0];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_VCLK_0];
>>> break;
>>> case METRICS_CURR_VCLK1:
>>> - *value = metrics->CurrClock[PPCLK_VCLK_1];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_VCLK_1];
>>> break;
>>> case METRICS_CURR_DCLK:
>>> - *value = metrics->CurrClock[PPCLK_DCLK_0];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_DCLK_0];
>>> break;
>>> case METRICS_CURR_DCLK1:
>>> - *value = metrics->CurrClock[PPCLK_DCLK_1];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_DCLK_1];
>>> break;
>>> case METRICS_CURR_DCEFCLK:
>>> - *value = metrics->CurrClock[PPCLK_DCEFCLK];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_DCEFCLK];
>>> break;
>>> case METRICS_CURR_FCLK:
>>> - *value = metrics->CurrClock[PPCLK_FCLK];
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + *value = data_u32[PPCLK_FCLK];
>>> break;
>>> case METRICS_AVERAGE_GFXCLK:
>>> - if (metrics->AverageGfxActivity <=
>> SMU_11_0_7_GFX_BUSY_THRESHOLD)
>>> - *value = metrics->AverageGfxclkFrequencyPostDs;
>>> + GET_METRICS_MEMBER(AverageGfxActivity, &data_u16);
>>> + if (*data_u16 <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
>>> +
>> GET_METRICS_MEMBER(AverageGfxclkFrequencyPostDs,
>> &data_u16);
>>> else
>>> - *value = metrics->AverageGfxclkFrequencyPreDs;
>>> +
>> GET_METRICS_MEMBER(AverageGfxclkFrequencyPreDs,
>> &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> case METRICS_AVERAGE_FCLK:
>>> - *value = metrics->AverageFclkFrequencyPostDs;
>>> + GET_METRICS_MEMBER(AverageFclkFrequencyPostDs,
>> &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> case METRICS_AVERAGE_UCLK:
>>> - *value = metrics->AverageUclkFrequencyPostDs;
>>> + GET_METRICS_MEMBER(AverageUclkFrequencyPostDs,
>> &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> case METRICS_AVERAGE_GFXACTIVITY:
>>> - *value = metrics->AverageGfxActivity;
>>> + GET_METRICS_MEMBER(AverageGfxActivity, &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> case METRICS_AVERAGE_MEMACTIVITY:
>>> - *value = metrics->AverageUclkActivity;
>>> + GET_METRICS_MEMBER(AverageUclkActivity, &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> case METRICS_AVERAGE_SOCKETPOWER:
>>> - *value = metrics->AverageSocketPower << 8;
>>> + GET_METRICS_MEMBER(AverageSocketPower, &data_u16);
>>> + *value = *data_u16 << 8;
>>> break;
>>> case METRICS_TEMPERATURE_EDGE:
>>> - *value = metrics->TemperatureEdge *
>>> + GET_METRICS_MEMBER(TemperatureEdge, &data_u16);
>>> + *value = *data_u16 *
>>> SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> break;
>>> case METRICS_TEMPERATURE_HOTSPOT:
>>> - *value = metrics->TemperatureHotspot *
>>> + GET_METRICS_MEMBER(TemperatureHotspot, &data_u16);
>>> + *value = *data_u16 *
>>> SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> break;
>>> case METRICS_TEMPERATURE_MEM:
>>> - *value = metrics->TemperatureMem *
>>> + GET_METRICS_MEMBER(TemperatureMem, &data_u16);
>>> + *value = *data_u16 *
>>> SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> break;
>>> case METRICS_TEMPERATURE_VRGFX:
>>> - *value = metrics->TemperatureVrGfx *
>>> + GET_METRICS_MEMBER(TemperatureVrGfx, &data_u16);
>>> + *value = *data_u16 *
>>> SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> break;
>>> case METRICS_TEMPERATURE_VRSOC:
>>> - *value = metrics->TemperatureVrSoc *
>>> + GET_METRICS_MEMBER(TemperatureVrSoc, &data_u16);
>>> + *value = *data_u16 *
>>> SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
>>> break;
>>> case METRICS_THROTTLER_STATUS:
>>> - *value = metrics->ThrottlerStatus;
>>> + *value = sienna_cichlid_get_throttler_status_locked(smu);
>>> break;
>>> case METRICS_CURR_FANSPEED:
>>> - *value = metrics->CurrFanSpeed;
>>> + GET_METRICS_MEMBER(CurrFanSpeed, &data_u16);
>>> + *value = *data_u16;
>>> break;
>>> default:
>>> *value = UINT_MAX;
>>> @@ -3564,68 +3613,103 @@ static ssize_t
>> sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>>> struct smu_table_context *smu_table = &smu->smu_table;
>>> struct gpu_metrics_v1_3 *gpu_metrics =
>>> (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
>>> - SmuMetricsExternal_t metrics_external;
>>> - SmuMetrics_t *metrics =
>>> - &(metrics_external.SmuMetrics);
>>> - struct amdgpu_device *adev = smu->adev;
>>> - uint32_t smu_version;
>>> + uint32_t *data_u32;
>>> + uint16_t *data_u16;
>>> + uint8_t *data_u8;
>>> int ret = 0;
>>>
>>> - ret = smu_cmn_get_metrics_table(smu,
>>> - &metrics_external,
>>> - true);
>>> - if (ret)
>>> + mutex_lock(&smu->metrics_lock);
>>> +
>>> + ret = smu_cmn_get_metrics_table_locked(smu,
>>> + NULL,
>>> + true);
>>> + if (ret) {
>>> + mutex_unlock(&smu->metrics_lock);
>>> return ret;
>>> + }
>>>
>>> smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
>>>
>>> - gpu_metrics->temperature_edge = metrics->TemperatureEdge;
>>> - gpu_metrics->temperature_hotspot = metrics-
>>> TemperatureHotspot;
>>> - gpu_metrics->temperature_mem = metrics->TemperatureMem;
>>> - gpu_metrics->temperature_vrgfx = metrics->TemperatureVrGfx;
>>> - gpu_metrics->temperature_vrsoc = metrics->TemperatureVrSoc;
>>> - gpu_metrics->temperature_vrmem = metrics-
>>> TemperatureVrMem0;
>>> + GET_METRICS_MEMBER(TemperatureEdge, &data_u16);
>>> + gpu_metrics->temperature_edge = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(TemperatureHotspot, &data_u16);
>>> + gpu_metrics->temperature_hotspot = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(TemperatureMem, &data_u16);
>>> + gpu_metrics->temperature_mem = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(TemperatureVrGfx, &data_u16);
>>> + gpu_metrics->temperature_vrgfx = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(TemperatureVrSoc, &data_u16);
>>> + gpu_metrics->temperature_vrsoc = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(TemperatureVrMem0, &data_u16);
>>> + gpu_metrics->temperature_vrmem = *data_u16;
>>>
>>> - gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
>>> - gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
>>> - gpu_metrics->average_mm_activity = metrics-
>>> VcnActivityPercentage;
>>> + GET_METRICS_MEMBER(AverageGfxActivity, &data_u16);
>>> + gpu_metrics->average_gfx_activity = *data_u16;
>>>
>>> - gpu_metrics->average_socket_power = metrics-
>>> AverageSocketPower;
>>> - gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
>>> + GET_METRICS_MEMBER(AverageUclkActivity, &data_u16);
>>> + gpu_metrics->average_umc_activity = *data_u16;
>>>
>>> - if (metrics->AverageGfxActivity <=
>> SMU_11_0_7_GFX_BUSY_THRESHOLD)
>>> - gpu_metrics->average_gfxclk_frequency = metrics-
>>> AverageGfxclkFrequencyPostDs;
>>> + GET_METRICS_MEMBER(VcnActivityPercentage, &data_u16);
>>> + gpu_metrics->average_mm_activity = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageSocketPower, &data_u16);
>>> + gpu_metrics->average_socket_power = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(EnergyAccumulator, &data_u32);
>>> + gpu_metrics->energy_accumulator = *data_u32;
>>> +
>>> + GET_METRICS_MEMBER(AverageGfxActivity, &data_u16);
>>> + if (*data_u16 <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
>>> + GET_METRICS_MEMBER(AverageGfxclkFrequencyPostDs,
>> &data_u16);
>>> else
>>> - gpu_metrics->average_gfxclk_frequency = metrics-
>>> AverageGfxclkFrequencyPreDs;
>>> - gpu_metrics->average_uclk_frequency = metrics-
>>> AverageUclkFrequencyPostDs;
>>> - gpu_metrics->average_vclk0_frequency = metrics-
>>> AverageVclk0Frequency;
>>> - gpu_metrics->average_dclk0_frequency = metrics-
>>> AverageDclk0Frequency;
>>> - gpu_metrics->average_vclk1_frequency = metrics-
>>> AverageVclk1Frequency;
>>> - gpu_metrics->average_dclk1_frequency = metrics-
>>> AverageDclk1Frequency;
>>> -
>>> - gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
>>> - gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
>>> - gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
>>> - gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
>>> - gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
>>> - gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
>>> - gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
>>> -
>>> - gpu_metrics->throttle_status = metrics->ThrottlerStatus;
>>> + GET_METRICS_MEMBER(AverageGfxclkFrequencyPreDs,
>> &data_u16);
>>> + gpu_metrics->average_gfxclk_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageUclkFrequencyPostDs, &data_u16);
>>> + gpu_metrics->average_uclk_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageVclk0Frequency, &data_u16);
>>> + gpu_metrics->average_vclk0_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageDclk0Frequency, &data_u16);
>>> + gpu_metrics->average_dclk0_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageVclk1Frequency, &data_u16);
>>> + gpu_metrics->average_vclk1_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(AverageDclk1Frequency, &data_u16);
>>> + gpu_metrics->average_dclk1_frequency = *data_u16;
>>> +
>>> + GET_METRICS_MEMBER(CurrClock, &data_u32);
>>> + gpu_metrics->current_gfxclk = data_u32[PPCLK_GFXCLK];
>>> + gpu_metrics->current_socclk = data_u32[PPCLK_SOCCLK];
>>> + gpu_metrics->current_uclk = data_u32[PPCLK_UCLK];
>>> + gpu_metrics->current_vclk0 = data_u32[PPCLK_VCLK_0];
>>> + gpu_metrics->current_dclk0 = data_u32[PPCLK_DCLK_0];
>>> + gpu_metrics->current_vclk1 = data_u32[PPCLK_VCLK_1];
>>> + gpu_metrics->current_dclk1 = data_u32[PPCLK_DCLK_1];
>>> +
>>> + gpu_metrics->throttle_status =
>>> + sienna_cichlid_get_throttler_status_locked(smu);
>>> gpu_metrics->indep_throttle_status =
>>> - smu_cmn_get_indep_throttler_status(metrics-
>>> ThrottlerStatus,
>>> + smu_cmn_get_indep_throttler_status(gpu_metrics-
>>> throttle_status,
>>>
>> sienna_cichlid_throttler_map);
>>>
>>> - gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
>>> + GET_METRICS_MEMBER(CurrFanSpeed, &data_u16);
>>> + gpu_metrics->current_fan_speed = *data_u16;
>>>
>>> - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
>>> - if (ret)
>>> - return ret;
>>> + if (((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && smu-
>>> smc_fw_version > 0x003A1E00) ||
>>> + ((smu->adev->asic_type == CHIP_NAVY_FLOUNDER) && smu-
>>> smc_fw_version > 0x00410400)) {
>>> + GET_METRICS_MEMBER(PcieWidth, &data_u8);
>>> + gpu_metrics->pcie_link_width = *data_u8;
>>>
>>> - if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu_version >
>> 0x003A1E00) ||
>>> - ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu_version >
>> 0x00410400)) {
>>> - gpu_metrics->pcie_link_width = metrics->PcieWidth;
>>> - gpu_metrics->pcie_link_speed = link_speed[metrics-
>>> PcieRate];
>>> + GET_METRICS_MEMBER(PcieRate, &data_u8);
>>> + gpu_metrics->pcie_link_speed = link_speed[*data_u8];
>>> } else {
>>> gpu_metrics->pcie_link_width =
>>>
>> smu_v11_0_get_current_pcie_link_width(smu);
>>> @@ -3633,6 +3717,8 @@ static ssize_t
>> sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
>>>
>> smu_v11_0_get_current_pcie_link_speed(smu);
>>> }
>>>
>>> + mutex_unlock(&smu->metrics_lock);
>>> +
>>> gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
>>>
>>> *table = (void *)gpu_metrics;
>>>
More information about the amd-gfx
mailing list