[PATCH] drm/amdgpu: fix sysfs_emit/sysfs_emit_at warnings
Yu, Lang
Lang.Yu at amd.com
Wed Sep 8 07:44:34 UTC 2021
[AMD Official Use Only]
>-----Original Message-----
>From: Lazar, Lijo <Lijo.Lazar at amd.com>
>Sent: Wednesday, September 8, 2021 3:36 PM
>To: Christian König <ckoenig.leichtzumerken at gmail.com>; Yu, Lang
><Lang.Yu at amd.com>; amd-gfx at lists.freedesktop.org
>Cc: Deucher, Alexander <Alexander.Deucher at amd.com>; Huang, Ray
><Ray.Huang at amd.com>; Tian Tao <tiantao6 at hisilicon.com>
>Subject: Re: [PATCH] drm/amdgpu: fix sysfs_emit/sysfs_emit_at warnings
>
>
>
>On 9/8/2021 12:07 PM, Christian König wrote:
>> Am 08.09.21 um 07:56 schrieb Lang Yu:
>>> sysfs_emit and sysfs_emit_at requrie a page boundary aligned buf
>>> address. Make them happy!
>>>
>>> Warning Log:
>>> [ 492.545174] invalid sysfs_emit_at: buf:00000000f19bdfde at:0 [
>>> 492.546416] WARNING: CPU: 7 PID: 1304 at fs/sysfs/file.c:765
>>> sysfs_emit_at+0x4a/0xa0
>>> [ 492.654805] Call Trace:
>>> [ 492.655353] ? smu_cmn_get_metrics_table+0x40/0x50 [amdgpu] [
>>> 492.656780] vangogh_print_clk_levels+0x369/0x410 [amdgpu] [
>>> 492.658245] vangogh_common_print_clk_levels+0x77/0x80 [amdgpu] [
>>> 492.659733] ? preempt_schedule_common+0x18/0x30 [ 492.660713]
>>> smu_print_ppclk_levels+0x65/0x90 [amdgpu] [ 492.662107]
>>> amdgpu_get_pp_od_clk_voltage+0x13d/0x190 [amdgpu] [ 492.663620]
>>> dev_attr_show+0x1d/0x40
>>
>> Mhm, that at least partially doesn't looks like the right approach to me.
>>
>> Why do we have string printing and sysfs code in the hardware version
>> specific backend in the first place?
>>
>
>This is a callback meant for printing ASIC specific information to sysfs node. The
>buffer passed in sysfs read is passed as it is to the callback API.
>
>> That stuff needs to be implemented for each hardware generation and is
>> now cluttered with sysfs buffer offset calculations.
>>
>
>Looks like the warning happened because of this usage.
>
> size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
> size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK,
>buf+size);
> size += amdgpu_dpm_print_clock_levels(adev,
>OD_VDDC_CURVE, buf+size);
> size += amdgpu_dpm_print_clock_levels(adev,
>OD_VDDGFX_OFFSET, buf+size);
> size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE,
>buf+size);
> size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK,
>buf+size);
>
>
[Yu, Lang]
Yes. So it is fine we just fix the caller amdgpu_get_pp_od_clk_voltage like following:
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size, offset;
int ret, i;
char temp_buf[512];
char clock_type[] = {OD_SCLK, OD_MCLK, OD_VDDC_CURVE,
OD_VDDGFX_OFFSET, OD_RANGE, OD_CCLK};
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
offset = 0;
if (adev->powerplay.pp_funcs->print_clock_levels) {
for (i = 0; i < ARRAY_SIZE(clock_type); i++) {
size = amdgpu_dpm_print_clock_levels(adev, clock_type[i], buf);
if (offset + size > PAGE_SIZE)
break;
memcpy(temp_buf + offset, buf, size);
offset += size;
}
memcpy(buf, temp_buf, offset);
size = offset;
} else {
size = sysfs_emit(buf, "\n");
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
Regards,
Lang
>
>> Regards,
>> Christian.
>>
>>>
>>> Signed-off-by: Lang Yu <lang.yu at amd.com>
>>> ---
>>> drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 9 +++++++--
>>> drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 5 ++++-
>>> .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 5 ++++-
>>> drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 15
>>> +++++++++------
>>> drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 3 +++
>>> .../gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 13
>>> +++++++++----
>>> .../gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 7 +++++--
>>> 7 files changed, 41 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
>>> index e343cc218990..53185fe96d83 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
>>> @@ -771,8 +771,13 @@ static int arcturus_print_clk_levels(struct
>>> smu_context *smu,
>>> struct smu_11_0_dpm_context *dpm_context = NULL;
>>> uint32_t gen_speed, lane_width;
>>> - if (amdgpu_ras_intr_triggered())
>>> - return sysfs_emit(buf, "unavailable\n");
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> + if (amdgpu_ras_intr_triggered()) {
>>> + size += sysfs_emit_at(buf, size, "unavailable\n");
>>> + return size;
>>> + }
>>> dpm_context = smu_dpm->dpm_context; diff --git
>>> a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>>> index 4c81989b8162..5490e8e66e14 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>>> @@ -1279,6 +1279,9 @@ static int navi10_print_clk_levels(struct
>>> smu_context *smu,
>>> struct smu_11_0_overdrive_table *od_settings =
>>> smu->od_settings;
>>> uint32_t min_value, max_value;
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> switch (clk_type) {
>>> case SMU_GFXCLK:
>>> case SMU_SCLK:
>>> @@ -1392,7 +1395,7 @@ static int navi10_print_clk_levels(struct
>>> smu_context *smu,
>>> case SMU_OD_RANGE:
>>> if (!smu->od_enabled || !od_table || !od_settings)
>>> break;
>>> - size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
>>> if (navi10_od_feature_is_supported(od_settings,
>>> SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
>>> navi10_od_setting_get_range(od_settings,
>>> SMU_11_0_ODSETTING_GFXCLKFMIN,
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> index 5e292c3f5050..817ad6de3854 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
>>> @@ -1058,6 +1058,9 @@ static int
>>> sienna_cichlid_print_clk_levels(struct smu_context *smu,
>>> uint32_t min_value, max_value;
>>> uint32_t smu_version;
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> switch (clk_type) {
>>> case SMU_GFXCLK:
>>> case SMU_SCLK:
>>> @@ -1180,7 +1183,7 @@ static int
>>> sienna_cichlid_print_clk_levels(struct smu_context *smu,
>>> if (!smu->od_enabled || !od_table || !od_settings)
>>> break;
>>> - size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
>>> if (sienna_cichlid_is_od_feature_supported(od_settings,
>>> SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
>>> sienna_cichlid_get_od_setting_range(od_settings,
>>> SMU_11_0_7_ODSETTING_GFXCLKFMIN,
>>> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
>>> index 3a3421452e57..c7842c69b570 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
>>> @@ -592,7 +592,7 @@ static int vangogh_print_legacy_clk_levels(struct
>>> smu_context *smu,
>>> switch (clk_type) {
>>> case SMU_OD_SCLK:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
>>> size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
>>> (smu->gfx_actual_hard_min_freq > 0) ?
>>> smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
>>> size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@
>>> -601,7 +601,7 @@ static int vangogh_print_legacy_clk_levels(struct
>>> smu_context *smu,
>>> break;
>>> case SMU_OD_CCLK:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",
>>> smu->cpu_core_id_select);
>>> + size += sysfs_emit_at(buf, size, "CCLK_RANGE in
>>> Core%d:\n", smu->cpu_core_id_select);
>>> size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
>>> (smu->cpu_actual_soft_min_freq > 0) ?
>>> smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
>>> size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@
>>> -610,7 +610,7 @@ static int vangogh_print_legacy_clk_levels(struct
>>> smu_context *smu,
>>> break;
>>> case SMU_OD_RANGE:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
>>> size += sysfs_emit_at(buf, size, "SCLK: %7uMhz
>>> %10uMhz\n",
>>> smu->gfx_default_hard_min_freq,
>>> smu->gfx_default_soft_max_freq);
>>> size += sysfs_emit_at(buf, size, "CCLK: %7uMhz
>>> %10uMhz\n", @@ -682,6 +682,9 @@ static int
>>> vangogh_print_clk_levels(struct smu_context *smu,
>>> uint32_t cur_value = 0, value = 0, count = 0;
>>> bool cur_value_match_level = false;
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> memset(&metrics, 0, sizeof(metrics));
>>> ret = smu_cmn_get_metrics_table(smu, &metrics, false); @@
>>> -691,7 +694,7 @@ static int vangogh_print_clk_levels(struct
>>> smu_context *smu,
>>> switch (clk_type) {
>>> case SMU_OD_SCLK:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
>>> size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
>>> (smu->gfx_actual_hard_min_freq > 0) ?
>>> smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
>>> size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@
>>> -700,7 +703,7 @@ static int vangogh_print_clk_levels(struct
>>> smu_context *smu,
>>> break;
>>> case SMU_OD_CCLK:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",
>>> smu->cpu_core_id_select);
>>> + size += sysfs_emit_at(buf, size, "CCLK_RANGE in
>>> Core%d:\n", smu->cpu_core_id_select);
>>> size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
>>> (smu->cpu_actual_soft_min_freq > 0) ?
>>> smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
>>> size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@
>>> -709,7 +712,7 @@ static int vangogh_print_clk_levels(struct
>>> smu_context *smu,
>>> break;
>>> case SMU_OD_RANGE:
>>> if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
>>> {
>>> - size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
>>> size += sysfs_emit_at(buf, size, "SCLK: %7uMhz
>>> %10uMhz\n",
>>> smu->gfx_default_hard_min_freq,
>>> smu->gfx_default_soft_max_freq);
>>> size += sysfs_emit_at(buf, size, "CCLK: %7uMhz
>>> %10uMhz\n", diff --git
>>> a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
>>> index 5aa175e12a78..86e7978b6d63 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
>>> @@ -491,6 +491,9 @@ static int renoir_print_clk_levels(struct
>>> smu_context *smu,
>>> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>>> bool cur_value_match_level = false;
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> memset(&metrics, 0, sizeof(metrics));
>>> ret = smu_cmn_get_metrics_table(smu, &metrics, false); diff
>>> --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
>>> index ab652028e003..6349f27e9efc 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
>>> @@ -733,15 +733,20 @@ static int aldebaran_print_clk_levels(struct
>>> smu_context *smu,
>>> uint32_t freq_values[3] = {0};
>>> uint32_t min_clk, max_clk;
>>> - if (amdgpu_ras_intr_triggered())
>>> - return sysfs_emit(buf, "unavailable\n");
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> + if (amdgpu_ras_intr_triggered()) {
>>> + size += sysfs_emit_at(buf, size, "unavailable\n");
>>> + return size;
>>> + }
>>> dpm_context = smu_dpm->dpm_context;
>>> switch (type) {
>>> case SMU_OD_SCLK:
>>> - size = sysfs_emit(buf, "%s:\n", "GFXCLK");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
>>> fallthrough;
>>> case SMU_SCLK:
>>> ret = aldebaran_get_current_clk_freq_by_table(smu,
>>> SMU_GFXCLK, &now);
>>> @@ -795,7 +800,7 @@ static int aldebaran_print_clk_levels(struct
>>> smu_context *smu,
>>> break;
>>> case SMU_OD_MCLK:
>>> - size = sysfs_emit(buf, "%s:\n", "MCLK");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
>>> fallthrough;
>>> case SMU_MCLK:
>>> ret = aldebaran_get_current_clk_freq_by_table(smu,
>>> SMU_UCLK, &now); diff --git
>>> a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
>>> b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
>>> index 627ba2eec7fd..3b21d9143b96 100644
>>> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
>>> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
>>> @@ -1052,16 +1052,19 @@ static int
>>> yellow_carp_print_clk_levels(struct
>>> smu_context *smu,
>>> int i, size = 0, ret = 0;
>>> uint32_t cur_value = 0, value = 0, count = 0;
>>> + size = offset_in_page(buf);
>>> + buf = buf - size;
>>> +
>>> switch (clk_type) {
>>> case SMU_OD_SCLK:
>>> - size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
>>> size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
>>> (smu->gfx_actual_hard_min_freq > 0) ?
>>> smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
>>> size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
>>> (smu->gfx_actual_soft_max_freq > 0) ?
>>> smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
>>> break;
>>> case SMU_OD_RANGE:
>>> - size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
>>> + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
>>> size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
>>> smu->gfx_default_hard_min_freq,
>>> smu->gfx_default_soft_max_freq);
>>> break;
>>
More information about the amd-gfx
mailing list