<div dir="ltr">This is why I advocated for the sysfs output to be either standard packed or serialized. It was a hack as it is anyways.</div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Feb 22, 2021 at 4:46 PM Alex Deucher <<a href="mailto:alexdeucher@gmail.com">alexdeucher@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">On Sun, Feb 21, 2021 at 11:03 PM Evan Quan <<a href="mailto:evan.quan@amd.com" target="_blank">evan.quan@amd.com</a>> wrote:<br>
><br>
> To make sure they are naturally aligned.<br>
><br>
> Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489<br>
> Signed-off-by: Evan Quan <<a href="mailto:evan.quan@amd.com" target="_blank">evan.quan@amd.com</a>><br>
<br>
Won't this break existing apps that query this info? We need to make<br>
sure umr and rocm-smi can handle this.<br>
<br>
Alex<br>
<br>
<br>
> ---<br>
> drivers/gpu/drm/amd/include/kgd_pp_interface.h | 11 ++++++-----<br>
> drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 4 ++--<br>
> drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 8 ++++----<br>
> drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c | 8 ++++----<br>
> drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 8 ++++----<br>
> 5 files changed, 20 insertions(+), 19 deletions(-)<br>
><br>
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h<br>
> index 828513412e20..3a8f64e1a10c 100644<br>
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h<br>
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h<br>
> @@ -332,9 +332,9 @@ struct amd_pm_funcs {<br>
> };<br>
><br>
> struct metrics_table_header {<br>
> - uint16_t structure_size;<br>
> - uint8_t format_revision;<br>
> - uint8_t content_revision;<br>
> + uint32_t structure_size;<br>
> + uint16_t format_revision;<br>
> + uint16_t content_revision;<br>
> };<br>
><br>
> struct gpu_metrics_v1_0 {<br>
> @@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {<br>
> uint16_t current_fan_speed;<br>
><br>
> /* Link width/speed */<br>
> - uint8_t pcie_link_width;<br>
> - uint8_t pcie_link_speed; // in 0.1 GT/s<br>
> + uint16_t pcie_link_width;<br>
> + uint16_t pcie_link_speed; // in 0.1 GT/s<br>
> + uint8_t padding[2];<br>
> };<br>
><br>
> struct gpu_metrics_v2_0 {<br>
> diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h<br>
> index 50dd1529b994..f4e7a330f67f 100644<br>
> --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h<br>
> +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h<br>
> @@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,<br>
><br>
> int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);<br>
><br>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);<br>
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);<br>
><br>
> int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);<br>
><br>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);<br>
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);<br>
><br>
> int smu_v11_0_gfx_ulv_control(struct smu_context *smu,<br>
> bool enablement);<br>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c<br>
> index c0753029a8e2..95e905d8418d 100644<br>
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c<br>
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c<br>
> @@ -52,8 +52,8 @@<br>
><br>
> #define LINK_WIDTH_MAX 6<br>
> #define LINK_SPEED_MAX 3<br>
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> -static int link_speed[] = {25, 50, 80, 160};<br>
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> +static uint16_t link_speed[] = {25, 50, 80, 160};<br>
><br>
> static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,<br>
> enum pp_clock_type type, uint32_t mask);<br>
> @@ -2117,7 +2117,7 @@ static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)<br>
> >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;<br>
> }<br>
><br>
> -static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)<br>
> +static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)<br>
> {<br>
> uint32_t width_level;<br>
><br>
> @@ -2137,7 +2137,7 @@ static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)<br>
> >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;<br>
> }<br>
><br>
> -static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)<br>
> +static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)<br>
> {<br>
> uint32_t speed_level;<br>
><br>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c<br>
> index 87811b005b85..3d462405b572 100644<br>
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c<br>
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c<br>
> @@ -57,8 +57,8 @@<br>
><br>
> #define LINK_WIDTH_MAX 6<br>
> #define LINK_SPEED_MAX 3<br>
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> -static int link_speed[] = {25, 50, 80, 160};<br>
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> +static uint16_t link_speed[] = {25, 50, 80, 160};<br>
><br>
> static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)<br>
> {<br>
> @@ -3279,7 +3279,7 @@ static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)<br>
> >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;<br>
> }<br>
><br>
> -static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)<br>
> +static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)<br>
> {<br>
> uint32_t width_level;<br>
><br>
> @@ -3299,7 +3299,7 @@ static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)<br>
> >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;<br>
> }<br>
><br>
> -static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)<br>
> +static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)<br>
> {<br>
> uint32_t speed_level;<br>
><br>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
> index 60ef63073ad4..86af9832ba9c 100644<br>
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c<br>
> @@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");<br>
> #define mmCG_THERMAL_STATUS_ARCT 0x90<br>
> #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX 0<br>
><br>
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> -static int link_speed[] = {25, 50, 80, 160};<br>
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};<br>
> +static uint16_t link_speed[] = {25, 50, 80, 160};<br>
><br>
> int smu_v11_0_init_microcode(struct smu_context *smu)<br>
> {<br>
> @@ -2134,7 +2134,7 @@ int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)<br>
> >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;<br>
> }<br>
><br>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)<br>
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)<br>
> {<br>
> uint32_t width_level;<br>
><br>
> @@ -2154,7 +2154,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)<br>
> >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;<br>
> }<br>
><br>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)<br>
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)<br>
> {<br>
> uint32_t speed_level;<br>
><br>
> --<br>
> 2.29.0<br>
><br>
> _______________________________________________<br>
> amd-gfx mailing list<br>
> <a href="mailto:amd-gfx@lists.freedesktop.org" target="_blank">amd-gfx@lists.freedesktop.org</a><br>
> <a href="https://lists.freedesktop.org/mailman/listinfo/amd-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/amd-gfx</a><br>
_______________________________________________<br>
amd-gfx mailing list<br>
<a href="mailto:amd-gfx@lists.freedesktop.org" target="_blank">amd-gfx@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/amd-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/amd-gfx</a><br>
</blockquote></div>