[PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures

Tom St Denis tstdenis82 at gmail.com
Tue Feb 23 11:48:52 UTC 2021


This is why I advocated for the sysfs output to be either standard packed
or serialized.  It was a hack as it is anyways.

On Mon, Feb 22, 2021 at 4:46 PM Alex Deucher <alexdeucher at gmail.com> wrote:

> On Sun, Feb 21, 2021 at 11:03 PM Evan Quan <evan.quan at amd.com> wrote:
> >
> > To make sure they are naturally aligned.
> >
> > Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489
> > Signed-off-by: Evan Quan <evan.quan at amd.com>
>
> Won't this break existing apps that query this info?  We need to make
> sure umr and rocm-smi can handle this.
>
> Alex
>
>
> > ---
> >  drivers/gpu/drm/amd/include/kgd_pp_interface.h        | 11 ++++++-----
> >  drivers/gpu/drm/amd/pm/inc/smu_v11_0.h                |  4 ++--
> >  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |  8 ++++----
> >  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |  8 ++++----
> >  drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c        |  8 ++++----
> >  5 files changed, 20 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > index 828513412e20..3a8f64e1a10c 100644
> > --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> > @@ -332,9 +332,9 @@ struct amd_pm_funcs {
> >  };
> >
> >  struct metrics_table_header {
> > -       uint16_t                        structure_size;
> > -       uint8_t                         format_revision;
> > -       uint8_t                         content_revision;
> > +       uint32_t                        structure_size;
> > +       uint16_t                        format_revision;
> > +       uint16_t                        content_revision;
> >  };
> >
> >  struct gpu_metrics_v1_0 {
> > @@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {
> >         uint16_t                        current_fan_speed;
> >
> >         /* Link width/speed */
> > -       uint8_t                         pcie_link_width;
> > -       uint8_t                         pcie_link_speed; // in 0.1 GT/s
> > +       uint16_t                        pcie_link_width;
> > +       uint16_t                        pcie_link_speed; // in 0.1 GT/s
> > +       uint8_t                         padding[2];
> >  };
> >
> >  struct gpu_metrics_v2_0 {
> > diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > index 50dd1529b994..f4e7a330f67f 100644
> > --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> > @@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct
> smu_context *smu,
> >
> >  int smu_v11_0_get_current_pcie_link_width_level(struct smu_context
> *smu);
> >
> > -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> > +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> >
> >  int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context
> *smu);
> >
> > -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> > +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> >
> >  int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
> >                               bool enablement);
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > index c0753029a8e2..95e905d8418d 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -52,8 +52,8 @@
> >
> >  #define LINK_WIDTH_MAX                         6
> >  #define LINK_SPEED_MAX                         3
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
> >                 enum pp_clock_type type, uint32_t mask);
> > @@ -2117,7 +2117,7 @@ static int
> vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t width_level;
> >
> > @@ -2137,7 +2137,7 @@ static int
> vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
> >                 >>
> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t speed_level;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > index 87811b005b85..3d462405b572 100644
> > --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> > @@ -57,8 +57,8 @@
> >
> >  #define LINK_WIDTH_MAX                         6
> >  #define LINK_SPEED_MAX                         3
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
> >  {
> > @@ -3279,7 +3279,7 @@ static int
> vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t width_level;
> >
> > @@ -3299,7 +3299,7 @@ static int
> vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
> >                 >>
> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> > +static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr
> *hwmgr)
> >  {
> >         uint32_t speed_level;
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > index 60ef63073ad4..86af9832ba9c 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> > @@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
> >  #define mmCG_THERMAL_STATUS_ARCT               0x90
> >  #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX      0
> >
> > -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > -static int link_speed[] = {25, 50, 80, 160};
> > +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> > +static uint16_t link_speed[] = {25, 50, 80, 160};
> >
> >  int smu_v11_0_init_microcode(struct smu_context *smu)
> >  {
> > @@ -2134,7 +2134,7 @@ int
> smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
> >                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
> >  }
> >
> > -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> > +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> >  {
> >         uint32_t width_level;
> >
> > @@ -2154,7 +2154,7 @@ int
> smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
> >                 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
> >  }
> >
> > -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> > +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> >  {
> >         uint32_t speed_level;
> >
> > --
> > 2.29.0
> >
> > _______________________________________________
> > amd-gfx mailing list
> > amd-gfx at lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20210223/e6f0dd4f/attachment-0001.htm>


More information about the amd-gfx mailing list