[PATCH 3/3] drm/amdkfd: report pcie bandwidth to the kfd

Kim, Jonathan Jonathan.Kim at amd.com
Mon Jul 19 15:48:16 UTC 2021


[AMD Official Use Only]

> -----Original Message-----
> From: Kuehling, Felix <Felix.Kuehling at amd.com>
> Sent: Saturday, July 17, 2021 1:37 AM
> To: Kim, Jonathan <Jonathan.Kim at amd.com>; amd-
> gfx at lists.freedesktop.org
> Subject: Re: [PATCH 3/3] drm/amdkfd: report pcie bandwidth to the kfd
>
> Am 2021-07-16 um 12:43 p.m. schrieb Jonathan Kim:
> > Similar to xGMI reporting the min/max bandwidth between direct peers,
> > PCIe will report the min/max bandwidth to the KFD.
> >
> > v2: change to bandwidth
> >
> > Signed-off-by: Jonathan Kim <jonathan.kim at amd.com>
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 61
> > ++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h |  1 +
> >  drivers/gpu/drm/amd/amdkfd/kfd_crat.c      |  4 ++
> >  3 files changed, 66 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> > index 3978578a1c49..b7db52f1a9d1 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
> > @@ -21,6 +21,7 @@
> >   */
> >
> >  #include "amdgpu_amdkfd.h"
> > +#include "amd_pcie.h"
> >  #include "amd_shared.h"
> >
> >  #include "amdgpu.h"
> > @@ -576,6 +577,66 @@ int
> amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct
> kgd_dev
> >     return (num_links * 16 * 25000)/BITS_PER_BYTE;  }
> >
> > +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev,
> bool
> > +is_min) {
> > +   struct amdgpu_device *adev = (struct amdgpu_device *)dev;
> > +   int num_lanes_shift = is_min ? ffs(adev->pm.pcie_mlw_mask >>
> > +
>       CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT) - 1 :
> > +                           fls(adev->pm.pcie_mlw_mask >>
> > +
>       CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT) - 1;
> > +   int gen_speed_shift = is_min ? ffs(adev->pm.pcie_gen_mask >>
> > +
>       CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT) - 1 :
> > +                           fls(adev->pm.pcie_gen_mask >>
> > +
>       CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT) - 1;
>
> The shifting is not necessary because you undo it below. I think this would
> do the trick and be more readable:
>
>       int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
>                                       fls(adev->pm.pcie_mlw_mask)) - 1;
>       int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask) :
>                                       fls(adev->pm.pcie_gen_mask)) - 1;

Ok thanks for the review and suggestion.  I've adjusted your suggestion by masking pcie_gen_mask with CAIL_PCIE_LINK_SPEED_SUPPORT_MASK as the mask sets some non-speed related lower bits.

Thanks,

Jon

>       uint32_t num_lanes_mask = 1 << num_lanes_shift;
>       uint32_t gen_speed_mask = 1 << gen_speed_shift;
>
> With that fixed, this patch is
>
> Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
>
>
> > +   uint32_t num_lanes_mask = (1UL << num_lanes_shift) <<
> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT;
> > +   uint32_t gen_speed_mask = (1UL << gen_speed_shift) <<
> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
> > +   int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
> > +
> > +   switch (num_lanes_mask) {
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
> > +           num_lanes_factor = 1;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
> > +           num_lanes_factor = 2;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
> > +           num_lanes_factor = 4;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
> > +           num_lanes_factor = 8;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
> > +           num_lanes_factor = 12;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
> > +           num_lanes_factor = 16;
> > +           break;
> > +   case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
> > +           num_lanes_factor = 32;
> > +           break;
> > +   }
> > +
> > +   switch (gen_speed_mask) {
> > +   case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
> > +           gen_speed_mbits_factor = 2500;
> > +           break;
> > +   case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
> > +           gen_speed_mbits_factor = 5000;
> > +           break;
> > +   case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
> > +           gen_speed_mbits_factor = 8000;
> > +           break;
> > +   case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
> > +           gen_speed_mbits_factor = 16000;
> > +           break;
> > +   case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
> > +           gen_speed_mbits_factor = 32000;
> > +           break;
> > +   }
> > +
> > +   return (num_lanes_factor *
> gen_speed_mbits_factor)/BITS_PER_BYTE;
> > +}
> > +
> >  uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev
> *kgd)
> > {
> >     struct amdgpu_device *adev = (struct amdgpu_device *)kgd; diff --git
> > a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> > index e12fccb2d2c4..5d73f3108d13 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
> > @@ -227,6 +227,7 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct
> > kgd_dev *kgd);  int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
> > uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct
> > kgd_dev *src);  int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct
> > kgd_dev *dst, struct kgd_dev *src, bool is_min);
> > +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev,
> bool
> > +is_min);
> >
> >  /* Read user wptr from a specified user address space with page fault
> >   * disabled. The memory must be pinned and mapped to the hardware
> > when diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> > b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> > index 40ce6239c813..eada22b9ea69 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> > @@ -1998,6 +1998,10 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int
> *avail_size,
> >             }
> >     } else {
> >             sub_type_hdr->io_interface_type =
> CRAT_IOLINK_TYPE_PCIEXPRESS;
> > +           sub_type_hdr->minimum_bandwidth_mbs =
> > +
>       amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true);
> > +           sub_type_hdr->maximum_bandwidth_mbs =
> > +
>       amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false);
> >     }
> >
> >     sub_type_hdr->proximity_domain_from = proximity_domain;


More information about the amd-gfx mailing list