[PATCH] drm/amdkfd: fix no atomics settings in the kfd topology
Felix Kuehling
felix.kuehling at amd.com
Mon May 3 18:09:28 UTC 2021
Am 2021-05-03 um 11:57 a.m. schrieb Jonathan Kim:
> To account for various PCIe and xGMI setups, check the no atomics settings
> for a device in relation to every direct peer.
Thanks, this looks reasonable. Some more nit-picks about naming and
coding style inline.
>
> Signed-off-by: Jonathan Kim <jonathan.kim at amd.com>
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 55 ++++++++++++++---------
> 1 file changed, 34 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> index 30430aefcfc7..40ac7fe2a499 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> @@ -1192,47 +1192,60 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
> mem->mem_clk_max = local_mem_info.mem_clk_max;
> }
>
> -static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
> +static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
> + struct kfd_topology_device *target_gpu_dev,
> + struct kfd_iolink_properties *link)
> {
> - struct kfd_iolink_properties *link, *cpu_link;
> - struct kfd_topology_device *cpu_dev;
> - struct amdgpu_device *adev;
> - uint32_t cap;
> - uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
> - uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
> -
> - if (!dev || !dev->gpu)
> + /* xgmi always supports atomics between links. */
> + if (link->iolink_type == CRAT_IOLINK_TYPE_XGMI)
> return;
>
> - adev = (struct amdgpu_device *)(dev->gpu->kgd);
> - if (!adev->gmc.xgmi.connected_to_cpu) {
> - pcie_capability_read_dword(dev->gpu->pdev,
> + /* check pcie support to set cpu(dev) flags for target_get_dev link. */
> + if (target_gpu_dev) {
> + uint32_t cap;
> +
> + pcie_capability_read_dword(target_gpu_dev->gpu->pdev,
> PCI_EXP_DEVCAP2, &cap);
>
> if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
> PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
> - cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
> + link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
> CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
> - }
> -
> - if (!adev->gmc.xgmi.num_physical_nodes) {
> + /* set gpu (dev) flags. */
> + } else {
> if (!dev->gpu->pci_atomic_requested ||
> dev->gpu->device_info->asic_family ==
> CHIP_HAWAII)
> - flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
> + link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
> CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
> }
> +}
> +
> +static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
> +{
> + struct kfd_iolink_properties *link, *cpu_link;
> + struct kfd_topology_device *cpu_dev;
The names cpu_link and cpu_dev are still misleading. When GPUs have XGMI
links these links may, in fact, be other GPUs and the XGMI links from
those GPUs. I think a better name would be "peer_dev" and "inbound_link".
> + uint32_t flag_enable = CRAT_IOLINK_FLAGS_ENABLED;
You don't need this variable. Just use CRAT_IOLINK_FLAGS_ENABLED below.
> +
> + if (!dev || !dev->gpu)
> + return;
>
> /* GPU only creates direct links so apply flags setting to all */
> list_for_each_entry(link, &dev->io_link_props, list) {
> - link->flags = flag;
> + link->flags = flag_enable;
> + kfd_set_iolink_no_atomics(dev, NULL, link);
> cpu_dev = kfd_topology_device_by_proximity_domain(
> link->node_to);
> +
> if (cpu_dev) {
To minimize indentation, I'd do
if (!peer_dev)
continue;
> list_for_each_entry(cpu_link,
> - &cpu_dev->io_link_props, list)
> - if (cpu_link->node_to == link->node_from)
> - cpu_link->flags = cpu_flag;
> + &cpu_dev->io_link_props, list) {
> + if (cpu_link->node_to == link->node_from) {
> + cpu_link->flags = flag_enable;
To minimize indentation, and to avoid unnecessary loop iterations, I'd do
if (inbound_link->node_to != link->node_from)
continue;
inbound_link->flags = KFD_IOLINK_CRAT_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
break;
Regards,
Felix
> + kfd_set_iolink_no_atomics(cpu_dev, dev,
> + cpu_link);
> + }
> + }
> }
> }
> }
More information about the amd-gfx
mailing list