[PATCH 16/44] drm/amdkfd: support xgmi same hive mapping
Felix Kuehling
Felix.Kuehling at amd.com
Mon Mar 22 10:58:32 UTC 2021
From: Philip Yang <Philip.Yang at amd.com>
amdgpu_gmc_get_vm_pte use bo_va->is_xgmi same hive information to set
pte flags to update GPU mapping. Add local structure variable bo_va, and
update bo_va.is_xgmi, pass it to mapping->bo_va while mapping to GPU.
Assuming xgmi pstate is hi after boot.
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9b1c5aa86f4a..de5777330d23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -26,6 +26,8 @@
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
+#include "amdgpu.h"
+#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
@@ -1026,10 +1028,12 @@ static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
static int
svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct svm_range *prange, dma_addr_t *pages_addr,
- bool reserve_vm, struct dma_fence **fence)
+ bool reserve_vm, struct amdgpu_device *bo_adev,
+ struct dma_fence **fence)
{
struct ttm_validate_buffer tv[2];
struct ww_acquire_ctx ticket;
+ struct amdgpu_bo_va bo_va;
struct list_head list;
uint64_t pte_flags;
int r = 0;
@@ -1062,13 +1066,18 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
}
+ if (prange->svm_bo && prange->ttm_res) {
+ bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
+ prange->mapping.bo_va = &bo_va;
+ }
+
prange->mapping.start = prange->start;
prange->mapping.last = prange->last;
prange->mapping.offset = prange->offset;
pte_flags = svm_range_get_pte_flags(adev, prange);
prange->mapping.flags = pte_flags;
- r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
prange->mapping.start,
prange->mapping.last, pte_flags,
prange->mapping.offset,
@@ -1092,6 +1101,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*fence = dma_fence_get(vm->last_update);
unreserve_out:
+ prange->mapping.bo_va = NULL;
if (reserve_vm)
ttm_eu_backoff_reservation(&ticket, &list);
out:
@@ -1102,6 +1112,7 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
{
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_process_device *pdd;
+ struct amdgpu_device *bo_adev;
struct amdgpu_device *adev;
struct kfd_process *p;
struct kfd_dev *dev;
@@ -1109,6 +1120,11 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
uint32_t gpuidx;
int r = 0;
+ if (prange->svm_bo && prange->ttm_res)
+ bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ else
+ bo_adev = NULL;
+
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
MAX_GPU_INSTANCE);
p = container_of(prange->svms, struct kfd_process, svms);
@@ -1125,6 +1141,12 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
return -EINVAL;
adev = (struct amdgpu_device *)dev->kgd;
+ if (bo_adev && adev != bo_adev &&
+ !amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ pr_debug("cannot map to device idx %d\n", gpuidx);
+ continue;
+ }
+
r = svm_range_dma_map(adev->dev, &prange->dma_addr[gpuidx],
prange->pages_addr, prange->npages);
if (r)
@@ -1132,7 +1154,7 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
r = svm_range_map_to_gpu(adev, pdd->vm, prange,
prange->dma_addr[gpuidx], reserve_vm,
- &fence);
+ bo_adev, &fence);
if (r)
break;
--
2.31.0
More information about the amd-gfx
mailing list