[PATCH 19/35] drm/amdkfd: support xgmi same hive mapping
Felix Kuehling
Felix.Kuehling at amd.com
Thu Jan 7 03:01:11 UTC 2021
From: Philip Yang <Philip.Yang at amd.com>
amdgpu_gmc_get_vm_pte use bo_va->is_xgmi same hive information to set
pte flags to update GPU mapping. Add local structure variable bo_va, and
update bo_va.is_xgmi, pass it to mapping->bo_va while mapping to GPU.
Assuming xgmi pstate is hi after boot.
Signed-off-by: Philip Yang <Philip.Yang at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 27 ++++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 7d91dc49a5a9..8a4d0a3935b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -26,6 +26,8 @@
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
+#include "amdgpu.h"
+#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
@@ -923,10 +925,11 @@ static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
static int
svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct svm_range *prange, bool reserve_vm,
- struct dma_fence **fence)
+ struct amdgpu_device *bo_adev, struct dma_fence **fence)
{
struct ttm_validate_buffer tv[2];
struct ww_acquire_ctx ticket;
+ struct amdgpu_bo_va bo_va;
struct list_head list;
dma_addr_t *pages_addr;
uint64_t pte_flags;
@@ -963,6 +966,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
}
+ if (prange->svm_bo && prange->mm_nodes) {
+ bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
+ prange->mapping.bo_va = &bo_va;
+ }
+
prange->mapping.start = prange->it_node.start;
prange->mapping.last = prange->it_node.last;
prange->mapping.offset = prange->offset;
@@ -970,7 +978,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
prange->mapping.flags = pte_flags;
pages_addr = prange->pages_addr;
- r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
prange->mapping.start,
prange->mapping.last, pte_flags,
prange->mapping.offset,
@@ -994,6 +1002,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*fence = dma_fence_get(vm->last_update);
unreserve_out:
+ prange->mapping.bo_va = NULL;
if (reserve_vm)
ttm_eu_backoff_reservation(&ticket, &list);
out:
@@ -1004,6 +1013,7 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
{
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_process_device *pdd;
+ struct amdgpu_device *bo_adev;
struct amdgpu_device *adev;
struct kfd_process *p;
struct kfd_dev *dev;
@@ -1011,6 +1021,11 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
uint32_t gpuidx;
int r = 0;
+ if (prange->svm_bo && prange->mm_nodes)
+ bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ else
+ bo_adev = NULL;
+
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
MAX_GPU_INSTANCE);
p = container_of(prange->svms, struct kfd_process, svms);
@@ -1027,8 +1042,14 @@ static int svm_range_map_to_gpus(struct svm_range *prange, bool reserve_vm)
return -EINVAL;
adev = (struct amdgpu_device *)dev->kgd;
+ if (bo_adev && adev != bo_adev &&
+ !amdgpu_xgmi_same_hive(adev, bo_adev)) {
+ pr_debug("cannot map to device idx %d\n", gpuidx);
+ continue;
+ }
+
r = svm_range_map_to_gpu(adev, pdd->vm, prange, reserve_vm,
- &fence);
+ bo_adev, &fence);
if (r)
break;
--
2.29.2
More information about the amd-gfx
mailing list