[PATCH] drm/amdgpu: Enable XGMI mapping for peer device

Liu, Shaoyun Shaoyun.Liu at amd.com
Mon Mar 4 23:27:02 UTC 2019


Adjust vram base offset for XGMI mapping when update the PT entry so
the address will fall into correct XGMI aperture for peer device

Change-Id: I78bdf244da699d2559481ef5afe9663b3e752236
Signed-off-by: shaoyunl <shaoyun.liu at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 21 +++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 12d51d9..0bc32c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1876,6 +1876,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
  * @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer that bo actually been allocated
  * @nodes: array of drm_mm_nodes with the MC addresses
  * @fence: optional resulting fence
  *
@@ -1891,11 +1892,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
 				      uint64_t flags,
+				      struct amdgpu_device *bo_adev,
 				      struct drm_mm_node *nodes,
 				      struct dma_fence **fence)
 {
 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
 	uint64_t pfn, start = mapping->start;
+	bool is_xgmi = false;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1917,6 +1920,10 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 		flags |= AMDGPU_PTE_PRT;
 		flags &= ~AMDGPU_PTE_VALID;
 	}
+	if (adev != bo_adev &&
+	    adev->gmc.xgmi.hive_id &&
+	    adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id)
+		is_xgmi = true;
 
 	trace_amdgpu_vm_bo_update(mapping);
 
@@ -1965,7 +1972,11 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 			}
 
 		} else if (flags & AMDGPU_PTE_VALID) {
-			addr += adev->vm_manager.vram_base_offset;
+			if (is_xgmi == false) {
+				DRM_ERROR("Failed to map the VRAM for other device access.\n");
+				return -EINVAL;
+			}
+			addr += bo_adev->vm_manager.vram_base_offset;
 			addr += pfn << PAGE_SHIFT;
 		}
 
@@ -2012,6 +2023,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	struct drm_mm_node *nodes;
 	struct dma_fence *exclusive, **last_update;
 	uint64_t flags;
+	struct amdgpu_device *bo_adev = adev;
 	int r;
 
 	if (clear || !bo) {
@@ -2030,9 +2042,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 		exclusive = reservation_object_get_excl(bo->tbo.resv);
 	}
 
-	if (bo)
+	if (bo) {
 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
-	else
+		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	} else
 		flags = 0x0;
 
 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
@@ -2050,7 +2063,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
-					       mapping, flags, nodes,
+					       mapping, flags, bo_adev, nodes,
 					       last_update);
 		if (r)
 			return r;
-- 
2.7.4



More information about the amd-gfx mailing list