[PATCH] drm/amdgpu: Enable XGMI mapping for peer device
Liu, Shaoyun
Shaoyun.Liu at amd.com
Mon Mar 4 20:38:36 UTC 2019
Adjust vram base offset for XGMI mapping when update the PT entry so
the address will fall into correct XGMI aperture for peer device
Change-Id: I78bdf244da699d2559481ef5afe9663b3e752236
Signed-off-by: shaoyunl <shaoyun.liu at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 12d51d9..c8f98b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1876,6 +1876,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer to the bo been actually allocated
* @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
@@ -1891,6 +1892,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t flags,
+ struct amdgpu_device *bo_adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
@@ -1965,7 +1967,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
} else if (flags & AMDGPU_PTE_VALID) {
- addr += adev->vm_manager.vram_base_offset;
+ if ((adev != bo_adev) &&
+ (!adev->gmc.xgmi.hive_id ||
+ adev->gmc.xgmi.hive_id != bo_adev->gmc.xgmi.hive_id)) {
+ DRM_ERROR("Failed to map the VRAM for other device access.\n");
+ return -EINVAL;
+ }
+ addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
@@ -2012,6 +2020,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update;
uint64_t flags;
+ struct amdgpu_device *bo_adev = adev;
int r;
if (clear || !bo) {
@@ -2030,9 +2039,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo)
+ if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
- else
+ bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ } else
flags = 0x0;
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
@@ -2050,7 +2060,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
- mapping, flags, nodes,
+ mapping, flags, bo_adev, nodes,
last_update);
if (r)
return r;
--
2.7.4
More information about the amd-gfx
mailing list