[PATCH 08/15] drm/amdgpu: handle multi level PD updates V2

Chunming Zhou David1.Zhou at amd.com
Mon Mar 27 05:53:18 UTC 2017


From: Christian König <christian.koenig at amd.com>

Update all levels of the page directory.

V2:
a. sub level pdes always are written to incorrect place.
b. sub levels need to update regardless of parent updates.

Change-Id: I0ce3fc1fd88397aedf693b0b6e2efb2db704e615
Signed-off-by: Christian König <christian.koenig at amd.com> (V1)
Reviewed-by: Alex Deucher <alexander.deucher at amd.com> (V1)
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> (V2)
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 97 ++++++++++++++++++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  4 +-
 4 files changed, 68 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d9308cf..de1c4c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -787,7 +787,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 	struct amdgpu_bo *bo;
 	int i, r;
 
-	r = amdgpu_vm_update_page_directory(adev, vm);
+	r = amdgpu_vm_update_directories(adev, vm);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 48ab967..008b8ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -691,7 +691,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 	if (r)
 		goto error;
 
-	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
+	r = amdgpu_vm_update_directories(adev, bo_va->vm);
 	if (r)
 		goto error;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index fe3db17..5a62a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -625,24 +625,24 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 }
 
 /*
- * amdgpu_vm_update_pdes - make sure that page directory is valid
+ * amdgpu_vm_update_level - update a single level in the hierarchy
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
+ * @parent: parent directory
  *
- * Allocates new page tables if necessary
- * and updates the page directory.
+ * Makes sure all entries in @parent are up to date.
  * Returns 0 for success, error for failure.
  */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm)
+static int amdgpu_vm_update_level(struct amdgpu_device *adev,
+				  struct amdgpu_vm *vm,
+				  struct amdgpu_vm_pt *parent,
+				  unsigned level)
 {
 	struct amdgpu_bo *shadow;
 	struct amdgpu_ring *ring;
 	uint64_t pd_addr, shadow_addr;
-	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
+	uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
 	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
 	unsigned count = 0, pt_idx, ndw;
 	struct amdgpu_job *job;
@@ -651,16 +651,19 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 
 	int r;
 
+	if (!parent->entries)
+		return 0;
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-	shadow = vm->root.bo->shadow;
 
 	/* padding, etc. */
 	ndw = 64;
 
 	/* assume the worst case */
-	ndw += vm->root.last_entry_used * 6;
+	ndw += parent->last_entry_used * 6;
+
+	pd_addr = amdgpu_bo_gpu_offset(parent->bo);
 
-	pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+	shadow = parent->bo->shadow;
 	if (shadow) {
 		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
 		if (r)
@@ -679,9 +682,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	params.adev = adev;
 	params.ib = &job->ibs[0];
 
-	/* walk over the address space and update the page directory */
-	for (pt_idx = 0; pt_idx <= vm->root.last_entry_used; ++pt_idx) {
-		struct amdgpu_bo *bo = vm->root.entries[pt_idx].bo;
+	/* walk over the address space and update the directory */
+	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+		struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
 		uint64_t pde, pt;
 
 		if (bo == NULL)
@@ -697,10 +700,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 		}
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		if (vm->root.entries[pt_idx].addr == pt)
+		if (parent->entries[pt_idx].addr == pt)
 			continue;
 
-		vm->root.entries[pt_idx].addr = pt;
+		parent->entries[pt_idx].addr = pt;
 
 		pde = pd_addr + pt_idx * 8;
 		if (((last_pde + 8 * count) != pde) ||
@@ -745,26 +748,39 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 
 	if (params.ib->length_dw == 0) {
 		amdgpu_job_free(job);
-		return 0;
-	}
-
-	amdgpu_ring_pad_ib(ring, params.ib);
-	amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
-			 AMDGPU_FENCE_OWNER_VM);
-	if (shadow)
-		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+	} else {
+		amdgpu_ring_pad_ib(ring, params.ib);
+		amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
 				 AMDGPU_FENCE_OWNER_VM);
+		if (shadow)
+			amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+					 AMDGPU_FENCE_OWNER_VM);
 
-	WARN_ON(params.ib->length_dw > ndw);
-	r = amdgpu_job_submit(job, ring, &vm->entity,
-			      AMDGPU_FENCE_OWNER_VM, &fence);
-	if (r)
-		goto error_free;
+		WARN_ON(params.ib->length_dw > ndw);
+		r = amdgpu_job_submit(job, ring, &vm->entity,
+				AMDGPU_FENCE_OWNER_VM, &fence);
+		if (r)
+			goto error_free;
 
-	amdgpu_bo_fence(vm->root.bo, fence, true);
-	fence_put(vm->last_dir_update);
-	vm->last_dir_update = fence_get(fence);
-	fence_put(fence);
+		amdgpu_bo_fence(parent->bo, fence, true);
+		fence_put(vm->last_dir_update);
+		vm->last_dir_update = fence_get(fence);
+		fence_put(fence);
+	}
+	/*
+	 * Recurse into the subdirectories. This recursion is harmless because
+	 * we only have a maximum of 5 layers.
+	 */
+	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+
+		if (!entry->bo)
+			continue;
+
+		r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
+		if (r)
+			return r;
+	}
 
 	return 0;
 
@@ -773,6 +789,21 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	return r;
 }
 
+/*
+ * amdgpu_vm_update_directories - make sure that all directories are valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Makes sure all directories are up to date.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+				 struct amdgpu_vm *vm)
+{
+	return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
+}
+
 /**
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 1f54563..117a68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -191,8 +191,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm);
+int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+				 struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 			  struct amdgpu_vm *vm);
 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-- 
1.9.1



More information about the amd-gfx mailing list