[PATCH 10/19] drm/amdgpu: update pt shadow

Chunming Zhou David1.Zhou at amd.com
Fri Aug 5 09:38:37 UTC 2016


Change-Id: I33b31cbe794c1c83b8e02e3069159c8204ac03e3
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 108 +++++++++++++++++++++------------
 1 file changed, 69 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8eb91a0..f317b09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -869,7 +869,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 					*vm_update_params,
 				  struct amdgpu_vm *vm,
 				  uint64_t start, uint64_t end,
-				  uint64_t dst, uint32_t flags)
+				  uint64_t dst, uint32_t flags,
+				  bool shadow)
 {
 	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
@@ -883,7 +884,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 	/* initialize the variables */
 	addr = start;
 	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = vm->page_tables[pt_idx].entry.robj;
+	pt = shadow ? vm->page_tables[pt_idx].entry.robj->shadow :
+		vm->page_tables[pt_idx].entry.robj;
 
 	if ((addr & ~mask) == (end & ~mask))
 		nptes = end - addr;
@@ -902,7 +904,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 	/* walk over the address space and update the page tables */
 	while (addr < end) {
 		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = vm->page_tables[pt_idx].entry.robj;
+		pt = shadow ? vm->page_tables[pt_idx].entry.robj->shadow :
+			vm->page_tables[pt_idx].entry.robj;
 
 		if ((addr & ~mask) == (end & ~mask))
 			nptes = end - addr;
@@ -937,31 +940,15 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 			    cur_pe_end, cur_dst, flags);
 }
 
-/**
- * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
- *
- * @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @vm: requested vm
- * @start: start of mapped range
- * @last: last mapped entry
- * @flags: flags for the entries
- * @addr: addr to set the area to
- * @fence: optional resulting fence
- *
- * Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
- */
-static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
-				       struct fence *exclusive,
-				       uint64_t src,
-				       dma_addr_t *pages_addr,
-				       struct amdgpu_vm *vm,
-				       uint64_t start, uint64_t last,
-				       uint32_t flags, uint64_t addr,
-				       struct fence **fence)
+static int amdgpu_vm_bo_update_mapping_or_shadow(struct amdgpu_device *adev,
+						 struct fence *exclusive,
+						 uint64_t src,
+						 dma_addr_t *pages_addr,
+						 struct amdgpu_vm *vm,
+						 uint64_t start, uint64_t last,
+						 uint32_t flags, uint64_t addr,
+						 struct fence **fence,
+						 bool shadow)
 {
 	struct amdgpu_ring *ring;
 	void *owner = AMDGPU_FENCE_OWNER_VM;
@@ -1016,29 +1003,30 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 	vm_update_params.ib = &job->ibs[0];
 
-	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
-	if (r)
-		goto error_free;
-
-	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
-			     owner);
-	if (r)
-		goto error_free;
+	if (!shadow) {
+		r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+		if (r)
+			goto error_free;
 
+		r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+				     owner);
+		if (r)
+			goto error_free;
+	}
 	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
 	if (r)
 		goto error_free;
 
 	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
-			      last + 1, addr, flags);
+			      last + 1, addr, flags, shadow);
 
 	amdgpu_ring_pad_ib(ring, vm_update_params.ib);
 	WARN_ON(vm_update_params.ib->length_dw > ndw);
-	r = amdgpu_job_submit(job, ring, &vm->entity,
+	r = amdgpu_job_submit(job, ring,
+			      shadow ? &vm->shadow_entity : &vm->entity,
 			      AMDGPU_FENCE_OWNER_VM, &f);
 	if (r)
 		goto error_free;
-
 	amdgpu_bo_fence(vm->page_directory, f, true);
 	if (fence) {
 		fence_put(*fence);
@@ -1053,6 +1041,48 @@ error_free:
 }
 
 /**
+ * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
+ *
+ * @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
+ * @src: address where to copy page table entries from
+ * @pages_addr: DMA addresses to use for mapping
+ * @vm: requested vm
+ * @start: start of mapped range
+ * @last: last mapped entry
+ * @flags: flags for the entries
+ * @addr: addr to set the area to
+ * @fence: optional resulting fence
+ *
+ * Fill in the page table entries between @start and @last.
+ * Returns 0 for success, -EINVAL for failure.
+ */
+static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+				       struct fence *exclusive,
+				       uint64_t src,
+				       dma_addr_t *pages_addr,
+				       struct amdgpu_vm *vm,
+				       uint64_t start, uint64_t last,
+				       uint32_t flags, uint64_t addr,
+				       struct fence **fence)
+{
+	int r;
+
+	r = amdgpu_vm_bo_update_mapping_or_shadow(adev, exclusive, src,
+						  pages_addr, vm,
+						  start, last,
+						  flags, addr,
+						  fence, true);
+	if (r)
+		return r;
+	return amdgpu_vm_bo_update_mapping_or_shadow(adev, exclusive, src,
+						     pages_addr, vm,
+						     start, last,
+						     flags, addr,
+						     NULL, false);
+}
+
+/**
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
-- 
1.9.1



More information about the amd-gfx mailing list