[PATCH 03/10] drm/amdgpu: implement vm recovery function from shadow V2

Chunming Zhou David1.Zhou at amd.com
Mon Aug 15 06:06:35 UTC 2016


V2:
a. check the pt/pd if evicted, no need to recover for evicted pt/pd bo.
b. return fence to caller.

Change-Id: I46783043eecbe9fc9c2ce9230be1085aca3731bd
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 59 ++++++++++++++++++++++++++++++++++
 2 files changed, 62 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 229bb32..50fd971 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1010,6 +1010,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 		       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		      struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+					     struct amdgpu_vm *vm,
+					     struct fence **fence);
 
 /*
  * context related structures
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2843132..915a41a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,65 @@ error_free:
 	return r;
 }
 
+int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
+					     struct amdgpu_vm *vm,
+					     struct fence **fence)
+{
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+	struct fence *f;
+	uint64_t pt_idx;
+	uint32_t domain;
+	int r;
+
+	if (!vm->page_directory->shadow)
+		return 0;
+
+	r = amdgpu_bo_reserve(vm->page_directory, false);
+	if (r)
+		return r;
+	domain = amdgpu_mem_type_to_domain(vm->page_directory->tbo.mem.mem_type);
+	/* if bo has been evicted, then no need to recover */
+	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+		vm->page_directory->backup_shadow = AMDGPU_BO_SHADOW_TO_PARENT;
+		r = amdgpu_bo_backup_shadow(adev, ring,
+					    vm->page_directory,
+					    NULL, &f, true);
+		if (r) {
+			DRM_ERROR("recover page table failed!\n");
+			goto err;
+		}
+		if (fence) {
+			fence_put(*fence);
+			*fence = fence_get(f);
+		}
+	}
+	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+		struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
+
+		if (!bo)
+			continue;
+		domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+		/* if bo has been evicted, then no need to recover */
+		if (domain != AMDGPU_GEM_DOMAIN_VRAM)
+			continue;
+
+		bo->backup_shadow = AMDGPU_BO_SHADOW_TO_PARENT;
+		r = amdgpu_bo_backup_shadow(adev, ring, bo,
+					    NULL, &f, true);
+		if (r) {
+			DRM_ERROR("recover page table failed!\n");
+			goto err;
+			if (fence) {
+				fence_put(*fence);
+				*fence = fence_get(f);
+			}
+		}
+	}
+
+err:
+	amdgpu_bo_unreserve(vm->page_directory);
+	return r;
+}
 /**
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
-- 
1.9.1



More information about the amd-gfx mailing list