[PATCH 13/13] drm/amdgpu: add backup condition for shadow page table

Chunming Zhou David1.Zhou at amd.com
Tue Aug 2 07:49:07 UTC 2016


Change-Id: I5a8c0f4c1e9b65d2310ccb0f669b478884072a11
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 71 +++++++++++++++++++++++-----------
 1 file changed, 48 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1305dc1..0e3f116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -112,6 +112,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 	list_add(&entry->tv.head, validated);
 }
 
+static bool amdgpu_vm_need_backup(struct amdgpu_device *adev)
+{
+	if (adev->flags & AMD_IS_APU)
+		return false;
+
+	return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
 /**
  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
  *
@@ -140,13 +148,18 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
 		struct amdgpu_bo_list_entry *entry_shadow = &vm->page_tables[i].entry_shadow;
 
-		if (!entry->robj || !entry_shadow->robj)
+		if (!entry->robj)
+			continue;
+
+		if (amdgpu_vm_need_backup(adev) && !entry_shadow->robj)
 			continue;
 
 		list_add(&entry->tv.head, duplicates);
-		list_add(&entry_shadow->tv.head, duplicates);
+		if (amdgpu_vm_need_backup(adev))
+			list_add(&entry_shadow->tv.head, duplicates);
 	}
-	list_add(&vm->pd_entry_shadow.tv.head, duplicates);
+	if (amdgpu_vm_need_backup(adev))
+		list_add(&vm->pd_entry_shadow.tv.head, duplicates);
 }
 
 /**
@@ -747,6 +760,8 @@ int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
 	uint64_t pt_idx;
 	int r;
 
+	if (!amdgpu_vm_need_backup(adev))
+		return 0;
 	/* bo and shadow use same resv, so reverve one time */
 	r = amdgpu_bo_reserve(vm->page_directory, false);
 	if (unlikely(r != 0))
@@ -804,9 +819,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 {
 	int r;
 
-	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
-	if (r)
-		return r;
+	if (amdgpu_vm_need_backup(adev)) {
+		r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+		if (r)
+			return r;
+	}
+
 	return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
 }
 
@@ -1072,10 +1090,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
 	if (r)
 		goto error_free;
-	/* update shadow pt bo */
-	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
-			      last + 1, addr, flags, true);
-
+	if (amdgpu_vm_need_backup(adev)) {
+		/* update shadow pt bo */
+		amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+				      last + 1, addr, flags, true);
+	}
 	amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
 			      last + 1, addr, flags, false);
 
@@ -1458,7 +1477,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-				     AMDGPU_GEM_CREATE_SHADOW,
+				     (amdgpu_vm_need_backup(adev) ?
+				      AMDGPU_GEM_CREATE_SHADOW : 0),
 				     NULL, resv, &pt);
 		if (r)
 			goto error_free;
@@ -1481,12 +1501,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		entry->user_pages = NULL;
 		vm->page_tables[pt_idx].addr = 0;
 
-		entry_shadow->robj = pt->shadow;
-		entry_shadow->priority = 0;
-		entry_shadow->tv.bo = &entry_shadow->robj->tbo;
-		entry_shadow->tv.shared = true;
-		entry_shadow->user_pages = NULL;
-		vm->page_tables[pt_idx].addr_shadow = 0;
+		if (amdgpu_vm_need_backup(adev)) {
+			entry_shadow->robj = pt->shadow;
+			entry_shadow->priority = 0;
+			entry_shadow->tv.bo = &entry_shadow->robj->tbo;
+			entry_shadow->tv.shared = true;
+			entry_shadow->user_pages = NULL;
+			vm->page_tables[pt_idx].addr_shadow = 0;
+		}
 	}
 
 	return 0;
@@ -1667,7 +1689,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	r = amdgpu_bo_create(adev, pd_size, align, true,
 			     AMDGPU_GEM_DOMAIN_VRAM,
 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-			     AMDGPU_GEM_CREATE_SHADOW,
+			     (amdgpu_vm_need_backup(adev) ?
+			      AMDGPU_GEM_CREATE_SHADOW : 0),
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		goto error_free_sched_entity;
@@ -1682,11 +1705,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		goto error_free_page_directory;
 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
 
-	vm->pd_entry_shadow.robj = vm->page_directory->shadow;
-	vm->pd_entry_shadow.priority = 0;
-	vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
-	vm->pd_entry_shadow.tv.shared = true;
-	vm->pd_entry_shadow.user_pages = NULL;
+	if (amdgpu_vm_need_backup(adev)) {
+		vm->pd_entry_shadow.robj = vm->page_directory->shadow;
+		vm->pd_entry_shadow.priority = 0;
+		vm->pd_entry_shadow.tv.bo = &vm->page_directory->shadow->tbo;
+		vm->pd_entry_shadow.tv.shared = true;
+		vm->pd_entry_shadow.user_pages = NULL;
+	}
 	spin_lock(&adev->vm_list_lock);
 	list_add_tail(&vm->list, &adev->vm_list);
 	spin_unlock(&adev->vm_list_lock);
-- 
1.9.1



More information about the amd-gfx mailing list