[PATCH 13/13] drm/amdgpu: free vm lru when vm fini

Chunming Zhou david1.zhou at amd.com
Wed May 9 06:45:43 UTC 2018


That means bo isn't per vm bo when vm fini, back to normal bo instead.

Change-Id: Ida56abd0351422dd0b4a4393545c9cdb0e1a6818
Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 50 +++++++++++++++++++++++++++++-----
 1 file changed, 43 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a425d498f3fc..89c2cbbce436 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -150,10 +150,34 @@ int amdgpu_vm_lru_init(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev,
 int amdgpu_vm_lru_fini(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev)
 {
 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
+	struct ttm_buffer_object *bo = NULL;
+	struct amdgpu_bo *abo = NULL;
+	struct rb_node *node;
+	int i, j;
+	bool locked;
 
+	locked = reservation_object_trylock(vm_lru->resv);
 	spin_lock(&glob->lru_lock);
 	list_del(&vm_lru->vm_lru_list);
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; i++) {
+		for (j = 0; j < TTM_NUM_MEM_TYPES; j++) {
+			list_for_each_entry(bo, &vm_lru->dynamic_lru[j][i], lru) {
+				struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+
+				abo->vm_lru = NULL;
+				abo->index = 0;
+			}
+			for (node = rb_first(&vm_lru->fixed_lru[j][i]);
+			     node; node = rb_next(node)) {
+				abo = rb_entry(node, struct amdgpu_bo, node);
+				abo->vm_lru = NULL;
+				abo->index = 0;
+			}
+		}
+	}
 	spin_unlock(&glob->lru_lock);
+	if (locked)
+		reservation_object_unlock(vm_lru->resv);
 
 	return 0;
 }
@@ -253,12 +277,16 @@ static void amdgpu_vm_bo_add_to_rb(struct amdgpu_bo *bo,
 void amdgpu_vm_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-	struct amdgpu_vm_lru *vm_lru = abo->vm_lru;
+	struct amdgpu_bo *abo;
+	struct amdgpu_vm_lru *vm_lru = NULL;
 	struct ttm_mem_type_manager *man;
 
+	if (bo->destroy != ttm_transfered_destroy) {
+		abo = ttm_to_amdgpu_bo(bo);
+		vm_lru = abo->vm_lru;
+	}
 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-		if (bo->destroy == ttm_transfered_destroy) {
+		if (bo->destroy == ttm_transfered_destroy || !vm_lru) {
 			BUG_ON(!list_empty(&bo->lru));
 
 			man = &bdev->man[bo->mem.mem_type];
@@ -300,11 +328,15 @@ static struct amdgpu_bo *amdgpu_vm_bo_rb_find(struct rb_root *root, u64 index)
 
 void amdgpu_vm_del_from_lru(struct ttm_buffer_object *bo)
 {
-	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-	struct amdgpu_vm_lru *vm_lru = abo->vm_lru;
+	struct amdgpu_bo *abo;
+	struct amdgpu_vm_lru *vm_lru;
 
 	if (bo->destroy == ttm_transfered_destroy)
 		return;
+	abo = ttm_to_amdgpu_bo(bo);
+	vm_lru = abo->vm_lru;
+	if (!vm_lru)
+		return;
 	if (amdgpu_vm_bo_rb_find(&vm_lru->fixed_lru[bo->mem.mem_type][bo->priority],
 				 abo->index)) {
 		rb_erase(&abo->node,
@@ -315,12 +347,16 @@ void amdgpu_vm_del_from_lru(struct ttm_buffer_object *bo)
 
 void amdgpu_vm_move_to_lru_tail(struct ttm_buffer_object *bo)
 {
-	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-	struct amdgpu_vm_lru *vm_lru = abo->vm_lru;
+	struct amdgpu_bo *abo;
+	struct amdgpu_vm_lru *vm_lru;
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
 	if (bo->destroy == ttm_transfered_destroy)
 		return;
+	abo = ttm_to_amdgpu_bo(bo);
+	vm_lru = abo->vm_lru;
+	if (!vm_lru)
+		return;
 	if (bo->resv == vm_lru->resv)
                list_move_tail(&vm_lru->vm_lru_list, &adev->vm_lru_list);
 }
-- 
2.14.1



More information about the amd-gfx mailing list