[PATCH 04/13] drm/amdgpu: init/fini vm lru

Chunming Zhou david1.zhou at amd.com
Wed May 9 06:45:34 UTC 2018


Change-Id: Icba45a329e2e2094581ad6c4b8b9028a2e5c5faa
Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     | 37 +++++++++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     | 14 +++++++++++
 5 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2d7500921c0b..f186c8f29774 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1532,6 +1532,8 @@ struct amdgpu_device {
 	dma_addr_t			dummy_page_addr;
 	struct amdgpu_vm_manager	vm_manager;
 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
+	struct amdgpu_vm_lru		kernel_vm_lru;
+	struct list_head		vm_lru_list;
 
 	/* memory management */
 	struct amdgpu_mman		mman;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 887f7c9e84e0..feafcfa2633d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2266,6 +2266,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	spin_lock_init(&adev->audio_endpt_idx_lock);
 	spin_lock_init(&adev->mm_stats.lock);
 
+	INIT_LIST_HEAD(&adev->vm_lru_list);
 	INIT_LIST_HEAD(&adev->shadow_list);
 	mutex_init(&adev->shadow_list_lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0bbb1dfdceff..207f88f38b23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1417,6 +1417,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		return r;
 	}
 	adev->mman.initialized = true;
+	amdgpu_vm_lru_init(&adev->kernel_vm_lru, adev, NULL);
 
 	/* We opt to avoid OOM on system pages allocations */
 	adev->mman.bdev.no_retry = true;
@@ -1537,6 +1538,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 		return;
 
 	amdgpu_ttm_debugfs_fini(adev);
+	amdgpu_vm_lru_fini(&adev->kernel_vm_lru, adev);
 	amdgpu_ttm_fw_reserve_vram_fini(adev);
 	if (adev->mman.aper_base_kaddr)
 		iounmap(adev->mman.aper_base_kaddr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index cc6093233ae7..72ff2d9c8686 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -124,6 +124,39 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 	spin_unlock(&vm->status_lock);
 }
 
+int amdgpu_vm_lru_init(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev,
+		       struct reservation_object *resv)
+{
+	struct ttm_bo_global *glob = adev->mman.bdev.glob;
+	int i, j;
+
+	INIT_LIST_HEAD(&vm_lru->vm_lru_list);
+	for (i = 0; i < TTM_NUM_MEM_TYPES; i++) {
+		for (j = 0; j < TTM_MAX_BO_PRIORITY; j++) {
+			INIT_LIST_HEAD(&vm_lru->fixed_lru[i][j]);
+			INIT_LIST_HEAD(&vm_lru->dynamic_lru[i][j]);
+		}
+	}
+	spin_lock(&glob->lru_lock);
+	list_add_tail(&vm_lru->vm_lru_list, &adev->vm_lru_list);
+	spin_unlock(&glob->lru_lock);
+
+	vm_lru->resv = resv;
+
+	return 0;
+}
+
+int amdgpu_vm_lru_fini(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev)
+{
+	struct ttm_bo_global *glob = adev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	list_del(&vm_lru->vm_lru_list);
+	spin_unlock(&glob->lru_lock);
+
+	return 0;
+}
+
 struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 						     uint32_t mem_type,
 						     const struct ttm_place *place,
@@ -2413,6 +2446,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	uint64_t flags;
 	int r, i;
 
+	amdgpu_vm_lru_init(&vm->vm_lru, adev, NULL);
 	vm->va = RB_ROOT_CACHED;
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
 		vm->reserved_vmid[i] = NULL;
@@ -2468,7 +2502,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	r = amdgpu_bo_create(adev, &bp, &root);
 	if (r)
 		goto error_free_sched_entity;
-
+	vm->vm_lru.resv = root->tbo.resv;
 	r = amdgpu_bo_reserve(root, true);
 	if (r)
 		goto error_free_root;
@@ -2672,6 +2706,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 				      adev->vm_manager.root_level);
 		amdgpu_bo_unreserve(root);
 	}
+	amdgpu_vm_lru_fini(&vm->vm_lru, adev);
 	amdgpu_bo_unref(&root);
 	dma_fence_put(vm->last_update);
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 0c965683faba..66ee902614a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -29,6 +29,7 @@
 #include <linux/rbtree.h>
 #include <drm/gpu_scheduler.h>
 #include <drm/drm_file.h>
+#include <drm/ttm/ttm_bo_driver.h>
 
 #include "amdgpu_sync.h"
 #include "amdgpu_ring.h"
@@ -135,6 +136,13 @@ enum amdgpu_vm_level {
 	AMDGPU_VM_PTB
 };
 
+struct amdgpu_vm_lru {
+	struct list_head vm_lru_list;
+	struct list_head fixed_lru[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+	struct list_head dynamic_lru[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+	struct reservation_object *resv;
+};
+
 /* base structure for tracking BO usage in a VM */
 struct amdgpu_vm_bo_base {
 	/* constant after initialization */
@@ -167,6 +175,7 @@ struct amdgpu_vm {
 	/* tree of virtual addresses mapped */
 	struct rb_root_cached	va;
 
+	struct amdgpu_vm_lru	vm_lru;
 	/* protecting invalidated */
 	spinlock_t		status_lock;
 
@@ -256,6 +265,11 @@ struct amdgpu_vm_manager {
 	spinlock_t				pasid_lock;
 };
 
+int amdgpu_vm_lru_init(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev,
+		       struct reservation_object *resv);
+int amdgpu_vm_lru_fini(struct amdgpu_vm_lru *vm_lru,
+		       struct amdgpu_device *adev);
+
 struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 						     uint32_t mem_type,
 						     const struct ttm_place *place,
-- 
2.14.1



More information about the amd-gfx mailing list