[PATCH 05/11] drm/ttm: add per process lru

Chunming Zhou david1.zhou at amd.com
Thu Apr 12 10:09:34 UTC 2018


Change-Id: Id2333f69119222a7e9bdb0357bbed97cf08636da
Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c    | 59 ++++++++++++++++++++++++++++++++++-------
 include/drm/ttm/ttm_bo_driver.h |  3 ++-
 2 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index b740d8f390ca..c1d0ec1238c6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -161,15 +161,20 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
+	struct ttm_process *ttm_process = bo->process;
 
 	reservation_object_assert_held(bo->resv);
 
 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 		BUG_ON(!list_empty(&bo->lru));
 
-		man = &bdev->man[bo->mem.mem_type];
-		list_add_tail(&bo->lru, &man->lru[bo->priority]);
 		kref_get(&bo->list_kref);
+		if (bo->resv == ttm_process->resv)
+			list_add_tail(&bo->lru,
+				      &ttm_process->fixed_lru[bo->mem.mem_type][bo->priority]);
+		else
+			list_add_tail(&bo->lru,
+				      &ttm_process->dynamic_lru[bo->mem.mem_type][bo->priority]);
 
 		if (bo->ttm && !(bo->ttm->page_flags &
 				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
@@ -712,13 +717,35 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_buffer_object *bo = NULL;
+	struct ttm_process *process;
 	bool locked = false;
 	unsigned i;
 	int ret;
 
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		list_for_each_entry(bo, &man->lru[i], lru) {
+	list_for_each_entry(process, &bdev->process_list, process_list) {
+		list_for_each_entry(bo, &process->dynamic_lru[mem_type][i], lru) {
+			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
+				continue;
+
+			if (place && !bdev->driver->eviction_valuable(bo,
+								      place)) {
+				if (locked)
+					reservation_object_unlock(bo->resv);
+				continue;
+			}
+			break;
+		}
+		/* If the inner loop terminated early, we have our candidate */
+		if (&bo->lru != &process->dynamic_lru[mem_type][i])
+			break;
+		bo = NULL;
+		list_for_each_entry(bo, &process->fixed_lru[mem_type][i], lru) {
+			if (!bo)
+				continue;
+			if (&bo->lru == &process->fixed_lru[mem_type][i])
+				break;
 			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
 				continue;
 
@@ -732,11 +759,14 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 		}
 
 		/* If the inner loop terminated early, we have our candidate */
-		if (&bo->lru != &man->lru[i])
+		if (&bo->lru != &process->fixed_lru[mem_type][i])
 			break;
 
 		bo = NULL;
 	}
+	if (bo)
+		break;
+	}
 
 	if (!bo) {
 		spin_unlock(&glob->lru_lock);
@@ -1318,13 +1348,13 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
-		while (!list_empty(&man->lru[i])) {
+		//while (!list_empty(&man->lru[i])) {
 			spin_unlock(&glob->lru_lock);
 			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
 			if (ret)
 				return ret;
 			spin_lock(&glob->lru_lock);
-		}
+		//}
 	}
 	spin_unlock(&glob->lru_lock);
 
@@ -1427,9 +1457,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 	man->has_type = true;
 	man->use_type = true;
 	man->size = p_size;
-
+/*
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
 		INIT_LIST_HEAD(&man->lru[i]);
+*/
 	man->move = NULL;
 
 	return 0;
@@ -1518,13 +1549,13 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 
 	if (ttm_bo_delayed_delete(bdev, true))
 		pr_debug("Delayed destroy list was clean\n");
-
+/*
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
 		if (list_empty(&bdev->man[0].lru[0]))
 			pr_debug("Swap list %d was clean\n", i);
 	spin_unlock(&glob->lru_lock);
-
+*/
 	drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
 	return ret;
@@ -1591,6 +1622,16 @@ int ttm_process_init(struct ttm_process *process, struct ttm_bo_device *bdev,
 	return 0;
 }
 EXPORT_SYMBOL(ttm_process_init);
+int ttm_process_fini(struct ttm_process *process, struct ttm_bo_device *bdev)
+{
+	spin_lock(&bdev->glob->lru_lock);
+	list_del(&process->process_list);
+	spin_unlock(&bdev->glob->lru_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_process_fini);
+
 
 /*
  * buffer object vm functions.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 91120923de81..b6aa7fc5bf14 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -199,7 +199,7 @@ struct ttm_mem_type_manager {
 	 * Protected by the global->lru_lock.
 	 */
 
-	struct list_head lru[TTM_MAX_BO_PRIORITY];
+//	struct list_head lru[TTM_MAX_BO_PRIORITY];
 
 	/*
 	 * Protected by @move_lock.
@@ -585,6 +585,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
 
 int ttm_process_init(struct ttm_process *process, struct ttm_bo_device *bdev,
 		     struct reservation_object *resv);
+int ttm_process_fini(struct ttm_process *process, struct ttm_bo_device *bdev);
 /**
  * ttm_bo_unmap_virtual
  *
-- 
2.14.1



More information about the amd-gfx mailing list