[PATCH v4 3/5] drm/ttm: add bulk move function on LRU
Huang Rui
ray.huang at amd.com
Fri Aug 17 10:07:59 UTC 2018
This function allow us to bulk move a group of BOs to the tail of their LRU.
The positions of group of BOs are stored on the (first, last) bulk_move_pos
structure.
Signed-off-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Huang Rui <ray.huang at amd.com>
Tested-by: Mike Lothian <mike at fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter at nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou at amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang at amd.com>
---
drivers/gpu/drm/ttm/ttm_bo.c | 52 ++++++++++++++++++++++++++++++++++++++++++++
include/drm/ttm/ttm_bo_api.h | 10 +++++++++
2 files changed, 62 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7117b6b..39d9d55 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -247,6 +247,58 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
+static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos,
+ struct list_head *lru, bool is_swap)
+{
+ struct list_head entries, before;
+ struct list_head *list1, *list2;
+
+ list1 = is_swap ? &pos->last->swap : &pos->last->lru;
+ list2 = is_swap ? pos->first->swap.prev : pos->first->lru.prev;
+
+ list_cut_position(&entries, lru, list1);
+ list_cut_position(&before, &entries, list2);
+ list_splice(&before, lru);
+ list_splice_tail(&entries, lru);
+}
+
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
+{
+ unsigned i;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ struct ttm_mem_type_manager *man;
+
+ if (!bulk->tt[i].first)
+ continue;
+
+ man = &bulk->tt[i].first->bdev->man[TTM_PL_TT];
+ ttm_bo_bulk_move_helper(&bulk->tt[i], &man->lru[i], false);
+ }
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ struct ttm_mem_type_manager *man;
+
+ if (!bulk->vram[i].first)
+ continue;
+
+ man = &bulk->vram[i].first->bdev->man[TTM_PL_VRAM];
+ ttm_bo_bulk_move_helper(&bulk->vram[i], &man->lru[i], false);
+ }
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
+ struct list_head *lru;
+
+ if (!pos->first)
+ continue;
+
+ lru = &pos->first->bdev->glob->swap_lru[i];
+ ttm_bo_bulk_move_helper(&bulk->swap[i], lru, true);
+ }
+}
+EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
+
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, bool evict,
struct ttm_operation_ctx *ctx)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0d4eb81..8c19470 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -417,6 +417,16 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
/**
+ * ttm_bo_bulk_move_lru_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ */
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
+
+/**
* ttm_bo_lock_delayed_workqueue
*
* Prevent the delayed workqueue from running.
--
2.7.4
More information about the amd-gfx
mailing list