[CI 3/3] drm/ttm: Consider hitch moves within bulk sublist moves

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Feb 6 12:17:16 UTC 2024


To work around the problem with hitches moving when bulk move
sublists are bumped, keep a second hitch when traversing a bulk
move sublist, which is attached to the list *after* the bulk
move sublist. If we detect a sublist bump, we use that second
hitch as the continuation point of list traversal.

Sublist bumps are detected by checking the sublist age which is
increased by 1 each time it was bumped. The age is then compared
to that of the last iteration returning an item within the sublist.

Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/ttm/ttm_resource.c | 65 ++++++++++++++++++++++++++++--
 include/drm/ttm/ttm_resource.h     | 38 +++++++++--------
 2 files changed, 83 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index f45b8bd5e7c4..f299152b38a8 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -32,10 +32,18 @@
 
 #include <drm/drm_util.h>
 
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+	cursor->bulk = NULL;
+	list_del_init(&cursor->bulk_hitch.link);
+}
+
 void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor)
 {
 	lockdep_assert_held(&cursor->man->bdev->lru_lock);
-	list_del(&cursor->hitch.link);
+	list_del_init(&cursor->hitch.link);
+	ttm_resource_cursor_clear_bulk(cursor);
 }
 
 void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
@@ -88,6 +96,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
 					    &pos->last->lru.link);
 		}
 	}
+	atomic64_inc(&bulk->age);
 }
 EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
 
@@ -487,6 +496,52 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
 }
 EXPORT_SYMBOL(ttm_resource_manager_debug);
 
+static bool
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+			       struct ttm_lru_item *next_lru)
+{
+	struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+	struct ttm_lru_bulk_move *bulk = NULL;
+	struct ttm_buffer_object *bo = next->bo;
+
+	if (bo && bo->resource == next)
+		bulk = bo->bulk_move;
+
+	if (!bulk) {
+		ttm_resource_cursor_clear_bulk(cursor);
+		return false;
+	}
+
+	/*
+	 * We encountered a bulk sublist. Record its age and
+	 * set a hitch after the sublist.
+	 */
+	if (cursor->bulk != bulk) {
+		struct ttm_lru_bulk_move_pos *pos =
+			ttm_lru_bulk_move_pos(bulk, next);
+
+		cursor->bulk = bulk;
+		cursor->bulk_age = atomic64_read(&bulk->age);
+		list_move(&cursor->bulk_hitch.link, &pos->last->lru.link);
+		return false;
+	}
+
+	/* Continue iterating down the bulk sublist */
+	if (cursor->bulk_age == atomic64_read(&bulk->age))
+		return false;
+
+	/*
+	 * The bulk sublist in which we had a hitch has moved and the
+	 * hitch moved with it. Restart iteration from a previously
+	 * set hitch after the bulk_move, and remove that backup
+	 * hitch.
+	 */
+	list_move(&cursor->hitch.link, &cursor->bulk_hitch.link);
+	ttm_resource_cursor_clear_bulk(cursor);
+
+	return true;
+}
+
 /**
  * ttm_resource_manager_next
  *
@@ -508,18 +563,21 @@ ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
 		lru = &cursor->hitch;
 		list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
 			if (ttm_lru_item_is_res(lru)) {
+				if (ttm_resource_cursor_check_bulk(cursor, lru))
+					continue;
 				list_move(&cursor->hitch.link, &lru->link);
 				return ttm_lru_item_to_res(lru);
 			}
 		}
 
-		if (cursor->priority++ >= TTM_MAX_BO_PRIORITY)
+		if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
 			break;
 
 		list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+		ttm_resource_cursor_clear_bulk(cursor);
 	} while (true);
 
-	list_del_init(&cursor->hitch.link);
+	ttm_resource_cursor_fini_locked(cursor);
 
 	return NULL;
 }
@@ -541,6 +599,7 @@ ttm_resource_manager_first(struct ttm_resource_manager *man,
 	cursor->priority = 0;
 	cursor->man = man;
 	ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+	ttm_lru_item_init(&cursor->bulk_hitch, TTM_LRU_HITCH);
 	list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
 
 	return ttm_resource_manager_next(cursor);
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 5becd784ba25..3e969b6b90b5 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -248,23 +248,6 @@ ttm_lru_item_to_res(struct ttm_lru_item *item)
 	return container_of(item, struct ttm_resource, lru);
 }
 
-/**
- * struct ttm_resource_cursor
- *
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
- */
-struct ttm_resource_cursor {
-	struct ttm_resource_manager *man;
-	struct ttm_lru_item hitch;
-	unsigned int priority;
-};
-
-void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor);
-
-void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
-
 /**
  * struct ttm_lru_bulk_move_pos
  *
@@ -288,8 +271,29 @@ struct ttm_lru_bulk_move_pos {
  */
 struct ttm_lru_bulk_move {
 	struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+	atomic64_t age;
 };
 
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+	struct ttm_resource_manager *man;
+	struct ttm_lru_item hitch;
+	struct ttm_lru_item bulk_hitch;
+	struct ttm_lru_bulk_move *bulk;
+	u64 bulk_age;
+	unsigned int priority;
+};
+
+void ttm_resource_cursor_fini_locked(struct ttm_resource_cursor *cursor);
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
 /**
  * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
  * struct sg_table backed struct ttm_resource.
-- 
2.43.0



More information about the Intel-xe mailing list