[PATCH 52/70] drm/i915: Replace priolist rbtree with a skiplist

Chris Wilson chris at chris-wilson.co.uk
Sun Dec 20 11:52:37 UTC 2020


Replace the priolist rbtree with a skiplist. The crucial difference is
that walking and removing the first element of a skiplist is O(1), but
O(lgN) for an rbtree, as we need to rebalance on remove. This is a
hindrance for submission latency as it occurs between picking a request
for the priolist and submitting it to hardware.

The downsides to skiplists are that lookup/insertion is only
probablistically O(lgN) and there is a significant memory penalty to
as each skip node is larger than the rbtree equivalent. Furthermore, we
don't use dynamic arrays for the skiplist, so the allocation is fixed,
and imposes an upper bound on the scalability wrt to the number of
inflight requests.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 .../drm/i915/gt/intel_execlists_submission.c  |  85 ++------
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  27 +--
 drivers/gpu/drm/i915/i915_priolist_types.h    |  29 ++-
 drivers/gpu/drm/i915/i915_scheduler.c         | 197 +++++++++++-------
 drivers/gpu/drm/i915/i915_scheduler.h         |  24 +--
 drivers/gpu/drm/i915/i915_scheduler_types.h   |   2 +-
 .../gpu/drm/i915/selftests/i915_scheduler.c   |   5 +-
 7 files changed, 181 insertions(+), 188 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index cfcfaa060e01..0f219576bf8d 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -365,11 +365,6 @@ ring_set_paused(const struct intel_engine_cs *engine, int state)
 		wmb();
 }
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
 static inline int rq_prio(const struct i915_request *rq)
 {
 	return rq->sched.attr.priority;
@@ -380,28 +375,20 @@ static inline u64 rq_deadline(const struct i915_request *rq)
 	return rq->sched.deadline;
 }
 
-static const struct i915_request *
-first_queue_request(struct intel_engine_cs *engine)
+static struct i915_request *first_queue_request(struct i915_sched_engine *se)
 {
-	struct i915_sched_engine *se = &engine->active;
-
-	do {
-		struct i915_priolist *p;
-		struct rb_node *rb;
-
-		rb = rb_first_cached(&se->queue);
-		if (!rb)
-			return NULL;
+	struct i915_priolist *p;
 
-		p = to_priolist(rb);
+	for_each_priolist(p, &se->queue) {
 		if (likely(!list_empty(&p->requests)))
 			return list_first_entry(&p->requests,
 						struct i915_request,
 						sched.link);
 
-		rb_erase_cached(&p->node, &se->queue);
-		i915_priolist_free(p);
-	} while (1);
+		i915_priolist_advance(&se->queue, p);
+	}
+
+	return NULL;
 }
 
 static struct virtual_engine *
@@ -414,23 +401,7 @@ first_virtual_engine(struct intel_engine_cs *engine)
 
 static struct i915_request *__first_virtual_request(struct virtual_engine *ve)
 {
-	struct rb_node *rb;
-
-	while ((rb = rb_first_cached(&ve->base.active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-
-		if (list_empty(&p->requests)) {
-			rb_erase_cached(&p->node, &ve->base.active.queue);
-			i915_priolist_free(p);
-			continue;
-		}
-
-		return list_first_entry(&p->requests,
-					struct i915_request,
-					sched.link);
-	}
-
-	return NULL;
+	return first_queue_request(&ve->base.active);
 }
 
 static const struct i915_request *
@@ -502,7 +473,7 @@ static inline bool need_preempt(struct intel_engine_cs *engine,
 	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
 	 * context, it's priority would not exceed ELSP[0] aka last_prio.
 	 */
-	next = first_queue_request(engine);
+	next = first_queue_request(&engine->active);
 	if (deadline_before(next, first))
 		first = next;
 
@@ -2018,7 +1989,8 @@ static void __virtual_dequeue(struct virtual_engine *ve,
 static void virtual_requeue(struct intel_engine_cs *engine,
 			    struct i915_request *last)
 {
-	const struct i915_request * const first = first_queue_request(engine);
+	const struct i915_request * const first =
+		first_queue_request(&engine->active);
 	struct virtual_engine *ve;
 
 	while ((ve = first_virtual_engine(engine))) {
@@ -2094,8 +2066,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct i915_request **port = execlists->pending;
 	struct i915_request ** const last_port = port + execlists->port_mask;
 	struct i915_request *last, * const *active;
-	struct list_head *free = NULL;
-	struct rb_node *rb;
+	struct i915_priolist *p;
 	bool submit = false;
 
 	/*
@@ -2214,13 +2185,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 	virtual_requeue(engine, last);
 
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
+	for_each_priolist(p, &engine->active.queue) {
 		struct i915_request *rq, *rn;
 
-		priolist_for_each_request_consume(rq, rn, p) {
+		priolist_for_each_request_safe(rq, rn, p) {
 			bool merge = true;
 
+			GEM_BUG_ON(rq_deadline(rq) != p->deadline);
 			GEM_BUG_ON(rq->engine != engine);
 
 			/*
@@ -2298,9 +2269,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			}
 		}
 
-		/* Remove the node, but defer the free for later */
-		rb_erase_cached(&p->node, &engine->active.queue);
-		free = i915_priolist_free_defer(p, free);
+		i915_priolist_advance(&engine->active.queue, p);
 	}
 done:
 	*port++ = i915_request_get(last);
@@ -2328,12 +2297,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			i915_request_put(*port);
 		*execlists->pending = NULL;
 	}
-
-	/*
-	 * We noticed that kmem_cache_free() may cause 1ms+ latencies, so
-	 * we defer the frees until after we have submitted the ELSP.
-	 */
-	i915_priolist_free_many(free);
 }
 
 static void execlists_dequeue_irq(struct intel_engine_cs *engine)
@@ -4051,6 +4014,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
+	struct i915_priolist *p;
 	struct rb_node *rb;
 	unsigned long flags;
 
@@ -4081,16 +4045,12 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-
-		priolist_for_each_request_consume(rq, rn, p) {
+	for_each_priolist(p, &engine->active.queue) {
+		priolist_for_each_request_safe(rq, rn, p) {
 			mark_eio(rq);
 			__i915_request_submit(rq);
 		}
-
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, p);
 	}
 	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
 
@@ -5052,6 +5012,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 {
 	const struct intel_engine_execlists *execlists = &engine->execlists;
 	struct i915_request *rq, *last;
+	struct i915_priolist *p;
 	unsigned long flags;
 	unsigned int count;
 	struct rb_node *rb;
@@ -5077,9 +5038,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 
 	last = NULL;
 	count = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-
+	for_each_priolist(p, &engine->active.queue) {
 		priolist_for_each_request(rq, p) {
 			if (count++ < max - 1)
 				show_request(m, rq, "\t\t", 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 5a1f6ed4b0c8..453fa8746c75 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -55,11 +55,6 @@
  *
  */
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
 static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
 {
 	struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
@@ -292,8 +287,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	struct i915_request ** const last_port = first + execlists->port_mask;
 	struct i915_request *last = first[0];
 	struct i915_request **port;
+	struct i915_priolist *p;
 	bool submit = false;
-	struct rb_node *rb;
 
 	lockdep_assert_held(&engine->active.lock);
 
@@ -310,11 +305,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	 * event.
 	 */
 	port = first;
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
+	for_each_priolist(p, &engine->active.queue) {
 		struct i915_request *rq, *rn;
 
-		priolist_for_each_request_consume(rq, rn, p) {
+		priolist_for_each_request_safe(rq, rn, p) {
 			if (last && rq->context != last->context) {
 				if (port == last_port)
 					goto done;
@@ -330,8 +324,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 			last = rq;
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, p);
 	}
 done:
 	if (submit) {
@@ -426,7 +419,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
-	struct rb_node *rb;
+	struct i915_priolist *p;
 	unsigned long flags;
 
 	ENGINE_TRACE(engine, "\n");
@@ -457,18 +450,14 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-
-		priolist_for_each_request_consume(rq, rn, p) {
+	for_each_priolist(p, &engine->active.queue) {
+		priolist_for_each_request_safe(rq, rn, p) {
 			list_del_init(&rq->sched.link);
 			__i915_request_submit(rq);
 			dma_fence_set_error(&rq->fence, -EIO);
 			i915_request_mark_complete(rq);
 		}
-
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, p);
 	}
 	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
 
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 43a0ac45295f..839c48f4b0e9 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -39,10 +39,37 @@ enum {
  */
 #define I915_PRIORITY_BARRIER INT_MAX
 
+#ifdef CONFIG_64BIT
+#define I915_PRIOLIST_HEIGHT 12
+#else
+#define I915_PRIOLIST_HEIGHT 11
+#endif
+
 struct i915_priolist {
 	struct list_head requests;
-	struct rb_node node;
 	u64 deadline;
+
+	int level;
+	struct i915_priolist *next[I915_PRIOLIST_HEIGHT];
+};
+
+struct i915_priolist_root {
+	struct i915_priolist sentinel;
+	u32 prng;
 };
 
+#define priolist_first(root) ((root)->sentinel.next[0])
+#define i915_priolist_is_empty(root) (priolist_first(root) == &(root)->sentinel)
+
+#define for_each_priolist(p, root) \
+	for ((p) = priolist_first(root); \
+	     (p) != &(root)->sentinel; \
+	     (p) = (p)->next[0])
+
+#define priolist_for_each_request(it, plist) \
+	list_for_each_entry(it, &(plist)->requests, sched.link)
+
+#define priolist_for_each_request_safe(it, n, plist) \
+	list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
+
 #endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 1ad6fa0b80b3..2a2e8e16ca7d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -4,7 +4,9 @@
  * Copyright © 2018 Intel Corporation
  */
 
+#include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/prandom.h>
 
 #include "gt/intel_ring.h"
 #include "gt/intel_lrc_reg.h"
@@ -124,15 +126,25 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
 	ipi->list = NULL;
 }
 
+static void init_priolist(struct i915_priolist_root *const root)
+{
+	struct i915_priolist *pl = &root->sentinel;
+
+	memset_p((void **)pl->next, pl, ARRAY_SIZE(pl->next));
+	pl->deadline = I915_DEADLINE_NEVER;
+	INIT_LIST_HEAD(&pl->requests);
+	pl->level = 0;
+}
+
 void i915_sched_init_engine(struct i915_sched_engine *se,
 			    unsigned int subclass)
 {
 	spin_lock_init(&se->lock);
 	lockdep_set_subclass(&se->lock, subclass);
 
+	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
 	INIT_LIST_HEAD(&se->hold);
-	se->queue = RB_ROOT_CACHED;
 
 	i915_sched_init_ipi(&se->ipi);
 
@@ -149,8 +161,34 @@ void i915_sched_init_engine(struct i915_sched_engine *se,
 #endif
 }
 
+__maybe_unused
+static bool priolist_is_empty(struct i915_priolist_root *root)
+{
+	struct i915_priolist *pl = &root->sentinel;
+	int lvl;
+
+	for (lvl = 0; lvl < ARRAY_SIZE(pl->next); lvl++) {
+		if (pl->next[lvl] != pl) {
+			GEM_TRACE_ERR("root[%d] is not empty\n", lvl);
+			return false;
+		}
+	}
+
+	return true;
+}
+
 void i915_sched_park_engine(struct i915_sched_engine *se)
 {
+	struct i915_priolist_root *root = &se->queue;
+	struct i915_priolist *pl, *n;
+
+	GEM_BUG_ON(!priolist_is_empty(root));
+
+	list_for_each_entry_safe(pl, n, &root->sentinel.requests, requests)
+		kmem_cache_free(global.slab_priorities, pl);
+	INIT_LIST_HEAD(&root->sentinel.requests);
+	root->sentinel.level = 0;
+
 	GEM_BUG_ON(!i915_sched_is_idle(se));
 	se->no_priolist = false;
 }
@@ -216,88 +254,72 @@ static inline bool node_signaled(const struct i915_sched_node *node)
 	return i915_request_completed(node_to_request(node));
 }
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+static void assert_priolists(struct i915_priolist_root * const root)
 {
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
-static void assert_priolists(struct intel_engine_cs * const engine)
-{
-	struct rb_node *rb;
+	struct i915_priolist *pl;
 	u64 last_deadline;
 
 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 		return;
 
-	GEM_BUG_ON(rb_first_cached(&engine->active.queue) !=
-		   rb_first(&engine->active.queue.rb_root));
-
 	last_deadline = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
-		const struct i915_priolist *p = to_priolist(rb);
-
-		GEM_BUG_ON(p->deadline < last_deadline);
-		last_deadline = p->deadline;
+	for_each_priolist(pl, root) {
+		GEM_BUG_ON(pl->deadline < last_deadline);
+		last_deadline = pl->deadline;
 	}
 }
 
+static inline unsigned int random_level(struct i915_priolist_root *root)
+{
+	root->prng = next_pseudo_random32(root->prng);
+	return min_t(int, __ffs(root->prng) / 2, I915_PRIOLIST_HEIGHT - 1);
+}
+
 static struct list_head *
 lookup_priolist(struct intel_engine_cs *engine, u64 deadline)
 {
+	struct i915_priolist *update[I915_PRIOLIST_HEIGHT];
 	struct i915_sched_engine * const se = &engine->active;
-	struct list_head *free = NULL;
-	struct rb_node **parent, *rb;
-	struct i915_priolist *p;
-	bool first;
+	struct i915_priolist_root *root = &se->queue;
+	struct i915_priolist *pl, *tmp;
+	int lvl;
 
 	GEM_BUG_ON(deadline == I915_DEADLINE_NEVER);
 	lockdep_assert_held(&se->lock);
-	assert_priolists(engine);
+	assert_priolists(root);
 
 	if (unlikely(se->no_priolist))
 		deadline = 0;
 
-find_priolist:
-	/* Earliest deadline is scheduled first, equal deadlines fifo. */
-	rb = NULL;
-	first = true;
-	parent = &se->queue.rb_root.rb_node;
-	while (*parent) {
-		rb = *parent;
-		p = to_priolist(rb);
-
-		if (deadline == p->deadline)
-			goto out;
-
-		/*
-		 * Prune an empty priolist, we can reuse it if we need to
-		 * allocate. After removing this node and rotating the subtrees
-		 * beneath its parent, we need to restart our descent from the
-		 * parent.
-		 */
-		if (list_empty(&p->requests)) {
-			rb = rb_parent(&p->node);
-			parent = rb ? &rb : &se->queue.rb_root.rb_node;
-			rb_erase_cached(&p->node, &se->queue);
-			free = i915_priolist_free_defer(p, free);
-			continue;
-		}
+	for_each_priolist(pl, root) { /* recycle any empty elements before us */
+		if (pl->deadline >= deadline || !list_empty(&pl->requests))
+			break;
 
-		if (deadline < p->deadline)
-			parent = &rb->rb_left;
-		else
-			parent = &rb->rb_right, first = false;
+		i915_priolist_advance(root, pl);
 	}
 
-	if (!deadline) {
-		p = &se->default_priolist;
-	} else if (free) {
-		p = container_of(free, typeof(*p), requests);
-		free = p->requests.next;
+find_priolist:
+	pl = &root->sentinel;
+	lvl = pl->level;
+	do {
+		while (tmp = pl->next[lvl], tmp->deadline <= deadline)
+			pl = tmp;
+		if (pl->deadline == deadline)
+			goto out;
+		update[lvl] = pl;
+	} while (--lvl >= 0);
+
+	if (deadline == 0) {
+		pl = &se->default_priolist;
+	} else if (!list_empty(&root->sentinel.requests)) {
+		pl = list_first_entry(&root->sentinel.requests,
+				      struct i915_priolist,
+				      requests);
+		__list_del_entry(&pl->requests);
 	} else {
-		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+		pl = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
 		/* Convert an allocation failure to a priority bump */
-		if (unlikely(!p)) {
+		if (unlikely(!pl)) {
 			deadline = 0; /* recurses just once */
 
 			/* To maintain ordering with all rendering, after an
@@ -313,35 +335,48 @@ lookup_priolist(struct intel_engine_cs *engine, u64 deadline)
 		}
 	}
 
-	p->deadline = deadline;
-	INIT_LIST_HEAD(&p->requests);
+	pl->deadline = deadline;
+	INIT_LIST_HEAD(&pl->requests);
+
+	lvl = random_level(root);
+	if (lvl > root->sentinel.level) {
+		lvl = ++root->sentinel.level;
+		update[lvl] = &root->sentinel;
+	}
 
-	rb_link_node(&p->node, rb, parent);
-	rb_insert_color_cached(&p->node, &se->queue, first);
-	GEM_BUG_ON(rb_first_cached(&se->queue) !=
-		   rb_first(&se->queue.rb_root));
+	pl->level = lvl;
+	do {
+		tmp = update[lvl];
+		pl->next[lvl] = update[lvl]->next[lvl];
+		tmp->next[lvl] = pl;
+	} while (--lvl >= 0);
+	assert_priolists(root);
 
 out:
-	i915_priolist_free_many(free);
-	return &p->requests;
+	GEM_BUG_ON(pl == &root->sentinel);
+	return &pl->requests;
 }
 
-void i915_priolist_free(struct i915_priolist *p)
+void i915_priolist_advance(struct i915_priolist_root *root,
+			   struct i915_priolist *pl)
 {
-	if (p->deadline)
-		kmem_cache_free(global.slab_priorities, p);
-}
+	int lvl;
 
-void i915_priolist_free_many(struct list_head *list)
-{
-	while (list) {
-		struct i915_priolist *p;
+	GEM_BUG_ON(!list_empty(&pl->requests));
+	GEM_BUG_ON(pl == &root->sentinel);
+
+	if (pl->deadline)
+		list_add(&pl->requests, &root->sentinel.requests);
 
-		p = container_of(list, typeof(*p), requests);
-		list = p->requests.next;
+	for (lvl = 0; lvl <= pl->level; lvl++)
+		root->sentinel.next[lvl] = pl->next[lvl];
 
-		GEM_BUG_ON(!p->deadline);
-		kmem_cache_free(global.slab_priorities, p);
+	if (pl->level == root->sentinel.level) {
+		pl = &root->sentinel;
+		lvl = pl->level;
+		while (lvl > -0 && pl->next[lvl] == pl)
+			lvl--;
+		pl->level = lvl;
 	}
 }
 
@@ -378,12 +413,12 @@ static void ipi_deadline(struct i915_request *rq, u64 deadline)
 }
 
 static bool is_first_priolist(const struct intel_engine_cs *engine,
-			      const struct list_head *plist)
+			      const struct list_head *requests)
 {
-	struct rb_node *node =
-		&container_of(plist, struct i915_priolist, requests)->node;
+	struct i915_priolist *pl =
+		container_of(requests, struct i915_priolist, requests);
 
-	return node == rb_first_cached(&engine->active.queue);
+	return priolist_first(&engine->active.queue) == pl;
 }
 
 static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 949d2757522f..5df3cf385bd1 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -16,12 +16,6 @@
 
 struct drm_printer;
 
-#define priolist_for_each_request(it, plist) \
-	list_for_each_entry(it, &(plist)->requests, sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist) \
-	list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
-
 void i915_sched_node_init(struct i915_sched_node *node);
 void i915_sched_node_reinit(struct i915_sched_node *node);
 
@@ -75,22 +69,9 @@ static inline u64 i915_sched_to_ns(u64 deadline)
 	return deadline << I915_SCHED_DEADLINE_SHIFT;
 }
 
-void i915_priolist_free(struct i915_priolist *p);
-void i915_priolist_free_many(struct list_head *list);
-
-static inline struct list_head *
-i915_priolist_free_defer(struct i915_priolist *p, struct list_head *free)
-{
-	if (p->deadline) {
-		p->requests.next = free;
-		free = &p->requests;
-	}
-	return free;
-}
-
 static inline bool i915_sched_is_idle(const struct i915_sched_engine *se)
 {
-	return RB_EMPTY_ROOT(&se->queue.rb_root);
+	return i915_priolist_is_empty(&se->queue);
 }
 
 static inline bool
@@ -120,6 +101,9 @@ static inline void i915_sched_kick(struct i915_sched_engine *se)
 	tasklet_hi_schedule(&se->tasklet);
 }
 
+void i915_priolist_advance(struct i915_priolist_root *root,
+			   struct i915_priolist *old);
+
 void i915_request_show_with_schedule(struct drm_printer *m,
 				     const struct i915_request *rq,
 				     const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aed02f06c073..b8c484511185 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -112,7 +112,7 @@ struct i915_sched_engine {
 	/**
 	 * @queue: queue of requests, in priority lists
 	 */
-	struct rb_root_cached queue;
+	struct i915_priolist_root queue;
 
 	struct i915_sched_ipi ipi;
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 88dc5848b6e6..8e07944d60e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -123,9 +123,9 @@ static int all_engines(struct drm_i915_private *i915,
 static bool check_context_order(struct intel_engine_cs *engine)
 {
 	u64 last_seqno, last_context;
+	struct i915_priolist *p;
 	unsigned long count;
 	bool result = false;
-	struct rb_node *rb;
 	int last_prio;
 
 	/* We expect the execution order to follow ascending fence-context */
@@ -135,8 +135,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
 	last_context = 0;
 	last_seqno = 0;
 	last_prio = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+	for_each_priolist(p, &engine->active.queue) {
 		struct i915_request *rq;
 
 		priolist_for_each_request(rq, p) {
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list