[PATCH 17/19] drm/i915: Replace priolist rbtree with a skiplist
Chris Wilson
chris at chris-wilson.co.uk
Thu Jan 28 14:52:38 UTC 2021
Replace the priolist rbtree with a skiplist. The crucial difference is
that walking and removing the first element of a skiplist is O(1), but
O(lgN) for an rbtree, as we need to rebalance on remove. This is a
hindrance for submission latency as it occurs between picking a request
for the priolist and submitting it to hardware, as well effectively
tripling the number of O(lgN) operations required under the irqoff lock.
This is critical to reducing the latency jitter with multiple clients.
The downsides to skiplists are that lookup/insertion is only
probabilistically O(lgN) and there is a significant memory penalty to
as each skip node is larger than the rbtree equivalent. Furthermore, we
don't use dynamic arrays for the skiplist, so the allocation is fixed,
and imposes an upper bound on the scalability wrt to the number of
inflight requests.
In the following patches, we introduce a new sort key to the scheduler,
a virtual deadline. This imposes a different structure to the tree.
Using a priority sort, we have very few priority levels active at any
time, most likely just the default priority and so the rbtree degenerates
to a single elements containing the list of all ready requests. The
deadlines in contrast are very sparse, and typically each request has a
unique deadline. Instead of being able to simply walk the list during
dequeue, with the deadline scheduler we have to iterate through the bst
on the critical submission path. Skiplists are vastly superior in this
instance due to the O(1) iteration during dequeue, with very similar
characteristics [on average] to the rbtree for insertion.
This means that by using skiplists we can introduce a sparse sort key
without degrading latency on the critical submission path.
As an example, one simple case where we try to do lots of
semi-independent work without any priority management (gem_exec_parallel),
the lock hold times were:
[worst] [total] [avg]
973.05 6301584.84 0.35 # plain rbtree
559.82 5424915.25 0.33 # best rbtree with pruning
208.21 3898784.09 0.24 # skiplist
34.05 5784106.01 0.32 # rbtree without deadlines
23.35 4152999.80 0.24 # skiplist without deadlines
Based on the skiplist implementation by Dr Con Kolivas for MuQSS.
References: https://en.wikipedia.org/wiki/Skip_list
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
.../drm/i915/gt/intel_execlists_submission.c | 52 ++--
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 30 +-
drivers/gpu/drm/i915/i915_priolist_types.h | 28 +-
drivers/gpu/drm/i915/i915_scheduler.c | 275 ++++++++++++++----
drivers/gpu/drm/i915/i915_scheduler.h | 11 +-
drivers/gpu/drm/i915/i915_scheduler_types.h | 2 +-
.../drm/i915/selftests/i915_mock_selftests.h | 1 +
.../gpu/drm/i915/selftests/i915_scheduler.c | 53 +++-
8 files changed, 333 insertions(+), 119 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 108691459aa2..6f15ef778af5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -244,11 +244,6 @@ static void ring_set_paused(const struct intel_engine_cs *engine, int state)
wmb();
}
-static struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
static int rq_prio(const struct i915_request *rq)
{
return READ_ONCE(rq->sched.attr.priority);
@@ -272,15 +267,27 @@ static int effective_prio(const struct i915_request *rq)
return prio;
}
-static int queue_prio(const struct i915_sched_engine *se)
+static struct i915_request *first_request(struct i915_sched_engine *se)
{
- struct rb_node *rb;
+ struct i915_priolist *pl = se->queue.sentinel.next[0];
- rb = rb_first_cached(&se->queue);
- if (!rb)
+ if (pl == &se->queue.sentinel)
+ return NULL;
+
+ return list_first_entry_or_null(&pl->requests,
+ struct i915_request,
+ sched.link);
+}
+
+static int queue_prio(struct i915_sched_engine *se)
+{
+ struct i915_request *rq;
+
+ rq = first_request(se);
+ if (!rq)
return INT_MIN;
- return to_priolist(rb)->priority;
+ return rq_prio(rq);
}
static int virtual_prio(const struct intel_engine_execlists *el)
@@ -290,7 +297,7 @@ static int virtual_prio(const struct intel_engine_execlists *el)
return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
}
-static bool need_preempt(const struct intel_engine_cs *engine,
+static bool need_preempt(struct intel_engine_cs *engine,
const struct i915_request *rq)
{
int last_prio;
@@ -1136,6 +1143,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request *last, * const *active;
struct virtual_engine *ve;
+ struct i915_priolist *pl;
struct rb_node *rb;
bool submit = false;
@@ -1346,11 +1354,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
- while ((rb = rb_first_cached(&engine->active.queue))) {
- struct i915_priolist *p = to_priolist(rb);
+ for_each_priolist(pl, &engine->active.queue) {
struct i915_request *rq, *rn;
- priolist_for_each_request_consume(rq, rn, p) {
+ priolist_for_each_request_safe(rq, rn, pl) {
bool merge = true;
/*
@@ -1425,8 +1432,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- rb_erase_cached(&p->node, &engine->active.queue);
- i915_priolist_free(p);
+ i915_priolist_advance(&engine->active.queue, pl);
}
done:
*port++ = i915_request_get(last);
@@ -2631,6 +2637,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
+ struct i915_priolist *pl;
struct rb_node *rb;
unsigned long flags;
@@ -2661,16 +2668,12 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&engine->active.queue))) {
- struct i915_priolist *p = to_priolist(rb);
-
- priolist_for_each_request_consume(rq, rn, p) {
+ for_each_priolist(pl, &engine->active.queue) {
+ priolist_for_each_request_safe(rq, rn, pl) {
i915_request_mark_eio(rq);
__i915_request_submit(rq);
}
-
- rb_erase_cached(&p->node, &engine->active.queue);
- i915_priolist_free(p);
+ i915_priolist_advance(&engine->active.queue, pl);
}
GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
@@ -2703,7 +2706,6 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority_hint = INT_MIN;
- engine->active.queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&engine->active.tasklet));
engine->active.tasklet.callback = nop_submission_tasklet;
@@ -3088,6 +3090,8 @@ static void virtual_context_exit(struct intel_context *ce)
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
+
+ i915_sched_park_engine(&ve->base.active);
}
static const struct intel_context_ops virtual_context_ops = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f778a783d74b..900e296448df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -59,11 +59,6 @@
#define GUC_REQUEST_SIZE 64 /* bytes */
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
{
struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
@@ -185,8 +180,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
struct i915_request ** const last_port = first + execlists->port_mask;
struct i915_request *last = first[0];
struct i915_request **port;
+ struct i915_priolist *pl;
bool submit = false;
- struct rb_node *rb;
lockdep_assert_held(&engine->active.lock);
@@ -203,11 +198,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
* event.
*/
port = first;
- while ((rb = rb_first_cached(&engine->active.queue))) {
- struct i915_priolist *p = to_priolist(rb);
+ for_each_priolist(pl, &engine->active.queue) {
struct i915_request *rq, *rn;
- priolist_for_each_request_consume(rq, rn, p) {
+ priolist_for_each_request_safe(rq, rn, pl) {
if (last && rq->context != last->context) {
if (port == last_port)
goto done;
@@ -223,12 +217,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = rq;
}
- rb_erase_cached(&p->node, &engine->active.queue);
- i915_priolist_free(p);
+ i915_priolist_advance(&engine->active.queue, pl);
}
done:
execlists->queue_priority_hint =
- rb ? to_priolist(rb)->priority : INT_MIN;
+ pl != &engine->active.queue.sentinel ? pl->priority : INT_MIN;
if (submit) {
*port = schedule_in(last, port - execlists->inflight);
*++port = NULL;
@@ -328,7 +321,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
- struct rb_node *rb;
+ struct i915_priolist *p;
unsigned long flags;
ENGINE_TRACE(engine, "\n");
@@ -356,25 +349,20 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
}
/* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&engine->active.queue))) {
- struct i915_priolist *p = to_priolist(rb);
-
- priolist_for_each_request_consume(rq, rn, p) {
+ for_each_priolist(p, &engine->active.queue) {
+ priolist_for_each_request_safe(rq, rn, p) {
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
-
- rb_erase_cached(&p->node, &engine->active.queue);
- i915_priolist_free(p);
+ i915_priolist_advance(&engine->active.queue, p);
}
GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority_hint = INT_MIN;
- engine->active.queue = RB_ROOT_CACHED;
spin_unlock_irqrestore(&engine->active.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index bc2fa84f98a8..1200c3df6a4a 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -38,10 +38,36 @@ enum {
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
#define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
+#ifdef CONFIG_64BIT
+#define I915_PRIOLIST_HEIGHT 12
+#else
+#define I915_PRIOLIST_HEIGHT 11
+#endif
+
struct i915_priolist {
struct list_head requests;
- struct rb_node node;
int priority;
+
+ int level;
+ struct i915_priolist *next[I915_PRIOLIST_HEIGHT];
};
+struct i915_priolist_root {
+ struct i915_priolist sentinel;
+ u32 prng;
+};
+
+#define i915_priolist_is_empty(root) ((root)->sentinel.level < 0)
+
+#define for_each_priolist(p, root) \
+ for ((p) = (root)->sentinel.next[0]; \
+ (p) != &(root)->sentinel; \
+ (p) = (p)->next[0])
+
+#define priolist_for_each_request(it, plist) \
+ list_for_each_entry(it, &(plist)->requests, sched.link)
+
+#define priolist_for_each_request_safe(it, n, plist) \
+ list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
+
#endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 595739363622..ce84fb264c78 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -4,7 +4,9 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/bitops.h>
#include <linux/mutex.h>
+#include <linux/prandom.h>
#include "gt/intel_ring.h"
#include "gt/intel_lrc_reg.h"
@@ -91,6 +93,16 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
ipi->list = NULL;
}
+static void init_priolist(struct i915_priolist_root *const root)
+{
+ struct i915_priolist *pl = &root->sentinel;
+
+ memset_p((void **)pl->next, pl, ARRAY_SIZE(pl->next));
+ pl->requests.prev = NULL;
+ pl->priority = INT_MIN;
+ pl->level = -1;
+}
+
void i915_sched_init_engine(struct i915_sched_engine *se,
const char *name,
unsigned long mask,
@@ -102,9 +114,9 @@ void i915_sched_init_engine(struct i915_sched_engine *se,
se->name = name;
se->mask = mask;
+ init_priolist(&se->queue);
INIT_LIST_HEAD(&se->requests);
INIT_LIST_HEAD(&se->hold);
- se->queue = RB_ROOT_CACHED;
i915_sched_init_ipi(&se->ipi);
@@ -121,8 +133,60 @@ void i915_sched_init_engine(struct i915_sched_engine *se,
#endif
}
+__maybe_unused static bool priolist_idle(struct i915_priolist_root *root)
+{
+ struct i915_priolist *pl = &root->sentinel;
+ int lvl;
+
+ for (lvl = 0; lvl < ARRAY_SIZE(pl->next); lvl++) {
+ if (pl->next[lvl] != pl) {
+ GEM_TRACE_ERR("root[%d] is not empty\n", lvl);
+ return false;
+ }
+ }
+
+ if (pl->level != -1) {
+ GEM_TRACE_ERR("root is not clear: %d\n", pl->level);
+ return false;
+ }
+
+ return true;
+}
+
+static bool pl_empty(struct list_head *st)
+{
+ return !st->prev;
+}
+
+static void pl_push(struct i915_priolist *pl, struct list_head *st)
+{
+ /* Keep list_empty(&pl->requests) valid for concurrent readers */
+ pl->requests.prev = st->prev;
+ st->prev = &pl->requests;
+ GEM_BUG_ON(pl_empty(st));
+}
+
+static struct i915_priolist *pl_pop(struct list_head *st)
+{
+ struct i915_priolist *pl;
+
+ GEM_BUG_ON(pl_empty(st));
+ pl = container_of(st->prev, typeof(*pl), requests);
+ st->prev = pl->requests.prev;
+
+ return pl;
+}
+
void i915_sched_park_engine(struct i915_sched_engine *se)
{
+ struct i915_priolist_root *root = &se->queue;
+ struct list_head *list = &root->sentinel.requests;
+
+ GEM_BUG_ON(!priolist_idle(root));
+
+ while (!pl_empty(list))
+ kmem_cache_free(global.slab_priorities, pl_pop(list));
+
GEM_BUG_ON(!i915_sched_is_idle(se));
se->no_priolist = false;
}
@@ -196,71 +260,55 @@ static inline bool node_signaled(const struct i915_sched_node *node)
return i915_request_completed(node_to_request(node));
}
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+static inline unsigned int random_level(struct i915_priolist_root *root)
{
- return rb_entry(rb, struct i915_priolist, node);
-}
-
-static void assert_priolists(struct i915_sched_engine * const se)
-{
- struct rb_node *rb;
- long last_prio;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
-
- GEM_BUG_ON(rb_first_cached(&se->queue) !=
- rb_first(&se->queue.rb_root));
-
- last_prio = INT_MAX;
- for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
- const struct i915_priolist *p = to_priolist(rb);
-
- GEM_BUG_ON(p->priority > last_prio);
- last_prio = p->priority;
- }
+ root->prng = next_pseudo_random32(root->prng);
+ return __ffs(root->prng) / 2;
}
static struct list_head *
lookup_priolist(struct intel_engine_cs *engine, int prio)
{
+ struct i915_priolist *update[I915_PRIOLIST_HEIGHT];
struct i915_sched_engine * const se = &engine->active;
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- lockdep_assert_held(&engine->active.lock);
- assert_priolists(se);
+ struct i915_priolist_root *root = &se->queue;
+ struct i915_priolist *pl, *tmp;
+ int lvl;
+ lockdep_assert_held(&se->lock);
if (unlikely(se->no_priolist))
prio = I915_PRIORITY_NORMAL;
+ for_each_priolist(pl, root) { /* recycle any empty elements before us */
+ if (pl->priority >= prio || !list_empty(&pl->requests))
+ break;
+
+ i915_priolist_advance(root, pl);
+ }
+
find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &se->queue.rb_root.rb_node;
- while (*parent) {
- rb = *parent;
- p = to_priolist(rb);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- return &p->requests;
- }
+ pl = &root->sentinel;
+ lvl = pl->level;
+ while (lvl >= 0) {
+ while (tmp = pl->next[lvl], tmp->priority >= prio)
+ pl = tmp;
+ if (pl->priority == prio)
+ goto out;
+ update[lvl--] = pl;
}
if (prio == I915_PRIORITY_NORMAL) {
- p = &se->default_priolist;
+ pl = &se->default_priolist;
+ } else if (!pl_empty(&root->sentinel.requests)) {
+ pl = pl_pop(&root->sentinel.requests);
} else {
- p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+ pl = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
+ if (unlikely(!pl)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
- /* To maintain ordering with all rendering, after an
+ /*
+ * To maintain ordering with all rendering, after an
* allocation failure we have to disable all scheduling.
* Requests will then be executed in fifo, and schedule
* will ensure that dependencies are emitted in fifo.
@@ -273,18 +321,124 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
}
}
- p->priority = prio;
- INIT_LIST_HEAD(&p->requests);
+ pl->priority = prio;
+ INIT_LIST_HEAD(&pl->requests);
- rb_link_node(&p->node, rb, parent);
- rb_insert_color_cached(&p->node, &se->queue, first);
+ lvl = random_level(root);
+ if (lvl > root->sentinel.level) {
+ if (root->sentinel.level < I915_PRIOLIST_HEIGHT - 1) {
+ lvl = ++root->sentinel.level;
+ update[lvl] = &root->sentinel;
+ } else {
+ lvl = I915_PRIOLIST_HEIGHT - 1;
+ }
+ }
+ GEM_BUG_ON(lvl < 0);
+ GEM_BUG_ON(lvl >= ARRAY_SIZE(pl->next));
- return &p->requests;
+ pl->level = lvl;
+ do {
+ tmp = update[lvl];
+ pl->next[lvl] = tmp->next[lvl];
+ tmp->next[lvl] = pl;
+ } while (--lvl >= 0);
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ struct i915_priolist *chk;
+
+ chk = &root->sentinel;
+ lvl = chk->level;
+ do {
+ while (tmp = chk->next[lvl], tmp->priority >= prio)
+ chk = tmp;
+ } while (--lvl >= 0);
+
+ GEM_BUG_ON(chk != pl);
+ }
+
+out:
+ GEM_BUG_ON(pl == &root->sentinel);
+ return &pl->requests;
}
-void __i915_priolist_free(struct i915_priolist *p)
+static void __remove_priolist(struct intel_engine_cs *engine,
+ struct list_head *plist)
{
- kmem_cache_free(global.slab_priorities, p);
+ struct i915_sched_engine * const se = &engine->active;
+ struct i915_priolist_root *root = &se->queue;
+ struct i915_priolist *pl, *tmp;
+ struct i915_priolist *old =
+ container_of(plist, struct i915_priolist, requests);
+ int prio = old->priority;
+ int lvl;
+
+ lockdep_assert_held(&se->lock);
+ GEM_BUG_ON(!list_empty(plist));
+
+ pl = &root->sentinel;
+ lvl = pl->level;
+ GEM_BUG_ON(lvl < 0);
+
+ if (prio != I915_PRIORITY_NORMAL)
+ pl_push(old, &pl->requests);
+
+ do {
+ while (tmp = pl->next[lvl], tmp->priority > prio)
+ pl = tmp;
+ if (lvl <= old->level) {
+ pl->next[lvl] = old->next[lvl];
+ if (pl == &root->sentinel && old->next[lvl] == pl) {
+ GEM_BUG_ON(pl->level != lvl);
+ pl->level--;
+ }
+ }
+ } while (--lvl >= 0);
+ GEM_BUG_ON(tmp != old);
+}
+
+static void remove_from_priolist(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ struct list_head *list,
+ bool tail)
+{
+ struct list_head *prev = rq->sched.link.prev;
+
+ GEM_BUG_ON(!i915_request_in_priority_queue(rq));
+
+ __list_del_entry(&rq->sched.link);
+ if (tail)
+ list_add_tail(&rq->sched.link, list);
+ else
+ list_add(&rq->sched.link, list);
+
+ /* If we just removed the last element in the old plist, delete it */
+ if (list_empty(prev))
+ __remove_priolist(engine, prev);
+}
+
+void i915_priolist_advance(struct i915_priolist_root *root,
+ struct i915_priolist *pl)
+{
+ struct i915_priolist * const s = &root->sentinel;
+ int lvl;
+
+ GEM_BUG_ON(!list_empty(&pl->requests));
+ GEM_BUG_ON(pl != s->next[0]);
+ GEM_BUG_ON(pl == s);
+
+ /* Keep pl->next[0] valid for for_each_priolist iteration */
+ if (pl->priority != I915_PRIORITY_NORMAL)
+ pl_push(pl, &s->requests);
+
+ lvl = pl->level;
+ GEM_BUG_ON(lvl < 0);
+ do {
+ s->next[lvl] = pl->next[lvl];
+ if (pl->next[lvl] == s) {
+ GEM_BUG_ON(s->level != lvl);
+ s->level--;
+ }
+ } while (--lvl >= 0);
}
static struct i915_request *
@@ -434,7 +588,7 @@ static void __i915_request_set_priority(struct i915_request *rq, int prio)
GEM_BUG_ON(rq->engine != engine);
if (i915_request_in_priority_queue(rq))
- list_move_tail(&rq->sched.link, plist);
+ remove_from_priolist(engine, rq, plist, true);
/* Defer (tasklet) submission until after all updates. */
kick_submission(engine, rq, prio);
@@ -567,8 +721,7 @@ void __i915_sched_defer_request(struct intel_engine_cs *engine,
/* Note list is reversed for waiters wrt signal hierarchy */
GEM_BUG_ON(rq->engine != engine);
- GEM_BUG_ON(!i915_request_in_priority_queue(rq));
- list_move(&rq->sched.link, &dfs);
+ remove_from_priolist(engine, rq, &dfs, false);
/* Track our visit, and prevent duplicate processing */
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1046,9 +1199,9 @@ void i915_sched_engine_show(struct drm_printer *m,
unsigned int max)
{
struct i915_request *rq, *last;
+ struct i915_priolist *pl;
unsigned long flags;
unsigned int count;
- struct rb_node *rb;
drm_printf(m, "\tTasklet queued? %s (%s)\n",
yesno(test_bit(TASKLET_STATE_SCHED, &se->tasklet.state)),
@@ -1075,10 +1228,8 @@ void i915_sched_engine_show(struct drm_printer *m,
last = NULL;
count = 0;
- for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-
- priolist_for_each_request(rq, p) {
+ for_each_priolist(pl, &se->queue) {
+ priolist_for_each_request(rq, pl) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index c5c9c9643def..38edc5510cab 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -16,12 +16,6 @@
struct drm_printer;
-#define priolist_for_each_request(it, plist) \
- list_for_each_entry(it, &(plist)->requests, sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist) \
- list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
-
void i915_sched_node_init(struct i915_sched_node *node);
void i915_sched_node_reinit(struct i915_sched_node *node);
@@ -71,7 +65,7 @@ static inline void i915_priolist_free(struct i915_priolist *p)
static inline bool i915_sched_is_idle(const struct i915_sched_engine *se)
{
- return RB_EMPTY_ROOT(&se->queue.rb_root);
+ return i915_priolist_is_empty(&se->queue);
}
static inline bool
@@ -107,6 +101,9 @@ static inline void i915_sched_flush(struct i915_sched_engine *se)
__i915_sched_flush(se, true);
}
+void i915_priolist_advance(struct i915_priolist_root *root,
+ struct i915_priolist *old);
+
void i915_request_show_with_schedule(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index ece0f8b828cc..b758523554dc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -33,7 +33,7 @@ struct i915_sched_engine {
/**
* @queue: queue of requests, in priority lists
*/
- struct rb_root_cached queue;
+ struct i915_priolist_root queue;
/**
* @tasklet: softirq tasklet for bottom half
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3db34d3eea58..946c93441c1f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -25,6 +25,7 @@ selftest(ring, intel_ring_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
+selftest(scheduler, i915_scheduler_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 359e058c0e90..29ca12cfbbd7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -12,6 +12,54 @@
#include "selftests/igt_spinner.h"
#include "selftests/i915_random.h"
+static int mock_skiplist_levels(void *dummy)
+{
+ struct i915_priolist_root root = {};
+ struct i915_priolist *pl = &root.sentinel;
+ IGT_TIMEOUT(end_time);
+ unsigned long total;
+ int count, lvl;
+
+ total = 0;
+ do {
+ for (count = 0; count < 16384; count++) {
+ lvl = random_level(&root);
+ if (lvl > pl->level) {
+ if (lvl < I915_PRIOLIST_HEIGHT - 1)
+ lvl = ++pl->level;
+ else
+ lvl = I915_PRIOLIST_HEIGHT - 1;
+ }
+
+ pl->next[lvl] = ptr_inc(pl->next[lvl]);
+ }
+ total += count;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("Total %9lu\n", total);
+ for (lvl = 0; lvl <= pl->level; lvl++) {
+ int x = ilog2((unsigned long)pl->next[lvl]);
+ char row[80];
+
+ memset(row, '*', x);
+ row[x] = '\0';
+
+ pr_info(" [%2d] %9lu %s\n",
+ lvl, (unsigned long)pl->next[lvl], row);
+ }
+
+ return 0;
+}
+
+int i915_scheduler_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(mock_skiplist_levels),
+ };
+
+ return i915_subtests(tests, NULL);
+}
+
static void scheduling_disable(struct intel_engine_cs *engine)
{
engine->props.preempt_timeout_ms = 0;
@@ -80,9 +128,9 @@ static int all_engines(struct drm_i915_private *i915,
static bool check_context_order(struct intel_engine_cs *engine)
{
u64 last_seqno, last_context;
+ struct i915_priolist *p;
unsigned long count;
bool result = false;
- struct rb_node *rb;
int last_prio;
/* We expect the execution order to follow ascending fence-context */
@@ -92,8 +140,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
last_context = 0;
last_seqno = 0;
last_prio = 0;
- for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ for_each_priolist(p, &engine->active.queue) {
struct i915_request *rq;
priolist_for_each_request(rq, p) {
--
2.20.1
More information about the Intel-gfx-trybot
mailing list