[PATCH 30/60] drm/i915: Extract the ability to defer and rerun a request later
Chris Wilson
chris at chris-wilson.co.uk
Sun Jan 10 12:29:32 UTC 2021
Lift the ability to defer a request until later from execlists into the
common layer.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
.../drm/i915/gt/intel_execlists_submission.c | 57 +++--------------
drivers/gpu/drm/i915/i915_scheduler.c | 63 +++++++++++++++++--
drivers/gpu/drm/i915/i915_scheduler.h | 5 +-
3 files changed, 67 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2881db065dd9..155df90859c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -982,54 +982,6 @@ static void virtual_xfer_context(struct virtual_engine *ve,
}
}
-static void defer_request(struct i915_request *rq, struct list_head * const pl)
-{
- LIST_HEAD(list);
-
- /*
- * We want to move the interrupted request to the back of
- * the round-robin list (i.e. its priority level), but
- * in doing so, we must then move all requests that were in
- * flight and were waiting for the interrupted request to
- * be run after it again.
- */
- do {
- struct i915_dependency *p;
-
- GEM_BUG_ON(i915_request_is_active(rq));
- list_move_tail(&rq->sched.link, pl);
-
- for_each_waiter(p, rq) {
- struct i915_request *w =
- container_of(p->waiter, typeof(*w), sched);
-
- if (p->flags & I915_DEPENDENCY_WEAK)
- continue;
-
- /* Leave semaphores spinning on the other engines */
- if (w->engine != rq->engine)
- continue;
-
- /* No waiter should start before its signaler */
- GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
- __i915_request_has_started(w) &&
- !__i915_request_is_complete(rq));
-
- GEM_BUG_ON(i915_request_is_active(w));
- if (!i915_request_is_ready(w))
- continue;
-
- if (rq_prio(w) < rq_prio(rq))
- continue;
-
- GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
- list_move_tail(&w->sched.link, &list);
- }
-
- rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
- } while (rq);
-}
-
static void defer_active(struct intel_engine_cs *engine)
{
struct i915_request *rq;
@@ -1038,7 +990,14 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;
- defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ __intel_engine_defer_request(engine, rq);
}
static bool
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 695ba75a1c9e..49c21af8d29e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -171,8 +171,8 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
}
}
-struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+static struct list_head *
+lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p;
@@ -324,7 +324,7 @@ static void __i915_request_set_priority(struct i915_request *rq, int prio)
struct list_head *pos = &rq->sched.signalers_list;
struct list_head *plist;
- plist = i915_sched_lookup_priolist(engine, prio);
+ plist = lookup_priolist(engine, prio);
/*
* Recursively bump all dependent priorities to match the new request.
@@ -451,12 +451,63 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+void __intel_engine_defer_request(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ struct list_head *pl;
+ LIST_HEAD(list);
+
+ lockdep_assert_held(&engine->active.lock);
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
+
+ /*
+ * When we defer a request, we must maintain its order with respect
+ * to those that are waiting upon it. So we traverse its chain of
+ * waiters and move any that are earlier than the request to after it.
+ */
+ pl = lookup_priolist(engine, rq_prio(rq));
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (p->flags & I915_DEPENDENCY_WEAK)
+ continue;
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
+ __i915_request_has_started(w) &&
+ !__i915_request_is_complete(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
static void queue_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link,
- i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ list_add_tail(&rq->sched.link, lookup_priolist(engine, rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
@@ -555,7 +606,7 @@ __intel_engine_rewind_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
+ pl = lookup_priolist(engine, prio);
}
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 421254cb8e8c..6eafda957f64 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -43,6 +43,8 @@ void i915_request_enqueue(struct i915_request *request);
struct i915_request *
__intel_engine_rewind_requests(struct intel_engine_cs *engine);
+void __intel_engine_defer_request(struct intel_engine_cs *engine,
+ struct i915_request *request);
bool __intel_engine_suspend_request(struct intel_engine_cs *engine,
struct i915_request *rq);
@@ -54,9 +56,6 @@ bool intel_engine_suspend_request(struct intel_engine_cs *engine,
void intel_engine_resume_request(struct intel_engine_cs *engine,
struct i915_request *rq);
-struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
-
void __i915_priolist_free(struct i915_priolist *p);
static inline void i915_priolist_free(struct i915_priolist *p)
{
--
2.20.1
More information about the Intel-gfx-trybot
mailing list