[PATCH 03/25] drm/i915/execlists: Store the highest priority context

Chris Wilson chris at chris-wilson.co.uk
Sat Jan 19 15:55:08 UTC 2019


In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.

To avoid the extra complications, record the highest priority context
along with its value so we can elide no effect preemption requests.

References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler.c   | 21 +++++++++++++++----
 drivers/gpu/drm/i915/intel_lrc.c        | 28 ++++++++++++++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +++++++
 3 files changed, 46 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 340faea6c08a..e0b177687bec 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -239,6 +239,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 	return engine;
 }
 
+static bool inflight(const struct i915_request *rq,
+		     const struct intel_engine_cs *engine)
+{
+	const struct i915_request *active;
+
+	if (!rq->global_seqno)
+		return false;
+
+	active = port_request(engine->execlists.port);
+	return active->hw_context == rq->hw_context;
+}
+
 static void __i915_schedule(struct i915_request *rq,
 			    const struct i915_sched_attr *attr)
 {
@@ -356,17 +368,18 @@ static void __i915_schedule(struct i915_request *rq,
 		if (prio <= engine->execlists.queue_priority)
 			continue;
 
+		engine->execlists.queue_priority = prio;
+		engine->execlists.queue_context =
+			node_to_request(node)->hw_context;
+
 		/*
 		 * If we are already the currently executing context, don't
 		 * bother evaluating if we should preempt ourselves.
 		 */
-		if (node_to_request(node)->global_seqno &&
-		    i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
-				      node_to_request(node)->global_seqno))
+		if (inflight(node_to_request(node), engine))
 			continue;
 
 		/* Defer (tasklet) submission until after all of our updates. */
-		engine->execlists.queue_priority = prio;
 		tasklet_hi_schedule(&engine->execlists.tasklet);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c0a42afaf177..379a43ed2f90 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -183,10 +183,12 @@ static inline int rq_prio(const struct i915_request *rq)
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
 				const struct i915_request *last,
-				int prio)
+				int prio,
+				const struct intel_context *context)
 {
 	return (intel_engine_has_preemption(engine) &&
 		__execlists_need_preempt(prio, rq_prio(last)) &&
+		last->hw_context != context &&
 		!i915_request_completed(last));
 }
 
@@ -579,7 +581,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
 			return;
 
-		if (need_preempt(engine, last, execlists->queue_priority)) {
+		if (need_preempt(engine, last,
+				 execlists->queue_priority,
+				 execlists->queue_context)) {
+			GEM_BUG_ON(execlists->queue_priority == INT_MIN);
 			inject_preempt_context(engine);
 			return;
 		}
@@ -696,6 +701,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * user, see queue_request(), the queue_priority is bumped to that
 	 * request triggering preemption on the next dequeue (or subsequent
 	 * interrupt for secondary ports).
+	 *
+	 * As we are guaranteed that the queue_priority does not preempt
+	 * the currently executing context, we can forgo resetting
+	 * queue_context here in the knowlege that it will be set before
+	 * any preemption.
 	 */
 	execlists->queue_priority =
 		port != execlists->port ? rq_prio(last) : INT_MIN;
@@ -1073,10 +1083,12 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
 		tasklet_hi_schedule(&execlists->tasklet);
 }
 
-static void submit_queue(struct intel_engine_cs *engine, int prio)
+static void submit_queue(struct intel_engine_cs *engine,
+			 const struct i915_request *rq)
 {
-	if (prio > engine->execlists.queue_priority) {
-		engine->execlists.queue_priority = prio;
+	if (rq_prio(rq) > engine->execlists.queue_priority) {
+		engine->execlists.queue_priority = rq_prio(rq);
+		engine->execlists.queue_context = rq->hw_context;
 		__submit_queue_imm(engine);
 	}
 }
@@ -1094,7 +1106,7 @@ static void execlists_submit_request(struct i915_request *request)
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->sched.link));
 
-	submit_queue(engine, rq_prio(request));
+	submit_queue(engine, request);
 
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
@@ -2736,7 +2748,9 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 
 	last = NULL;
 	count = 0;
-	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
+	if (execlists->queue_priority != INT_MIN)
+		drm_printf(m, "\t\tQueue priority: %d\n",
+			   execlists->queue_priority);
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 		int i;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c3ef0f9bf321..c5975c67f74d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -308,6 +308,14 @@ struct intel_engine_execlists {
 	 */
 	int queue_priority;
 
+	/**
+	 * @queue_context: Highest pending context
+	 *
+	 * Record the context that is at the forefront of the queue to
+	 * avoid preempting itself.
+	 */
+	const struct intel_context *queue_context;
+
 	/**
 	 * @queue: queue of requests, in priority lists
 	 */
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list