[PATCH 25/42] el-shrink-irqoff

Chris Wilson chris at chris-wilson.co.uk
Thu Jan 28 14:55:11 UTC 2021


---
 .../drm/i915/gt/intel_execlists_submission.c  | 88 ++++++++-----------
 1 file changed, 39 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index da155a0dd18f..bd14893fa023 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1008,24 +1008,6 @@ static void virtual_xfer_context(struct virtual_engine *ve,
 	}
 }
 
-static void defer_active(struct intel_engine_cs *engine)
-{
-	struct i915_request *rq;
-
-	rq = __i915_sched_rewind_requests(engine);
-	if (!rq)
-		return;
-
-	/*
-	 * We want to move the interrupted request to the back of
-	 * the round-robin list (i.e. its priority level), but
-	 * in doing so, we must then move all requests that were in
-	 * flight and were waiting for the interrupted request to
-	 * be run after it again.
-	 */
-	__i915_sched_defer_request(engine, rq);
-}
-
 static bool
 timeslice_yield(const struct intel_engine_execlists *el,
 		const struct i915_request *rq)
@@ -1302,8 +1284,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock(&engine->active.lock);
-
 	/*
 	 * If the queue is higher priority than the last
 	 * request in the currently active context, submit afresh.
@@ -1326,24 +1306,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				     rq_deadline(last),
 				     rq_prio(last));
 			record_preemption(execlists);
-
-			/*
-			 * Don't let the RING_HEAD advance past the breadcrumb
-			 * as we unwind (and until we resubmit) so that we do
-			 * not accidentally tell it to go backwards.
-			 */
-			ring_set_paused(engine, 1);
-
-			/*
-			 * Note that we have not stopped the GPU at this point,
-			 * so we are unwinding the incomplete requests as they
-			 * remain inflight and so by the time we do complete
-			 * the preemption, some of the unwound requests may
-			 * complete!
-			 */
-			__i915_sched_rewind_requests(engine);
-
-			last = NULL;
+			last = (void *)1;
 		} else if (timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
 				     "expired:%s last=%llx:%llu, deadline=%llu, now=%llu, yield?=%s\n",
@@ -1370,8 +1333,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * same context again, grant it a full timeslice.
 			 */
 			cancel_timer(&execlists->timer);
-			ring_set_paused(engine, 1);
-			defer_active(engine);
 
 			/*
 			 * Unlike for preemption, if we rewind and continue
@@ -1386,7 +1347,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * normal save/restore will preserve state and allow
 			 * us to later continue executing the same request.
 			 */
-			last = NULL;
+			last = (void *)3;
 		} else {
 			/*
 			 * Otherwise if we already have a request pending
@@ -1402,12 +1363,46 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				spin_unlock(&engine->active.lock);
 				return;
 			}
 		}
 	}
 
+	local_irq_disable(); /* irq remains off until after ELSP write */
+	spin_lock(&engine->active.lock);
+
+	if ((unsigned long)last & 1) {
+		bool defer = (unsigned long)last & 2;
+
+		/*
+		 * Don't let the RING_HEAD advance past the breadcrumb
+		 * as we unwind (and until we resubmit) so that we do
+		 * not accidentally tell it to go backwards.
+		 */
+		ring_set_paused(engine, (unsigned long)last);
+
+		/*
+		 * Note that we have not stopped the GPU at this point,
+		 * so we are unwinding the incomplete requests as they
+		 * remain inflight and so by the time we do complete
+		 * the preemption, some of the unwound requests may
+		 * complete!
+		 */
+		last = __i915_sched_rewind_requests(engine);
+
+		/*
+		 * We want to move the interrupted request to the back of
+		 * the round-robin list (i.e. its priority level), but
+		 * in doing so, we must then move all requests that were in
+		 * flight and were waiting for the interrupted request to
+		 * be run after it again.
+		 */
+		if (last && defer)
+			__i915_sched_defer_request(engine, last);
+
+		last = NULL;
+	}
+
 	if (!RB_EMPTY_ROOT(&execlists->virtual.rb_root))
 		virtual_requeue(engine, last);
 
@@ -1523,13 +1518,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			i915_request_put(*port);
 		*execlists->pending = NULL;
 	}
-}
 
-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
-{
-	local_irq_disable(); /* Suspend interrupts across request submission */
-	execlists_dequeue(engine);
-	local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+	local_irq_enable();
 }
 
 static void clear_ports(struct i915_request **ports, int count)
@@ -2172,7 +2162,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
 		execlists_reset(engine);
 
 	if (!engine->execlists.pending[0]) {
-		execlists_dequeue_irq(engine);
+		execlists_dequeue(engine);
 		start_timeslice(engine);
 	}
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list