[PATCH 26/44] el-shrink-irqoff

Chris Wilson chris at chris-wilson.co.uk
Wed Jan 27 01:10:43 UTC 2021


---
 .../drm/i915/gt/intel_execlists_submission.c  | 88 ++++++++-----------
 1 file changed, 39 insertions(+), 49 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 0d0a45252b34..a0c1b7ce333b 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1002,24 +1002,6 @@ static void virtual_xfer_context(struct virtual_engine *ve,
 	}
 }
 
-static void defer_active(struct intel_engine_cs *engine)
-{
-	struct i915_request *rq;
-
-	rq = __intel_engine_rewind_requests(engine);
-	if (!rq)
-		return;
-
-	/*
-	 * We want to move the interrupted request to the back of
-	 * the round-robin list (i.e. its priority level), but
-	 * in doing so, we must then move all requests that were in
-	 * flight and were waiting for the interrupted request to
-	 * be run after it again.
-	 */
-	__intel_engine_defer_request(engine, rq);
-}
-
 static bool
 timeslice_yield(const struct intel_engine_execlists *el,
 		const struct i915_request *rq)
@@ -1296,8 +1278,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock(&engine->active.lock);
-
 	/*
 	 * If the queue is higher priority than the last
 	 * request in the currently active context, submit afresh.
@@ -1320,24 +1300,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				     rq_deadline(last),
 				     rq_prio(last));
 			record_preemption(execlists);
-
-			/*
-			 * Don't let the RING_HEAD advance past the breadcrumb
-			 * as we unwind (and until we resubmit) so that we do
-			 * not accidentally tell it to go backwards.
-			 */
-			ring_set_paused(engine, 1);
-
-			/*
-			 * Note that we have not stopped the GPU at this point,
-			 * so we are unwinding the incomplete requests as they
-			 * remain inflight and so by the time we do complete
-			 * the preemption, some of the unwound requests may
-			 * complete!
-			 */
-			__intel_engine_rewind_requests(engine);
-
-			last = NULL;
+			last = (void *)1;
 		} else if (timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
 				     "expired:%s last=%llx:%llu, deadline=%llu, now=%llu, yield?=%s\n",
@@ -1364,8 +1327,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * same context again, grant it a full timeslice.
 			 */
 			cancel_timer(&execlists->timer);
-			ring_set_paused(engine, 1);
-			defer_active(engine);
 
 			/*
 			 * Unlike for preemption, if we rewind and continue
@@ -1380,7 +1341,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * normal save/restore will preserve state and allow
 			 * us to later continue executing the same request.
 			 */
-			last = NULL;
+			last = (void *)3;
 		} else {
 			/*
 			 * Otherwise if we already have a request pending
@@ -1396,12 +1357,46 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				spin_unlock(&engine->active.lock);
 				return;
 			}
 		}
 	}
 
+	local_irq_disable(); /* irq remains off until after ELSP write */
+	spin_lock(&engine->active.lock);
+
+	if ((unsigned long)last & 1) {
+		bool defer = (unsigned long)last & 2;
+
+		/*
+		 * Don't let the RING_HEAD advance past the breadcrumb
+		 * as we unwind (and until we resubmit) so that we do
+		 * not accidentally tell it to go backwards.
+		 */
+		ring_set_paused(engine, 1);
+
+		/*
+		 * Note that we have not stopped the GPU at this point,
+		 * so we are unwinding the incomplete requests as they
+		 * remain inflight and so by the time we do complete
+		 * the preemption, some of the unwound requests may
+		 * complete!
+		 */
+		last = __intel_engine_rewind_requests(engine);
+
+		/*
+		 * We want to move the interrupted request to the back of
+		 * the round-robin list (i.e. its priority level), but
+		 * in doing so, we must then move all requests that were in
+		 * flight and were waiting for the interrupted request to
+		 * be run after it again.
+		 */
+		if (last && defer)
+			__intel_engine_defer_request(engine, last);
+
+		last = NULL;
+	}
+
 	if (!RB_EMPTY_ROOT(&execlists->virtual.rb_root))
 		virtual_requeue(engine, last);
 
@@ -1517,13 +1512,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			i915_request_put(*port);
 		*execlists->pending = NULL;
 	}
-}
 
-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
-{
-	local_irq_disable(); /* Suspend interrupts across request submission */
-	execlists_dequeue(engine);
-	local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
+	local_irq_enable();
 }
 
 static void clear_ports(struct i915_request **ports, int count)
@@ -2168,7 +2158,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
 		execlists_reset(engine);
 
 	if (!engine->execlists.pending[0]) {
-		execlists_dequeue_irq(engine);
+		execlists_dequeue(engine);
 		start_timeslice(engine);
 	}
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list