[PATCH 09/10] drm/i915/execlists: Keep the engine awake until the tasklet is idle

Chris Wilson chris at chris-wilson.co.uk
Sun Aug 11 18:56:25 UTC 2019


The execlists tasklet follows the principle as the guc, while it is not
completely interrupt driven we do still expect to generate interrupts
(context-switches) while the tasklet is awake. Currently, we mark the GT
as awake while we expect the CS interrupts to continue, but it is better
to localise that to the engine, e.g. so that intel_engine_is_idle() will
flush the tasklet.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine_pm.h |  5 +++++
 drivers/gpu/drm/i915/gt/intel_lrc.c       | 22 +++++++++++++---------
 drivers/gpu/drm/i915/intel_wakeref.h      | 17 ++++++++++++++++-
 3 files changed, 34 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 739c50fefcef..17ead1328d4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -26,6 +26,11 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
 	return intel_wakeref_get_if_active(&engine->wakeref);
 }
 
+static inline bool intel_engine_pm_put_if_awake(struct intel_engine_cs *engine)
+{
+	return intel_wakeref_put_if_active(&engine->wakeref);
+}
+
 static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
 {
 	intel_wakeref_put(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index bb74954889dd..d1afb93696ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -553,6 +553,7 @@ execlists_schedule_in(struct i915_request *rq, int idx)
 	struct intel_context *ce = rq->hw_context;
 	int count;
 
+	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
 	trace_i915_request_in(rq, idx);
 
 	count = intel_context_inflight_count(ce);
@@ -560,7 +561,7 @@ execlists_schedule_in(struct i915_request *rq, int idx)
 		intel_context_get(ce);
 		ce->inflight = rq->engine;
 
-		intel_gt_pm_get(ce->inflight->gt);
+		intel_engine_pm_get(ce->inflight);
 		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
 		intel_engine_context_in(ce->inflight);
 	}
@@ -593,7 +594,7 @@ execlists_schedule_out(struct i915_request *rq)
 	if (!intel_context_inflight_count(ce)) {
 		intel_engine_context_out(ce->inflight);
 		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-		intel_gt_pm_put(ce->inflight->gt);
+		intel_engine_pm_put(ce->inflight);
 
 		/*
 		 * If this is part of a virtual engine, its next request may
@@ -1356,7 +1357,6 @@ static void process_csb(struct intel_engine_cs *engine)
 	const u8 num_entries = execlists->csb_size;
 	u8 head, tail;
 
-	lockdep_assert_held(&engine->active.lock);
 	GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
 
 	/*
@@ -1479,8 +1479,6 @@ static void process_csb(struct intel_engine_cs *engine)
 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 {
 	lockdep_assert_held(&engine->active.lock);
-
-	process_csb(engine);
 	if (!engine->execlists.pending[0])
 		execlists_dequeue(engine);
 }
@@ -1494,9 +1492,15 @@ static void execlists_submission_tasklet(unsigned long data)
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->active.lock, flags);
-	__execlists_submission_tasklet(engine);
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	if (!intel_engine_pm_is_awake(engine))
+		return;
+
+	process_csb(engine);
+	if (!engine->execlists.pending[0]) {
+		spin_lock_irqsave(&engine->active.lock, flags);
+		__execlists_submission_tasklet(engine);
+		spin_unlock_irqrestore(&engine->active.lock, flags);
+	}
 }
 
 static void execlists_submission_timer(struct timer_list *timer)
@@ -2728,7 +2732,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 
 static void execlists_park(struct intel_engine_cs *engine)
 {
-	del_timer_sync(&engine->execlists.timer);
+	del_timer(&engine->execlists.timer);
 }
 
 void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 5f0c972a80fb..ef6239f323fe 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -85,7 +85,7 @@ intel_wakeref_get(struct intel_wakeref *wf)
 }
 
 /**
- * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * intel_wakeref_get_if_active: Increase the active wakeref counter
  * @wf: the wakeref
  *
  * Acquire a hold on the wakeref, but only if the wakeref is already
@@ -99,6 +99,21 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
 	return atomic_inc_not_zero(&wf->count);
 }
 
+/**
+ * intel_wakeref_put_if_active: Decrease the active wakeref counter
+ * @wf: the wakeref
+ *
+ * Releases a hold on the wakeref, but only if doing so the wakeref
+ * will remain active.
+ *
+ * Returns: true if the wakeref was released, false otherwise.
+ */
+static inline bool
+intel_wakeref_put_if_active(struct intel_wakeref *wf)
+{
+	return atomic_add_unless(&wf->count, -1, 1);
+}
+
 /**
  * intel_wakeref_put: Release the wakeref
  * @i915: the drm_i915_private device
-- 
2.23.0.rc1



More information about the Intel-gfx-trybot mailing list