[PATCH 20/46] drm/i915: Wrap access to engine->active

Chris Wilson chris at chris-wilson.co.uk
Sat Jan 30 12:52:11 UTC 2021


Now that we have separated out the scheduling lists from the engine
type, provide a helper for returning the scheduler interface from the
engine. We will want to make further changes in the near future and so
wish to minimise the noise.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine.h        |  8 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     | 23 +++--
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 83 ++++++++++---------
 drivers/gpu/drm/i915/gt/intel_gt_irq.c        |  2 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 14 ++--
 drivers/gpu/drm/i915/gt/mock_engine.c         |  7 +-
 drivers/gpu/drm/i915/gt/selftest_execlists.c  | 29 ++++---
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  3 +-
 drivers/gpu/drm/i915/gt/selftest_lrc.c        | 13 ++-
 drivers/gpu/drm/i915/gt/selftest_reset.c      |  3 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++-----
 drivers/gpu/drm/i915/i915_gpu_error.c         |  5 +-
 drivers/gpu/drm/i915/i915_request.c           | 23 +++--
 drivers/gpu/drm/i915/i915_scheduler.c         |  2 +-
 .../gpu/drm/i915/selftests/i915_scheduler.c   | 44 +++++-----
 drivers/gpu/drm/i915/selftests/igt_spinner.c  |  2 +-
 17 files changed, 177 insertions(+), 132 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index f113bbf3ce2e..8a3a0a3cdcf8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -258,10 +258,16 @@ intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
 	return READ_ONCE(engine->props.heartbeat_interval_ms);
 }
 
+static inline struct i915_sched_engine *
+intel_engine_get_scheduler(struct intel_engine_cs *engine)
+{
+	return &engine->active;
+}
+
 static inline void
 intel_engine_flush_scheduler(struct intel_engine_cs *engine)
 {
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(intel_engine_get_scheduler(engine));
 }
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9c5e6c8fe329..9e24da6d35b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -727,6 +727,7 @@ struct measure_breadcrumb {
 static int measure_breadcrumb_dw(struct intel_context *ce)
 {
 	struct intel_engine_cs *engine = ce->engine;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct measure_breadcrumb *frame;
 	int dw;
 
@@ -749,11 +750,11 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
 	frame->rq.ring = &frame->ring;
 
 	mutex_lock(&ce->timeline->mutex);
-	spin_lock_irq(&engine->active.lock);
+	spin_lock_irq(&se->lock);
 
 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
 
-	spin_unlock_irq(&engine->active.lock);
+	spin_unlock_irq(&se->lock);
 	mutex_unlock(&ce->timeline->mutex);
 
 	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
@@ -1197,6 +1198,8 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
 	/* More white lies, if wedged, hw state is inconsistent */
 	if (intel_gt_is_wedged(engine->gt))
 		return true;
@@ -1206,10 +1209,10 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 
 	/* Waiting to drain ELSP? */
 	synchronize_hardirq(engine->i915->drm.pdev->irq);
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(se);
 
 	/* ELSP is empty, but there are ready requests? E.g. after reset */
-	if (!i915_sched_is_idle(&engine->active))
+	if (!i915_sched_is_idle(se))
 		return false;
 
 	/* Ring stopped? */
@@ -1521,6 +1524,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		       const char *header, ...)
 {
 	struct i915_gpu_error * const error = &engine->i915->gpu_error;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	intel_wakeref_t wakeref;
 	unsigned long flags;
@@ -1562,7 +1566,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 	drm_printf(m, "\tRequests:\n");
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 	rq = intel_engine_find_active_request(engine);
 	if (rq) {
 		struct intel_timeline *tl = get_timeline(rq);
@@ -1593,8 +1597,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
 		}
 	}
-	drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	drm_printf(m, "\tOn hold?: %lu\n", list_count(&se->hold));
+	spin_unlock_irqrestore(&se->lock, flags);
 
 	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
 	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
@@ -1662,6 +1666,7 @@ static bool match_ring(struct i915_request *rq)
 struct i915_request *
 intel_engine_find_active_request(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *request, *active = NULL;
 
 	/*
@@ -1675,7 +1680,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 	 * At all other times, we must assume the GPU is still running, but
 	 * we only care about the snapshot of this moment.
 	 */
-	lockdep_assert_held(&engine->active.lock);
+	lockdep_assert_held(&se->lock);
 
 	rcu_read_lock();
 	request = execlists_active(&engine->execlists);
@@ -1693,7 +1698,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 	if (active)
 		return active;
 
-	list_for_each_entry(request, &engine->active.requests, sched.link) {
+	list_for_each_entry(request, &se->requests, sched.link) {
 		if (__i915_request_is_complete(request))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index a79e99bd9218..0854159eeb5a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -277,7 +277,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (engine->park)
 		engine->park(engine);
 
-	i915_sched_park_engine(&engine->active);
+	i915_sched_park_engine(intel_engine_get_scheduler(engine));
 
 	/* While gt calls i915_vma_parked(), we have to break the lock cycle */
 	intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index ad04d63d7b0e..a83b3bba2753 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1127,6 +1127,8 @@ static bool completed(const struct i915_request *rq)
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_sched_engine * const se =
+		intel_engine_get_scheduler(engine);
 	struct i915_request **port = execlists->pending;
 	struct i915_request ** const last_port = port + execlists->port_mask;
 	struct i915_request *last, * const *active;
@@ -1156,7 +1158,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	spin_lock(&engine->active.lock);
+	spin_lock(&se->lock);
 
 	/*
 	 * If the queue is higher priority than the last
@@ -1256,7 +1258,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				spin_unlock(&engine->active.lock);
+				spin_unlock(&se->lock);
 				return;
 			}
 		}
@@ -1275,14 +1277,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq->engine != &ve->base);
 		GEM_BUG_ON(rq->context != &ve->context);
 
-		if (unlikely(rq_prio(rq) < queue_prio(&engine->active))) {
+		if (unlikely(rq_prio(rq) < queue_prio(se))) {
 			spin_unlock(&ve->base.active.lock);
 			break;
 		}
 
 		if (last && !can_merge_rq(last, rq)) {
 			spin_unlock(&ve->base.active.lock);
-			spin_unlock(&engine->active.lock);
+			spin_unlock(&se->lock);
 			return; /* leave this for another sibling */
 		}
 
@@ -1341,7 +1343,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			break;
 	}
 
-	while ((rb = rb_first_cached(&engine->active.queue))) {
+	while ((rb = rb_first_cached(&se->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
 
@@ -1420,7 +1422,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			}
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
+		rb_erase_cached(&p->node, &se->queue);
 		i915_priolist_free(p);
 	}
 done:
@@ -1442,8 +1444,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * request triggering preemption on the next dequeue (or subsequent
 	 * interrupt for secondary ports).
 	 */
-	execlists->queue_priority_hint = queue_prio(&engine->active);
-	spin_unlock(&engine->active.lock);
+	execlists->queue_priority_hint = queue_prio(se);
+	spin_unlock(&se->lock);
 
 	/*
 	 * We can skip poking the HW if we ended up with exactly the same set
@@ -2055,6 +2057,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 
 static noinline void execlists_reset(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
 	unsigned long *lock = &engine->gt->reset.flags;
 	unsigned long eir = fetch_and_zero(&engine->execlists.error_interrupt);
@@ -2078,13 +2081,13 @@ static noinline void execlists_reset(struct intel_engine_cs *engine)
 	ENGINE_TRACE(engine, "reset for %s\n", msg);
 
 	/* Mark this tasklet as disabled to avoid waiting for it to complete */
-	tasklet_disable_nosync(&engine->active.tasklet);
+	tasklet_disable_nosync(&se->tasklet);
 
 	ring_set_paused(engine, 1); /* Freeze the current request in place */
 	execlists_capture(engine);
 	intel_engine_reset(engine, msg);
 
-	tasklet_enable(&engine->active.tasklet);
+	tasklet_enable(&se->tasklet);
 	clear_and_wake_up_bit(bit, lock);
 }
 
@@ -2138,7 +2141,7 @@ static void __execlists_kick(struct intel_engine_execlists *execlists)
 	struct intel_engine_cs *engine =
 		container_of(execlists, typeof(*engine), execlists);
 
-	i915_sched_kick(&engine->active);
+	i915_sched_kick(intel_engine_get_scheduler(engine));
 }
 
 #define execlists_kick(t, member) \
@@ -2461,8 +2464,9 @@ static int execlists_resume(struct intel_engine_cs *engine)
 
 static void execlists_reset_prepare(struct intel_engine_cs *engine)
 {
-	ENGINE_TRACE(engine, "depth<-%d\n",
-		     atomic_read(&engine->active.tasklet.count));
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
+	ENGINE_TRACE(engine, "depth<-%d\n", atomic_read(&se->tasklet.count));
 
 	/*
 	 * Prevent request submission to the hardware until we have
@@ -2473,7 +2477,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
 	 * Turning off the execlists->tasklet until the reset is over
 	 * prevents the race.
 	 */
-	i915_sched_disable(&engine->active);
+	i915_sched_disable(se);
 
 	/*
 	 * We stop engines, otherwise we might get failed reset and a
@@ -2606,6 +2610,7 @@ static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
 
 static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	unsigned long flags;
 
 	ENGINE_TRACE(engine, "\n");
@@ -2615,9 +2620,9 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	/* Push back any incomplete requests for replay after the reset. */
 	rcu_read_lock();
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 	__i915_sched_rewind_requests(engine);
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 	rcu_read_unlock();
 }
 
@@ -2633,6 +2638,7 @@ static void nop_submission_tasklet(struct tasklet_struct *t)
 static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq, *rn;
 	struct rb_node *rb;
 	unsigned long flags;
@@ -2656,15 +2662,15 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	execlists_reset_csb(engine, true);
 
 	rcu_read_lock();
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	/* Mark all executing requests as skipped. */
-	list_for_each_entry(rq, &engine->active.requests, sched.link)
+	list_for_each_entry(rq, &se->requests, sched.link)
 		i915_request_put(i915_request_mark_eio(rq));
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
+	while ((rb = rb_first_cached(&se->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 
 		priolist_for_each_request_consume(rq, rn, p) {
@@ -2674,13 +2680,13 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 			}
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
+		rb_erase_cached(&p->node, &se->queue);
 		i915_priolist_free(p);
 	}
-	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
+	GEM_BUG_ON(!i915_sched_is_idle(se));
 
 	/* On-hold requests will be flushed to timeline upon their release */
-	list_for_each_entry(rq, &engine->active.hold, sched.link)
+	list_for_each_entry(rq, &se->hold, sched.link)
 		i915_request_put(i915_request_mark_eio(rq));
 
 	/* Cancel all attached virtual engines */
@@ -2709,17 +2715,19 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority_hint = INT_MIN;
-	engine->active.queue = RB_ROOT_CACHED;
+	se->queue = RB_ROOT_CACHED;
 
-	GEM_BUG_ON(__tasklet_is_enabled(&engine->active.tasklet));
-	engine->active.tasklet.callback = nop_submission_tasklet;
+	GEM_BUG_ON(__tasklet_is_enabled(&se->tasklet));
+	se->tasklet.callback = nop_submission_tasklet;
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 	rcu_read_unlock();
 }
 
 static void execlists_reset_finish(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
 	/*
 	 * After a GPU reset, we may have requests to replay. Do so now while
 	 * we still have the forcewake to be sure that the GPU is not allowed
@@ -2731,10 +2739,9 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
 	 * will declare the device wedged.
 	 */
 
-	i915_sched_enable(&engine->active);
+	i915_sched_enable(se);
 
-	ENGINE_TRACE(engine, "depth->%d\n",
-		     atomic_read(&engine->active.tasklet.count));
+	ENGINE_TRACE(engine, "depth->%d\n", atomic_read(&se->tasklet.count));
 }
 
 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
@@ -2945,6 +2952,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 {
 	struct virtual_engine *ve =
 		container_of(wrk, typeof(*ve), rcu.work);
+	struct i915_sched_engine *se = intel_engine_get_scheduler(&ve->base);
 	unsigned int n;
 
 	GEM_BUG_ON(ve->context.inflight);
@@ -2953,7 +2961,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 	if (unlikely(ve->request)) {
 		struct i915_request *old;
 
-		spin_lock_irq(&ve->base.active.lock);
+		spin_lock_irq(&se->lock);
 
 		old = fetch_and_zero(&ve->request);
 		if (old) {
@@ -2962,7 +2970,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 			i915_request_put(old);
 		}
 
-		spin_unlock_irq(&ve->base.active.lock);
+		spin_unlock_irq(&se->lock);
 	}
 
 	/*
@@ -2972,7 +2980,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 	 * rbtrees as in the case it is running in parallel, it may reinsert
 	 * the rb_node into a sibling.
 	 */
-	i915_sched_fini_engine(&ve->base.active);
+	i915_sched_fini_engine(se);
 
 	/* Decouple ourselves from the siblings, no more access allowed. */
 	for (n = 0; n < ve->num_siblings; n++) {
@@ -2990,7 +2998,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 
 		spin_unlock_irq(&sibling->active.lock);
 	}
-	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.active.tasklet));
+	GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
 	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
 
 	lrc_fini(&ve->context);
@@ -3526,6 +3534,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 				   unsigned int max)
 {
 	const struct intel_engine_execlists *execlists = &engine->execlists;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request * const *port;
 	struct i915_request *rq, *last;
 	unsigned long flags;
@@ -3535,7 +3544,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 	if (!execlists->port_mask)
 		return;
 
-	i915_sched_lock_bh(&engine->active);
+	i915_sched_lock_bh(se);
 	rcu_read_lock();
 
 	for (port = execlists->active; (rq = *port); port++) {
@@ -3567,7 +3576,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		i915_request_show(m, rq, hdr, 0);
 	}
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	last = NULL;
 	count = 0;
@@ -3592,10 +3601,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		show_request(m, last, "\t\t", 0);
 	}
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 
 	rcu_read_unlock();
-	i915_sched_unlock_bh(&engine->active);
+	i915_sched_unlock_bh(se);
 }
 
 bool
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 5f5e96da09b0..78235c64e1c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -59,7 +59,7 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 	}
 
 	if (tasklet)
-		i915_sched_kick(&engine->active);
+		i915_sched_kick(intel_engine_get_scheduler(engine));
 }
 
 static u32
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 9c2c605d7a92..5f0f5f17cedb 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -322,14 +322,15 @@ static void reset_prepare(struct intel_engine_cs *engine)
 
 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *pos, *rq;
 	unsigned long flags;
 	u32 head;
 
 	rq = NULL;
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 	rcu_read_lock();
-	list_for_each_entry(pos, &engine->active.requests, sched.link) {
+	list_for_each_entry(pos, &se->requests, sched.link) {
 		if (!__i915_request_is_complete(pos)) {
 			rq = pos;
 			break;
@@ -384,7 +385,7 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	}
 	engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void reset_finish(struct intel_engine_cs *engine)
@@ -393,19 +394,20 @@ static void reset_finish(struct intel_engine_cs *engine)
 
 static void reset_cancel(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *request;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	/* Mark all submitted requests as skipped. */
-	list_for_each_entry(request, &engine->active.requests, sched.link)
+	list_for_each_entry(request, &se->requests, sched.link)
 		i915_request_put(i915_request_mark_eio(request));
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void i9xx_submit_request(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 350191479c30..fa48716ff030 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -230,15 +230,16 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	unsigned long flags;
 
 	del_timer_sync(&mock->hw_delay);
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	/* Mark all submitted requests as skipped. */
-	list_for_each_entry(rq, &engine->active.requests, sched.link)
+	list_for_each_entry(rq, &se->requests, sched.link)
 		i915_request_put(i915_request_mark_eio(rq));
 	intel_engine_signal_breadcrumbs(engine);
 
@@ -251,7 +252,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 	}
 	INIT_LIST_HEAD(&mock->hw_queue);
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void mock_reset_finish(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index d7cedc7735ba..a470ec3c4cd9 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -42,8 +42,10 @@ static int wait_for_submit(struct intel_engine_cs *engine,
 			   struct i915_request *rq,
 			   unsigned long timeout)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
 	/* Ignore our own attempts to suppress excess tasklets */
-	i915_sched_kick(&engine->active);
+	i915_sched_kick(se);
 
 	timeout += jiffies;
 	do {
@@ -53,7 +55,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
 			return 0;
 
 		/* Wait until the HW has acknowleged the submission (or err) */
-		i915_sched_flush(&engine->active);
+		i915_sched_flush(se);
 		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
 			return 0;
 
@@ -568,6 +570,7 @@ static int live_hold_reset(void *arg)
 		return -ENOMEM;
 
 	for_each_engine(engine, gt, id) {
+		struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 		struct intel_context *ce;
 		struct i915_request *rq;
 
@@ -602,9 +605,9 @@ static int live_hold_reset(void *arg)
 			err = -EBUSY;
 			goto out;
 		}
-		tasklet_disable(&engine->active.tasklet);
+		tasklet_disable(&se->tasklet);
 
-		engine->active.tasklet.callback(&engine->active.tasklet);
+		se->tasklet.callback(&se->tasklet);
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
@@ -614,7 +617,7 @@ static int live_hold_reset(void *arg)
 		__intel_engine_reset_bh(engine, NULL);
 		GEM_BUG_ON(rq->fence.error != -EIO);
 
-		tasklet_enable(&engine->active.tasklet);
+		tasklet_enable(&se->tasklet);
 		clear_and_wake_up_bit(I915_RESET_ENGINE + id,
 				      &gt->reset.flags);
 		local_bh_enable();
@@ -1176,8 +1179,8 @@ static int live_timeslice_rewind(void *arg)
 		while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
 			/* Wait for the timeslice to kick in */
 			del_timer(&engine->execlists.timer);
-			i915_sched_kick(&engine->active);
-			i915_sched_flush(&engine->active);
+			i915_sched_kick(intel_engine_get_scheduler(engine));
+			i915_sched_flush(intel_engine_get_scheduler(engine));
 		}
 		/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
 		GEM_BUG_ON(!i915_request_is_active(rq[A1]));
@@ -4525,6 +4528,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
 				struct intel_engine_cs **siblings,
 				unsigned int nsibling)
 {
+	struct i915_sched_engine *se;
 	struct intel_engine_cs *engine;
 	struct intel_context *ve;
 	struct igt_spinner spin;
@@ -4565,6 +4569,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
 
 	engine = rq->engine;
 	GEM_BUG_ON(engine == ve->engine);
+	se = intel_engine_get_scheduler(engine);
 
 	/* Take ownership of the reset and tasklet */
 	local_bh_disable();
@@ -4575,15 +4580,15 @@ static int reset_virtual_engine(struct intel_gt *gt,
 		err = -EBUSY;
 		goto out_heartbeat;
 	}
-	tasklet_disable(&engine->active.tasklet);
+	tasklet_disable(&se->tasklet);
 
-	engine->active.tasklet.callback(&engine->active.tasklet);
+	se->tasklet.callback(&se->tasklet);
 	GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 	/* Fake a preemption event; failed of course */
-	spin_lock_irq(&engine->active.lock);
+	spin_lock_irq(&se->lock);
 	__i915_sched_rewind_requests(engine);
-	spin_unlock_irq(&engine->active.lock);
+	spin_unlock_irq(&se->lock);
 	GEM_BUG_ON(rq->engine != engine);
 
 	/* Reset the engine while keeping our active request on hold */
@@ -4594,7 +4599,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
 	GEM_BUG_ON(rq->fence.error != -EIO);
 
 	/* Release our grasp on the engine, letting CS flow again */
-	tasklet_enable(&engine->active.tasklet);
+	tasklet_enable(&se->tasklet);
 	clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
 	local_bh_enable();
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3d3f41b1271a..cdb0ceff3be1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1701,7 +1701,8 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
 				     const struct igt_atomic_section *p,
 				     const char *mode)
 {
-	struct tasklet_struct * const t = &engine->active.tasklet;
+	struct tasklet_struct * const t =
+		&intel_engine_get_scheduler(engine)->tasklet;
 	int err;
 
 	GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 2f83ba504a99..13354d3e6351 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -48,8 +48,10 @@ static int wait_for_submit(struct intel_engine_cs *engine,
 			   struct i915_request *rq,
 			   unsigned long timeout)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
 	/* Ignore our own attempts to suppress excess tasklets */
-	i915_sched_kick(&engine->active);
+	i915_sched_kick(se);
 
 	timeout += jiffies;
 	do {
@@ -59,7 +61,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
 			return 0;
 
 		/* Wait until the HW has acknowleged the submission (or err) */
-		i915_sched_flush(&engine->active);
+		i915_sched_flush(se);
 		if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
 			return 0;
 
@@ -1857,12 +1859,15 @@ static void garbage_reset(struct intel_engine_cs *engine,
 
 	local_bh_disable();
 	if (!test_and_set_bit(bit, lock)) {
-		tasklet_disable(&engine->active.tasklet);
+		struct i915_sched_engine *se =
+			intel_engine_get_scheduler(engine);
+
+		tasklet_disable(&se->tasklet);
 
 		if (!rq->fence.error)
 			__intel_engine_reset_bh(engine, NULL);
 
-		tasklet_enable(&engine->active.tasklet);
+		tasklet_enable(&se->tasklet);
 		clear_and_wake_up_bit(bit, lock);
 	}
 	local_bh_enable();
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 154a09ef075a..08594309a96d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -321,7 +321,8 @@ static int igt_atomic_engine_reset(void *arg)
 		goto out_unlock;
 
 	for_each_engine(engine, gt, id) {
-		struct tasklet_struct *t = &engine->active.tasklet;
+		struct tasklet_struct *t =
+			&intel_engine_get_scheduler(engine)->tasklet;
 
 		if (t->func)
 			tasklet_disable(t);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b2af0ea3cec6..42ad82308925 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -180,6 +180,8 @@ static void schedule_out(struct i915_request *rq)
 
 static void __guc_dequeue(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine * const se =
+		intel_engine_get_scheduler(engine);
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **first = execlists->inflight;
 	struct i915_request ** const last_port = first + execlists->port_mask;
@@ -188,7 +190,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	bool submit = false;
 	struct rb_node *rb;
 
-	lockdep_assert_held(&engine->active.lock);
+	lockdep_assert_held(&se->lock);
 
 	if (last) {
 		if (*++first)
@@ -203,7 +205,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	 * event.
 	 */
 	port = first;
-	while ((rb = rb_first_cached(&engine->active.queue))) {
+	while ((rb = rb_first_cached(&se->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
 
@@ -223,7 +225,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 			last = rq;
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
+		rb_erase_cached(&p->node, &se->queue);
 		i915_priolist_free(p);
 	}
 done:
@@ -239,13 +241,14 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 
 static void guc_submission_tasklet(struct tasklet_struct *t)
 {
+	struct i915_sched_engine *se = from_tasklet(se, t, tasklet);
 	struct intel_engine_cs * const engine =
-		from_tasklet(engine, t, active.tasklet);
+		container_of(se, typeof(*engine), active);
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **port, *rq;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	for (port = execlists->inflight; (rq = *port); port++) {
 		if (!i915_request_completed(rq))
@@ -261,11 +264,13 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
 
 	__guc_dequeue(engine);
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void guc_reset_prepare(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+
 	ENGINE_TRACE(engine, "\n");
 
 	/*
@@ -277,7 +282,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
 	 * Turning off the execlists->tasklet until the reset is over
 	 * prevents the race.
 	 */
-	i915_sched_disable(&engine->active);
+	i915_sched_disable(se);
 }
 
 static void guc_reset_state(struct intel_context *ce,
@@ -304,10 +309,11 @@ static void guc_reset_state(struct intel_context *ce,
 
 static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	unsigned long flags;
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
 	rq = __i915_sched_rewind_requests(engine);
@@ -321,11 +327,12 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	guc_reset_state(rq->context, engine, rq->head, stalled);
 
 out_unlock:
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void guc_reset_cancel(struct intel_engine_cs *engine)
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
 	struct rb_node *rb;
@@ -347,16 +354,16 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 	 * submission's irq state, we also wish to remind ourselves that
 	 * it is irq state.)
 	 */
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	/* Mark all executing requests as skipped. */
-	list_for_each_entry(rq, &engine->active.requests, sched.link) {
+	list_for_each_entry(rq, &se->requests, sched.link) {
 		i915_request_set_error_once(rq, -EIO);
 		i915_request_mark_complete(rq);
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
+	while ((rb = rb_first_cached(&se->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 
 		priolist_for_each_request_consume(rq, rn, p) {
@@ -366,25 +373,26 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 			i915_request_mark_complete(rq);
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
+		rb_erase_cached(&p->node, &se->queue);
 		i915_priolist_free(p);
 	}
-	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
+	GEM_BUG_ON(!i915_sched_is_idle(se));
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority_hint = INT_MIN;
-	engine->active.queue = RB_ROOT_CACHED;
+	se->queue = RB_ROOT_CACHED;
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void guc_reset_finish(struct intel_engine_cs *engine)
 {
-	i915_sched_enable(&engine->active);
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 
-	ENGINE_TRACE(engine, "depth->%d\n",
-		     atomic_read(&engine->active.tasklet.count));
+	i915_sched_enable(se);
+
+	ENGINE_TRACE(engine, "depth->%d\n", atomic_read(&se->tasklet.count));
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0cb3686ed91d..47249bff6705 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1434,6 +1434,7 @@ capture_engine(struct intel_engine_cs *engine,
 	       struct i915_vma_compress *compress)
 {
 	struct intel_engine_capture_vma *capture = NULL;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_engine_coredump *ee;
 	struct i915_request *rq;
 	unsigned long flags;
@@ -1442,12 +1443,12 @@ capture_engine(struct intel_engine_cs *engine,
 	if (!ee)
 		return NULL;
 
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 	rq = intel_engine_find_active_request(engine);
 	if (rq)
 		capture = intel_engine_coredump_add_request(ee, rq,
 							    ATOMIC_MAYFAIL);
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 	if (!capture) {
 		kfree(ee);
 		return NULL;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f68025a78ba2..37e3dc3ae795 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -533,12 +533,12 @@ struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 bool __i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	bool result = false;
 
 	RQ_TRACE(request, "\n");
 
-	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->active.lock);
+	lockdep_assert_held(&se->lock);
 
 	/*
 	 * With the advent of preempt-to-busy, we frequently encounter
@@ -595,7 +595,7 @@ bool __i915_request_submit(struct i915_request *request)
 	result = true;
 
 	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
-	list_move_tail(&request->sched.link, &engine->active.requests);
+	list_move_tail(&request->sched.link, &se->requests);
 active:
 	clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
@@ -622,29 +622,25 @@ bool __i915_request_submit(struct i915_request *request)
 void i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	__i915_request_submit(request);
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 void __i915_request_unsubmit(struct i915_request *request)
 {
-	struct intel_engine_cs *engine = request->engine;
-
 	/*
 	 * Only unwind in reverse order, required so that the per-context list
 	 * is kept in seqno/ring order.
 	 */
 	RQ_TRACE(request, "\n");
 
-	GEM_BUG_ON(!irqs_disabled());
-	lockdep_assert_held(&engine->active.lock);
-
 	/*
 	 * Before we remove this breadcrumb from the signal list, we have
 	 * to ensure that a concurrent dma_fence_enable_signaling() does not
@@ -673,14 +669,15 @@ void __i915_request_unsubmit(struct i915_request *request)
 void i915_request_unsubmit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	unsigned long flags;
 
 	/* Will be called from irq-context when using foreign fences. */
-	spin_lock_irqsave(&engine->active.lock, flags);
+	spin_lock_irqsave(&se->lock, flags);
 
 	__i915_request_unsubmit(request);
 
-	spin_unlock_irqrestore(&engine->active.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static int __i915_sw_fence_call
@@ -1852,7 +1849,7 @@ long i915_request_wait(struct i915_request *rq,
 	 * for unhappy HW.
 	 */
 	if (i915_request_is_ready(rq))
-		__i915_sched_flush(&rq->engine->active, false);
+		__i915_sched_flush(intel_engine_get_scheduler(rq->engine), false);
 
 	for (;;) {
 		set_current_state(state);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2826ba5922fa..c3bb68b65a9e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -665,7 +665,7 @@ void i915_request_enqueue(struct i915_request *rq)
 	} else {
 		queue_request(engine, rq);
 
-		GEM_BUG_ON(i915_sched_is_idle(&engine->active));
+		GEM_BUG_ON(i915_sched_is_idle(se));
 
 		kick = submit_queue(engine, rq);
 	}
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index dda7c759374b..db0e28cb865d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -77,7 +77,7 @@ static int all_engines(struct drm_i915_private *i915,
 	return 0;
 }
 
-static bool check_context_order(struct intel_engine_cs *engine)
+static bool check_context_order(struct i915_sched_engine *se)
 {
 	u64 last_seqno, last_context;
 	unsigned long count;
@@ -86,13 +86,13 @@ static bool check_context_order(struct intel_engine_cs *engine)
 	int last_prio;
 
 	/* We expect the execution order to follow ascending fence-context */
-	spin_lock_irq(&engine->active.lock);
+	spin_lock_irq(&se->lock);
 
 	count = 0;
 	last_context = 0;
 	last_seqno = 0;
 	last_prio = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
+	for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 		struct i915_request *rq;
 
@@ -119,7 +119,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
 	}
 	result = true;
 out_unlock:
-	spin_unlock_irq(&engine->active.lock);
+	spin_unlock_irq(&se->lock);
 
 	return result;
 }
@@ -128,6 +128,7 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
 			  bool (*fn)(struct i915_request *rq,
 				     unsigned long v, unsigned long e))
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_context *ce;
 	struct igt_spinner spin;
 	struct i915_request *rq;
@@ -170,12 +171,12 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
 		i915_request_add(rq);
 		count++;
 	}
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(se);
 
-	i915_sched_lock_bh(&engine->active);
-	if (fn(rq, count, count - 1) && !check_context_order(engine))
+	i915_sched_lock_bh(se);
+	if (fn(rq, count, count - 1) && !check_context_order(se))
 		err = -EINVAL;
-	i915_sched_unlock_bh(&engine->active);
+	i915_sched_unlock_bh(se);
 
 	igt_spinner_end(&spin);
 err_context:
@@ -191,6 +192,7 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
 			bool (*fn)(struct i915_request *rq,
 				   unsigned long v, unsigned long e))
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_context **ce;
 	struct i915_request **rq;
 	struct igt_spinner spin;
@@ -254,12 +256,12 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
 		}
 		i915_request_add(rq[i]);
 	}
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(se);
 
-	i915_sched_lock_bh(&engine->active);
-	if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+	i915_sched_lock_bh(se);
+	if (fn(rq[i - 1], i, count) && !check_context_order(se))
 		err = -EINVAL;
-	i915_sched_unlock_bh(&engine->active);
+	i915_sched_unlock_bh(se);
 
 	igt_spinner_end(&spin);
 err_free:
@@ -279,6 +281,7 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
 		       bool (*fn)(struct i915_request *rq,
 				  unsigned long v, unsigned long e))
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_context **ce;
 	struct i915_request **rq;
 	struct igt_spinner spin;
@@ -342,12 +345,12 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
 		}
 		i915_request_add(rq[i]);
 	}
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(se);
 
-	i915_sched_lock_bh(&engine->active);
-	if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+	i915_sched_lock_bh(se);
+	if (fn(rq[i - 1], i, count) && !check_context_order(se))
 		err = -EINVAL;
-	i915_sched_unlock_bh(&engine->active);
+	i915_sched_unlock_bh(se);
 
 	igt_spinner_end(&spin);
 err_free:
@@ -367,6 +370,7 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
 			  bool (*fn)(struct i915_request *rq,
 				     unsigned long v, unsigned long e))
 {
+	struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
 	struct intel_context **ce;
 	struct i915_request **rq;
 	struct igt_spinner spin;
@@ -447,12 +451,12 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
 		if (err)
 			break;
 	}
-	i915_sched_flush(&engine->active);
+	i915_sched_flush(se);
 
-	i915_sched_lock_bh(&engine->active);
-	if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+	i915_sched_lock_bh(se);
+	if (fn(rq[i - 1], i, count) && !check_context_order(se))
 		err = -EINVAL;
-	i915_sched_unlock_bh(&engine->active);
+	i915_sched_unlock_bh(se);
 
 	igt_spinner_end(&spin);
 err_free:
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1d23134a506a..d50615f3beba 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -221,7 +221,7 @@ void igt_spinner_fini(struct igt_spinner *spin)
 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 {
 	if (i915_request_is_ready(rq))
-		i915_sched_flush(&rq->engine->active);
+		i915_sched_flush(intel_engine_get_scheduler(rq->engine));
 
 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 					       rq->fence.seqno),
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list