[PATCH 45/51] drm/i915: Extract the backend scheduling object from intel_engine_cs

Chris Wilson chris at chris-wilson.co.uk
Sat Feb 6 17:35:29 UTC 2021


In preparation for the guc wishing to use a single scheduling channel
for submission onto a single guc workqueue that controls multiple
physical engines, pull the scheduling object out of the physical CS
struct.

Over the last few patches, we have invertd the current hierarchy where we
are scheduling and submitting using the intel_engine_cs and now we
instead only use the scheduling object as the primary interface through
the scheduler.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  16 +--
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 121 ++++++++++--------
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |  27 ++--
 .../gpu/drm/i915/gt/intel_ring_submission.c   |  27 ++--
 drivers/gpu/drm/i915/gt/mock_engine.c         |  23 ++--
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |   4 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  23 ++--
 drivers/gpu/drm/i915/i915_gem.c               |   6 +-
 drivers/gpu/drm/i915/i915_scheduler.c         |  43 ++++++-
 drivers/gpu/drm/i915/i915_scheduler.h         |  41 +++++-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  10 ++
 13 files changed, 226 insertions(+), 123 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 863c76126b02..c1763bf643bb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -427,10 +427,14 @@ void intel_engines_free(struct intel_gt *gt)
 	enum intel_engine_id id;
 
 	/* Free the requests! dma-resv keeps fences around for an eternity */
-	rcu_barrier();
+	rcu_barrier(); /* once for delayed dma_fence_put */
+	rcu_barrier(); /* twice for i915_fence_free */
+	/* Hopefully the rabbit hole doesn't go any deeper! */
 
 	for_each_engine(engine, gt, id) {
+		i915_sched_put(engine->sched);
 		kfree(engine);
+
 		gt->engine[id] = NULL;
 	}
 }
@@ -690,13 +694,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 		goto err_status;
 	}
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_PHYSICAL);
-
-	intel_engine_init_execlists(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
@@ -915,8 +912,7 @@ int intel_engines_init(struct intel_gt *gt)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-	i915_sched_fini(intel_engine_get_scheduler(engine));
-
+	intel_engine_flush_scheduler(engine);
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
 	intel_engine_fini_retire(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index cfcc9b491faf..1d3a8d801da3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -273,7 +273,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (engine->park)
 		engine->park(engine);
 
-	i915_sched_park(intel_engine_get_scheduler(engine));
+	i915_sched_trypark(intel_engine_get_scheduler(engine));
 
 	/* While gt calls i915_vma_parked(), we have to break the lock cycle */
 	intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 5b4eed376f81..b74265af2d50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -258,6 +258,8 @@ struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
 	struct intel_uncore *uncore;
+	struct i915_sched *sched;
+
 	char name[INTEL_ENGINE_CS_MAX_NAME];
 
 	enum intel_engine_id id;
@@ -294,8 +296,6 @@ struct intel_engine_cs {
 
 	struct intel_sseu sseu;
 
-	struct i915_sched sched;
-
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
@@ -560,7 +560,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
 static inline struct i915_sched *
 intel_engine_get_scheduler(struct intel_engine_cs *engine)
 {
-	return &engine->sched;
+	return engine->sched;
 }
 
 #endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 007a46e96fc4..58464120614a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -287,7 +287,7 @@ first_virtual(const struct intel_engine_cs *engine)
 	if (!ve)
 		return NULL;
 
-	return first_request(&ve->base.sched);
+	return first_request(ve->base.sched);
 }
 
 static const struct i915_request *
@@ -308,7 +308,7 @@ dl_before(const struct i915_request *next, const struct i915_request *prev)
 static bool need_preempt(const struct intel_engine_cs *engine,
 			 const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 	const struct i915_request *first = NULL;
 	const struct i915_request *next;
 
@@ -568,8 +568,8 @@ static void kick_siblings(struct intel_engine_cs *engine,
 	    rq->execution_mask != engine->mask)
 		resubmit_virtual_request(rq, ve);
 
-	if (!i915_sched_is_idle(&ve->base.sched))
-		i915_sched_kick(&ve->base.sched);
+	if (!i915_sched_is_idle(ve->base.sched))
+		i915_sched_kick(ve->base.sched);
 }
 
 static void __execlists_schedule_out(struct intel_engine_cs *engine,
@@ -1053,7 +1053,7 @@ timeslice_yield(const struct intel_engine_execlists *el,
 static bool needs_timeslice(const struct intel_engine_cs *engine,
 			    const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 
 	if (!i915_sched_has_timeslices(se))
 		return false;
@@ -1179,7 +1179,7 @@ static void __virtual_dequeue(struct virtual_engine *ve,
 	rb_erase_cached(&node->rb, &sibling->execlists.virtual);
 	RB_CLEAR_NODE(&node->rb);
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!virtual_matches(ve, rq, sibling))
 		return;
 
@@ -1245,7 +1245,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			     yesno(engine != ve->siblings[0]));
 
 		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-		if (__i915_request_requeue(rq, &engine->sched)) {
+		if (__i915_request_requeue(rq, engine->sched)) {
 			/*
 			 * Only after we confirm that we will submit
 			 * this request (i.e. it has not already
@@ -2098,7 +2098,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 	 * simply hold that request accountable for being non-preemptible
 	 * long enough to force the reset.
 	 */
-	if (!i915_sched_suspend_request(&engine->sched, cap->rq))
+	if (!i915_sched_suspend_request(engine->sched, cap->rq))
 		goto err_rq;
 
 	INIT_WORK(&cap->work, execlists_capture_work);
@@ -2167,8 +2167,8 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
  */
 static void execlists_submission_tasklet(struct tasklet_struct *t)
 {
-	struct intel_engine_cs * const engine =
-		from_tasklet(engine, t, sched.tasklet);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -2745,9 +2745,9 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 		rb_erase_cached(rb, &execlists->virtual);
 		RB_CLEAR_NODE(rb);
 
-		spin_lock(&ve->base.sched.lock);
-		__i915_sched_cancel_queue(&ve->base.sched);
-		spin_unlock(&ve->base.sched.lock);
+		spin_lock(&ve->base.sched->lock);
+		__i915_sched_cancel_queue(ve->base.sched);
+		spin_unlock(&ve->base.sched->lock);
 	}
 
 	spin_unlock_irqrestore(&se->lock, flags);
@@ -2792,8 +2792,7 @@ static void execlists_park(struct intel_engine_cs *engine)
 
 static struct i915_request *execlists_active_request(struct i915_sched *se)
 {
-	struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *rq;
 
 	rq = execlists_active(&engine->execlists);
@@ -2806,8 +2805,8 @@ static struct i915_request *execlists_active_request(struct i915_sched *se)
 static bool execlists_is_executing(const struct i915_request *rq)
 {
 	struct i915_sched *se = i915_request_get_scheduler(rq);
-	struct intel_engine_execlists *el =
-		&container_of(se, struct intel_engine_cs, sched)->execlists;
+	struct intel_engine_cs *engine = se->priv;
+	struct intel_engine_execlists *el = &engine->execlists;
 	struct i915_request * const *port, *p;
 	bool inflight = false;
 
@@ -2923,7 +2922,7 @@ static bool can_preempt(struct intel_engine_cs *engine)
 
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3036,28 +3035,39 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
 	}
 }
 
-static void init_execlists(struct intel_engine_cs *engine)
+static int init_execlists(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct drm_i915_private *i915 = engine->i915;
 	struct intel_uncore *uncore = engine->uncore;
 	u32 base = engine->mmio_base;
 
-	engine->sched.active_request = execlists_active_request;
-	engine->sched.is_executing = execlists_is_executing;
-	engine->sched.revoke_context = execlists_revoke_context;
-	engine->sched.show = execlists_show;
-	tasklet_setup(&engine->sched.tasklet, execlists_submission_tasklet);
+	engine->sched =
+		i915_sched_create(i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  execlists_submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
 
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_DEADLINE);
+	engine->sched->submit_request = i915_request_enqueue;
+	engine->sched->active_request = execlists_active_request;
+	engine->sched->is_executing = execlists_is_executing;
+	engine->sched->revoke_context = execlists_revoke_context;
+	engine->sched->show = execlists_show;
+
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_DEADLINE);
+
+	intel_engine_init_execlists(engine);
 
 	if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION) &&
 	    intel_engine_has_preemption(engine))
-		__set_bit(I915_SCHED_TIMESLICE_BIT, &engine->sched.flags);
+		__set_bit(I915_SCHED_TIMESLICE_BIT, &engine->sched->flags);
 
 	if (intel_engine_has_preemption(engine)) {
-		__set_bit(I915_SCHED_BUSYWAIT_BIT, &engine->sched.flags);
-		__set_bit(I915_SCHED_PREEMPT_RESET_BIT, &engine->sched.flags);
+		__set_bit(I915_SCHED_BUSYWAIT_BIT, &engine->sched->flags);
+		__set_bit(I915_SCHED_PREEMPT_RESET_BIT, &engine->sched->flags);
 	}
 
 	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
@@ -3089,6 +3099,8 @@ static void init_execlists(struct intel_engine_cs *engine)
 		execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
 		execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
 	}
+
+	return 0;
 }
 
 int intel_execlists_submission_setup(struct intel_engine_cs *engine)
@@ -3099,7 +3111,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 	if (engine->class == RENDER_CLASS)
 		rcs_submission_override(engine);
 
-	init_execlists(engine);
+	if (init_execlists(engine))
+		return -ENOMEM;
 
 	lrc_init_wa_ctx(engine);
 
@@ -3145,15 +3158,16 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 		if (RB_EMPTY_NODE(node))
 			continue;
 
-		spin_lock_irq(&sibling->sched.lock);
+		spin_lock_irq(&sibling->sched->lock);
 
 		/* Detachment is lazily performed in the execlists tasklet */
 		if (!RB_EMPTY_NODE(node))
 			rb_erase_cached(node, &sibling->execlists.virtual);
 
-		spin_unlock_irq(&sibling->sched.lock);
+		spin_unlock_irq(&sibling->sched->lock);
 	}
 	GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
+	i915_sched_put(se);
 
 	lrc_fini(&ve->context);
 	intel_context_fini(&ve->context);
@@ -3278,7 +3292,7 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 {
 	struct i915_request *rq;
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!rq)
 		return NULL;
 
@@ -3300,8 +3314,8 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 
 static void virtual_submission_tasklet(struct tasklet_struct *t)
 {
-	struct virtual_engine * const ve =
-		from_tasklet(ve, t, base.sched.tasklet);
+	struct i915_sched *se = from_tasklet(se, t, tasklet);
+	struct virtual_engine * const ve = se->priv;
 	struct i915_request *rq;
 	unsigned int n;
 	u64 deadline;
@@ -3476,7 +3490,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 		 * layering if we handle cloning of the requests and
 		 * submitting a copy into each backend.
 		 */
-		if (sibling->sched.tasklet.callback !=
+		if (sibling->sched->tasklet.callback !=
 		    execlists_submission_tasklet) {
 			err = -ENODEV;
 			goto err_put;
@@ -3487,7 +3501,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 		ve->siblings[ve->num_siblings++] = sibling;
 		ve->base.mask |= sibling->mask;
-		sched &= sibling->sched.flags;
+		sched &= sibling->sched->flags;
 
 		/*
 		 * All physical engines must be compatible for their emission
@@ -3524,16 +3538,22 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
 
-	i915_sched_init(&ve->base.sched,
-			ve->base.i915->drm.dev,
-			ve->base.name,
-			ve->base.mask,
-			ENGINE_VIRTUAL);
-	ve->base.sched.flags = sched;
+	ve->base.sched =
+		i915_sched_create(ve->base.i915->drm.dev,
+				  ve->base.name,
+				  ve->base.mask,
+				  virtual_submission_tasklet, ve,
+				  ENGINE_VIRTUAL);
+	if (!ve->base.sched) {
+		err = -ENOMEM;
+		goto err_put;
+	}
 
-	ve->base.sched.submit_request = i915_request_enqueue;
-	ve->base.sched.revoke_context = execlists_revoke_context;
-	tasklet_setup(&ve->base.sched.tasklet, virtual_submission_tasklet);
+	ve->base.sched->flags = sched;
+
+	ve->base.sched->submit_request = i915_request_enqueue;
+	ve->base.sched->revoke_context = execlists_revoke_context;
+	tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
 
 	ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
 
@@ -3587,7 +3607,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (n == ve->num_siblings)
 		return -EINVAL;
 
-	bond = virtual_find_bond(ve, &master->sched);
+	bond = virtual_find_bond(ve, master->sched);
 	if (bond) {
 		bond->sibling_mask |= sibling->mask;
 		return 0;
@@ -3599,7 +3619,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (!bond)
 		return -ENOMEM;
 
-	bond[ve->num_bonds].master = &master->sched;
+	bond[ve->num_bonds].master = master->sched;
 	bond[ve->num_bonds].sibling_mask = sibling->mask;
 
 	ve->bonds = bond;
@@ -3648,11 +3668,10 @@ static void execlists_show(struct drm_printer *m,
 						int indent),
 			   unsigned int max)
 {
-	const struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	const struct intel_engine_cs *engine = se->priv;
 	const struct intel_engine_execlists *el = &engine->execlists;
-	const u64 *hws = el->csb_status;
 	const u8 num_entries = el->csb_size;
+	const u64 *hws = el->csb_status;
 	struct i915_request * const *port;
 	struct i915_request *rq, *last;
 	intel_wakeref_t wakeref;
@@ -3671,7 +3690,7 @@ static void execlists_show(struct drm_printer *m,
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq;
 
-		rq = first_request(&ve->base.sched);
+		rq = first_request(ve->base.sched);
 		if (rq) {
 			if (count++ < max - 1)
 				show_request(m, rq, "\t\t", 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 95185d0d4c69..d4c54ebdf13b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -615,8 +615,7 @@ process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
 static void submission_tasklet(struct tasklet_struct *t)
 {
 	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -661,9 +660,8 @@ cancel_port_requests(struct intel_engine_execlists * const el,
 	return inactive;
 }
 
-static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
+static void __ring_rewind(struct i915_sched *se, bool stalled)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	unsigned long flags;
 
@@ -696,7 +694,7 @@ static void ring_reset_csb(struct intel_engine_cs *engine)
 static void ring_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	ring_reset_csb(engine);
-	__ring_rewind(engine, stalled);
+	__ring_rewind(engine->sched, stalled);
 }
 
 static void ring_reset_cancel(struct intel_engine_cs *engine)
@@ -952,7 +950,7 @@ static const struct intel_context_ops ring_context_ops = {
 
 static void set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1150,10 +1148,6 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
 
-	tasklet_setup(&engine->sched.tasklet, submission_tasklet);
-
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_DEADLINE);
-
 	setup_common(engine);
 
 	switch (engine->class) {
@@ -1174,6 +1168,19 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_DEADLINE);
+
 	ring = intel_engine_create_ring(engine, global_ring_size());
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index e38028a6517e..6475fb1f3432 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -921,7 +921,7 @@ static const struct intel_context_ops ring_context_ops = {
 
 static void set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1204,8 +1204,7 @@ static void write_tail(struct intel_engine_cs *engine,
 static void passthrough_tasklet(struct tasklet_struct *t)
 {
 	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *last = NULL;
 	struct i915_request *rq, *rn;
 	struct i915_priolist *pl;
@@ -1228,12 +1227,20 @@ static void passthrough_tasklet(struct tasklet_struct *t)
 	local_irq_enable();
 }
 
-static int init_sched(struct intel_engine_cs *engine)
+static struct i915_sched *init_sched(struct intel_engine_cs *engine)
 {
-	tasklet_setup(&engine->sched.tasklet, passthrough_tasklet);
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_NONE);
+	struct i915_sched *se;
 
-	return 0;
+	se = i915_sched_create(engine->i915->drm.dev,
+			       engine->name, engine->mask,
+			       passthrough_tasklet, engine,
+			       ENGINE_PHYSICAL);
+	if (!se)
+		return NULL;
+
+	i915_sched_select_mode(se, I915_SCHED_MODE_NONE);
+
+	return se;
 }
 
 int intel_ring_submission_setup(struct intel_engine_cs *engine)
@@ -1262,9 +1269,11 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
-	err = init_sched(engine);
-	if (err)
+	engine->sched = init_sched(engine);
+	if (!engine->sched) {
+		err = -ENOMEM;
 		goto err;
+	}
 
 	timeline = intel_timeline_create_from_engine(engine,
 						     I915_GEM_HWS_SEQNO_ADDR);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 58d829865735..2b4219e3b3f8 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -309,8 +309,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
 
-	engine->base.sched.submit_request = mock_submit_request;
-
 	engine->base.reset.prepare = mock_reset_prepare;
 	engine->base.reset.rewind = mock_reset_rewind;
 	engine->base.reset.cancel = mock_reset_cancel;
@@ -335,20 +333,23 @@ int mock_engine_init(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_MOCK);
-	engine->sched.submit_request = mock_submit_request;
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  NULL, engine,
+				  ENGINE_MOCK);
+	if (!engine->sched)
+		return -ENOMEM;
+
+	engine->sched->submit_request = mock_submit_request;
 
-	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
 
 	engine->breadcrumbs = intel_breadcrumbs_create(NULL);
 	if (!engine->breadcrumbs)
-		return -ENOMEM;
+		goto err_scheduler;
 
 	ce = create_kernel_context(engine);
 	if (IS_ERR(ce))
@@ -362,6 +363,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
 
 err_breadcrumbs:
 	intel_breadcrumbs_free(engine->breadcrumbs);
+err_scheduler:
+	i915_sched_put(engine->sched);
 	return -ENOMEM;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index bdec2c02410d..1fe716d9f344 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
-		i915_sched_suspend_request(&engine->sched, rq);
+		i915_sched_suspend_request(se, rq);
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		/* But is resubmitted on release */
-		i915_sched_resume_request(&engine->sched, rq);
+		i915_sched_resume_request(se, rq);
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("%s: held request did not complete!\n",
 			       engine->name);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 881c6d62cf47..ac3c20a51b1a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -229,9 +229,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 
 static void guc_submission_tasklet(struct tasklet_struct *t)
 {
-	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **port, *rq;
 	unsigned long flags;
@@ -308,7 +307,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	rq = __i915_sched_rewind_requests(&engine->sched);
+	rq = __i915_sched_rewind_requests(se);
 	if (!rq)
 		goto out_unlock;
 
@@ -545,7 +544,7 @@ static int guc_resume(struct intel_engine_cs *engine)
 
 static void guc_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void guc_release(struct intel_engine_cs *engine)
@@ -618,17 +617,21 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
 
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *i915 = engine->i915;
-
 	/*
 	 * The setup relies on several assumptions (e.g. irqs always enabled)
 	 * that are only valid on gen11+
 	 */
-	GEM_BUG_ON(INTEL_GEN(i915) < 11);
+	GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
 
-	tasklet_setup(&engine->sched.tasklet, guc_submission_tasklet);
+	engine->sched = i915_sched_create(engine->i915->drm.dev,
+					  engine->name,
+					  engine->mask,
+					  guc_submission_tasklet, engine,
+					  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
 
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_PRIORITY);
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_PRIORITY);
 
 	guc_default_vfuncs(engine);
 	guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b2e3b5cfccb4..ac70064c0502 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1086,13 +1086,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
 
 	i915_gem_suspend_late(dev_priv);
-	intel_gt_driver_remove(&dev_priv->gt);
-	dev_priv->uabi_engines = RB_ROOT;
 
 	/* Flush any outstanding unpin_work. */
 	i915_gem_drain_workqueue(dev_priv);
-
 	i915_gem_drain_freed_objects(dev_priv);
+
+	intel_gt_driver_remove(&dev_priv->gt);
+	dev_priv->uabi_engines = RB_ROOT;
 }
 
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index e695081d8adc..efcc180acdd2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -231,12 +231,16 @@ static void init_priolist(struct i915_priolist_root *const root)
 	pl->level = -1;
 }
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass)
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass)
 {
+	kref_init(&se->kref);
 	spin_lock_init(&se->lock);
 	lockdep_set_subclass(&se->lock, subclass);
 	mark_lock_used_irq(&se->lock);
@@ -246,6 +250,9 @@ void i915_sched_init(struct i915_sched *se,
 
 	se->mask = mask;
 
+	tasklet_setup(&se->tasklet, tasklet);
+	se->priv = priv;
+
 	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
 	INIT_LIST_HEAD(&se->hold);
@@ -257,6 +264,23 @@ void i915_sched_init(struct i915_sched *se,
 	se->revoke_context = i915_sched_default_revoke_context;
 }
 
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass)
+{
+	struct i915_sched *se;
+
+	se = kzalloc(sizeof(*se), GFP_KERNEL);
+	if (se)
+		i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+
+	return se;
+}
+
 __maybe_unused static bool priolist_idle(struct i915_priolist_root *root)
 {
 	struct i915_priolist *pl = &root->sentinel;
@@ -319,12 +343,17 @@ void i915_sched_park(struct i915_sched *se)
 				fetch_and_zero(&se->request_pool));
 }
 
-void i915_sched_fini(struct i915_sched *se)
+void i915_sched_destroy(struct kref *kref)
 {
-	GEM_BUG_ON(!list_empty(&se->requests));
+	struct i915_sched *se = container_of(kref, typeof(*se), kref);
 
 	tasklet_kill(&se->tasklet); /* flush the callback */
 	i915_sched_park(se);
+
+	GEM_BUG_ON(!list_empty(&se->requests));
+	GEM_BUG_ON(!i915_sched_is_idle(se));
+
+	kfree(se);
 }
 
 static void __ipi_add(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 1891aeae1d1e..915082cdf195 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -37,14 +37,41 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 
 void i915_sched_node_retire(struct i915_sched_node *node);
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass);
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass);
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass);
 void i915_sched_park(struct i915_sched *se);
-void i915_sched_fini(struct i915_sched *se);
+void i915_sched_destroy(struct kref *kref);
+
+static inline void i915_sched_trypark(struct i915_sched *se)
+{
+	if (kref_read(&se->kref) == 1)
+		i915_sched_park(se);
+}
+
+static inline struct i915_sched *i915_sched_get(struct i915_sched *se)
+{
+	kref_get(&se->kref);
+	return se;
+}
+
+static inline void i915_sched_put(struct i915_sched *se)
+{
+	if (se)
+		kref_put(&se->kref, i915_sched_destroy);
+}
 
 void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 5adf51093366..c687864b00c2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -66,6 +66,11 @@ enum i915_sched_mode {
 struct i915_sched {
 	spinlock_t lock; /* protects the scheduling lists and queue */
 
+	/**
+	 * @priv: private opaque pointer reserved for use by the owner.
+	 */
+	void *priv;
+
 	unsigned long flags;
 	unsigned long mask; /* available scheduling channels */
 
@@ -148,6 +153,11 @@ struct i915_sched {
 	/* keep a request in reserve for a [pm] barrier under oom */
 	struct i915_request *request_pool;
 
+	/**
+	 * @kref: reference count
+	 */
+	struct kref kref;
+
 	/* Pretty device names for debug messages */
 	struct {
 		struct device *dev;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list