[PATCH 68/69] sched->engine

Chris Wilson chris at chris-wilson.co.uk
Mon Feb 1 07:59:48 UTC 2021


---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |   9 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 115 ++++++++++--------
 drivers/gpu/drm/i915/gt/intel_reset.c         |   2 +-
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |  27 ++--
 .../gpu/drm/i915/gt/intel_ring_submission.c   |  14 ++-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  24 ++--
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |   4 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  26 ++--
 drivers/gpu/drm/i915/i915_scheduler.c         |  45 +++++--
 drivers/gpu/drm/i915/i915_scheduler.h         |  40 +++++-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  10 ++
 13 files changed, 214 insertions(+), 110 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index dd27d9b063d4..60b58fe8e562 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -689,13 +689,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 		goto err_status;
 	}
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_PHYSICAL);
-
-	intel_engine_init_execlists(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
@@ -914,7 +907,7 @@ int intel_engines_init(struct intel_gt *gt)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-	i915_sched_fini(intel_engine_get_scheduler(engine));
+	i915_sched_put(intel_engine_get_scheduler(engine));
 
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index cfcc9b491faf..1d3a8d801da3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -273,7 +273,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (engine->park)
 		engine->park(engine);
 
-	i915_sched_park(intel_engine_get_scheduler(engine));
+	i915_sched_trypark(intel_engine_get_scheduler(engine));
 
 	/* While gt calls i915_vma_parked(), we have to break the lock cycle */
 	intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index d367a0fc19e5..ab5f8b9c2b3a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -258,6 +258,8 @@ struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
 	struct intel_uncore *uncore;
+	struct i915_sched *sched;
+
 	char name[INTEL_ENGINE_CS_MAX_NAME];
 
 	enum intel_engine_id id;
@@ -294,8 +296,6 @@ struct intel_engine_cs {
 
 	struct intel_sseu sseu;
 
-	struct i915_sched sched;
-
 	/* keep a request in reserve for a [pm] barrier under oom */
 	struct i915_request *request_pool;
 
@@ -560,7 +560,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
 static inline struct i915_sched *
 intel_engine_get_scheduler(struct intel_engine_cs *engine)
 {
-	return &engine->sched;
+	return engine->sched;
 }
 
 #endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index fc0587e51349..984eb234fafb 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -285,7 +285,7 @@ first_virtual(const struct intel_engine_cs *engine)
 	if (!ve)
 		return NULL;
 
-	return first_request(&ve->base.sched);
+	return first_request(ve->base.sched);
 }
 
 static const struct i915_request *
@@ -306,7 +306,7 @@ dl_before(const struct i915_request *next, const struct i915_request *prev)
 static bool need_preempt(const struct intel_engine_cs *engine,
 			 const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 	const struct i915_request *first = NULL;
 	const struct i915_request *next;
 
@@ -566,8 +566,8 @@ static void kick_siblings(struct intel_engine_cs *engine,
 	    rq->execution_mask != engine->mask)
 		resubmit_virtual_request(rq, ve);
 
-	if (!i915_sched_is_idle(&ve->base.sched))
-		i915_sched_kick(&ve->base.sched);
+	if (!i915_sched_is_idle(ve->base.sched))
+		i915_sched_kick(ve->base.sched);
 }
 
 static void __execlists_schedule_out(struct intel_engine_cs *engine,
@@ -1051,7 +1051,7 @@ timeslice_yield(const struct intel_engine_execlists *el,
 static bool needs_timeslice(const struct intel_engine_cs *engine,
 			    const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 
 	if (!i915_sched_has_timeslices(se))
 		return false;
@@ -1177,7 +1177,7 @@ static void __virtual_dequeue(struct virtual_engine *ve,
 	rb_erase_cached(&node->rb, &sibling->execlists.virtual);
 	RB_CLEAR_NODE(&node->rb);
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!virtual_matches(ve, rq, sibling))
 		return;
 
@@ -1243,7 +1243,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			     yesno(engine != ve->siblings[0]));
 
 		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-		if (__i915_request_requeue(rq, &engine->sched)) {
+		if (__i915_request_requeue(rq, engine->sched)) {
 			/*
 			 * Only after we confirm that we will submit
 			 * this request (i.e. it has not already
@@ -2102,7 +2102,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 	 * simply hold that request accountable for being non-preemptible
 	 * long enough to force the reset.
 	 */
-	if (!i915_sched_suspend_request(&engine->sched, cap->rq))
+	if (!i915_sched_suspend_request(engine->sched, cap->rq))
 		goto err_rq;
 
 	INIT_WORK(&cap->work, execlists_capture_work);
@@ -2171,8 +2171,8 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
  */
 static void execlists_submission_tasklet(struct tasklet_struct *t)
 {
-	struct intel_engine_cs * const engine =
-		from_tasklet(engine, t, sched.tasklet);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -2751,17 +2751,17 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 		rb_erase_cached(rb, &execlists->virtual);
 		RB_CLEAR_NODE(rb);
 
-		spin_lock(&ve->base.sched.lock);
-		for_each_priolist(pl, &ve->base.sched.queue) {
+		spin_lock(&ve->base.sched->lock);
+		for_each_priolist(pl, &ve->base.sched->queue) {
 			priolist_for_each_request_safe(rq, rn, pl) {
 				if (i915_request_mark_eio(rq)) {
 					__i915_request_submit(rq, engine);
 					i915_request_put(rq);
 				}
 			}
-			i915_priolist_advance(&ve->base.sched.queue, pl);
+			i915_priolist_advance(&ve->base.sched->queue, pl);
 		}
-		spin_unlock(&ve->base.sched.lock);
+		spin_unlock(&ve->base.sched->lock);
 	}
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -2807,8 +2807,7 @@ static void execlists_park(struct intel_engine_cs *engine)
 static const struct i915_request *
 execlists_active_request(struct i915_sched *se)
 {
-	struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *rq;
 
 	rq = execlists_active(&engine->execlists);
@@ -2821,8 +2820,8 @@ execlists_active_request(struct i915_sched *se)
 static bool execlists_is_executing(const struct i915_request *rq)
 {
 	struct i915_sched *se = i915_request_get_scheduler(rq);
-	struct intel_engine_execlists *el =
-		&container_of(se, struct intel_engine_cs, sched)->execlists;
+	struct intel_engine_cs *engine = se->priv;
+	struct intel_engine_execlists *el = &engine->execlists;
 	struct i915_request * const *port, *p;
 	bool inflight = false;
 
@@ -2894,7 +2893,7 @@ static bool can_preempt(struct intel_engine_cs *engine)
 
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -2968,13 +2967,13 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION) &&
 	    intel_engine_has_preemption(engine))
 		__set_bit(I915_SCHED_HAS_TIMESLICES_BIT,
-			  &engine->sched.flags);
+			  &engine->sched->flags);
 
 	if (intel_engine_has_preemption(engine)) {
 		__set_bit(I915_SCHED_USE_BUSYWAIT_BIT,
-			  &engine->sched.flags);
+			  &engine->sched->flags);
 		__set_bit(I915_SCHED_HAS_PREEMPT_RESET_BIT,
-			  &engine->sched.flags);
+			  &engine->sched->flags);
 	}
 }
 
@@ -3025,11 +3024,23 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 	struct intel_uncore *uncore = engine->uncore;
 	u32 base = engine->mmio_base;
 
-	engine->sched.active_request = execlists_active_request;
-	engine->sched.is_executing = execlists_is_executing;
-	engine->sched.show = execlists_show;
-	tasklet_setup(&engine->sched.tasklet, execlists_submission_tasklet);
-	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched.flags);
+	engine->sched =
+		i915_sched_create(i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  execlists_submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
+
+	engine->sched->submit_request = i915_request_enqueue;
+	engine->sched->active_request = execlists_active_request;
+	engine->sched->is_executing = execlists_is_executing;
+	engine->sched->show = execlists_show;
+	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched->flags);
+
+	intel_engine_init_execlists(engine);
+
 	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
 	timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
 
@@ -3107,7 +3118,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 	 * rbtrees as in the case it is running in parallel, it may reinsert
 	 * the rb_node into a sibling.
 	 */
-	i915_sched_fini(se);
+	tasklet_kill(&se->tasklet);
 
 	/* Decouple ourselves from the siblings, no more access allowed. */
 	for (n = 0; n < ve->num_siblings; n++) {
@@ -3117,15 +3128,16 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 		if (RB_EMPTY_NODE(node))
 			continue;
 
-		spin_lock_irq(&sibling->sched.lock);
+		spin_lock_irq(&sibling->sched->lock);
 
 		/* Detachment is lazily performed in the execlists tasklet */
 		if (!RB_EMPTY_NODE(node))
 			rb_erase_cached(node, &sibling->execlists.virtual);
 
-		spin_unlock_irq(&sibling->sched.lock);
+		spin_unlock_irq(&sibling->sched->lock);
 	}
 	GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
+	i915_sched_put(se);
 
 	lrc_fini(&ve->context);
 	intel_context_fini(&ve->context);
@@ -3252,7 +3264,7 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 {
 	struct i915_request *rq;
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!rq)
 		return NULL;
 
@@ -3274,8 +3286,8 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 
 static void virtual_submission_tasklet(struct tasklet_struct *t)
 {
-	struct virtual_engine * const ve =
-		from_tasklet(ve, t, base.sched.tasklet);
+	struct i915_sched *se = from_tasklet(se, t, tasklet);
+	struct virtual_engine * const ve = se->priv;
 	struct i915_request *rq;
 	unsigned int n;
 	u64 deadline;
@@ -3450,7 +3462,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 		 * layering if we handle cloning of the requests and
 		 * submitting a copy into each backend.
 		 */
-		if (sibling->sched.tasklet.callback !=
+		if (sibling->sched->tasklet.callback !=
 		    execlists_submission_tasklet) {
 			err = -ENODEV;
 			goto err_put;
@@ -3461,7 +3473,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 		ve->siblings[ve->num_siblings++] = sibling;
 		ve->base.mask |= sibling->mask;
-		sched &= sibling->sched.flags;
+		sched &= sibling->sched->flags;
 
 		/*
 		 * All physical engines must be compatible for their emission
@@ -3498,15 +3510,21 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
 
-	i915_sched_init(&ve->base.sched,
-			ve->base.i915->drm.dev,
-			ve->base.name,
-			ve->base.mask,
-			ENGINE_VIRTUAL);
-	ve->base.sched.flags = sched;
+	ve->base.sched =
+		i915_sched_create(ve->base.i915->drm.dev,
+				  ve->base.name,
+				  ve->base.mask,
+				  virtual_submission_tasklet, ve,
+				  ENGINE_VIRTUAL);
+	if (!ve->base.sched) {
+		err = -ENOMEM;
+		goto err_put;
+	}
 
-	ve->base.sched.submit_request = i915_request_enqueue;
-	tasklet_setup(&ve->base.sched.tasklet, virtual_submission_tasklet);
+	ve->base.sched->flags = sched;
+
+	ve->base.sched->submit_request = i915_request_enqueue;
+	tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
 
 	ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
 
@@ -3560,7 +3578,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (n == ve->num_siblings)
 		return -EINVAL;
 
-	bond = virtual_find_bond(ve, &master->sched);
+	bond = virtual_find_bond(ve, master->sched);
 	if (bond) {
 		bond->sibling_mask |= sibling->mask;
 		return 0;
@@ -3572,7 +3590,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (!bond)
 		return -ENOMEM;
 
-	bond[ve->num_bonds].master = &master->sched;
+	bond[ve->num_bonds].master = master->sched;
 	bond[ve->num_bonds].sibling_mask = sibling->mask;
 
 	ve->bonds = bond;
@@ -3621,11 +3639,10 @@ static void execlists_show(struct drm_printer *m,
 						int indent),
 			   unsigned int max)
 {
-	const struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	const struct intel_engine_cs *engine = se->priv;
 	const struct intel_engine_execlists *el = &engine->execlists;
-	const u64 *hws = el->csb_status;
 	const u8 num_entries = el->csb_size;
+	const u64 *hws = el->csb_status;
 	struct i915_request * const *port;
 	struct i915_request *rq, *last;
 	intel_wakeref_t wakeref;
@@ -3644,7 +3661,7 @@ static void execlists_show(struct drm_printer *m,
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq;
 
-		rq = first_request(&ve->base.sched);
+		rq = first_request(ve->base.sched);
 		if (rq) {
 			if (count++ < max - 1)
 				show_request(m, rq, "\t\t", 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a2f64a488310..e8629db85cae 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -822,7 +822,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
 		__intel_gt_reset(gt, ALL_ENGINES);
 
 	for_each_engine(engine, gt, id)
-		engine->sched.submit_request = nop_submit_request;
+		engine->sched->submit_request = nop_submit_request;
 
 	/*
 	 * Make sure no request can slip through without getting completed by
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 7b01237d050f..a2a62d50f71e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -621,8 +621,7 @@ process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
 static void submission_tasklet(struct tasklet_struct *t)
 {
 	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -670,9 +669,8 @@ cancel_port_requests(struct intel_engine_execlists * const el,
 	return inactive;
 }
 
-static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
+static void __ring_rewind(struct i915_sched *se, bool stalled)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	unsigned long flags;
 
@@ -705,7 +703,7 @@ static void ring_reset_csb(struct intel_engine_cs *engine)
 static void ring_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	ring_reset_csb(engine);
-	__ring_rewind(engine, stalled);
+	__ring_rewind(engine->sched, stalled);
 }
 
 static void ring_reset_cancel(struct intel_engine_cs *engine)
@@ -986,7 +984,7 @@ static const struct intel_context_ops ring_context_ops = {
 
 static void set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1182,10 +1180,6 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
 
-	tasklet_setup(&engine->sched.tasklet, submission_tasklet);
-	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched.flags);
-	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched.flags);
-
 	setup_common(engine);
 
 	switch (engine->class) {
@@ -1206,6 +1200,19 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched) {
+		err = -ENOMEM;
+		goto err;
+	}
+	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched->flags);
+	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched->flags);
+
 	ring = intel_engine_create_ring(engine, global_ring_size());
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 089097cafbf7..ad7c5ec63f8a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -980,12 +980,12 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 
 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i9xx_submit_request;
+	engine->sched->submit_request = i9xx_submit_request;
 }
 
 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = gen6_bsd_submit_request;
+	engine->sched->submit_request = gen6_bsd_submit_request;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1228,6 +1228,16 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
+	engine->sched = i915_sched_create(engine->i915->drm.dev,
+					  engine->name,
+					  engine->mask,
+					  NULL, engine,
+					  ENGINE_PHYSICAL);
+	if (!engine->sched) {
+		err = -ENOMEM;
+		goto err;
+	}
+
 	timeline = intel_timeline_create_from_engine(engine,
 						     I915_GEM_HWS_SEQNO_ADDR);
 	if (IS_ERR(timeline)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 9c2cdd8e18ce..d2a4221a8b9e 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -274,6 +274,7 @@ static void mock_engine_release(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
+	i915_sched_put(engine->sched);
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
 	intel_context_unpin(engine->kernel_context);
@@ -310,8 +311,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
 
-	engine->base.sched.submit_request = mock_submit_request;
-
 	engine->base.reset.prepare = mock_reset_prepare;
 	engine->base.reset.rewind = mock_reset_rewind;
 	engine->base.reset.cancel = mock_reset_cancel;
@@ -336,20 +335,23 @@ int mock_engine_init(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_MOCK);
-	engine->sched.submit_request = mock_submit_request;
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  NULL, engine,
+				  ENGINE_MOCK);
+	if (!engine->sched)
+		return -ENOMEM;
+
+	engine->sched->submit_request = mock_submit_request;
 
-	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
 
 	engine->breadcrumbs = intel_breadcrumbs_create(NULL);
 	if (!engine->breadcrumbs)
-		return -ENOMEM;
+		goto err_scheduler;
 
 	ce = create_kernel_context(engine);
 	if (IS_ERR(ce))
@@ -363,6 +365,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
 
 err_breadcrumbs:
 	intel_breadcrumbs_free(engine->breadcrumbs);
+err_scheduler:
+	i915_sched_put(engine->sched);
 	return -ENOMEM;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index b65269f4da3b..7a7175a24fd8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
-		i915_sched_suspend_request(&engine->sched, rq);
+		i915_sched_suspend_request(se, rq);
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		/* But is resubmitted on release */
-		i915_sched_resume_request(&engine->sched, rq);
+		i915_sched_resume_request(se, rq);
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("%s: held request did not complete!\n",
 			       engine->name);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a6320f90bd4d..2b79d073ac37 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -233,9 +233,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 
 static void guc_submission_tasklet(struct tasklet_struct *t)
 {
-	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **port, *rq;
 	unsigned long flags;
@@ -304,7 +303,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	rq = __i915_sched_rewind_requests(&engine->sched);
+	rq = __i915_sched_rewind_requests(se);
 	if (!rq)
 		goto out_unlock;
 
@@ -560,7 +559,7 @@ static int guc_resume(struct intel_engine_cs *engine)
 
 static void guc_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void guc_release(struct intel_engine_cs *engine)
@@ -632,17 +631,22 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
 
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *i915 = engine->i915;
-
 	/*
 	 * The setup relies on several assumptions (e.g. irqs always enabled)
 	 * that are only valid on gen11+
 	 */
-	GEM_BUG_ON(INTEL_GEN(i915) < 11);
+	GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
 
-	tasklet_setup(&engine->sched.tasklet, guc_submission_tasklet);
-	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched.flags);
-	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched.flags);
+	engine->sched = i915_sched_create(engine->i915->drm.dev,
+					  engine->name,
+					  engine->mask,
+					  guc_submission_tasklet, engine,
+					  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
+
+	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched->flags);
+	__set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched->flags);
 
 	guc_default_vfuncs(engine);
 	guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 93fb6fd959c6..e362c7b8d3af 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -170,12 +170,16 @@ static void init_priolist(struct i915_priolist_root *const root)
 	pl->level = -1;
 }
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass)
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass)
 {
+	kref_init(&se->kref);
 	spin_lock_init(&se->lock);
 	lockdep_set_subclass(&se->lock, subclass);
 
@@ -184,6 +188,9 @@ void i915_sched_init(struct i915_sched *se,
 
 	se->mask = mask;
 
+	tasklet_setup(&se->tasklet, tasklet);
+	se->priv = priv;
+
 	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
 	INIT_LIST_HEAD(&se->hold);
@@ -207,6 +214,25 @@ void i915_sched_init(struct i915_sched *se,
 #endif
 }
 
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass)
+{
+	struct i915_sched *se;
+
+	se = kzalloc(sizeof(*se), GFP_KERNEL);
+	if (!se)
+		return NULL;
+
+	i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+
+	return se;
+}
+
 __maybe_unused static bool priolist_idle(struct i915_priolist_root *root)
 {
 	struct i915_priolist *pl = &root->sentinel;
@@ -265,12 +291,17 @@ void i915_sched_park(struct i915_sched *se)
 	se->no_priolist = false;
 }
 
-void i915_sched_fini(struct i915_sched *se)
+void i915_sched_destroy(struct kref *kref)
 {
-	GEM_BUG_ON(!list_empty(&se->requests));
+	struct i915_sched *se = container_of(kref, typeof(*se), kref);
 
 	tasklet_kill(&se->tasklet); /* flush the callback */
 	i915_sched_park(se);
+
+	GEM_BUG_ON(!list_empty(&se->requests));
+	GEM_BUG_ON(!i915_sched_is_idle(se));
+
+	kfree(se);
 }
 
 static void __ipi_add(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index c7e0f479e8df..a18b9b94b870 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -37,13 +37,41 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 
 void i915_sched_node_retire(struct i915_sched_node *node);
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass);
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass);
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass);
 void i915_sched_park(struct i915_sched *se);
-void i915_sched_fini(struct i915_sched *se);
+void i915_sched_destroy(struct kref *kref);
+
+static inline void i915_sched_trypark(struct i915_sched *se)
+{
+	if (kref_read(&se->kref) == 1)
+		i915_sched_park(se);
+}
+
+static inline struct i915_sched *i915_sched_get(struct i915_sched *se)
+{
+	kref_get(&se->kref);
+	return se;
+}
+
+static inline void i915_sched_put(struct i915_sched *se)
+{
+	if (se)
+		kref_put(&se->kref, i915_sched_destroy);
+}
 
 void i915_request_set_priority(struct i915_request *request, int prio);
 void i915_request_set_deadline(struct i915_request *request, u64 deadline);
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 1b13642f5458..e6a2d3254059 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -36,6 +36,11 @@ enum {
 struct i915_sched {
 	spinlock_t lock; /* protects the scheduling lists and queue */
 
+	/**
+	 * @priv: private opaque pointer reserved for use by the owner.
+	 */
+	void *priv;
+
 	unsigned long flags;
 	unsigned long mask; /* available scheduling channels */
 
@@ -91,6 +96,11 @@ struct i915_sched {
 	 */
 	bool no_priolist;
 
+	/**
+	 * @kref: reference count
+	 */
+	struct kref kref;
+
 	/* Pretty device names for debug messages */
 	struct {
 		struct device *dev;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list