[PATCH 68/75] sched->engine

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 2 00:36:34 UTC 2021


---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |   9 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +-
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 121 ++++++++++--------
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |  29 +++--
 .../gpu/drm/i915/gt/intel_ring_submission.c   |  14 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  24 ++--
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |   4 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  26 ++--
 drivers/gpu/drm/i915/i915_scheduler.c         |  43 ++++++-
 drivers/gpu/drm/i915/i915_scheduler.h         |  40 +++++-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  10 ++
 12 files changed, 216 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1e20466204ec..0eeedf39c530 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -680,13 +680,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 		goto err_status;
 	}
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_PHYSICAL);
-
-	intel_engine_init_execlists(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
@@ -905,7 +898,7 @@ int intel_engines_init(struct intel_gt *gt)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-	i915_sched_fini(intel_engine_get_scheduler(engine));
+	i915_sched_put(intel_engine_get_scheduler(engine));
 
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index cfcc9b491faf..1d3a8d801da3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -273,7 +273,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (engine->park)
 		engine->park(engine);
 
-	i915_sched_park(intel_engine_get_scheduler(engine));
+	i915_sched_trypark(intel_engine_get_scheduler(engine));
 
 	/* While gt calls i915_vma_parked(), we have to break the lock cycle */
 	intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index c9279357fc4c..75d82c8e8ec0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -258,6 +258,8 @@ struct intel_engine_cs {
 	struct drm_i915_private *i915;
 	struct intel_gt *gt;
 	struct intel_uncore *uncore;
+	struct i915_sched *sched;
+
 	char name[INTEL_ENGINE_CS_MAX_NAME];
 
 	enum intel_engine_id id;
@@ -294,8 +296,6 @@ struct intel_engine_cs {
 
 	struct intel_sseu sseu;
 
-	struct i915_sched sched;
-
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
@@ -557,7 +557,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
 static inline struct i915_sched *
 intel_engine_get_scheduler(struct intel_engine_cs *engine)
 {
-	return &engine->sched;
+	return engine->sched;
 }
 
 #endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8dae1be62e4a..d312cf2d6790 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -286,7 +286,7 @@ first_virtual(const struct intel_engine_cs *engine)
 	if (!ve)
 		return NULL;
 
-	return first_request(&ve->base.sched);
+	return first_request(ve->base.sched);
 }
 
 static const struct i915_request *
@@ -307,7 +307,7 @@ dl_before(const struct i915_request *next, const struct i915_request *prev)
 static bool need_preempt(const struct intel_engine_cs *engine,
 			 const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 	const struct i915_request *first = NULL;
 	const struct i915_request *next;
 
@@ -567,8 +567,8 @@ static void kick_siblings(struct intel_engine_cs *engine,
 	    rq->execution_mask != engine->mask)
 		resubmit_virtual_request(rq, ve);
 
-	if (!i915_sched_is_idle(&ve->base.sched))
-		i915_sched_kick(&ve->base.sched);
+	if (!i915_sched_is_idle(ve->base.sched))
+		i915_sched_kick(ve->base.sched);
 }
 
 static void __execlists_schedule_out(struct intel_engine_cs *engine,
@@ -1048,7 +1048,7 @@ timeslice_yield(const struct intel_engine_execlists *el,
 static bool needs_timeslice(const struct intel_engine_cs *engine,
 			    const struct i915_request *rq)
 {
-	const struct i915_sched *se = &engine->sched;
+	const struct i915_sched *se = engine->sched;
 
 	if (!i915_sched_has_timeslices(se))
 		return false;
@@ -1174,7 +1174,7 @@ static void __virtual_dequeue(struct virtual_engine *ve,
 	rb_erase_cached(&node->rb, &sibling->execlists.virtual);
 	RB_CLEAR_NODE(&node->rb);
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!virtual_matches(ve, rq, sibling))
 		return;
 
@@ -1240,7 +1240,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			     yesno(engine != ve->siblings[0]));
 
 		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-		if (__i915_request_requeue(rq, &engine->sched)) {
+		if (__i915_request_requeue(rq, engine->sched)) {
 			/*
 			 * Only after we confirm that we will submit
 			 * this request (i.e. it has not already
@@ -2099,7 +2099,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 	 * simply hold that request accountable for being non-preemptible
 	 * long enough to force the reset.
 	 */
-	if (!i915_sched_suspend_request(&engine->sched, cap->rq))
+	if (!i915_sched_suspend_request(engine->sched, cap->rq))
 		goto err_rq;
 
 	INIT_WORK(&cap->work, execlists_capture_work);
@@ -2168,8 +2168,8 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
  */
 static void execlists_submission_tasklet(struct tasklet_struct *t)
 {
-	struct intel_engine_cs * const engine =
-		from_tasklet(engine, t, sched.tasklet);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -2728,17 +2728,17 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 		rb_erase_cached(rb, &execlists->virtual);
 		RB_CLEAR_NODE(rb);
 
-		spin_lock(&ve->base.sched.lock);
-		for_each_priolist(pl, &ve->base.sched.queue) {
+		spin_lock(&ve->base.sched->lock);
+		for_each_priolist(pl, &ve->base.sched->queue) {
 			priolist_for_each_request_safe(rq, rn, pl) {
 				if (i915_request_mark_eio(rq)) {
 					__i915_request_submit(rq, engine);
 					i915_request_put(rq);
 				}
 			}
-			i915_priolist_advance(&ve->base.sched.queue, pl);
+			i915_priolist_advance(&ve->base.sched->queue, pl);
 		}
-		spin_unlock(&ve->base.sched.lock);
+		spin_unlock(&ve->base.sched->lock);
 	}
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -2784,8 +2784,7 @@ static void execlists_park(struct intel_engine_cs *engine)
 static const struct i915_request *
 execlists_active_request(struct i915_sched *se)
 {
-	struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *rq;
 
 	rq = execlists_active(&engine->execlists);
@@ -2798,8 +2797,8 @@ execlists_active_request(struct i915_sched *se)
 static bool execlists_is_executing(const struct i915_request *rq)
 {
 	struct i915_sched *se = i915_request_get_scheduler(rq);
-	struct intel_engine_execlists *el =
-		&container_of(se, struct intel_engine_cs, sched)->execlists;
+	struct intel_engine_cs *engine = se->priv;
+	struct intel_engine_execlists *el = &engine->execlists;
 	struct i915_request * const *port, *p;
 	bool inflight = false;
 
@@ -2915,7 +2914,7 @@ static bool can_preempt(struct intel_engine_cs *engine)
 
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3027,28 +3026,39 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
 	}
 }
 
-static void init_execlists(struct intel_engine_cs *engine)
+static int init_execlists(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct drm_i915_private *i915 = engine->i915;
 	struct intel_uncore *uncore = engine->uncore;
 	u32 base = engine->mmio_base;
 
-	engine->sched.active_request = execlists_active_request;
-	engine->sched.is_executing = execlists_is_executing;
-	engine->sched.revoke_context = execlists_revoke_context;
-	engine->sched.show = execlists_show;
-	tasklet_setup(&engine->sched.tasklet, execlists_submission_tasklet);
+	engine->sched =
+		i915_sched_create(i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  execlists_submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
 
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_DEADLINE);
+	engine->sched->submit_request = i915_request_enqueue;
+	engine->sched->active_request = execlists_active_request;
+	engine->sched->is_executing = execlists_is_executing;
+	engine->sched->revoke_context = execlists_revoke_context;
+	engine->sched->show = execlists_show;
+
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_DEADLINE);
+
+	intel_engine_init_execlists(engine);
 
 	if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION) &&
 	    intel_engine_has_preemption(engine))
-		__set_bit(I915_SCHED_TIMESLICE_BIT, &engine->sched.flags);
+		__set_bit(I915_SCHED_TIMESLICE_BIT, &engine->sched->flags);
 
 	if (intel_engine_has_preemption(engine)) {
-		__set_bit(I915_SCHED_BUSYWAIT_BIT, &engine->sched.flags);
-		__set_bit(I915_SCHED_PREEMPT_RESET_BIT, &engine->sched.flags);
+		__set_bit(I915_SCHED_BUSYWAIT_BIT, &engine->sched->flags);
+		__set_bit(I915_SCHED_PREEMPT_RESET_BIT, &engine->sched->flags);
 	}
 
 	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
@@ -3080,6 +3090,8 @@ static void init_execlists(struct intel_engine_cs *engine)
 		execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
 		execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
 	}
+
+	return 0;
 }
 
 int intel_execlists_submission_setup(struct intel_engine_cs *engine)
@@ -3090,7 +3102,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 	if (engine->class == RENDER_CLASS)
 		rcs_submission_override(engine);
 
-	init_execlists(engine);
+	if (init_execlists(engine))
+		return -ENOMEM;
 
 	lrc_init_wa_ctx(engine);
 
@@ -3143,15 +3156,16 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 		if (RB_EMPTY_NODE(node))
 			continue;
 
-		spin_lock_irq(&sibling->sched.lock);
+		spin_lock_irq(&sibling->sched->lock);
 
 		/* Detachment is lazily performed in the execlists tasklet */
 		if (!RB_EMPTY_NODE(node))
 			rb_erase_cached(node, &sibling->execlists.virtual);
 
-		spin_unlock_irq(&sibling->sched.lock);
+		spin_unlock_irq(&sibling->sched->lock);
 	}
 	GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
+	i915_sched_put(se);
 
 	lrc_fini(&ve->context);
 	intel_context_fini(&ve->context);
@@ -3276,7 +3290,7 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 {
 	struct i915_request *rq;
 
-	rq = first_request(&ve->base.sched);
+	rq = first_request(ve->base.sched);
 	if (!rq)
 		return NULL;
 
@@ -3298,8 +3312,8 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 
 static void virtual_submission_tasklet(struct tasklet_struct *t)
 {
-	struct virtual_engine * const ve =
-		from_tasklet(ve, t, base.sched.tasklet);
+	struct i915_sched *se = from_tasklet(se, t, tasklet);
+	struct virtual_engine * const ve = se->priv;
 	struct i915_request *rq;
 	unsigned int n;
 	u64 deadline;
@@ -3474,7 +3488,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 		 * layering if we handle cloning of the requests and
 		 * submitting a copy into each backend.
 		 */
-		if (sibling->sched.tasklet.callback !=
+		if (sibling->sched->tasklet.callback !=
 		    execlists_submission_tasklet) {
 			err = -ENODEV;
 			goto err_put;
@@ -3485,7 +3499,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 		ve->siblings[ve->num_siblings++] = sibling;
 		ve->base.mask |= sibling->mask;
-		sched &= sibling->sched.flags;
+		sched &= sibling->sched->flags;
 
 		/*
 		 * All physical engines must be compatible for their emission
@@ -3522,15 +3536,21 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
 	ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
 
-	i915_sched_init(&ve->base.sched,
-			ve->base.i915->drm.dev,
-			ve->base.name,
-			ve->base.mask,
-			ENGINE_VIRTUAL);
-	ve->base.sched.flags = sched;
+	ve->base.sched =
+		i915_sched_create(ve->base.i915->drm.dev,
+				  ve->base.name,
+				  ve->base.mask,
+				  virtual_submission_tasklet, ve,
+				  ENGINE_VIRTUAL);
+	if (!ve->base.sched) {
+		err = -ENOMEM;
+		goto err_put;
+	}
 
-	ve->base.sched.submit_request = i915_request_enqueue;
-	tasklet_setup(&ve->base.sched.tasklet, virtual_submission_tasklet);
+	ve->base.sched->flags = sched;
+
+	ve->base.sched->submit_request = i915_request_enqueue;
+	tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
 
 	ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
 
@@ -3584,7 +3604,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (n == ve->num_siblings)
 		return -EINVAL;
 
-	bond = virtual_find_bond(ve, &master->sched);
+	bond = virtual_find_bond(ve, master->sched);
 	if (bond) {
 		bond->sibling_mask |= sibling->mask;
 		return 0;
@@ -3596,7 +3616,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (!bond)
 		return -ENOMEM;
 
-	bond[ve->num_bonds].master = &master->sched;
+	bond[ve->num_bonds].master = master->sched;
 	bond[ve->num_bonds].sibling_mask = sibling->mask;
 
 	ve->bonds = bond;
@@ -3645,11 +3665,10 @@ static void execlists_show(struct drm_printer *m,
 						int indent),
 			   unsigned int max)
 {
-	const struct intel_engine_cs *engine =
-		container_of(se, typeof(*engine), sched);
+	const struct intel_engine_cs *engine = se->priv;
 	const struct intel_engine_execlists *el = &engine->execlists;
-	const u64 *hws = el->csb_status;
 	const u8 num_entries = el->csb_size;
+	const u64 *hws = el->csb_status;
 	struct i915_request * const *port;
 	struct i915_request *rq, *last;
 	intel_wakeref_t wakeref;
@@ -3668,7 +3687,7 @@ static void execlists_show(struct drm_printer *m,
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq;
 
-		rq = first_request(&ve->base.sched);
+		rq = first_request(ve->base.sched);
 		if (rq) {
 			if (count++ < max - 1)
 				show_request(m, rq, "\t\t", 0);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 91e1b7f82a4f..deb8e52c1cc8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -621,8 +621,7 @@ process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
 static void submission_tasklet(struct tasklet_struct *t)
 {
 	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct intel_engine_cs *engine = se->priv;
 	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
 	struct i915_request **inactive;
 
@@ -670,9 +669,8 @@ cancel_port_requests(struct intel_engine_execlists * const el,
 	return inactive;
 }
 
-static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
+static void __ring_rewind(struct i915_sched *se, bool stalled)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq;
 	unsigned long flags;
 
@@ -705,7 +703,7 @@ static void ring_reset_csb(struct intel_engine_cs *engine)
 static void ring_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 {
 	ring_reset_csb(engine);
-	__ring_rewind(engine, stalled);
+	__ring_rewind(engine->sched, stalled);
 }
 
 static void ring_reset_cancel(struct intel_engine_cs *engine)
@@ -986,7 +984,7 @@ static const struct intel_context_ops ring_context_ops = {
 
 static void set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1182,11 +1180,6 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
 
-	tasklet_setup(&engine->sched.tasklet, submission_tasklet);
-
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_DEADLINE);
-	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched.flags);
-
 	setup_common(engine);
 
 	switch (engine->class) {
@@ -1207,6 +1200,20 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  submission_tasklet, engine,
+				  ENGINE_PHYSICAL);
+	if (!engine->sched) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_DEADLINE);
+	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched->flags);
+
 	ring = intel_engine_create_ring(engine, global_ring_size());
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 089097cafbf7..ad7c5ec63f8a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -980,12 +980,12 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 
 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i9xx_submit_request;
+	engine->sched->submit_request = i9xx_submit_request;
 }
 
 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = gen6_bsd_submit_request;
+	engine->sched->submit_request = gen6_bsd_submit_request;
 }
 
 static void ring_release(struct intel_engine_cs *engine)
@@ -1228,6 +1228,16 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 		return -ENODEV;
 	}
 
+	engine->sched = i915_sched_create(engine->i915->drm.dev,
+					  engine->name,
+					  engine->mask,
+					  NULL, engine,
+					  ENGINE_PHYSICAL);
+	if (!engine->sched) {
+		err = -ENOMEM;
+		goto err;
+	}
+
 	timeline = intel_timeline_create_from_engine(engine,
 						     I915_GEM_HWS_SEQNO_ADDR);
 	if (IS_ERR(timeline)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 9c2cdd8e18ce..d2a4221a8b9e 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -274,6 +274,7 @@ static void mock_engine_release(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
+	i915_sched_put(engine->sched);
 	intel_breadcrumbs_free(engine->breadcrumbs);
 
 	intel_context_unpin(engine->kernel_context);
@@ -310,8 +311,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
 
-	engine->base.sched.submit_request = mock_submit_request;
-
 	engine->base.reset.prepare = mock_reset_prepare;
 	engine->base.reset.rewind = mock_reset_rewind;
 	engine->base.reset.cancel = mock_reset_cancel;
@@ -336,20 +335,23 @@ int mock_engine_init(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
-	i915_sched_init(&engine->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			ENGINE_MOCK);
-	engine->sched.submit_request = mock_submit_request;
+	engine->sched =
+		i915_sched_create(engine->i915->drm.dev,
+				  engine->name,
+				  engine->mask,
+				  NULL, engine,
+				  ENGINE_MOCK);
+	if (!engine->sched)
+		return -ENOMEM;
+
+	engine->sched->submit_request = mock_submit_request;
 
-	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
 	intel_engine_init_retire(engine);
 
 	engine->breadcrumbs = intel_breadcrumbs_create(NULL);
 	if (!engine->breadcrumbs)
-		return -ENOMEM;
+		goto err_scheduler;
 
 	ce = create_kernel_context(engine);
 	if (IS_ERR(ce))
@@ -363,6 +365,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
 
 err_breadcrumbs:
 	intel_breadcrumbs_free(engine->breadcrumbs);
+err_scheduler:
+	i915_sched_put(engine->sched);
 	return -ENOMEM;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index b65269f4da3b..7a7175a24fd8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
-		i915_sched_suspend_request(&engine->sched, rq);
+		i915_sched_suspend_request(se, rq);
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		/* But is resubmitted on release */
-		i915_sched_resume_request(&engine->sched, rq);
+		i915_sched_resume_request(se, rq);
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("%s: held request did not complete!\n",
 			       engine->name);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fa81fa8f97cd..006db849993a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -233,9 +233,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 
 static void guc_submission_tasklet(struct tasklet_struct *t)
 {
-	struct i915_sched *se = from_tasklet(se, t, tasklet);
-	struct intel_engine_cs * const engine =
-		container_of(se, typeof(*engine), sched);
+	struct i915_sched * const se = from_tasklet(se, t, tasklet);
+	struct intel_engine_cs *engine = se->priv;
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **port, *rq;
 	unsigned long flags;
@@ -304,7 +303,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	rq = __i915_sched_rewind_requests(&engine->sched);
+	rq = __i915_sched_rewind_requests(se);
 	if (!rq)
 		goto out_unlock;
 
@@ -560,7 +559,7 @@ static int guc_resume(struct intel_engine_cs *engine)
 
 static void guc_set_default_submission(struct intel_engine_cs *engine)
 {
-	engine->sched.submit_request = i915_request_enqueue;
+	engine->sched->submit_request = i915_request_enqueue;
 }
 
 static void guc_release(struct intel_engine_cs *engine)
@@ -632,18 +631,23 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
 
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *i915 = engine->i915;
-
 	/*
 	 * The setup relies on several assumptions (e.g. irqs always enabled)
 	 * that are only valid on gen11+
 	 */
-	GEM_BUG_ON(INTEL_GEN(i915) < 11);
+	GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
 
-	tasklet_setup(&engine->sched.tasklet, guc_submission_tasklet);
-	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched.flags);
+	engine->sched = i915_sched_create(engine->i915->drm.dev,
+					  engine->name,
+					  engine->mask,
+					  guc_submission_tasklet, engine,
+					  ENGINE_PHYSICAL);
+	if (!engine->sched)
+		return -ENOMEM;
 
-	i915_sched_select_mode(&engine->sched, I915_SCHED_MODE_PRIORITY);
+	__set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched->flags);
+
+	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_PRIORITY);
 
 	guc_default_vfuncs(engine);
 	guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 88e9c4076f83..0194189c5305 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -224,12 +224,16 @@ void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode)
 	}
 }
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass)
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass)
 {
+	kref_init(&se->kref);
 	spin_lock_init(&se->lock);
 	lockdep_set_subclass(&se->lock, subclass);
 	mark_lock_used_irq(&se->lock);
@@ -239,6 +243,9 @@ void i915_sched_init(struct i915_sched *se,
 
 	se->mask = mask;
 
+	tasklet_setup(&se->tasklet, tasklet);
+	se->priv = priv;
+
 	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
 	INIT_LIST_HEAD(&se->hold);
@@ -251,6 +258,23 @@ void i915_sched_init(struct i915_sched *se,
 	se->revoke_context = revoke_context;
 }
 
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass)
+{
+	struct i915_sched *se;
+
+	se = kzalloc(sizeof(*se), GFP_KERNEL);
+	if (se)
+		i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+
+	return se;
+}
+
 __maybe_unused static bool priolist_idle(struct i915_priolist_root *root)
 {
 	struct i915_priolist *pl = &root->sentinel;
@@ -313,12 +337,17 @@ void i915_sched_park(struct i915_sched *se)
 				fetch_and_zero(&se->request_pool));
 }
 
-void i915_sched_fini(struct i915_sched *se)
+void i915_sched_destroy(struct kref *kref)
 {
-	GEM_BUG_ON(!list_empty(&se->requests));
+	struct i915_sched *se = container_of(kref, typeof(*se), kref);
 
 	tasklet_kill(&se->tasklet); /* flush the callback */
 	i915_sched_park(se);
+
+	GEM_BUG_ON(!list_empty(&se->requests));
+	GEM_BUG_ON(!i915_sched_is_idle(se));
+
+	kfree(se);
 }
 
 static void __ipi_add(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 8ca273c73ef3..e704bb6ae5dd 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -37,13 +37,41 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 
 void i915_sched_node_retire(struct i915_sched_node *node);
 
-void i915_sched_init(struct i915_sched *se,
-		     struct device *dev,
-		     const char *name,
-		     unsigned long mask,
-		     unsigned int subclass);
+void
+i915_sched_init(struct i915_sched *se,
+		struct device *dev,
+		const char *name,
+		unsigned long mask,
+		void (*tasklet)(struct tasklet_struct *t),
+		void *priv,
+		unsigned int subclass);
+struct i915_sched *
+i915_sched_create(struct device *dev,
+		  const char *name,
+		  unsigned long mask,
+		  void (*tasklet)(struct tasklet_struct *t),
+		  void *priv,
+		  unsigned int subclass);
 void i915_sched_park(struct i915_sched *se);
-void i915_sched_fini(struct i915_sched *se);
+void i915_sched_destroy(struct kref *kref);
+
+static inline void i915_sched_trypark(struct i915_sched *se)
+{
+	if (kref_read(&se->kref) == 1)
+		i915_sched_park(se);
+}
+
+static inline struct i915_sched *i915_sched_get(struct i915_sched *se)
+{
+	kref_get(&se->kref);
+	return se;
+}
+
+static inline void i915_sched_put(struct i915_sched *se)
+{
+	if (se)
+		kref_put(&se->kref, i915_sched_destroy);
+}
 
 void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 86c93c5e4ef3..050ae6cbd6fc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -46,6 +46,11 @@ enum i915_sched_mode {
 struct i915_sched {
 	spinlock_t lock; /* protects the scheduling lists and queue */
 
+	/**
+	 * @priv: private opaque pointer reserved for use by the owner.
+	 */
+	void *priv;
+
 	unsigned long flags;
 	unsigned long mask; /* available scheduling channels */
 
@@ -108,6 +113,11 @@ struct i915_sched {
 	/* keep a request in reserve for a [pm] barrier under oom */
 	struct i915_request *request_pool;
 
+	/**
+	 * @kref: reference count
+	 */
+	struct kref kref;
+
 	/* Pretty device names for debug messages */
 	struct {
 		struct device *dev;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list