[PATCH 57/57] drm/i915: Write protect the scheduler vfuncs

Chris Wilson chris at chris-wilson.co.uk
Fri Feb 5 01:18:55 UTC 2021


Move the backend vfuncs to const i915_sched_ops objects.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  4 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 46 +++++++++++-------
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    | 18 +++----
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 23 ++++-----
 drivers/gpu/drm/i915/gt/mock_engine.c         | 10 +++-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 14 +++---
 drivers/gpu/drm/i915/i915_gpu_error.c         |  2 +-
 drivers/gpu/drm/i915/i915_request.h           |  4 +-
 drivers/gpu/drm/i915/i915_scheduler.c         | 37 +++++++--------
 drivers/gpu/drm/i915/i915_scheduler.h         | 22 +++++----
 drivers/gpu/drm/i915/i915_scheduler_types.h   | 47 ++++++++++++-------
 11 files changed, 131 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index b53ec6d7cdbe..2c6d1f47c002 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -401,7 +401,9 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
 		if (ban && intel_context_set_banned(ce))
 			continue;
 
-		se->revoke_context(ce, ban ? engines->ctx->name : NULL, error);
+		se->ops->revoke_context(ce,
+					ban ? engines->ctx->name : NULL,
+					error);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9ff1b4002b9d..c94eac5e38b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3195,6 +3195,23 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
 	}
 }
 
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverride-init"
+
+static const struct i915_sched_ops execlists_ops = {
+	I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+	.tasklet = execlists_submission_tasklet,
+
+	.active_request = execlists_active_request,
+	.is_executing = execlists_is_executing,
+	.revoke_context = execlists_revoke_context,
+
+	.show = execlists_show,
+};
+
+#pragma GCC diagnostic pop
+
 static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
 {
 	struct intel_uncore *uncore = engine->uncore;
@@ -3206,19 +3223,9 @@ static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
 	if (!el)
 		return NULL;
 
-	i915_sched_init(&el->sched,
-			i915->drm.dev,
-			engine->name,
-			engine->mask,
-			execlists_submission_tasklet, engine,
-			ENGINE_PHYSICAL);
-
-	el->sched.active_request = execlists_active_request;
-	el->sched.is_executing = execlists_is_executing;
-	el->sched.revoke_context = execlists_revoke_context;
-	el->sched.show = execlists_show;
-
-	i915_sched_select_mode(&el->sched, I915_SCHED_MODE_DEADLINE);
+	i915_sched_init(&el->sched, i915->drm.dev,
+			engine->name, engine->mask,
+			&execlists_ops, engine);
 
 	if (intel_engine_has_preemption(engine)) {
 		__set_bit(I915_SCHED_BUSYWAIT_BIT, &el->sched.flags);
@@ -3610,6 +3617,13 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 	to_request(signal)->sched.execution &= ~allowed;
 }
 
+static const struct i915_sched_ops virtual_ops = {
+	I915_SCHED_DEFAULT_OPS(NONE), /* mode inherited from siblings */
+
+	.tasklet = virtual_submission_tasklet,
+	.subclass = ENGINE_VIRTUAL,
+};
+
 struct intel_context *
 intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count)
@@ -3719,15 +3733,13 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 		i915_sched_create(ve->base.i915->drm.dev,
 				  ve->base.name,
 				  ve->base.mask,
-				  virtual_submission_tasklet, ve,
-				  ENGINE_VIRTUAL);
+				  &virtual_ops, ve);
 	if (!ve->base.sched) {
 		err = -ENOMEM;
 		goto err_put;
 	}
 
-	ve->base.sched->flags = sched;
-	tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
+	ve->base.sched->flags |= sched; /* override submission method */
 
 	ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 25652f665e97..304e4868d89a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -13,6 +13,7 @@
 #include "gen7_renderclear.h"
 #include "i915_drv.h"
 #include "i915_mitigations.h"
+#include "i915_scheduler.h"
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
@@ -1190,6 +1191,12 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
 	return err;
 }
 
+static const struct i915_sched_ops ring_ops = {
+	I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+	.tasklet = submission_tasklet,
+};
+
 static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
 {
 	struct ring_sched *rs;
@@ -1198,14 +1205,9 @@ static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
 	if (!rs)
 		return NULL;
 
-	i915_sched_init(&rs->sched,
-			engine->i915->drm.dev,
-			engine->name,
-			engine->mask,
-			submission_tasklet, engine,
-			ENGINE_PHYSICAL);
-
-	i915_sched_select_mode(&rs->sched, I915_SCHED_MODE_DEADLINE);
+	i915_sched_init(&rs->sched, engine->i915->drm.dev,
+			engine->name, engine->mask,
+			&ring_ops, engine);
 
 	rs->active = rs->inflight;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 10723bb98bc5..2612c25b02ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -355,7 +355,7 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	 * subsequent hangs.
 	 */
 
-	rq = se->active_request(se);
+	rq = se->ops->active_request(se);
 	if (rq) {
 		/*
 		 * Try to restore the logical GPU state to match the
@@ -1226,20 +1226,17 @@ static void passthrough_tasklet(struct tasklet_struct *t)
 	local_irq_enable();
 }
 
+static const struct i915_sched_ops ring_ops = {
+	I915_SCHED_DEFAULT_OPS(NONE),
+
+	.tasklet = passthrough_tasklet
+};
+
 static struct i915_sched *init_sched(struct intel_engine_cs *engine)
 {
-	struct i915_sched *se;
-
-	se = i915_sched_create(engine->i915->drm.dev,
-			       engine->name, engine->mask,
-			       passthrough_tasklet, engine,
-			       ENGINE_PHYSICAL);
-	if (!se)
-		return NULL;
-
-	i915_sched_select_mode(se, I915_SCHED_MODE_NONE);
-
-	return se;
+	return i915_sched_create(engine->i915->drm.dev,
+				 engine->name, engine->mask,
+				 &ring_ops, engine);
 }
 
 int intel_ring_submission_setup(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 78a12b9bcc0e..323c28cbe401 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -321,6 +321,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	return engine;
 }
 
+static const struct i915_sched_ops mock_ops = {
+	I915_SCHED_DEFAULT_OPS(NONE),
+
+	.tasklet = submission_tasklet,
+	.subclass = ENGINE_MOCK,
+};
+
 int mock_engine_init(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
@@ -336,8 +343,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
 			engine->i915->drm.dev,
 			engine->name,
 			engine->mask,
-			submission_tasklet, engine,
-			ENGINE_MOCK);
+			&mock_ops, engine);
 
 	i915_sched_enable(&se->sched);
 	engine->sched = &se->sched;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 4f2cc608d514..093f6ffd1030 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -505,6 +505,12 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
 	intel_engine_set_irq_handler(engine, cs_irq_handler);
 }
 
+static const struct i915_sched_ops guc_sched_ops = {
+	I915_SCHED_DEFAULT_OPS(PRIORITY),
+
+	.tasklet = guc_submission_tasklet,
+};
+
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 {
 	/*
@@ -514,15 +520,11 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
 	GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
 
 	engine->sched = i915_sched_create(engine->i915->drm.dev,
-					  engine->name,
-					  engine->mask,
-					  guc_submission_tasklet, engine,
-					  ENGINE_PHYSICAL);
+					  engine->name, engine->mask,
+					  &guc_sched_ops, engine);
 	if (!engine->sched)
 		return -ENOMEM;
 
-	i915_sched_select_mode(engine->sched, I915_SCHED_MODE_PRIORITY);
-
 	guc_default_vfuncs(engine);
 	guc_default_irqs(engine);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b682c8c63d20..4cdd47be55d7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1381,7 +1381,7 @@ capture_engine(struct intel_engine_cs *engine,
 
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
-	rq = se->active_request(se);
+	rq = se->ops->active_request(se);
 	if (rq)
 		capture = intel_engine_coredump_add_request(ee, rq,
 							    ATOMIC_MAYFAIL);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6877962cf1..fdc4557556e1 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -646,7 +646,7 @@ static inline bool i915_request_is_executing(const struct i915_request *rq)
 		return true;
 
 	se = i915_request_get_scheduler(rq);
-	if (!se->is_executing)
+	if (!se->ops->is_executing)
 		return false;
 
 	if (!i915_request_is_ready(rq)) /* Not yet ready for execution */
@@ -659,7 +659,7 @@ static inline bool i915_request_is_executing(const struct i915_request *rq)
 	 * need to double check with the backend for it to query the HW
 	 * to see if the request is still executing.
 	 */
-	return se->is_executing(rq);
+	return se->ops->is_executing(rq);
 }
 
 static inline bool i915_request_use_semaphores(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2c059ad18f86..de4ebe91ac14 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -142,8 +142,7 @@ static bool match_ring(const struct i915_request *rq)
 	return ENGINE_READ(engine, RING_START) == i915_ggtt_offset(ring->vma);
 }
 
-static struct i915_request *
-i915_sched_default_active_request(struct i915_sched *se)
+struct i915_request *i915_sched_default_active_request(struct i915_sched *se)
 {
 	struct i915_request *request, *active = NULL;
 
@@ -185,7 +184,7 @@ static bool context_active(struct intel_context *ce)
 	return i915_active_fence_isset(&ce->timeline->last_request);
 }
 
-static void
+void
 i915_sched_default_revoke_context(struct intel_context *ce,
 				  const char *force,
 				  int error)
@@ -199,7 +198,7 @@ i915_sched_default_revoke_context(struct intel_context *ce,
 				      "context revoked from %s", force);
 }
 
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode)
+static void select_mode(struct i915_sched *se, enum i915_sched_mode mode)
 {
 	switch (min_t(int, mode, CONFIG_DRM_I915_SCHED)) {
 	case I915_SCHED_MODE_DEADLINE:
@@ -231,22 +230,22 @@ i915_sched_init(struct i915_sched *se,
 		struct device *dev,
 		const char *name,
 		unsigned long mask,
-		void (*tasklet)(struct tasklet_struct *t),
-		void *priv,
-		unsigned int subclass)
+		const struct i915_sched_ops *ops,
+		void *priv)
 {
 	kref_init(&se->kref);
 	spin_lock_init(&se->lock);
-	lockdep_set_subclass(&se->lock, subclass);
+	lockdep_set_subclass(&se->lock, ops->subclass);
 	mark_lock_used_irq(&se->lock);
 
 	se->dbg.dev = dev;
 	se->dbg.name = name;
 
-	se->mask = mask;
-
-	tasklet_setup(&se->tasklet, tasklet);
+	se->ops = ops;
 	se->priv = priv;
+	se->mask = mask;
+	tasklet_setup(&se->tasklet, ops->tasklet);
+	se->flags = ops->flags;
 
 	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
@@ -254,23 +253,21 @@ i915_sched_init(struct i915_sched *se,
 
 	i915_sched_init_ipi(&se->ipi);
 
-	se->active_request = i915_sched_default_active_request;
-	se->revoke_context = i915_sched_default_revoke_context;
+	select_mode(se, ops->mode);
 }
 
 struct i915_sched *
 i915_sched_create(struct device *dev,
 		  const char *name,
 		  unsigned long mask,
-		  void (*tasklet)(struct tasklet_struct *t),
-		  void *priv,
-		  unsigned int subclass)
+		  const struct i915_sched_ops *ops,
+		  void *priv)
 {
 	struct i915_sched *se;
 
 	se = kzalloc(sizeof(*se), GFP_KERNEL);
 	if (se)
-		i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+		i915_sched_init(se, dev, name, mask, ops, priv);
 
 	return se;
 }
@@ -1852,7 +1849,7 @@ void i915_sched_show(struct drm_printer *m,
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
 
-	rq = se->active_request(se);
+	rq = se->ops->active_request(se);
 	if (rq) {
 		struct intel_ring *ring = i915_request_get_ring(rq);
 
@@ -1937,8 +1934,8 @@ void i915_sched_show(struct drm_printer *m,
 	spin_unlock_irqrestore(&se->lock, flags);
 	rcu_read_unlock();
 
-	if (se->show)
-		se->show(m, se, show_request, max);
+	if (se->ops->show)
+		se->ops->show(m, se, show_request, max);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index f20ca081db0b..95e53c11f651 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -47,16 +47,14 @@ i915_sched_init(struct i915_sched *se,
 		struct device *dev,
 		const char *name,
 		unsigned long mask,
-		void (*tasklet)(struct tasklet_struct *t),
-		void *priv,
-		unsigned int subclass);
+		const struct i915_sched_ops *ops,
+		void *priv);
 struct i915_sched *
 i915_sched_create(struct device *dev,
 		  const char *name,
 		  unsigned long mask,
-		  void (*tasklet)(struct tasklet_struct *t),
-		  void *priv,
-		  unsigned int subclass);
+		  const struct i915_sched_ops *ops,
+		  void *priv);
 void i915_sched_park(struct i915_sched *se);
 void i915_sched_destroy(struct kref *kref);
 
@@ -78,8 +76,6 @@ static inline void i915_sched_put(struct i915_sched *se)
 		kref_put(&se->kref, i915_sched_destroy);
 }
 
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
-
 void i915_request_set_priority(struct i915_request *request, int prio);
 void i915_request_set_deadline(struct i915_request *request, u64 deadline);
 
@@ -207,4 +203,14 @@ void i915_sched_show(struct drm_printer *m,
 					  int indent),
 		     unsigned int max);
 
+struct i915_request *i915_sched_default_active_request(struct i915_sched *se);
+void i915_sched_default_revoke_context(struct intel_context *ce,
+				       const char *force,
+				       int error);
+
+#define I915_SCHED_DEFAULT_OPS(mode__) \
+	.mode = I915_SCHED_MODE_##mode__, \
+	.active_request = i915_sched_default_active_request, \
+	.revoke_context = i915_sched_default_revoke_context
+
 #endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 10a50ee4e050..0a4f62649bad 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -17,6 +17,7 @@
 
 struct drm_printer;
 struct i915_request;
+struct i915_sched;
 struct intel_context;
 
 enum {
@@ -56,24 +57,8 @@ enum i915_sched_mode {
 	I915_SCHED_MODE_DEADLINE, /* reorder to meet soft deadlines; fair */
 };
 
-/**
- * struct i915_sched - funnels requests towards hardware
- *
- * The struct i915_sched captures all the requests as they become ready
- * to execute (on waking the i915_request.submit fence) puts them into
- * a queue where they may be reordered according to priority and then
- * wakes the backend tasklet to feed the queue to HW.
- */
-struct i915_sched {
-	spinlock_t lock; /* protects the scheduling lists and queue */
-
-	/**
-	 * @priv: private opaque pointer reserved for use by the owner.
-	 */
-	void *priv;
-
-	unsigned long flags;
-	unsigned long mask; /* available scheduling channels */
+struct i915_sched_ops {
+	enum i915_sched_mode mode;
 
 	struct i915_request *(*active_request)(struct i915_sched *se);
 
@@ -91,6 +76,32 @@ struct i915_sched {
 					  int indent),
 		     unsigned int max);
 
+	void (*tasklet)(struct tasklet_struct *t);
+	unsigned long flags;
+	int subclass;
+};
+
+/**
+ * struct i915_sched - funnels requests towards hardware
+ *
+ * The struct i915_sched captures all the requests as they become ready
+ * to execute (on waking the i915_request.submit fence) puts them into
+ * a queue where they may be reordered according to priority and then
+ * wakes the backend tasklet to feed the queue to HW.
+ */
+struct i915_sched {
+	const struct i915_sched_ops *ops;
+
+	/**
+	 * @priv: private opaque pointer reserved for use by the owner.
+	 */
+	void *priv;
+
+	unsigned long flags;
+	unsigned long mask; /* available scheduling channels */
+
+	spinlock_t lock; /* protects the scheduling lists and queue */
+
 	struct list_head requests; /* active request, on HW */
 	struct list_head hold; /* ready requests, but on hold */
 	/**
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list