[PATCH 57/57] drm/i915: Write protect the scheduler vfuncs
Chris Wilson
chris at chris-wilson.co.uk
Wed Feb 3 23:49:08 UTC 2021
Move the backend vfuncs to const i915_sched_ops objects.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 4 +-
.../drm/i915/gt/intel_execlists_submission.c | 46 +++++++++++-------
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 18 +++----
.../gpu/drm/i915/gt/intel_ring_submission.c | 21 ++++-----
drivers/gpu/drm/i915/gt/mock_engine.c | 10 +++-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 14 +++---
drivers/gpu/drm/i915/i915_gpu_error.c | 2 +-
drivers/gpu/drm/i915/i915_request.h | 2 +-
drivers/gpu/drm/i915/i915_scheduler.c | 44 +++++++++--------
drivers/gpu/drm/i915/i915_scheduler.h | 25 ++++++----
drivers/gpu/drm/i915/i915_scheduler_types.h | 47 ++++++++++++-------
11 files changed, 136 insertions(+), 97 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index b53ec6d7cdbe..2c6d1f47c002 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -401,7 +401,9 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
if (ban && intel_context_set_banned(ce))
continue;
- se->revoke_context(ce, ban ? engines->ctx->name : NULL, error);
+ se->ops->revoke_context(ce,
+ ban ? engines->ctx->name : NULL,
+ error);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 3bd89e385a3c..07e416cc6b57 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3196,6 +3196,23 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
}
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverride-init"
+
+static const struct i915_sched_ops execlists_ops = {
+ I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+ .tasklet = execlists_submission_tasklet,
+
+ .active_request = execlists_active_request,
+ .is_executing = execlists_is_executing,
+ .revoke_context = execlists_revoke_context,
+
+ .show = execlists_show,
+};
+
+#pragma GCC diagnostic pop
+
static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -3207,19 +3224,9 @@ static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
if (!el)
return NULL;
- i915_sched_init(&el->sched,
- i915->drm.dev,
- engine->name,
- engine->mask,
- execlists_submission_tasklet, engine,
- ENGINE_PHYSICAL);
-
- el->sched.active_request = execlists_active_request;
- el->sched.is_executing = execlists_is_executing;
- el->sched.revoke_context = execlists_revoke_context;
- el->sched.show = execlists_show;
-
- i915_sched_select_mode(&el->sched, I915_SCHED_MODE_DEADLINE);
+ i915_sched_init(&el->sched, i915->drm.dev,
+ engine->name, engine->mask,
+ &execlists_ops, engine);
if (INTEL_GEN(i915) >= 12)
el->flags |= GEN12_CSB_PARSE;
@@ -3608,6 +3615,13 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
to_request(signal)->sched.execution &= ~allowed;
}
+static const struct i915_sched_ops virtual_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE), /* mode inherited from siblings */
+
+ .tasklet = virtual_submission_tasklet,
+ .subclass = ENGINE_VIRTUAL,
+};
+
struct intel_context *
intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
@@ -3717,15 +3731,13 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
i915_sched_create(ve->base.i915->drm.dev,
ve->base.name,
ve->base.mask,
- virtual_submission_tasklet, ve,
- ENGINE_VIRTUAL);
+ &virtual_ops, ve);
if (!ve->base.sched) {
err = -ENOMEM;
goto err_put;
}
- ve->base.sched->flags = sched;
- tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
+ ve->base.sched->flags |= sched; /* override submission method */
ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index a53e01b937cf..ec971851e0bd 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -13,6 +13,7 @@
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
+#include "i915_scheduler.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
@@ -1202,6 +1203,12 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
return err;
}
+static const struct i915_sched_ops ring_ops = {
+ I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+ .tasklet = submission_tasklet,
+};
+
static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
{
struct ring_sched *rs;
@@ -1210,14 +1217,9 @@ static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
if (!rs)
return NULL;
- i915_sched_init(&rs->sched,
- engine->i915->drm.dev,
- engine->name,
- engine->mask,
- submission_tasklet, engine,
- ENGINE_PHYSICAL);
-
- i915_sched_select_mode(&rs->sched, I915_SCHED_MODE_DEADLINE);
+ i915_sched_init(&rs->sched, engine->i915->drm.dev,
+ engine->name, engine->mask,
+ &ring_ops, engine);
rs->active = rs->inflight;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 98a36c764536..931f175ce250 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1234,20 +1234,17 @@ static void passthrough_tasklet(struct tasklet_struct *t)
local_irq_enable();
}
+static const struct i915_sched_ops ring_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE),
+
+ .tasklet = passthrough_tasklet
+};
+
static struct i915_sched *init_sched(struct intel_engine_cs *engine)
{
- struct i915_sched *se;
-
- se = i915_sched_create(engine->i915->drm.dev,
- engine->name, engine->mask,
- passthrough_tasklet, engine,
- ENGINE_PHYSICAL);
- if (!se)
- return NULL;
-
- i915_sched_select_mode(se, I915_SCHED_MODE_NONE);
-
- return se;
+ return i915_sched_create(engine->i915->drm.dev,
+ engine->name, engine->mask,
+ &ring_ops, engine);
}
int intel_ring_submission_setup(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 78a12b9bcc0e..323c28cbe401 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -321,6 +321,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
return engine;
}
+static const struct i915_sched_ops mock_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE),
+
+ .tasklet = submission_tasklet,
+ .subclass = ENGINE_MOCK,
+};
+
int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -336,8 +343,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
engine->i915->drm.dev,
engine->name,
engine->mask,
- submission_tasklet, engine,
- ENGINE_MOCK);
+ &mock_ops, engine);
i915_sched_enable(&se->sched);
engine->sched = &se->sched;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 4f2cc608d514..093f6ffd1030 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -505,6 +505,12 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
intel_engine_set_irq_handler(engine, cs_irq_handler);
}
+static const struct i915_sched_ops guc_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(PRIORITY),
+
+ .tasklet = guc_submission_tasklet,
+};
+
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
/*
@@ -514,15 +520,11 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
engine->sched = i915_sched_create(engine->i915->drm.dev,
- engine->name,
- engine->mask,
- guc_submission_tasklet, engine,
- ENGINE_PHYSICAL);
+ engine->name, engine->mask,
+ &guc_sched_ops, engine);
if (!engine->sched)
return -ENOMEM;
- i915_sched_select_mode(engine->sched, I915_SCHED_MODE_PRIORITY);
-
guc_default_vfuncs(engine);
guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b682c8c63d20..4cdd47be55d7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1381,7 +1381,7 @@ capture_engine(struct intel_engine_cs *engine,
rcu_read_lock();
spin_lock_irqsave(&se->lock, flags);
- rq = se->active_request(se);
+ rq = se->ops->active_request(se);
if (rq)
capture = intel_engine_coredump_add_request(ee, rq,
ATOMIC_MAYFAIL);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index babe7ff8a181..02c16b47b01e 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -641,7 +641,7 @@ static inline bool i915_request_is_executing(const struct i915_request *rq)
if (i915_request_is_active(rq))
return true;
- return i915_request_get_scheduler(rq)->is_executing(rq);
+ return i915_request_get_scheduler(rq)->ops->is_executing(rq);
}
static inline bool i915_request_use_semaphores(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 3a22832acbe5..72063f8003b1 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -142,7 +142,8 @@ static bool match_ring(const struct i915_request *rq)
return ENGINE_READ(engine, RING_START) == i915_ggtt_offset(ring->vma);
}
-static const struct i915_request *active_request(struct i915_sched *se)
+const struct i915_request *
+i915_sched_default_active_request(struct i915_sched *se)
{
struct i915_request *request, *active = NULL;
@@ -182,9 +183,9 @@ static bool context_active(struct intel_context *ce)
return i915_active_fence_isset(&ce->timeline->last_request);
}
-static void revoke_context(struct intel_context *ce,
- const char *force,
- int error)
+void i915_sched_default_revoke_context(struct intel_context *ce,
+ const char *force,
+ int error)
{
/*
* Without backend support, we cannot remove the context from the
@@ -195,7 +196,7 @@ static void revoke_context(struct intel_context *ce,
"context revoked from %s", force);
}
-static bool not_executing(const struct i915_request *rq)
+bool i915_sched_default_is_executing(const struct i915_request *rq)
{
return false;
}
@@ -210,7 +211,7 @@ static void init_priolist(struct i915_priolist_root *const root)
pl->level = -1;
}
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode)
+static void select_mode(struct i915_sched *se, enum i915_sched_mode mode)
{
switch (min_t(int, mode, CONFIG_DRM_I915_SCHED)) {
case 2:
@@ -230,22 +231,22 @@ i915_sched_init(struct i915_sched *se,
struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass)
+ const struct i915_sched_ops *ops,
+ void *priv)
{
kref_init(&se->kref);
spin_lock_init(&se->lock);
- lockdep_set_subclass(&se->lock, subclass);
+ lockdep_set_subclass(&se->lock, ops->subclass);
mark_lock_used_irq(&se->lock);
se->dbg.dev = dev;
se->dbg.name = name;
- se->mask = mask;
-
- tasklet_setup(&se->tasklet, tasklet);
+ se->ops = ops;
se->priv = priv;
+ se->mask = mask;
+ tasklet_setup(&se->tasklet, ops->tasklet);
+ se->flags = ops->flags;
init_priolist(&se->queue);
INIT_LIST_HEAD(&se->requests);
@@ -253,24 +254,21 @@ i915_sched_init(struct i915_sched *se,
i915_sched_init_ipi(&se->ipi);
- se->active_request = active_request;
- se->is_executing = not_executing;
- se->revoke_context = revoke_context;
+ select_mode(se, ops->mode);
}
struct i915_sched *
i915_sched_create(struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass)
+ const struct i915_sched_ops *ops,
+ void *priv)
{
struct i915_sched *se;
se = kzalloc(sizeof(*se), GFP_KERNEL);
if (se)
- i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+ i915_sched_init(se, dev, name, mask, ops, priv);
return se;
}
@@ -1852,7 +1850,7 @@ void i915_sched_show(struct drm_printer *m,
rcu_read_lock();
spin_lock_irqsave(&se->lock, flags);
- rq = se->active_request(se);
+ rq = se->ops->active_request(se);
if (rq) {
struct intel_ring *ring = i915_request_get_ring(rq);
@@ -1937,8 +1935,8 @@ void i915_sched_show(struct drm_printer *m,
spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
- if (se->show)
- se->show(m, se, show_request, max);
+ if (se->ops->show)
+ se->ops->show(m, se, show_request, max);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index f20ca081db0b..2c387b1d7a54 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -47,16 +47,14 @@ i915_sched_init(struct i915_sched *se,
struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass);
+ const struct i915_sched_ops *ops,
+ void *priv);
struct i915_sched *
i915_sched_create(struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass);
+ const struct i915_sched_ops *ops,
+ void *priv);
void i915_sched_park(struct i915_sched *se);
void i915_sched_destroy(struct kref *kref);
@@ -78,8 +76,6 @@ static inline void i915_sched_put(struct i915_sched *se)
kref_put(&se->kref, i915_sched_destroy);
}
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
-
void i915_request_set_priority(struct i915_request *request, int prio);
void i915_request_set_deadline(struct i915_request *request, u64 deadline);
@@ -207,4 +203,17 @@ void i915_sched_show(struct drm_printer *m,
int indent),
unsigned int max);
+const struct i915_request *
+i915_sched_default_active_request(struct i915_sched *se);
+bool i915_sched_default_is_executing(const struct i915_request *rq);
+void i915_sched_default_revoke_context(struct intel_context *ce,
+ const char *force,
+ int error);
+
+#define I915_SCHED_DEFAULT_OPS(mode__) \
+ .mode = I915_SCHED_MODE_##mode__, \
+ .active_request = i915_sched_default_active_request, \
+ .is_executing = i915_sched_default_is_executing, \
+ .revoke_context = i915_sched_default_revoke_context
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index dcb3e8e76f80..d76a068f9e9d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -17,6 +17,7 @@
struct drm_printer;
struct i915_request;
+struct i915_sched;
struct intel_context;
enum {
@@ -36,24 +37,8 @@ enum i915_sched_mode {
I915_SCHED_MODE_DEADLINE, /* reorder to meet soft deadlines; fair */
};
-/**
- * struct i915_sched - funnels requests towards hardware
- *
- * The struct i915_sched captures all the requests as they become ready
- * to execute (on waking the i915_request.submit fence) puts them into
- * a queue where they may be reordered according to priority and then
- * wakes the backend tasklet to feed the queue to HW.
- */
-struct i915_sched {
- spinlock_t lock; /* protects the scheduling lists and queue */
-
- /**
- * @priv: private opaque pointer reserved for use by the owner.
- */
- void *priv;
-
- unsigned long flags;
- unsigned long mask; /* available scheduling channels */
+struct i915_sched_ops {
+ enum i915_sched_mode mode;
const struct i915_request *(*active_request)(struct i915_sched *se);
@@ -71,6 +56,32 @@ struct i915_sched {
int indent),
unsigned int max);
+ void (*tasklet)(struct tasklet_struct *t);
+ unsigned long flags;
+ int subclass;
+};
+
+/**
+ * struct i915_sched - funnels requests towards hardware
+ *
+ * The struct i915_sched captures all the requests as they become ready
+ * to execute (on waking the i915_request.submit fence) puts them into
+ * a queue where they may be reordered according to priority and then
+ * wakes the backend tasklet to feed the queue to HW.
+ */
+struct i915_sched {
+ const struct i915_sched_ops *ops;
+
+ /**
+ * @priv: private opaque pointer reserved for use by the owner.
+ */
+ void *priv;
+
+ unsigned long flags;
+ unsigned long mask; /* available scheduling channels */
+
+ spinlock_t lock; /* protects the scheduling lists and queue */
+
struct list_head requests; /* active request, on HW */
struct list_head hold; /* ready requests, but on hold */
/**
--
2.20.1
More information about the Intel-gfx-trybot
mailing list