[PATCH 46/46] drm/i915: Write protect the scheduler vfuncs
Chris Wilson
chris at chris-wilson.co.uk
Sun Feb 7 20:00:07 UTC 2021
Move the backend vfuncs to const i915_sched_ops objects.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 4 +-
.../drm/i915/gt/intel_execlists_submission.c | 53 ++++++++++++-------
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 18 ++++---
.../gpu/drm/i915/gt/intel_ring_submission.c | 21 ++++----
drivers/gpu/drm/i915/gt/mock_engine.c | 10 +++-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 14 ++---
drivers/gpu/drm/i915/i915_scheduler.c | 34 ++++++------
drivers/gpu/drm/i915/i915_scheduler.h | 27 ++++++----
drivers/gpu/drm/i915/i915_scheduler_types.h | 47 +++++++++-------
9 files changed, 135 insertions(+), 93 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index b53ec6d7cdbe..2c6d1f47c002 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -401,7 +401,9 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban)
if (ban && intel_context_set_banned(ce))
continue;
- se->revoke_context(ce, ban ? engines->ctx->name : NULL, error);
+ se->ops->revoke_context(ce,
+ ban ? engines->ctx->name : NULL,
+ error);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index e159d5ff82a8..90d053ed58c2 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3082,6 +3082,22 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
}
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverride-init"
+
+static const struct i915_sched_ops execlists_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+ .tasklet = execlists_submission_tasklet,
+
+ .active_request = execlists_active_request,
+ .revoke_context = execlists_revoke_context,
+
+ .show = execlists_show,
+};
+
+#pragma GCC diagnostic pop
+
static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -3093,18 +3109,9 @@ static struct i915_sched *init_execlists(struct intel_engine_cs *engine)
if (!el)
return NULL;
- i915_sched_init(&el->sched,
- i915->drm.dev,
- engine->name,
- engine->mask,
- execlists_submission_tasklet, engine,
- ENGINE_PHYSICAL);
-
- el->sched.active_request = execlists_active_request;
- el->sched.revoke_context = execlists_revoke_context;
- el->sched.show = execlists_show;
-
- i915_sched_select_mode(&el->sched, I915_SCHED_MODE_DEADLINE);
+ i915_sched_init(&el->sched, i915->drm.dev,
+ engine->name, engine->mask,
+ &execlists_sched_ops, engine);
if (intel_engine_has_preemption(engine)) {
__set_bit(I915_SCHED_BUSYWAIT_BIT, &el->sched.flags);
@@ -3488,6 +3495,20 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
to_request(signal)->sched.execution &= ~allowed;
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverride-init"
+
+static const struct i915_sched_ops virtual_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE), /* mode inherited from siblings */
+
+ .revoke_context = execlists_revoke_context,
+
+ .tasklet = virtual_submission_tasklet,
+ .subclass = ENGINE_VIRTUAL,
+};
+
+#pragma GCC diagnostic pop
+
struct intel_context *
intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
@@ -3597,17 +3618,13 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
i915_sched_create(ve->base.i915->drm.dev,
ve->base.name,
ve->base.mask,
- virtual_submission_tasklet, ve,
- ENGINE_VIRTUAL);
+ &virtual_sched_ops, ve);
if (!ve->base.sched) {
err = -ENOMEM;
goto err_put;
}
- ve->base.sched->flags = sched;
-
- ve->base.sched->revoke_context = execlists_revoke_context;
- tasklet_setup(&ve->base.sched->tasklet, virtual_submission_tasklet);
+ ve->base.sched->flags |= sched; /* override submission method */
ve->base.breadcrumbs = virtual_engine_initial_hint(ve)->breadcrumbs;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 9be7f981ee19..933e62f6c054 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -13,6 +13,7 @@
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
+#include "i915_scheduler.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
@@ -1167,6 +1168,12 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
return err;
}
+static const struct i915_sched_ops ring_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(DEADLINE),
+
+ .tasklet = submission_tasklet,
+};
+
static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
{
struct ring_sched *rs;
@@ -1175,14 +1182,9 @@ static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
if (!rs)
return NULL;
- i915_sched_init(&rs->sched,
- engine->i915->drm.dev,
- engine->name,
- engine->mask,
- submission_tasklet, engine,
- ENGINE_PHYSICAL);
-
- i915_sched_select_mode(&rs->sched, I915_SCHED_MODE_DEADLINE);
+ i915_sched_init(&rs->sched, engine->i915->drm.dev,
+ engine->name, engine->mask,
+ &ring_sched_ops, engine);
rs->active = rs->inflight;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 4011365fa0a3..b5ebac3ad7fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1231,20 +1231,17 @@ static void passthrough_tasklet(struct tasklet_struct *t)
local_irq_enable();
}
+static const struct i915_sched_ops ring_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE),
+
+ .tasklet = passthrough_tasklet
+};
+
static struct i915_sched *init_sched(struct intel_engine_cs *engine)
{
- struct i915_sched *se;
-
- se = i915_sched_create(engine->i915->drm.dev,
- engine->name, engine->mask,
- passthrough_tasklet, engine,
- ENGINE_PHYSICAL);
- if (!se)
- return NULL;
-
- i915_sched_select_mode(se, I915_SCHED_MODE_NONE);
-
- return se;
+ return i915_sched_create(engine->i915->drm.dev,
+ engine->name, engine->mask,
+ &ring_sched_ops, engine);
}
int intel_ring_submission_setup(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 6ae6ae539d35..23607501a578 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -300,6 +300,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
return engine;
}
+static const struct i915_sched_ops mock_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(NONE),
+
+ .tasklet = submission_tasklet,
+ .subclass = ENGINE_MOCK,
+};
+
int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -315,8 +322,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
engine->i915->drm.dev,
engine->name,
engine->mask,
- submission_tasklet, engine,
- ENGINE_MOCK);
+ &mock_sched_ops, engine);
i915_sched_enable(&se->sched);
engine->sched = &se->sched;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 89d8a89789a2..f7bf4e5f582d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -500,6 +500,12 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
intel_engine_set_irq_handler(engine, cs_irq_handler);
}
+static const struct i915_sched_ops guc_sched_ops = {
+ I915_SCHED_DEFAULT_OPS(PRIORITY),
+
+ .tasklet = guc_submission_tasklet,
+};
+
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
/*
@@ -509,15 +515,11 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(INTEL_GEN(engine->i915) < 11);
engine->sched = i915_sched_create(engine->i915->drm.dev,
- engine->name,
- engine->mask,
- guc_submission_tasklet, engine,
- ENGINE_PHYSICAL);
+ engine->name, engine->mask,
+ &guc_sched_ops, engine);
if (!engine->sched)
return -ENOMEM;
- i915_sched_select_mode(engine->sched, I915_SCHED_MODE_PRIORITY);
-
guc_default_vfuncs(engine);
guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 012905b5a959..db1c26cd33c3 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -131,7 +131,7 @@ static void init_ipi(struct i915_sched_ipi *ipi)
ipi->list = NULL;
}
-static struct i915_request *
+struct i915_request *
i915_sched_default_active_request(const struct i915_sched *se)
{
struct i915_request *rq, *active = NULL;
@@ -159,7 +159,7 @@ static bool context_active(struct intel_context *ce)
return i915_active_fence_isset(&ce->timeline->last_request);
}
-static void
+void
i915_sched_default_revoke_context(struct intel_context *ce,
const char *force,
int error)
@@ -173,7 +173,7 @@ i915_sched_default_revoke_context(struct intel_context *ce,
"context revoked from %s", force);
}
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode)
+static void select_mode(struct i915_sched *se, enum i915_sched_mode mode)
{
switch (min_t(int, mode, CONFIG_DRM_I915_SCHED)) {
case I915_SCHED_MODE_DEADLINE:
@@ -205,22 +205,22 @@ i915_sched_init(struct i915_sched *se,
struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass)
+ const struct i915_sched_ops *ops,
+ void *priv)
{
kref_init(&se->kref);
spin_lock_init(&se->lock);
- lockdep_set_subclass(&se->lock, subclass);
+ lockdep_set_subclass(&se->lock, ops->subclass);
mark_lock_used_irq(&se->lock);
se->dbg.dev = dev;
se->dbg.name = name;
- se->mask = mask;
-
- tasklet_setup(&se->tasklet, tasklet);
+ se->ops = ops;
se->priv = priv;
+ se->mask = mask;
+ tasklet_setup(&se->tasklet, ops->tasklet);
+ se->flags = ops->flags;
init_priolist(&se->queue);
INIT_LIST_HEAD(&se->requests);
@@ -228,23 +228,21 @@ i915_sched_init(struct i915_sched *se,
init_ipi(&se->ipi);
- se->active_request = i915_sched_default_active_request;
- se->revoke_context = i915_sched_default_revoke_context;
+ select_mode(se, ops->mode);
}
struct i915_sched *
i915_sched_create(struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass)
+ const struct i915_sched_ops *ops,
+ void *priv)
{
struct i915_sched *se;
se = kzalloc(sizeof(*se), GFP_KERNEL);
if (se)
- i915_sched_init(se, dev, name, mask, tasklet, priv, subclass);
+ i915_sched_init(se, dev, name, mask, ops, priv);
return se;
}
@@ -1913,8 +1911,8 @@ void i915_sched_show(struct drm_printer *m,
spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
- if (se->show)
- se->show(m, se, show_request, max);
+ if (se->ops->show)
+ se->ops->show(m, se, show_request, max);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 382637f25bd1..0c95715307cf 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -47,16 +47,14 @@ i915_sched_init(struct i915_sched *se,
struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass);
+ const struct i915_sched_ops *ops,
+ void *priv);
struct i915_sched *
i915_sched_create(struct device *dev,
const char *name,
unsigned long mask,
- void (*tasklet)(struct tasklet_struct *t),
- void *priv,
- unsigned int subclass);
+ const struct i915_sched_ops *ops,
+ void *priv);
void i915_sched_park(struct i915_sched *se);
void i915_sched_destroy(struct kref *kref);
@@ -78,8 +76,6 @@ static inline void i915_sched_put(struct i915_sched *se)
kref_put(&se->kref, i915_sched_destroy);
}
-void i915_sched_select_mode(struct i915_sched *se, enum i915_sched_mode mode);
-
void i915_request_set_priority(struct i915_request *request, int prio);
void i915_request_set_deadline(struct i915_request *request, u64 deadline);
@@ -197,8 +193,8 @@ i915_sched_get_active_request(const struct i915_sched *se)
{
lockdep_assert_held(&se->lock);
- if (se->active_request)
- return se->active_request(se);
+ if (se->ops->active_request)
+ return se->ops->active_request(se);
return NULL;
}
@@ -224,4 +220,15 @@ void i915_sched_show(struct drm_printer *m,
int indent),
unsigned int max);
+struct i915_request *
+i915_sched_default_active_request(const struct i915_sched *se);
+void i915_sched_default_revoke_context(struct intel_context *ce,
+ const char *force,
+ int error);
+
+#define I915_SCHED_DEFAULT_OPS(mode__) \
+ .mode = I915_SCHED_MODE_##mode__, \
+ .active_request = i915_sched_default_active_request, \
+ .revoke_context = i915_sched_default_revoke_context
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 5552b137fff5..79ecee43698a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -17,6 +17,7 @@
struct drm_printer;
struct i915_request;
+struct i915_sched;
struct intel_context;
enum {
@@ -56,24 +57,8 @@ enum i915_sched_mode {
I915_SCHED_MODE_DEADLINE, /* reorder to meet soft deadlines; fair */
};
-/**
- * struct i915_sched - funnels requests towards hardware
- *
- * The struct i915_sched captures all the requests as they become ready
- * to execute (on waking the i915_request.submit fence) puts them into
- * a queue where they may be reordered according to priority and then
- * wakes the backend tasklet to feed the queue to HW.
- */
-struct i915_sched {
- spinlock_t lock; /* protects the scheduling lists and queue */
-
- /**
- * @priv: private opaque pointer reserved for use by the owner.
- */
- void *priv;
-
- unsigned long flags;
- unsigned long mask; /* available scheduling channels */
+struct i915_sched_ops {
+ enum i915_sched_mode mode;
struct i915_request *(*active_request)(const struct i915_sched *se);
@@ -89,6 +74,32 @@ struct i915_sched {
int indent),
unsigned int max);
+ void (*tasklet)(struct tasklet_struct *t);
+ unsigned long flags;
+ int subclass;
+};
+
+/**
+ * struct i915_sched - funnels requests towards hardware
+ *
+ * The struct i915_sched captures all the requests as they become ready
+ * to execute (on waking the i915_request.submit fence) puts them into
+ * a queue where they may be reordered according to priority and then
+ * wakes the backend tasklet to feed the queue to HW.
+ */
+struct i915_sched {
+ const struct i915_sched_ops *ops;
+
+ /**
+ * @priv: private opaque pointer reserved for use by the owner.
+ */
+ void *priv;
+
+ unsigned long flags;
+ unsigned long mask; /* available scheduling channels */
+
+ spinlock_t lock; /* protects the scheduling lists and queue */
+
struct list_head requests; /* active request, on HW */
struct list_head hold; /* ready requests, but on hold */
--
2.20.1
More information about the Intel-gfx-trybot
mailing list