[PATCH 70/75] legacy-passthru
Chris Wilson
chris at chris-wilson.co.uk
Tue Feb 2 00:36:36 UTC 2021
---
drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 +
.../gpu/drm/i915/gt/intel_ring_submission.c | 155 +++++++++++-------
2 files changed, 95 insertions(+), 61 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 917341469150..f50ed6c23f94 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -303,6 +303,7 @@ struct intel_engine_cs {
#define I915_ENGINE_IS_VIRTUAL BIT(4)
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(5)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(6)
+#define I915_ENGINE_NEEDS_WA_TAIL_WRITE BIT(7)
unsigned int flags;
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index ad7c5ec63f8a..1e377685c491 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -411,17 +411,6 @@ static void reset_cancel(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&se->lock, flags);
}
-static void i9xx_submit_request(struct i915_request *request)
-{
- struct intel_engine_cs *engine = i915_request_get_engine(request);
-
- i915_request_submit(request, engine);
- wmb(); /* paranoid flush writes out of the WCB before mmio */
-
- ENGINE_WRITE(engine, RING_TAIL,
- intel_ring_set_tail(request->ring, request->tail));
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
i915_vma_put(ce->state);
@@ -940,52 +929,9 @@ static const struct intel_context_ops ring_context_ops = {
.destroy = ring_context_destroy,
};
-static void gen6_bsd_submit_request(struct i915_request *request)
+static void set_default_submission(struct intel_engine_cs *engine)
{
- struct intel_uncore *uncore = i915_request_get_engine(request)->uncore;
-
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- /* Every tail move must follow the sequence below */
-
- /* Disable notification that the ring is IDLE. The GT
- * will then assume that it is busy and bring it out of rc6.
- */
- intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
-
- /* Clear the context id. Here be magic! */
- intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
-
- /* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (__intel_wait_for_register_fw(uncore,
- GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_INDICATOR,
- 0,
- 1000, 0, NULL))
- drm_err(&uncore->i915->drm,
- "timed out waiting for the BSD ring to wake up\n");
-
- /* Now that the ring is fully powered up, update the tail */
- i9xx_submit_request(request);
-
- /* Let the ring send IDLE messages to the GT again,
- * and so let it sleep to conserve power when idle.
- */
- intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
-
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-}
-
-static void i9xx_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->sched->submit_request = i9xx_submit_request;
-}
-
-static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
-{
- engine->sched->submit_request = gen6_bsd_submit_request;
+ engine->sched->submit_request = i915_request_enqueue;
}
static void ring_release(struct intel_engine_cs *engine)
@@ -1071,7 +1017,7 @@ static void setup_common(struct intel_engine_cs *engine)
else
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
- engine->set_default_submission = i9xx_set_default_submission;
+ engine->set_default_submission = set_default_submission;
}
static void setup_rcs(struct intel_engine_cs *engine)
@@ -1108,9 +1054,8 @@ static void setup_vcs(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
if (INTEL_GEN(i915) >= 6) {
- /* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN(i915, 6))
- engine->set_default_submission = gen6_bsd_set_default_submission;
+ if (IS_GEN(engine->i915, 6))
+ engine->flags |= I915_ENGINE_NEEDS_WA_TAIL_WRITE;
engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
@@ -1202,6 +1147,94 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
return err;
}
+static void __write_tail(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ ENGINE_WRITE(engine, RING_TAIL,
+ intel_ring_set_tail(rq->ring, rq->tail));
+}
+
+static void wa_write_tail(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ struct intel_uncore *uncore = engine->uncore;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 1000, 0, NULL))
+ drm_err(&uncore->i915->drm,
+ "timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ __write_tail(engine, rq);
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+static void write_tail(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
+
+ if (engine->flags & I915_ENGINE_NEEDS_WA_TAIL_WRITE)
+ wa_write_tail(engine, rq);
+ else
+ __write_tail(engine, rq);
+}
+
+static void passthrough_tasklet(struct tasklet_struct *t)
+{
+ struct i915_sched *se = from_tasklet(se, t, tasklet);
+ struct intel_engine_cs *engine = se->priv;
+ struct i915_request *last = NULL;
+ struct i915_priolist *pl;
+
+ if (i915_sched_is_idle(se))
+ return;
+
+ local_irq_disable();
+
+ spin_lock(&se->lock);
+ for_each_priolist(pl, &se->queue) {
+ struct i915_request *rq, *rn;
+
+ priolist_for_each_request_safe(rq, rn, pl) {
+ __i915_request_submit(rq, engine);
+ last = rq;
+ }
+
+ i915_priolist_advance(&se->queue, pl);
+ }
+ spin_unlock(&se->lock);
+
+ if (last)
+ write_tail(engine, last);
+
+ local_irq_enable();
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1231,7 +1264,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
engine->sched = i915_sched_create(engine->i915->drm.dev,
engine->name,
engine->mask,
- NULL, engine,
+ passthrough_tasklet, engine,
ENGINE_PHYSICAL);
if (!engine->sched) {
err = -ENOMEM;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list