[PATCH 69/69] el-sched
Chris Wilson
chris at chris-wilson.co.uk
Mon Feb 1 07:59:49 UTC 2021
---
drivers/gpu/drm/i915/gt/intel_engine.h | 27 -
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 13 -
drivers/gpu/drm/i915/gt/intel_engine_types.h | 130 +--
.../drm/i915/gt/intel_execlists_submission.c | 803 +++++++++---------
.../gpu/drm/i915/gt/intel_execlists_types.h | 157 ++++
drivers/gpu/drm/i915/gt/intel_gt_irq.c | 15 +-
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 184 ++--
drivers/gpu/drm/i915/gt/selftest_execlists.c | 30 +-
drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 119 +--
drivers/gpu/drm/i915/i915_gpu_error.c | 60 --
drivers/gpu/drm/i915/i915_gpu_error.h | 3 -
12 files changed, 720 insertions(+), 823 deletions(-)
create mode 100644 drivers/gpu/drm/i915/gt/intel_execlists_types.h
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index bc07c96ab48c..143255c34543 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -99,31 +99,6 @@ struct intel_gt;
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-static inline unsigned int
-execlists_num_ports(const struct intel_engine_execlists * const execlists)
-{
- return execlists->port_mask + 1;
-}
-
-static inline struct i915_request *
-execlists_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *cur, * const *old, *active;
-
- cur = READ_ONCE(execlists->active);
- smp_rmb(); /* pairs with overwrite protection in process_csb() */
- do {
- old = cur;
-
- active = READ_ONCE(*cur);
- cur = READ_ONCE(execlists->active);
-
- smp_rmb(); /* and complete the seqlock retry */
- } while (unlikely(cur != old));
-
- return active;
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -206,8 +181,6 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone);
-void intel_engine_init_execlists(struct intel_engine_cs *engine);
-
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 60b58fe8e562..4e8398600628 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -563,19 +563,6 @@ int intel_engines_init_mmio(struct intel_gt *gt)
return err;
}
-void intel_engine_init_execlists(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- execlists->port_mask = 1;
- GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
- GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
-
- memset(execlists->pending, 0, sizeof(execlists->pending));
- execlists->active =
- memset(execlists->inflight, 0, sizeof(execlists->inflight));
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ab5f8b9c2b3a..83d7629eae65 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -20,7 +20,6 @@
#include "i915_gem.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
-#include "i915_scheduler_types.h"
#include "i915_selftest.h"
#include "intel_breadcrumbs_types.h"
#include "intel_sseu.h"
@@ -59,6 +58,7 @@ struct drm_i915_gem_object;
struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
+struct i915_sched;
struct i915_sched_attr;
struct intel_gt;
struct intel_ring;
@@ -126,132 +126,6 @@ enum intel_engine_id {
/* A simple estimator for the round-trip latency of an engine */
DECLARE_EWMA(_engine_latency, 6, 4)
-struct st_preempt_hang {
- struct completion completion;
- unsigned int count;
-};
-
-/**
- * struct intel_engine_execlists - execlist submission queue and port state
- *
- * The struct intel_engine_execlists represents the combined logical state of
- * driver and the hardware state for execlist mode of submission.
- */
-struct intel_engine_execlists {
- /**
- * @timer: kick the current context if its timeslice expires
- */
- struct timer_list timer;
-
- /**
- * @preempt: reset the current context if it fails to give way
- */
- struct timer_list preempt;
-
- /**
- * @ccid: identifier for contexts submitted to this engine
- */
- u32 ccid;
-
- /**
- * @yield: CCID at the time of the last semaphore-wait interrupt.
- *
- * Instead of leaving a semaphore busy-spinning on an engine, we would
- * like to switch to another ready context, i.e. yielding the semaphore
- * timeslice.
- */
- u32 yield;
-
- /**
- * @error_interrupt: CS Master EIR
- *
- * The CS generates an interrupt when it detects an error. We capture
- * the first error interrupt, record the EIR and schedule the tasklet.
- * In the tasklet, we process the pending CS events to ensure we have
- * the guilty request, and then reset the engine.
- *
- * Low 16b are used by HW, with the upper 16b used as the enabling mask.
- * Reserve the upper 16b for tracking internal errors.
- */
- u32 error_interrupt;
-#define ERROR_CSB BIT(31)
-#define ERROR_PREEMPT BIT(30)
-
- /**
- * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
- */
- u32 reset_ccid;
-
- /**
- * @submit_reg: gen-specific execlist submission register
- * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
- * the ExecList Submission Queue Contents register array for Gen11+
- */
- u32 __iomem *submit_reg;
-
- /**
- * @ctrl_reg: the enhanced execlists control register, used to load the
- * submit queue on the HW and to request preemptions to idle
- */
- u32 __iomem *ctrl_reg;
-
-#define EXECLIST_MAX_PORTS 2
- /**
- * @active: the currently known context executing on HW
- */
- struct i915_request * const *active;
- /**
- * @inflight: the set of contexts submitted and acknowleged by HW
- *
- * The set of inflight contexts is managed by reading CS events
- * from the HW. On a context-switch event (not preemption), we
- * know the HW has transitioned from port0 to port1, and we
- * advance our inflight/active tracking accordingly.
- */
- struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
- /**
- * @pending: the next set of contexts submitted to ELSP
- *
- * We store the array of contexts that we submit to HW (via ELSP) and
- * promote them to the inflight array once HW has signaled the
- * preemption or idle-to-active event.
- */
- struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
-
- /**
- * @port_mask: number of execlist ports - 1
- */
- unsigned int port_mask;
-
- struct rb_root_cached virtual;
-
- /**
- * @csb_write: control register for Context Switch buffer
- *
- * Note this register may be either mmio or HWSP shadow.
- */
- u32 *csb_write;
-
- /**
- * @csb_status: status array for Context Switch buffer
- *
- * Note these register may be either mmio or HWSP shadow.
- */
- u64 *csb_status;
-
- /**
- * @csb_size: context status buffer FIFO size
- */
- u8 csb_size;
-
- /**
- * @csb_head: context status buffer head
- */
- u8 csb_head;
-
- I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
-};
-
#define INTEL_ENGINE_CS_MAX_NAME 8
struct intel_engine_cs {
@@ -414,8 +288,6 @@ struct intel_engine_cs {
void (*release)(struct intel_engine_cs *engine);
- struct intel_engine_execlists execlists;
-
/*
* Keep track of completed timelines on this engine for early
* retirement with the goal of quickly enabling powersaving as
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 984eb234fafb..92d6258bb690 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -117,6 +117,7 @@
#include "intel_engine_pm.h"
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
+#include "intel_execlists_types.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -233,7 +234,7 @@ active_request(const struct intel_timeline * const tl, struct i915_request *rq)
return __active_request(tl, rq, 0);
}
-static void ring_set_paused(const struct intel_engine_cs *engine, int state)
+static void ring_set_paused(const struct intel_execlists *el, int state)
{
/*
* We inspect HWS_PREEMPT with a semaphore inside
@@ -241,7 +242,7 @@ static void ring_set_paused(const struct intel_engine_cs *engine, int state)
* the ring is paused as the semaphore will busywait
* until the dword is false.
*/
- engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ WRITE_ONCE(*el->pause, state);
if (state)
wmb();
}
@@ -269,19 +270,19 @@ static struct i915_request *first_request(const struct i915_sched *se)
}
static struct virtual_engine *
-first_virtual_engine(const struct intel_engine_cs *engine)
+first_virtual_engine(const struct intel_execlists *el)
{
- return rb_entry_safe(rb_first_cached(&engine->execlists.virtual),
+ return rb_entry_safe(rb_first_cached(&el->virtual),
struct virtual_engine,
- nodes[engine->id].rb);
+ nodes[el->id].rb);
}
static const struct i915_request *
-first_virtual(const struct intel_engine_cs *engine)
+first_virtual(const struct intel_execlists *el)
{
struct virtual_engine *ve;
- ve = first_virtual_engine(engine);
+ ve = first_virtual_engine(el);
if (!ve)
return NULL;
@@ -303,14 +304,13 @@ dl_before(const struct i915_request *next, const struct i915_request *prev)
return !prev || (next && rq_deadline(next) < rq_deadline(prev));
}
-static bool need_preempt(const struct intel_engine_cs *engine,
+static bool need_preempt(const struct intel_execlists *el,
const struct i915_request *rq)
{
- const struct i915_sched *se = engine->sched;
const struct i915_request *first = NULL;
const struct i915_request *next;
- if (!i915_sched_use_busywait(se))
+ if (!i915_sched_use_busywait(&el->sched))
return false;
/*
@@ -328,7 +328,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
- next = next_elsp_request(se, rq);
+ next = next_elsp_request(&el->sched, rq);
if (dl_before(next, first))
first = next;
@@ -342,11 +342,11 @@ static bool need_preempt(const struct intel_engine_cs *engine,
* ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
* context, it's priority would not exceed ELSP[0] aka last_prio.
*/
- next = first_request(se);
+ next = first_request(&el->sched);
if (dl_before(next, first))
first = next;
- next = first_virtual(engine);
+ next = first_virtual(el);
if (dl_before(next, first))
first = next;
@@ -360,7 +360,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
* switching between contexts is noticeable, so we try to keep
* the deadline shuffling only to timeslice boundaries.
*/
- ENGINE_TRACE(engine,
+ ENGINE_TRACE(el->sched.priv,
"preempt for first=%llx:%llu, dl=%llu, prio=%d?\n",
first->fence.context,
first->fence.seqno,
@@ -370,7 +370,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
}
__maybe_unused static bool
-assert_priority_queue(const struct intel_engine_cs *engine,
+assert_priority_queue(const struct intel_execlists *el,
const struct i915_request *prev,
const struct i915_request *next)
{
@@ -387,7 +387,7 @@ assert_priority_queue(const struct intel_engine_cs *engine,
if (rq_deadline(prev) <= rq_deadline(next))
return true;
- ENGINE_TRACE(engine,
+ ENGINE_TRACE(el->sched.priv,
"next %llx:%lld dl %lld is before prev %llx:%lld dl %lld\n",
next->fence.context, next->fence.seqno, rq_deadline(next),
prev->fence.context, prev->fence.seqno, rq_deadline(prev));
@@ -411,9 +411,9 @@ execlists_context_status_change(struct intel_engine_cs *engine,
status, rq);
}
-static void reset_active(struct i915_request *rq,
- struct intel_engine_cs *engine)
+static void reset_active(struct i915_request *rq, struct intel_execlists *el)
{
+ struct intel_engine_cs *engine = el->sched.priv;
struct intel_context * const ce = rq->context;
u32 head;
@@ -450,8 +450,9 @@ static void reset_active(struct i915_request *rq,
}
static struct intel_engine_cs *
-__execlists_schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
+__execlists_schedule_in(struct intel_execlists *el, struct i915_request *rq)
{
+ struct intel_engine_cs *engine = el->sched.priv;
struct intel_context * const ce = rq->context;
intel_context_get(ce);
@@ -461,7 +462,7 @@ __execlists_schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
intel_context_set_banned(ce);
if (unlikely(intel_context_is_banned(ce)))
- reset_active(rq, engine);
+ reset_active(rq, el);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
lrc_check_regs(ce, engine, "before");
@@ -481,7 +482,7 @@ __execlists_schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
}
- ce->lrc.ccid |= engine->execlists.ccid;
+ ce->lrc.ccid |= el->ccid;
__intel_gt_pm_get(engine->gt);
if (engine->fw_domain && !engine->fw_active++)
@@ -495,33 +496,34 @@ __execlists_schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
}
static void
-execlists_schedule_in(struct intel_engine_cs *engine,
+execlists_schedule_in(struct intel_execlists *el,
struct i915_request *rq,
int idx)
{
struct intel_context * const ce = rq->context;
struct intel_engine_cs *old;
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(el->sched.priv));
trace_i915_request_in(rq, idx);
old = ce->inflight;
if (!__intel_context_inflight_count(old))
- old = __execlists_schedule_in(engine, rq);
+ old = __execlists_schedule_in(el, rq);
WRITE_ONCE(ce->inflight, ptr_inc(old));
- GEM_BUG_ON(intel_context_inflight(ce) != engine);
+ GEM_BUG_ON(intel_context_inflight(ce) != el->sched.priv);
}
static void
-resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+resubmit_virtual_request(struct intel_execlists *el,
+ struct i915_request *rq,
+ struct virtual_engine *ve)
{
struct i915_sched *se = intel_engine_get_scheduler(&ve->base);
- struct i915_sched *pv = i915_request_get_scheduler(rq);
struct i915_request *pos = rq;
struct intel_timeline *tl;
- spin_lock_irq(&pv->lock);
+ spin_lock_irq(&el->sched.lock);
if (__i915_request_is_complete(rq))
goto unlock;
@@ -547,10 +549,10 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
spin_unlock(&se->lock);
unlock:
- spin_unlock_irq(&pv->lock);
+ spin_unlock_irq(&el->sched.lock);
}
-static void kick_siblings(struct intel_engine_cs *engine,
+static void kick_siblings(struct intel_execlists *el,
struct i915_request *rq,
struct intel_context *ce)
{
@@ -563,17 +565,18 @@ static void kick_siblings(struct intel_engine_cs *engine,
* same as other native request.
*/
if (i915_request_in_priority_queue(rq) &&
- rq->execution_mask != engine->mask)
- resubmit_virtual_request(rq, ve);
+ rq->execution_mask != BIT(el->id))
+ resubmit_virtual_request(el, rq, ve);
if (!i915_sched_is_idle(ve->base.sched))
i915_sched_kick(ve->base.sched);
}
-static void __execlists_schedule_out(struct intel_engine_cs *engine,
+static void __execlists_schedule_out(struct intel_execlists *el,
struct i915_request * const rq,
struct intel_context * const ce)
{
+ struct intel_engine_cs *engine = el->sched.priv;
unsigned int ccid;
/*
@@ -635,14 +638,14 @@ static void __execlists_schedule_out(struct intel_engine_cs *engine,
* each virtual tree and kick everyone again.
*/
if (ce->engine != engine)
- kick_siblings(engine, rq, ce);
+ kick_siblings(el, rq, ce);
WRITE_ONCE(ce->inflight, NULL);
intel_context_put(ce);
}
static inline void
-execlists_schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
+execlists_schedule_out(struct intel_execlists *el, struct i915_request *rq)
{
struct intel_context * const ce = rq->context;
@@ -651,7 +654,7 @@ execlists_schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
GEM_BUG_ON(!ce->inflight);
ce->inflight = ptr_dec(ce->inflight);
if (!__intel_context_inflight_count(ce->inflight))
- __execlists_schedule_out(engine, rq, ce);
+ __execlists_schedule_out(el, rq, ce);
i915_request_put(rq);
}
@@ -703,14 +706,14 @@ static u64 execlists_update_context(struct i915_request *rq)
return desc;
}
-static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+static void write_desc(struct intel_execlists *el, u64 desc, u32 port)
{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ if (el->ctrl_reg) {
+ writel(lower_32_bits(desc), el->submit_reg + port * 2);
+ writel(upper_32_bits(desc), el->submit_reg + port * 2 + 1);
} else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ writel(upper_32_bits(desc), el->submit_reg);
+ writel(lower_32_bits(desc), el->submit_reg);
}
}
@@ -733,52 +736,48 @@ dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
}
static __maybe_unused noinline void
-trace_ports(const struct intel_engine_execlists *execlists,
+trace_ports(const struct intel_execlists *el,
const char *msg,
struct i915_request * const *ports)
{
- const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
char __maybe_unused p0[40], p1[40];
if (!ports[0])
return;
- ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
+ ENGINE_TRACE(el->sched.priv, "%s { %s%s }\n", msg,
dump_port(p0, sizeof(p0), "", ports[0]),
dump_port(p1, sizeof(p1), ", ", ports[1]));
}
static __maybe_unused noinline bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
+assert_pending_valid(const struct intel_execlists *el,
const char *msg)
{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
bool sentinel = false;
u32 ccid = -1;
- trace_ports(execlists, msg, execlists->pending);
+ trace_ports(el, msg, el->pending);
/* We may be messing around with the lists during reset, lalala */
- if (i915_sched_is_disabled(intel_engine_get_scheduler(engine)))
+ if (i915_sched_is_disabled(&el->sched))
return true;
- if (!execlists->pending[0]) {
+ if (!el->pending[0]) {
GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
- engine->name);
+ el->sched.dbg.name);
return false;
}
- if (execlists->pending[execlists_num_ports(execlists)]) {
+ if (el->pending[execlists_num_ports(el)]) {
GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
+ el->sched.dbg.name, execlists_num_ports(el));
return false;
}
- for (port = execlists->pending; (rq = *port); port++) {
+ for (port = el->pending; (rq = *port); port++) {
unsigned long flags;
bool ok = true;
@@ -787,18 +786,18 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
if (ce == rq->context) {
GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
ce = rq->context;
if (ccid == ce->lrc.ccid) {
GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ccid, ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
ccid = ce->lrc.ccid;
@@ -810,9 +809,9 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
*/
if (sentinel) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
sentinel = i915_request_has_sentinel(rq);
@@ -822,12 +821,11 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
* that they are never stuck behind a hog and can be immediately
* transferred onto the next idle engine.
*/
- if (rq->execution_mask != engine->mask &&
- port != execlists->pending) {
+ if (rq->execution_mask != BIT(el->id) && port != el->pending) {
GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
@@ -841,27 +839,27 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
if (i915_active_is_idle(&ce->active) &&
!intel_context_is_barrier(ce)) {
GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
if (!i915_vma_is_pinned(ce->state)) {
GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
- engine->name,
+ el->sched.dbg.name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
@@ -875,12 +873,11 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
return ce;
}
-static void execlists_submit_ports(struct intel_engine_cs *engine)
+static void execlists_submit_ports(struct intel_execlists *el)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
unsigned int n;
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+ GEM_BUG_ON(!assert_pending_valid(el, "submit"));
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -890,7 +887,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(el->sched.priv));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -898,17 +895,15 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* currently ensured by the fact that we always write the same number
* of elsq entries, keep this in mind before changing the loop below.
*/
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
+ for (n = execlists_num_ports(el); n--; ) {
+ struct i915_request *rq = el->pending[n];
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
+ write_desc(el, rq ? execlists_update_context(rq) : 0, n);
}
/* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+ if (el->ctrl_reg)
+ writel(EL_CTRL_LOAD, el->ctrl_reg);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
@@ -940,12 +935,12 @@ static unsigned long i915_request_flags(const struct i915_request *rq)
return READ_ONCE(rq->fence.flags);
}
-static bool can_merge_rq(const struct intel_engine_cs *engine,
+static bool can_merge_rq(const struct intel_execlists *el,
const struct i915_request *prev,
const struct i915_request *next)
{
GEM_BUG_ON(prev == next);
- GEM_BUG_ON(!assert_priority_queue(engine, prev, next));
+ GEM_BUG_ON(!assert_priority_queue(el, prev, next));
/*
* We do not submit known completed requests. Therefore if the next
@@ -1030,7 +1025,7 @@ static void virtual_xfer_context(struct virtual_engine *ve,
}
static bool
-timeslice_yield(const struct intel_engine_execlists *el,
+timeslice_yield(const struct intel_execlists *el,
const struct i915_request *rq)
{
/*
@@ -1048,12 +1043,10 @@ timeslice_yield(const struct intel_engine_execlists *el,
return rq->context->lrc.ccid == READ_ONCE(el->yield);
}
-static bool needs_timeslice(const struct intel_engine_cs *engine,
+static bool needs_timeslice(const struct intel_execlists *el,
const struct i915_request *rq)
{
- const struct i915_sched *se = engine->sched;
-
- if (!i915_sched_has_timeslices(se))
+ if (!i915_sched_has_timeslices(&el->sched))
return false;
/* If not currently active, or about to switch, wait for next event */
@@ -1061,23 +1054,24 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
return false;
/* We do not need to start the timeslice until after the ACK */
- if (READ_ONCE(engine->execlists.pending[0]))
+ if (READ_ONCE(el->pending[0]))
return false;
/* If ELSP[1] is occupied, always check to see if worth slicing */
- if (!i915_sched_is_last_request(se, rq)) {
- ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
+ if (!i915_sched_is_last_request(&el->sched, rq)) {
+ ENGINE_TRACE(el->sched.priv,
+ "timeslice required for second inflight context\n");
return true;
}
/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
- if (!i915_sched_is_idle(se)) {
- ENGINE_TRACE(engine, "timeslice required for queue\n");
+ if (!i915_sched_is_idle(&el->sched)) {
+ ENGINE_TRACE(el->sched.priv, "timeslice required for queue\n");
return true;
}
- if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
- ENGINE_TRACE(engine, "timeslice required for virtual\n");
+ if (!RB_EMPTY_ROOT(&el->virtual.rb_root)) {
+ ENGINE_TRACE(el->sched.priv, "timeslice required for virtual\n");
return true;
}
@@ -1085,14 +1079,12 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
}
static bool
-timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+timeslice_expired(struct intel_execlists *el, const struct i915_request *rq)
{
- const struct intel_engine_execlists *el = &engine->execlists;
-
if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
return false;
- if (!needs_timeslice(engine, rq))
+ if (!needs_timeslice(el, rq))
return false;
return timer_expired(&el->timer) || timeslice_yield(el, rq);
@@ -1103,14 +1095,13 @@ static unsigned long timeslice(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.timeslice_duration_ms);
}
-static void start_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_execlists *el)
{
- struct intel_engine_execlists *el = &engine->execlists;
unsigned long duration;
/* Disable the timer if there is nothing to switch to */
duration = 0;
- if (needs_timeslice(engine, *el->active)) {
+ if (needs_timeslice(el, *el->active)) {
/* Avoid continually prolonging an active timeslice */
if (timer_active(&el->timer)) {
/*
@@ -1119,19 +1110,19 @@ static void start_timeslice(struct intel_engine_cs *engine)
* its timeslice, so recheck.
*/
if (!timer_pending(&el->timer))
- intel_engine_kick_scheduler(engine);
+ i915_sched_kick(&el->sched);
return;
}
- duration = timeslice(engine);
+ duration = timeslice(el->sched.priv);
}
set_timer_ms(&el->timer, duration);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+static void record_preemption(struct intel_execlists *l)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ (void)I915_SELFTEST_ONLY(l->preempt_hang.count++);
}
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
@@ -1147,14 +1138,13 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
return READ_ONCE(engine->props.preempt_timeout_ms);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine,
+static void set_preempt_timeout(struct intel_execlists *el,
const struct i915_request *rq)
{
- if (!intel_engine_has_preempt_reset(engine))
+ if (!intel_engine_has_preempt_reset(el->sched.priv))
return;
- set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine, rq));
+ set_timer_ms(&el->preempt, active_preempt_timeout(el->sched.priv, rq));
}
static bool completed(const struct i915_request *rq)
@@ -1165,25 +1155,30 @@ static bool completed(const struct i915_request *rq)
return __i915_request_is_complete(rq);
}
-static void __virtual_dequeue(struct virtual_engine *ve,
- struct intel_engine_cs *sibling)
+static struct intel_execlists *to_execlists(struct intel_engine_cs *e)
{
- struct ve_node * const node = &ve->nodes[sibling->id];
+ return container_of(e->sched, struct intel_execlists, sched);
+}
+
+static void __virtual_dequeue(struct virtual_engine *ve,
+ struct intel_execlists *el)
+{
+ struct ve_node * const node = &ve->nodes[el->id];
struct rb_node **parent, *rb;
struct i915_request *rq;
u64 deadline;
bool first;
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ rb_erase_cached(&node->rb, &el->virtual);
RB_CLEAR_NODE(&node->rb);
rq = first_request(ve->base.sched);
- if (!virtual_matches(ve, rq, sibling))
+ if (!virtual_matches(ve, rq, el->sched.priv))
return;
rb = NULL;
first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
+ parent = &el->virtual.rb_root.rb_node;
deadline = rq_deadline(rq);
while (*parent) {
struct ve_node *other;
@@ -1199,24 +1194,23 @@ static void __virtual_dequeue(struct virtual_engine *ve,
}
rb_link_node(&node->rb, rb, parent);
- rb_insert_color_cached(&node->rb, &sibling->execlists.virtual, first);
+ rb_insert_color_cached(&node->rb, &el->virtual, first);
}
-static void virtual_requeue(struct intel_engine_cs *engine,
+static void virtual_requeue(struct intel_execlists *el,
struct i915_request *last)
{
- const struct i915_request * const first =
- first_request(intel_engine_get_scheduler(engine));
+ const struct i915_request * const first = first_request(&el->sched);
struct virtual_engine *ve;
- while ((ve = first_virtual_engine(engine))) {
+ while ((ve = first_virtual_engine(el))) {
struct i915_sched *se = intel_engine_get_scheduler(&ve->base);
struct i915_request *rq;
spin_lock(&se->lock);
rq = first_request(se);
- if (unlikely(!virtual_matches(ve, rq, engine)))
+ if (unlikely(!virtual_matches(ve, rq, el->sched.priv)))
/* lost the race to a sibling */
goto unlock;
@@ -1232,7 +1226,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
return;
}
- ENGINE_TRACE(engine,
+ ENGINE_TRACE(el->sched.priv,
"virtual rq=%llx:%lld%s, dl %lld, new engine? %s\n",
rq->fence.context,
rq->fence.seqno,
@@ -1240,10 +1234,10 @@ static void virtual_requeue(struct intel_engine_cs *engine,
__i915_request_has_started(rq) ? "*" :
"",
rq_deadline(rq),
- yesno(engine != ve->siblings[0]));
+ yesno(el->sched.priv != ve->siblings[0]));
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- if (__i915_request_requeue(rq, engine->sched)) {
+ GEM_BUG_ON(!(rq->execution_mask & BIT(el->id)));
+ if (__i915_request_requeue(rq, &el->sched)) {
/*
* Only after we confirm that we will submit
* this request (i.e. it has not already
@@ -1257,30 +1251,29 @@ static void virtual_requeue(struct intel_engine_cs *engine,
* we may be using ve->siblings[] in
* virtual_context_enter / virtual_context_exit.
*/
- virtual_xfer_context(ve, engine);
+ virtual_xfer_context(ve, el->sched.priv);
/* Bind this ve before we release the lock */
if (!ve->context.inflight)
- WRITE_ONCE(ve->context.inflight, engine);
+ WRITE_ONCE(ve->context.inflight,
+ el->sched.priv);
- GEM_BUG_ON(ve->siblings[0] != engine);
- GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
+ GEM_BUG_ON(ve->siblings[0] != el->sched.priv);
+ GEM_BUG_ON(intel_context_inflight(rq->context) != el->sched.priv);
last = rq;
}
unlock:
- __virtual_dequeue(ve, engine);
+ __virtual_dequeue(ve, el);
spin_unlock(&se->lock);
}
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static void execlists_dequeue(struct intel_execlists *el)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request **port = el->pending;
+ struct i915_request ** const last_port = port + el->port_mask;
struct i915_request *last, * const *active;
struct i915_priolist *pl;
bool submit = false;
@@ -1316,28 +1309,28 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of trouble.
*
*/
- active = execlists->active;
+ active = el->active;
while ((last = *active) && completed(last))
active++;
if (last) {
- if (need_preempt(engine, last)) {
- ENGINE_TRACE(engine,
+ if (need_preempt(el, last)) {
+ ENGINE_TRACE(el->sched.priv,
"preempting last=%llx:%llu, dl=%llu, prio=%d\n",
last->fence.context,
last->fence.seqno,
rq_deadline(last),
rq_prio(last));
- record_preemption(execlists);
+ record_preemption(el);
last = (void *)1;
- } else if (timeslice_expired(engine, last)) {
- ENGINE_TRACE(engine,
+ } else if (timeslice_expired(el, last)) {
+ ENGINE_TRACE(el->sched.priv,
"expired:%s last=%llx:%llu, deadline=%llu, now=%llu, yield?=%s\n",
- yesno(timer_expired(&execlists->timer)),
+ yesno(timer_expired(&el->timer)),
last->fence.context, last->fence.seqno,
rq_deadline(last),
i915_sched_to_ticks(ktime_get()),
- yesno(timeslice_yield(execlists, last)));
+ yesno(timeslice_yield(el, last)));
/*
* Consume this timeslice; ensure we start a new one.
@@ -1355,7 +1348,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* consumption of this timeslice, if we submit the
* same context again, grant it a full timeslice.
*/
- cancel_timer(&execlists->timer);
+ cancel_timer(&el->timer);
/*
* Unlike for preemption, if we rewind and continue
@@ -1392,7 +1385,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
local_irq_disable(); /* irq remains off until after ELSP write */
- spin_lock(&se->lock);
+ spin_lock(&el->sched.lock);
if ((unsigned long)last & 1) {
bool defer = (unsigned long)last & 2;
@@ -1402,7 +1395,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* as we unwind (and until we resubmit) so that we do
* not accidentally tell it to go backwards.
*/
- ring_set_paused(engine, (unsigned long)last);
+ ring_set_paused(el, (unsigned long)last);
/*
* Note that we have not stopped the GPU at this point,
@@ -1411,7 +1404,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the preemption, some of the unwound requests may
* complete!
*/
- last = __i915_sched_rewind_requests(se);
+ last = __i915_sched_rewind_requests(&el->sched);
/*
* We want to move the interrupted request to the back of
@@ -1421,21 +1414,21 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* be run after it again.
*/
if (last && defer)
- __i915_sched_defer_request(se, last);
+ __i915_sched_defer_request(&el->sched, last);
last = NULL;
}
- if (!RB_EMPTY_ROOT(&execlists->virtual.rb_root))
- virtual_requeue(engine, last);
+ if (!RB_EMPTY_ROOT(&el->virtual.rb_root))
+ virtual_requeue(el, last);
- for_each_priolist(pl, &se->queue) {
+ for_each_priolist(pl, &el->sched.queue) {
struct i915_request *rq, *rn;
priolist_for_each_request_safe(rq, rn, pl) {
bool merge = true;
- GEM_BUG_ON(i915_request_get_scheduler(rq) != se);
+ GEM_BUG_ON(i915_request_get_engine(rq) != el->sched.priv);
/*
* Can we combine this request with the current port?
@@ -1448,7 +1441,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last && !can_merge_rq(engine, last, rq)) {
+ if (last && !can_merge_rq(el, last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -1474,11 +1467,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the request immediately to another engine
* rather than wait for the primary request.
*/
- if (rq->execution_mask != engine->mask)
+ if (rq->execution_mask != BIT(el->id))
goto done;
- if (unlikely(dl_before(first_virtual(engine),
- rq)))
+ if (unlikely(dl_before(first_virtual(el), rq)))
goto done;
/*
@@ -1495,7 +1487,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
merge = false;
}
- if (__i915_request_submit(rq, engine)) {
+ if (__i915_request_submit(rq, el->sched.priv)) {
if (!merge) {
*port++ = i915_request_get(last);
last = NULL;
@@ -1513,11 +1505,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- i915_priolist_advance(&se->queue, pl);
+ i915_priolist_advance(&el->sched.queue, pl);
}
done:
*port++ = i915_request_get(last);
- spin_unlock(&se->lock);
+ spin_unlock(&el->sched.lock);
/*
* We can skip poking the HW if we ended up with exactly the same set
@@ -1526,21 +1518,20 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
if (submit &&
memcmp(active,
- execlists->pending,
- (port - execlists->pending) * sizeof(*port))) {
+ el->pending,
+ (port - el->pending) * sizeof(*port))) {
*port = NULL;
- while (port-- != execlists->pending)
- execlists_schedule_in(engine, *port,
- port - execlists->pending);
+ while (port-- != el->pending)
+ execlists_schedule_in(el, *port, port - el->pending);
- WRITE_ONCE(execlists->yield, -1);
- set_preempt_timeout(engine, *active);
- execlists_submit_ports(engine);
+ WRITE_ONCE(el->yield, -1);
+ set_preempt_timeout(el, *active);
+ execlists_submit_ports(el);
} else {
- ring_set_paused(engine, 0);
- while (port-- != execlists->pending)
+ ring_set_paused(el, 0);
+ while (port-- != el->pending)
i915_request_put(*port);
- *execlists->pending = NULL;
+ *el->pending = NULL;
}
local_irq_enable();
@@ -1560,27 +1551,27 @@ copy_ports(struct i915_request **dst, struct i915_request **src, int count)
}
static struct i915_request **
-cancel_port_requests(struct intel_engine_execlists * const execlists,
+cancel_port_requests(struct intel_execlists * const el,
struct i915_request **inactive)
{
struct i915_request * const *port;
- for (port = execlists->pending; *port; port++)
+ for (port = el->pending; *port; port++)
*inactive++ = *port;
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+ clear_ports(el->pending, ARRAY_SIZE(el->pending));
/* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ for (port = xchg(&el->active, el->pending); *port; port++)
*inactive++ = *port;
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ clear_ports(el->inflight, ARRAY_SIZE(el->inflight));
smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
+ WRITE_ONCE(el->active, el->inflight);
/* Having cancelled all outstanding process_csb(), stop their timers */
- GEM_BUG_ON(execlists->pending[0]);
- cancel_timer(&execlists->timer);
- cancel_timer(&execlists->preempt);
+ GEM_BUG_ON(el->pending[0]);
+ cancel_timer(&el->timer);
+ cancel_timer(&el->preempt);
return inactive;
}
@@ -1650,7 +1641,7 @@ static bool gen8_csb_parse(const u64 csb)
}
static noinline u64
-wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+wa_csb_read(const struct intel_execlists *el, u64 * const csb)
{
u64 entry;
@@ -1665,7 +1656,8 @@ wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
*/
preempt_disable();
if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
- int idx = csb - engine->execlists.csb_status;
+ struct intel_engine_cs *engine = el->sched.priv;
+ int idx = csb - el->csb_status;
int status;
status = GEN8_EXECLISTS_STATUS_BUF;
@@ -1683,7 +1675,7 @@ wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
return entry;
}
-static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
+static u64 csb_read(const struct intel_execlists *el, u64 * const csb)
{
u64 entry = READ_ONCE(*csb);
@@ -1699,7 +1691,7 @@ static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
* tgl:HSDES#22011248461
*/
if (unlikely(entry == -1))
- entry = wa_csb_read(engine, csb);
+ entry = wa_csb_read(el, csb);
/* Consume this entry so that we can spot its future reuse. */
WRITE_ONCE(*csb, -1);
@@ -1708,18 +1700,22 @@ static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
return entry;
}
-static void new_timeslice(struct intel_engine_execlists *el)
+static void new_timeslice(struct intel_execlists *el)
{
/* By cancelling, we will start afresh in start_timeslice() */
cancel_timer(&el->timer);
}
-static struct i915_request **
-process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+static void process_csb_delay(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+}
+
+static struct i915_request **
+process_csb(struct intel_execlists *el, struct i915_request **inactive)
+{
+ u64 * const buf = el->csb_status;
+ const u8 num_entries = el->csb_size;
struct i915_request **prev;
u8 head, tail;
@@ -1733,8 +1729,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* to use explicit shifting and masking, and probably bifurcating
* the code to handle the legacy mmio read).
*/
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
+ head = el->csb_head;
+ tail = READ_ONCE(*el->csb_write);
if (unlikely(head == tail))
return inactive;
@@ -1754,8 +1750,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* simplest way is by stop processing the event queue and force the
* engine to reset.
*/
- execlists->csb_head = tail;
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
+ el->csb_head = tail;
+ ENGINE_TRACE(el->sched.priv, "cs-irq head=%d, tail=%d\n", head, tail);
/*
* Hopefully paired with a wmb() in HW!
@@ -1796,53 +1792,52 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* status notifier.
*/
- csb = csb_read(engine, buf + head);
- ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ csb = csb_read(el, buf + head);
+ ENGINE_TRACE(el->sched.priv, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
- if (INTEL_GEN(engine->i915) >= 12)
+ if (el->flags & GEN12_CSB_PARSE)
promote = gen12_csb_parse(csb);
else
promote = gen8_csb_parse(csb);
if (promote) {
- struct i915_request * const *old = execlists->active;
+ struct i915_request * const *old = el->active;
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
+ if (GEM_WARN_ON(!*el->pending)) {
+ el->error_interrupt |= ERROR_CSB;
break;
}
- ring_set_paused(engine, 0);
+ ring_set_paused(el, 0);
/* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
+ WRITE_ONCE(el->active, el->pending);
smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
+ trace_ports(el, "preempted", old);
while (*old)
*inactive++ = *old++;
/* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
+ GEM_BUG_ON(!assert_pending_valid(el, "promote"));
+ copy_ports(el->inflight, el->pending,
+ execlists_num_ports(el));
smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
+ WRITE_ONCE(el->active, el->inflight);
/* XXX Magic delay for tgl */
- ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+ process_csb_delay(el->sched.priv);
- WRITE_ONCE(execlists->pending[0], NULL);
+ WRITE_ONCE(el->pending[0], NULL);
} else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
+ if (GEM_WARN_ON(!*el->active)) {
+ el->error_interrupt |= ERROR_CSB;
break;
}
/* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
+ trace_ports(el, "completed", el->active);
/*
* We rely on the hardware being strongly
@@ -1855,10 +1850,12 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* itself...
*/
if (GEM_SHOW_DEBUG() &&
- !__i915_request_is_complete(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
+ !__i915_request_is_complete(*el->active)) {
+ struct i915_request *rq = *el->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
+ struct intel_engine_cs *engine __maybe_unused =
+ el->sched.priv;
ENGINE_TRACE(engine,
"context completed before request!\n");
@@ -1883,10 +1880,10 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
regs[CTX_RING_TAIL]);
}
- *inactive++ = *execlists->active++;
+ *inactive++ = *el->active++;
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
+ GEM_BUG_ON(el->active - el->inflight >
+ execlists_num_ports(el));
}
} while (head != tail);
@@ -1908,7 +1905,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
- if (*prev != *execlists->active) { /* elide lite-restores */
+ if (*prev != *el->active) { /* elide lite-restores */
/*
* Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU
@@ -1921,20 +1918,20 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
*/
if (*prev)
lrc_runtime_stop((*prev)->context);
- if (*execlists->active)
- lrc_runtime_start((*execlists->active)->context);
- new_timeslice(execlists);
+ if (*el->active)
+ lrc_runtime_start((*el->active)->context);
+ new_timeslice(el);
}
return inactive;
}
-static void post_process_csb(struct intel_engine_cs *engine,
+static void post_process_csb(struct intel_execlists *el,
struct i915_request **port,
struct i915_request **last)
{
while (port != last)
- execlists_schedule_out(engine, *port++);
+ execlists_schedule_out(el, *port++);
}
struct execlists_capture {
@@ -2009,9 +2006,8 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
}
static struct i915_request *
-active_context(struct intel_engine_cs *engine, u32 ccid)
+active_context(struct intel_execlists *el, u32 ccid)
{
- const struct intel_engine_execlists * const el = &engine->execlists;
struct i915_request * const *port, *rq;
/*
@@ -2022,7 +2018,7 @@ active_context(struct intel_engine_cs *engine, u32 ccid)
for (port = el->active; (rq = *port); port++) {
if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
+ ENGINE_TRACE(el->sched.priv,
"ccid:%x found at active:%zd\n",
ccid, port - el->active);
return rq;
@@ -2031,14 +2027,14 @@ active_context(struct intel_engine_cs *engine, u32 ccid)
for (port = el->pending; (rq = *port); port++) {
if (rq->context->lrc.ccid == ccid) {
- ENGINE_TRACE(engine,
+ ENGINE_TRACE(el->sched.priv,
"ccid:%x found at pending:%zd\n",
ccid, port - el->pending);
return rq;
}
}
- ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
+ ENGINE_TRACE(el->sched.priv, "ccid:%x not found\n", ccid);
return NULL;
}
@@ -2047,7 +2043,7 @@ static u32 active_ccid(struct intel_engine_cs *engine)
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
}
-static void execlists_capture(struct intel_engine_cs *engine)
+static void execlists_capture(struct intel_execlists *el)
{
struct execlists_capture *cap;
struct i915_request *rq;
@@ -2055,7 +2051,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
return;
- rq = active_context(engine, active_ccid(engine));
+ rq = active_context(el, active_ccid(el->sched.priv));
/*
* If the context is closed or already banned, assume no one is
@@ -2073,7 +2069,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
* We are inside an atomic section (softirq) here and we are delaying
* the forced preemption event.
*/
- cap = capture_regs(engine);
+ cap = capture_regs(el->sched.priv);
if (!cap)
return;
@@ -2102,7 +2098,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
* simply hold that request accountable for being non-preemptible
* long enough to force the reset.
*/
- if (!i915_sched_suspend_request(engine->sched, cap->rq))
+ if (!i915_sched_suspend_request(&el->sched, cap->rq))
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work);
@@ -2116,12 +2112,12 @@ static void execlists_capture(struct intel_engine_cs *engine)
kfree(cap);
}
-static noinline void execlists_reset(struct intel_engine_cs *engine)
+static noinline void execlists_reset(struct intel_execlists *el)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct intel_engine_cs *engine = el->sched.priv;
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- unsigned long eir = fetch_and_zero(&engine->execlists.error_interrupt);
+ unsigned long eir = fetch_and_zero(&el->error_interrupt);
const char *msg;
if (!intel_has_reset_engine(engine->gt))
@@ -2142,19 +2138,19 @@ static noinline void execlists_reset(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "reset for %s\n", msg);
/* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&se->tasklet);
+ tasklet_disable_nosync(&el->sched.tasklet);
- ring_set_paused(engine, 1); /* Freeze the current request in place */
- execlists_capture(engine);
- intel_engine_reset(engine, msg);
+ ring_set_paused(el, 1); /* Freeze the current request in place */
+ execlists_capture(el);
+ intel_engine_reset(el->sched.priv, msg);
- tasklet_enable(&se->tasklet);
+ tasklet_enable(&el->sched.tasklet);
clear_and_wake_up_bit(bit, lock);
}
-static bool preempt_timeout(const struct intel_engine_cs *const engine)
+static bool preempt_timeout(const struct intel_execlists *const el)
{
- const struct timer_list *t = &engine->execlists.preempt;
+ const struct timer_list *t = &el->preempt;
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return false;
@@ -2162,7 +2158,7 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
if (!timer_expired(t))
return false;
- return engine->execlists.pending[0];
+ return el->pending[0];
}
/*
@@ -2171,8 +2167,7 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
*/
static void execlists_submission_tasklet(struct tasklet_struct *t)
{
- struct i915_sched * const se = from_tasklet(se, t, tasklet);
- struct intel_engine_cs *engine = se->priv;
+ struct intel_execlists *el = from_tasklet(el, t, sched.tasklet);
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
@@ -2183,36 +2178,33 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
*/
rcu_read_lock();
- inactive = process_csb(engine, post);
+ inactive = process_csb(el, post);
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
- if (unlikely(preempt_timeout(engine))) {
- cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (unlikely(preempt_timeout(el))) {
+ cancel_timer(&el->preempt);
+ el->error_interrupt |= ERROR_PREEMPT;
}
- if (unlikely(READ_ONCE(engine->execlists.error_interrupt)))
- execlists_reset(engine);
+ if (unlikely(READ_ONCE(el->error_interrupt)))
+ execlists_reset(el);
- if (!engine->execlists.pending[0]) {
- execlists_dequeue(engine);
- start_timeslice(engine);
+ if (!el->pending[0]) {
+ execlists_dequeue(el);
+ start_timeslice(el);
}
- post_process_csb(engine, post, inactive);
+ post_process_csb(el, post, inactive);
rcu_read_unlock();
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+static void __execlists_kick(struct intel_execlists *el)
{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- intel_engine_kick_scheduler(engine);
+ i915_sched_kick(&el->sched);
}
#define execlists_kick(t, member) \
- __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+ __execlists_kick(container_of(t, struct intel_execlists, member))
static void execlists_timeslice(struct timer_list *timer)
{
@@ -2354,12 +2346,12 @@ static const struct intel_context_ops execlists_context_ops = {
.destroy = lrc_destroy,
};
-static void reset_csb_pointers(struct intel_engine_cs *engine)
+static void reset_csb_pointers(struct intel_execlists *el)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
+ struct intel_engine_cs *engine = el->sched.priv;
+ const unsigned int reset_value = el->csb_size - 1;
- ring_set_paused(engine, 0);
+ ring_set_paused(el, 0);
/*
* Sometimes Icelake forgets to reset its pointers on a GPU reset.
@@ -2378,21 +2370,21 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
+ el->csb_head = reset_value;
+ WRITE_ONCE(*el->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
/* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ memset(el->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&el->csb_status[0],
+ &el->csb_status[reset_value]);
/* Once more for luck and our trusty paranoia */
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
0xffff << 16 | reset_value << 8 | reset_value);
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+ GEM_BUG_ON(READ_ONCE(*el->csb_write) != reset_value);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -2405,7 +2397,7 @@ static void sanitize_hwsp(struct intel_engine_cs *engine)
static void execlists_sanitize(struct intel_engine_cs *engine)
{
- GEM_BUG_ON(*engine->execlists.active);
+ GEM_BUG_ON(*to_execlists(engine)->active);
/*
* Poison residual state on resume, in case the suspend didn't!
@@ -2419,7 +2411,7 @@ static void execlists_sanitize(struct intel_engine_cs *engine)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
- reset_csb_pointers(engine);
+ reset_csb_pointers(to_execlists(engine));
/*
* The kernel_context HWSP is stored in the status_page. As above,
@@ -2438,7 +2430,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
/* Flush ongoing GT interrupts before touching interrupt state */
synchronize_hardirq(engine->i915->drm.irq);
- engine->execlists.error_interrupt = 0;
+ to_execlists(engine)->error_interrupt = 0;
ENGINE_WRITE(engine, RING_EMR, ~0u);
ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
@@ -2534,6 +2526,8 @@ static int execlists_resume(struct intel_engine_cs *engine)
static void execlists_reset_prepare(struct intel_engine_cs *engine)
{
+ struct intel_execlists *el = to_execlists(engine);
+
/*
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
@@ -2543,7 +2537,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- i915_sched_disable(intel_engine_get_scheduler(engine));
+ i915_sched_disable(&el->sched);
/*
* We stop engines, otherwise we might get failed reset and a
@@ -2557,31 +2551,29 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- ring_set_paused(engine, 1);
+ ring_set_paused(el, 1);
intel_engine_stop_cs(engine);
- engine->execlists.reset_ccid = active_ccid(engine);
+ el->reset_ccid = active_ccid(engine);
}
static struct i915_request **
-reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+reset_csb(struct intel_execlists *el, struct i915_request **inactive)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
+ clflush(el->csb_write);
mb();
- inactive = process_csb(engine, inactive); /* drain preemption events */
+ inactive = process_csb(el, inactive); /* drain preemption events */
/* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(engine);
+ reset_csb_pointers(el);
return inactive;
}
static void
-execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+execlists_reset_active(struct intel_execlists *el, bool stalled)
{
struct intel_context *ce;
struct i915_request *rq;
@@ -2592,7 +2584,7 @@ execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- rq = active_context(engine, engine->execlists.reset_ccid);
+ rq = active_context(el, el->reset_ccid);
if (!rq)
return;
@@ -2606,7 +2598,7 @@ execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
}
/* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(el->sched.priv));
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
@@ -2652,50 +2644,48 @@ execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
* to recreate its own state.
*/
out_replay:
- ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ ENGINE_TRACE(el->sched.priv, "replay {head:%04x, tail:%04x}\n",
head, ce->ring->tail);
- lrc_reset_regs(ce, engine);
- ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+ lrc_reset_regs(ce, el->sched.priv);
+ ce->lrc.lrca = lrc_update_regs(ce, el->sched.priv, head);
}
-static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+static void execlists_reset_csb(struct intel_execlists *el, bool stalled)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
rcu_read_lock();
- inactive = reset_csb(engine, post);
+ inactive = reset_csb(el, post);
- execlists_reset_active(engine, true);
+ execlists_reset_active(el, true);
- inactive = cancel_port_requests(execlists, inactive);
- post_process_csb(engine, post, inactive);
+ inactive = cancel_port_requests(el, inactive);
+ post_process_csb(el, post, inactive);
rcu_read_unlock();
}
static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct intel_execlists *el = to_execlists(engine);
unsigned long flags;
ENGINE_TRACE(engine, "\n");
/* Process the csb, find the guilty context and throw away */
- execlists_reset_csb(engine, stalled);
+ execlists_reset_csb(el, stalled);
/* Push back any incomplete requests for replay after the reset. */
rcu_read_lock();
- spin_lock_irqsave(&se->lock, flags);
- __i915_sched_rewind_requests(se);
- spin_unlock_irqrestore(&se->lock, flags);
+ spin_lock_irqsave(&el->sched.lock, flags);
+ __i915_sched_rewind_requests(&el->sched);
+ spin_unlock_irqrestore(&el->sched.lock, flags);
rcu_read_unlock();
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct intel_execlists * const el = to_execlists(engine);
struct i915_request *rq, *rn;
struct i915_priolist *pl;
struct rb_node *rb;
@@ -2717,38 +2707,38 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- execlists_reset_csb(engine, true);
+ execlists_reset_csb(el, true);
rcu_read_lock();
- spin_lock_irqsave(&se->lock, flags);
+ spin_lock_irqsave(&el->sched.lock, flags);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &se->requests, sched.link)
+ list_for_each_entry(rq, &el->sched.requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
- for_each_priolist(pl, &se->queue) {
+ for_each_priolist(pl, &el->sched.queue) {
priolist_for_each_request_safe(rq, rn, pl) {
if (i915_request_mark_eio(rq)) {
__i915_request_submit(rq, engine);
i915_request_put(rq);
}
}
- i915_priolist_advance(&se->queue, pl);
+ i915_priolist_advance(&el->sched.queue, pl);
}
- GEM_BUG_ON(!i915_sched_is_idle(se));
+ GEM_BUG_ON(!i915_sched_is_idle(&el->sched));
/* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &se->hold, sched.link)
+ list_for_each_entry(rq, &el->sched.hold, sched.link)
i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
+ while ((rb = rb_first_cached(&el->virtual))) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- rb_erase_cached(rb, &execlists->virtual);
+ rb_erase_cached(rb, &el->virtual);
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.sched->lock);
@@ -2766,7 +2756,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&se->lock, flags);
+ spin_unlock_irqrestore(&el->sched.lock, flags);
rcu_read_unlock();
}
@@ -2800,17 +2790,38 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
static void execlists_park(struct intel_engine_cs *engine)
{
- cancel_timer(&engine->execlists.timer);
- cancel_timer(&engine->execlists.preempt);
+ struct intel_execlists *el = to_execlists(engine);
+
+ cancel_timer(&el->timer);
+ cancel_timer(&el->preempt);
+}
+
+static inline struct i915_request *
+execlists_active(const struct intel_execlists *el)
+{
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(el->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(el->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static const struct i915_request *
execlists_active_request(struct i915_sched *se)
{
- struct intel_engine_cs *engine = se->priv;
+ struct intel_execlists *el = container_of(se, typeof(*el), sched);
struct i915_request *rq;
- rq = execlists_active(&engine->execlists);
+ rq = execlists_active(el);
if (rq)
rq = active_request(rq->context->timeline, rq);
@@ -2819,9 +2830,9 @@ execlists_active_request(struct i915_sched *se)
static bool execlists_is_executing(const struct i915_request *rq)
{
- struct i915_sched *se = i915_request_get_scheduler(rq);
- struct intel_engine_cs *engine = se->priv;
- struct intel_engine_execlists *el = &engine->execlists;
+ struct intel_execlists *el =
+ container_of(i915_request_get_scheduler(rq),
+ typeof(*el), sched);
struct i915_request * const *port, *p;
bool inflight = false;
@@ -2896,18 +2907,18 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->sched->submit_request = i915_request_enqueue;
}
-static void execlists_shutdown(struct intel_engine_cs *engine)
+static void execlists_shutdown(struct intel_execlists *el)
{
/* Synchronise with residual timers and any softirq they raise */
- del_timer_sync(&engine->execlists.timer);
- del_timer_sync(&engine->execlists.preempt);
+ del_timer_sync(&el->timer);
+ del_timer_sync(&el->preempt);
}
static void execlists_release(struct intel_engine_cs *engine)
{
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
- execlists_shutdown(engine);
+ execlists_shutdown(to_execlists(engine));
intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine);
@@ -3017,33 +3028,80 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
}
}
+static struct i915_sched *create_execlists(struct intel_engine_cs *engine)
+{
+ struct intel_uncore *uncore = engine->uncore;
+ struct drm_i915_private *i915 = engine->i915;
+ u32 base = engine->mmio_base;
+ struct intel_execlists *el;
+
+ el = kzalloc(sizeof(*el), GFP_KERNEL);
+ if (!el)
+ return NULL;
+
+ i915_sched_init(&el->sched,
+ i915->drm.dev,
+ engine->name,
+ engine->mask,
+ execlists_submission_tasklet, engine,
+ ENGINE_PHYSICAL);
+
+ el->sched.submit_request = i915_request_enqueue;
+ el->sched.active_request = execlists_active_request;
+ el->sched.is_executing = execlists_is_executing;
+ el->sched.show = execlists_show;
+ __set_bit(I915_SCHED_ACTIVE_BIT, &el->sched.flags);
+
+ if (INTEL_GEN(i915) >= 12)
+ el->flags |= GEN12_CSB_PARSE;
+
+ el->id = engine->id;
+ el->pause = &engine->status_page.addr[I915_GEM_HWS_PREEMPT];
+
+ el->port_mask = 1;
+ GEM_BUG_ON(!is_power_of_2(execlists_num_ports(el)));
+ GEM_BUG_ON(execlists_num_ports(el) > EXECLIST_MAX_PORTS);
+ el->active = el->inflight;
+
+ timer_setup(&el->timer, execlists_timeslice, 0);
+ timer_setup(&el->preempt, execlists_preempt, 0);
+
+ if (HAS_LOGICAL_RING_ELSQ(i915)) {
+ el->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ el->ctrl_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+ } else {
+ el->submit_reg = uncore->regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
+ }
+
+ el->csb_status =
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+
+ el->csb_write =
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
+
+ if (INTEL_GEN(i915) < 11)
+ el->csb_size = GEN8_CSB_ENTRIES;
+ else
+ el->csb_size = GEN11_CSB_ENTRIES;
+
+ engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
+ if (INTEL_GEN(i915) >= 11) {
+ el->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ el->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ }
+
+ return &el->sched;
+}
+
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
-
- engine->sched =
- i915_sched_create(i915->drm.dev,
- engine->name,
- engine->mask,
- execlists_submission_tasklet, engine,
- ENGINE_PHYSICAL);
+ engine->sched = create_execlists(engine);
if (!engine->sched)
return -ENOMEM;
- engine->sched->submit_request = i915_request_enqueue;
- engine->sched->active_request = execlists_active_request;
- engine->sched->is_executing = execlists_is_executing;
- engine->sched->show = execlists_show;
- __set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched->flags);
-
- intel_engine_init_execlists(engine);
-
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
-
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -3052,33 +3110,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
lrc_init_wa_ctx(engine);
- if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
- } else {
- execlists->submit_reg = uncore->regs +
- i915_mmio_reg_offset(RING_ELSP(base));
- }
-
- execlists->csb_status =
- (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
-
- execlists->csb_write =
- &engine->status_page.addr[intel_hws_csb_write_index(i915)];
-
- if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
- else
- execlists->csb_size = GEN11_CSB_ENTRIES;
-
- engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
- if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
- }
-
/* Finally, take ownership and responsibility for cleanup! */
engine->sanitize = execlists_sanitize;
engine->release = execlists_release;
@@ -3132,7 +3163,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
- rb_erase_cached(node, &sibling->execlists.virtual);
+ rb_erase_cached(node, &to_execlists(sibling)->virtual);
spin_unlock_irq(&sibling->sched->lock);
}
@@ -3299,7 +3330,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
- struct i915_sched *se = intel_engine_get_scheduler(sibling);
+ struct intel_execlists *el = to_execlists(sibling);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
@@ -3308,8 +3339,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
if (unlikely(!virtual_matches(ve, rq, sibling))) {
if (!RB_EMPTY_NODE(&node->rb)) {
- rb_erase_cached(&node->rb,
- &sibling->execlists.virtual);
+ rb_erase_cached(&node->rb, &el->virtual);
RB_CLEAR_NODE(&node->rb);
}
@@ -3321,18 +3351,17 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
* Cheat and avoid rebalancing the tree if we can
* reuse this node in situ.
*/
- first = rb_first_cached(&sibling->execlists.virtual) ==
- &node->rb;
+ first = rb_first_cached(&el->virtual) == &node->rb;
if (deadline == node->deadline ||
(deadline < node->deadline && first))
goto submit_engine;
- rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ rb_erase_cached(&node->rb, &el->virtual);
}
rb = NULL;
first = true;
- parent = &sibling->execlists.virtual.rb_root.rb_node;
+ parent = &el->virtual.rb_root.rb_node;
while (*parent) {
struct ve_node *other;
@@ -3348,7 +3377,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
rb_link_node(&node->rb, rb, parent);
rb_insert_color_cached(&node->rb,
- &sibling->execlists.virtual,
+ &to_execlists(sibling)->virtual,
first);
submit_engine:
@@ -3434,8 +3463,6 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
- intel_engine_init_execlists(&ve->base);
-
ve->base.cops = &virtual_context_ops;
ve->base.bond_execute = virtual_bond_execute;
@@ -3639,8 +3666,8 @@ static void execlists_show(struct drm_printer *m,
int indent),
unsigned int max)
{
+ struct intel_execlists *el = container_of(se, typeof(*el), sched);
const struct intel_engine_cs *engine = se->priv;
- const struct intel_engine_execlists *el = &engine->execlists;
const u8 num_entries = el->csb_size;
const u64 *hws = el->csb_status;
struct i915_request * const *port;
@@ -3658,7 +3685,7 @@ static void execlists_show(struct drm_printer *m,
count = 0;
for (rb = rb_first_cached(&el->virtual); rb; rb = rb_next(rb)) {
struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ rb_entry(rb, typeof(*ve), nodes[el->id].rb);
struct i915_request *rq;
rq = first_request(ve->base.sched);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_types.h b/drivers/gpu/drm/i915/gt/intel_execlists_types.h
new file mode 100644
index 000000000000..073659e6556b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_types.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_EXECLISTS_TYPES__
+#define __INTEL_EXECLISTS_TYPES__
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "i915_scheduler_types.h"
+#include "i915_selftest.h"
+
+struct st_preempt_hang {
+ struct completion completion;
+ unsigned int count;
+};
+
+/**
+ * struct intel_execlists - execlist submission queue and port state
+ *
+ * The struct intel_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_execlists {
+ struct i915_sched sched;
+
+ unsigned long flags;
+#define GEN12_CSB_PARSE BIT(0)
+
+ unsigned int id;
+ u32 *pause;
+
+ /**
+ * @timer: kick the current context if its timeslice expires
+ */
+ struct timer_list timer;
+
+ /**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
+ * @ccid: identifier for contexts submitted to this engine
+ */
+ u32 ccid;
+
+ /**
+ * @yield: CCID at the time of the last semaphore-wait interrupt.
+ *
+ * Instead of leaving a semaphore busy-spinning on an engine, we would
+ * like to switch to another ready context, i.e. yielding the semaphore
+ * timeslice.
+ */
+ u32 yield;
+
+ /**
+ * @error_interrupt: CS Master EIR
+ *
+ * The CS generates an interrupt when it detects an error. We capture
+ * the first error interrupt, record the EIR and schedule the tasklet.
+ * In the tasklet, we process the pending CS events to ensure we have
+ * the guilty request, and then reset the engine.
+ *
+ * Low 16b are used by HW, with the upper 16b used as the enabling mask.
+ * Reserve the upper 16b for tracking internal errors.
+ */
+ u32 error_interrupt;
+#define ERROR_CSB BIT(31)
+#define ERROR_PREEMPT BIT(30)
+
+ /**
+ * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
+ */
+ u32 reset_ccid;
+
+ /**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+#define EXECLIST_MAX_PORTS 2
+ /**
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
+ /**
+ * @inflight: the set of contexts submitted and acknowleged by HW
+ *
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
+ */
+ struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
+ /**
+ * @pending: the next set of contexts submitted to ELSP
+ *
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
+ */
+ struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ struct rb_root_cached virtual;
+
+ /**
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_write;
+
+ /**
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
+ */
+ u64 *csb_status;
+
+ /**
+ * @csb_size: context status buffer FIFO size
+ */
+ u8 csb_size;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+static inline unsigned int
+execlists_num_ports(const struct intel_execlists * const el)
+{
+ return el->port_mask + 1;
+}
+
+#endif /* __INTEL_EXECLISTS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 270dbebc4c18..102817a31581 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
+#include "intel_execlists_types.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_lrc_reg.h"
@@ -23,6 +24,8 @@ static void guc_irq_handler(struct intel_guc *guc, u16 iir)
static void
cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
+ struct intel_execlists *el =
+ container_of(engine->sched, typeof(*el), sched);
bool tasklet = false;
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
@@ -36,17 +39,17 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (likely(eir)) {
ENGINE_WRITE(engine, RING_EMR, ~0u);
ENGINE_WRITE(engine, RING_EIR, eir);
- WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ WRITE_ONCE(el->error_interrupt, eir);
tasklet = true;
}
}
if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
- WRITE_ONCE(engine->execlists.yield,
+ WRITE_ONCE(el->yield,
ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
ENGINE_TRACE(engine, "semaphore yield: %08x\n",
- engine->execlists.yield);
- if (del_timer(&engine->execlists.timer))
+ el->yield);
+ if (del_timer(&el->timer))
tasklet = true;
}
@@ -55,11 +58,11 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_signal_breadcrumbs(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
+ tasklet |= i915_sched_needs_breadcrumb_tasklet(&el->sched);
}
if (tasklet)
- intel_engine_kick_scheduler(engine);
+ i915_sched_kick(&el->sched);
}
void gen2_engine_cs_irq(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index a2a62d50f71e..26a95fadc0dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -31,6 +31,41 @@
*/
#define LEGACY_REQUEST_SIZE 200
+struct ring_sched {
+ struct i915_sched sched;
+
+ unsigned long flags;
+ unsigned int id;
+
+#define MAX_PORTS 2
+ /**
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
+ /**
+ * @inflight: the set of contexts submitted and acknowleged by HW
+ *
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
+ */
+ struct i915_request *inflight[MAX_PORTS + 1 /* sentinel */];
+ /**
+ * @pending: the next set of contexts submitted to ELSP
+ *
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
+ */
+ struct i915_request *pending[MAX_PORTS + 1];
+};
+
+static inline struct ring_sched *to_ring_sched(struct intel_engine_cs *engine)
+{
+ return container_of(engine->sched, struct ring_sched, sched);
+}
+
static void
set_current_context(struct intel_context **ptr, struct intel_context *ce)
{
@@ -72,9 +107,9 @@ static inline void runtime_stop(struct intel_context *ce)
}
static struct intel_engine_cs *
-__schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
+__schedule_in(struct ring_sched *rs, struct intel_context *ce)
{
- struct intel_context *ce = rq->context;
+ struct intel_engine_cs *engine = rs->sched.priv;
intel_context_get(ce);
@@ -89,19 +124,19 @@ __schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
return engine;
}
-static void schedule_in(struct intel_engine_cs *engine, struct i915_request *rq)
+static void schedule_in(struct ring_sched *rs, struct i915_request *rq)
{
struct intel_context * const ce = rq->context;
struct intel_engine_cs *old;
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rs->sched.priv));
old = ce->inflight;
if (!old)
- old = __schedule_in(engine, rq);
+ old = __schedule_in(rs, ce);
WRITE_ONCE(ce->inflight, ptr_inc(old));
- GEM_BUG_ON(intel_context_inflight(ce) != engine);
+ GEM_BUG_ON(intel_context_inflight(ce) != rs->sched.priv);
GEM_BUG_ON(!intel_context_inflight_count(ce));
}
@@ -531,14 +566,13 @@ static inline void write_tail(const struct intel_engine_cs *engine)
wa_write_tail(engine);
}
-static void dequeue(struct i915_sched *se, struct intel_engine_cs *engine)
+static void dequeue(struct ring_sched *rs)
{
- struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request ** const last_port = el->pending + el->port_mask;
+ struct i915_request ** const last_port = rs->pending + MAX_PORTS - 1;
struct i915_request **port, **first, *last;
struct i915_priolist *p;
- first = copy_active(el->pending, el->active);
+ first = copy_active(rs->pending, rs->active);
if (first > last_port)
return;
@@ -546,8 +580,8 @@ static void dequeue(struct i915_sched *se, struct intel_engine_cs *engine)
last = NULL;
port = first;
- spin_lock(&se->lock);
- for_each_priolist(p, &se->queue) {
+ spin_lock(&rs->sched.lock);
+ for_each_priolist(p, &rs->sched.queue) {
struct i915_request *rq, *rn;
priolist_for_each_request_safe(rq, rn, p) {
@@ -559,33 +593,33 @@ static void dequeue(struct i915_sched *se, struct intel_engine_cs *engine)
*port++ = i915_request_get(last);
}
- last = ring_submit(engine, rq);
+ last = ring_submit(rs->sched.priv, rq);
}
- i915_priolist_advance(&se->queue, p);
+ i915_priolist_advance(&rs->sched.queue, p);
}
done:
- spin_unlock(&se->lock);
+ spin_unlock(&rs->sched.lock);
if (last) {
*port++ = i915_request_get(last);
*port = NULL;
- if (!*el->active)
- runtime_start((*el->pending)->context);
- WRITE_ONCE(el->active, el->pending);
+ if (!*rs->active)
+ runtime_start((*rs->pending)->context);
+ WRITE_ONCE(rs->active, rs->pending);
- copy_ports(el->inflight, el->pending, port - el->pending + 1);
+ copy_ports(rs->inflight, rs->pending, port - rs->pending + 1);
while (port-- != first)
- schedule_in(engine, *port);
+ schedule_in(rs, *port);
- write_tail(engine);
+ write_tail(rs->sched.priv);
- WRITE_ONCE(el->active, el->inflight);
- GEM_BUG_ON(!*el->active);
+ WRITE_ONCE(rs->active, rs->inflight);
+ GEM_BUG_ON(!*rs->active);
}
- WRITE_ONCE(el->pending[0], NULL);
+ WRITE_ONCE(rs->pending[0], NULL);
local_irq_enable(); /* flush irq_work *after* RING_TAIL write */
}
@@ -599,18 +633,18 @@ static void post_process_csb(struct intel_engine_cs *engine,
}
static struct i915_request **
-process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
+process_csb(struct ring_sched *rs, struct i915_request **inactive)
{
struct i915_request *rq;
- while ((rq = *el->active)) {
+ while ((rq = *rs->active)) {
if (!__i915_request_is_complete(rq)) {
runtime_start(rq->context);
break;
}
*inactive++ = rq;
- el->active++;
+ rs->active++;
runtime_stop(rq->context);
}
@@ -620,19 +654,18 @@ process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
static void submission_tasklet(struct tasklet_struct *t)
{
- struct i915_sched *se = from_tasklet(se, t, tasklet);
- struct intel_engine_cs *engine = se->priv;
- struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct ring_sched *rs = from_tasklet(rs, t, sched.tasklet);
+ struct i915_request *post[2 * MAX_PORTS];
struct i915_request **inactive;
rcu_read_lock();
- inactive = process_csb(&engine->execlists, post);
+ inactive = process_csb(rs, post);
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
- if (!i915_sched_is_idle(se))
- dequeue(se, engine);
+ if (!i915_sched_is_idle(&rs->sched))
+ dequeue(rs);
- post_process_csb(engine, post, inactive);
+ post_process_csb(rs->sched.priv, post, inactive);
rcu_read_unlock();
}
@@ -651,46 +684,45 @@ static inline void clear_ports(struct i915_request **ports, int count)
}
static struct i915_request **
-cancel_port_requests(struct intel_engine_execlists * const el,
+cancel_port_requests(struct ring_sched * const rs,
struct i915_request **inactive)
{
struct i915_request * const *port;
- clear_ports(el->pending, ARRAY_SIZE(el->pending));
+ clear_ports(rs->pending, ARRAY_SIZE(rs->pending));
/* Mark the end of active before we overwrite *active */
- for (port = xchg(&el->active, el->pending); *port; port++)
+ for (port = xchg(&rs->active, rs->pending); *port; port++)
*inactive++ = *port;
- clear_ports(el->inflight, ARRAY_SIZE(el->inflight));
+ clear_ports(rs->inflight, ARRAY_SIZE(rs->inflight));
- smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(el->active, el->inflight);
+ WRITE_ONCE(rs->active, rs->inflight);
return inactive;
}
-static void __ring_rewind(struct i915_sched *se, bool stalled)
+static void __ring_rewind(struct ring_sched *rs, bool stalled)
{
struct i915_request *rq;
unsigned long flags;
rcu_read_lock();
- spin_lock_irqsave(&se->lock, flags);
- rq = __i915_sched_rewind_requests(se);
- spin_unlock_irqrestore(&se->lock, flags);
+ spin_lock_irqsave(&rs->sched.lock, flags);
+ rq = __i915_sched_rewind_requests(&rs->sched);
+ spin_unlock_irqrestore(&rs->sched.lock, flags);
if (rq && __i915_request_has_started(rq))
__i915_request_reset(rq, stalled);
rcu_read_unlock();
}
-static void ring_reset_csb(struct intel_engine_cs *engine)
+static void ring_reset_csb(struct ring_sched *rs)
{
- struct intel_engine_execlists * const el = &engine->execlists;
- struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+ struct intel_engine_cs *engine = rs->sched.priv;
+ struct i915_request *post[2 * MAX_PORTS];
struct i915_request **inactive;
rcu_read_lock();
- inactive = cancel_port_requests(el, post);
+ inactive = cancel_port_requests(rs, post);
/* Clear the global submission state, we will submit from scratch */
intel_ring_reset(engine->legacy.ring, 0);
@@ -702,40 +734,42 @@ static void ring_reset_csb(struct intel_engine_cs *engine)
static void ring_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
- ring_reset_csb(engine);
- __ring_rewind(engine->sched, stalled);
+ struct ring_sched *rs = to_ring_sched(engine);
+
+ ring_reset_csb(rs);
+ __ring_rewind(rs, stalled);
}
static void ring_reset_cancel(struct intel_engine_cs *engine)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct ring_sched *rs = to_ring_sched(engine);
struct i915_request *rq, *rn;
struct i915_priolist *p;
unsigned long flags;
- ring_reset_csb(engine);
+ ring_reset_csb(rs);
rcu_read_lock();
- spin_lock_irqsave(&se->lock, flags);
+ spin_lock_irqsave(&rs->sched.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(rq, &se->requests, sched.link)
+ list_for_each_entry(rq, &rs->sched.requests, sched.link)
i915_request_mark_eio(rq);
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
- for_each_priolist(p, &se->queue) {
+ for_each_priolist(p, &rs->sched.queue) {
priolist_for_each_request_safe(rq, rn, p) {
i915_request_mark_eio(rq);
__i915_request_submit(rq, engine);
}
- i915_priolist_advance(&se->queue, p);
+ i915_priolist_advance(&rs->sched.queue, p);
}
- GEM_BUG_ON(!i915_sched_is_idle(se));
+ GEM_BUG_ON(!i915_sched_is_idle(&rs->sched));
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&se->lock, flags);
+ spin_unlock_irqrestore(&rs->sched.lock, flags);
rcu_read_unlock();
}
@@ -1118,7 +1152,7 @@ static void setup_vecs(struct intel_engine_cs *engine)
static unsigned int global_ring_size(void)
{
/* Enough space to hold 2 clients and the context switch */
- return roundup_pow_of_two(EXECLIST_MAX_PORTS * SZ_16K + SZ_4K);
+ return roundup_pow_of_two(MAX_PORTS * SZ_16K + SZ_4K);
}
static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
@@ -1173,6 +1207,29 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
return err;
}
+static struct i915_sched *create_ring_sched(struct intel_engine_cs *engine)
+{
+ struct ring_sched *rs;
+
+ rs = kzalloc(sizeof(*rs), GFP_KERNEL);
+ if (!rs)
+ return NULL;
+
+ i915_sched_init(&rs->sched,
+ engine->i915->drm.dev,
+ engine->name,
+ engine->mask,
+ submission_tasklet, engine,
+ ENGINE_PHYSICAL);
+
+ __set_bit(I915_SCHED_ACTIVE_BIT, &rs->sched.flags);
+ __set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &rs->sched.flags);
+
+ rs->active = rs->inflight;
+
+ return &rs->sched;
+}
+
int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
{
struct intel_ring *ring;
@@ -1200,18 +1257,11 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
return -ENODEV;
}
- engine->sched =
- i915_sched_create(engine->i915->drm.dev,
- engine->name,
- engine->mask,
- submission_tasklet, engine,
- ENGINE_PHYSICAL);
+ engine->sched = create_ring_sched(engine);
if (!engine->sched) {
err = -ENOMEM;
goto err;
}
- __set_bit(I915_SCHED_ACTIVE_BIT, &engine->sched->flags);
- __set_bit(I915_SCHED_NEEDS_BREADCRUMB_BIT, &engine->sched->flags);
ring = intel_engine_create_ring(engine, global_ring_size());
if (IS_ERR(ring)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 7a7175a24fd8..26841934fafa 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -54,7 +54,8 @@ static int wait_for_submit(struct intel_engine_cs *engine,
/* Wait until the HW has acknowleged the submission (or err) */
intel_engine_flush_scheduler(engine);
- if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ if (!READ_ONCE(to_execlists(engine)->pending[0]) &&
+ is_active(rq))
return 0;
if (done)
@@ -74,7 +75,7 @@ static int wait_for_reset(struct intel_engine_cs *engine,
cond_resched();
intel_engine_flush_scheduler(engine);
- if (READ_ONCE(engine->execlists.pending[0]))
+ if (READ_ONCE(to_execlists(engine)->pending[0]))
continue;
if (i915_request_completed(rq))
@@ -606,7 +607,6 @@ static int live_hold_reset(void *arg)
tasklet_disable(&se->tasklet);
se->tasklet.callback(&se->tasklet);
- GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
i915_sched_suspend_request(se, rq);
@@ -1184,7 +1184,7 @@ static int live_timeslice_rewind(void *arg)
while (i915_request_is_active(rq[A2]) &&
time_before(jiffies, timeout)) { /* semaphore yield! */
/* Wait for the timeslice to kick in */
- del_timer(&engine->execlists.timer);
+ del_timer(&to_execlists(engine)->timer);
intel_engine_kick_scheduler(engine);
intel_engine_flush_scheduler(engine);
@@ -1375,7 +1375,7 @@ static int live_timeslice_queue(void *arg)
do {
cond_resched();
intel_engine_flush_scheduler(engine);
- } while (READ_ONCE(engine->execlists.pending[0]));
+ } while (READ_ONCE(to_execlists(engine)->pending[0]));
/* Timeslice every jiffy, so within 2 we should signal */
if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
@@ -1971,7 +1971,7 @@ static int live_nopreempt(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- engine->execlists.preempt_hang.count = 0;
+ to_execlists(engine)->preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
a.ctx, engine,
@@ -2018,9 +2018,9 @@ static int live_nopreempt(void *arg)
igt_spinner_end(&b.spin);
- if (engine->execlists.preempt_hang.count) {
+ if (to_execlists(engine)->preempt_hang.count) {
pr_err("Preemption recorded x%d; should have been suppressed!\n",
- engine->execlists.preempt_hang.count);
+ to_execlists(engine)->preempt_hang.count);
err = -EINVAL;
goto err_wedged;
}
@@ -2345,9 +2345,9 @@ static int __cancel_fail(struct live_preempt_cancel *arg)
force_reset_timeout(engine);
/* force preempt reset [failure] */
- while (!engine->execlists.pending[0])
+ while (!to_execlists(engine)->pending[0])
intel_engine_flush_scheduler(engine);
- del_timer_sync(&engine->execlists.preempt);
+ del_timer_sync(&to_execlists(engine)->preempt);
intel_engine_flush_scheduler(engine);
cancel_reset_timeout(engine);
@@ -2463,7 +2463,7 @@ static int live_suppress_self_preempt(void *arg)
goto err_wedged;
st_engine_heartbeat_disable(engine);
- engine->execlists.preempt_hang.count = 0;
+ to_execlists(engine)->preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
a.ctx, engine,
@@ -2482,7 +2482,7 @@ static int live_suppress_self_preempt(void *arg)
}
/* Keep postponing the timer to avoid premature slicing */
- mod_timer(&engine->execlists.timer, jiffies + HZ);
+ mod_timer(&to_execlists(engine)->timer, jiffies + HZ);
for (depth = 0; depth < 8; depth++) {
rq_b = spinner_create_request(&b.spin,
b.ctx, engine,
@@ -2509,10 +2509,10 @@ static int live_suppress_self_preempt(void *arg)
}
igt_spinner_end(&a.spin);
- if (engine->execlists.preempt_hang.count) {
+ if (to_execlists(engine)->preempt_hang.count) {
pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
engine->name,
- engine->execlists.preempt_hang.count,
+ to_execlists(engine)->preempt_hang.count,
depth);
st_engine_heartbeat_enable(engine);
err = -EINVAL;
@@ -3419,7 +3419,7 @@ static int live_preempt_timeout(void *arg)
}
/* Flush the previous CS ack before changing timeouts */
- while (READ_ONCE(engine->execlists.pending[0]))
+ while (READ_ONCE(to_execlists(engine)->pending[0]))
cpu_relax();
saved_timeout = engine->props.preempt_timeout_ms;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 391a14cc135f..fa7e7bf3cf09 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -60,7 +60,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
/* Wait until the HW has acknowleged the submission (or err) */
intel_engine_flush_scheduler(engine);
- if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
+ if (is_active(rq))
return 0;
if (done)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2b79d073ac37..30e746272237 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -131,131 +131,22 @@ static void flush_ggtt_writes(struct i915_vma *vma)
GUC_STATUS);
}
-static void guc_submit(struct intel_engine_cs *engine,
- struct i915_request **out,
- struct i915_request **end)
+static void guc_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
-
- do {
- struct i915_request *rq = *out++;
-
- flush_ggtt_writes(rq->ring->vma);
- guc_add_request(guc, rq);
- } while (out != end);
-}
-
-static inline int rq_prio(const struct i915_request *rq)
-{
- return rq->sched.attr.priority;
-}
-
-static struct i915_request *
-schedule_in(struct intel_engine_cs *engine, struct i915_request *rq, int idx)
-{
- trace_i915_request_in(rq, idx);
-
- /*
- * Currently we are not tracking the rq->context being inflight
- * (ce->inflight = rq->engine). It is only used by the execlists
- * backend at the moment, a similar counting strategy would be
- * required if we generalise the inflight tracking.
- */
-
- __intel_gt_pm_get(engine->gt);
- return i915_request_get(rq);
-}
-
-static void
-schedule_out(struct intel_engine_cs *engine, struct i915_request *rq)
-{
- trace_i915_request_out(rq);
-
- i915_request_put(rq);
- intel_gt_pm_put_async(engine->gt);
-}
-
-static void __guc_dequeue(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
- struct i915_request **first = execlists->inflight;
- struct i915_request ** const last_port = first + execlists->port_mask;
- struct i915_request *last = first[0];
- struct i915_request **port;
+ struct i915_sched *se = from_tasklet(se, t, tasklet);
struct i915_priolist *pl;
- bool submit = false;
- lockdep_assert_held(&se->lock);
-
- if (last) {
- if (*++first)
- return;
-
- last = NULL;
- }
-
- /*
- * We write directly into the execlists->inflight queue and don't use
- * the execlists->pending queue, as we don't have a distinct switch
- * event.
- */
- port = first;
for_each_priolist(pl, &se->queue) {
struct i915_request *rq, *rn;
priolist_for_each_request_safe(rq, rn, pl) {
- if (last && rq->context != last->context) {
- if (port == last_port)
- goto done;
-
- *port = schedule_in(engine, last,
- port - execlists->inflight);
- port++;
- }
-
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq, engine);
- submit = true;
- last = rq;
+ __i915_request_submit(rq, rq->context->engine);
+ flush_ggtt_writes(rq->context->state);
+ guc_add_request(se->priv, rq);
}
i915_priolist_advance(&se->queue, pl);
}
-done:
- if (submit) {
- *port = schedule_in(engine, last, port - execlists->inflight);
- *++port = NULL;
- guc_submit(engine, first, port);
- }
- execlists->active = execlists->inflight;
-}
-
-static void guc_submission_tasklet(struct tasklet_struct *t)
-{
- struct i915_sched * const se = from_tasklet(se, t, tasklet);
- struct intel_engine_cs *engine = se->priv;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port, *rq;
- unsigned long flags;
-
- spin_lock_irqsave(&se->lock, flags);
-
- for (port = execlists->inflight; (rq = *port); port++) {
- if (!i915_request_completed(rq))
- break;
-
- schedule_out(engine, rq);
- }
- if (port != execlists->inflight) {
- int idx = port - execlists->inflight;
- int rem = ARRAY_SIZE(execlists->inflight) - idx;
- memmove(execlists->inflight, port, rem * sizeof(*port));
- }
-
- __guc_dequeue(engine);
-
- spin_unlock_irqrestore(&se->lock, flags);
}
static void guc_reset_prepare(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0c5adca7994f..ed20fb2181c2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -464,23 +464,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.slice_common_extra[1]);
}
-static void error_print_request(struct drm_i915_error_state_buf *m,
- const char *prefix,
- const struct i915_request_coredump *erq)
-{
- if (!erq->seqno)
- return;
-
- err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
- prefix, erq->pid, erq->context, erq->seqno,
- test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &erq->flags) ? "!" : "",
- test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &erq->flags) ? "+" : "",
- erq->sched_attr.priority,
- erq->head, erq->tail);
-}
-
static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
@@ -513,7 +496,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee)
{
struct i915_vma_coredump *batch;
- int n;
err_printf(m, "%s command stream:\n", ee->engine->name);
err_printf(m, " CCID: 0x%08x\n", ee->ccid);
@@ -570,11 +552,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hung: %u\n", ee->hung);
err_printf(m, " engine reset count: %u\n", ee->reset_count);
- for (n = 0; n < ee->num_ports; n++) {
- err_printf(m, " ELSP[%d]:", n);
- error_print_request(m, " ", &ee->execlist[n]);
- }
-
error_print_context(m, " Active context: ", &ee->context);
}
@@ -1220,42 +1197,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
}
}
-static void record_request(const struct i915_request *request,
- struct i915_request_coredump *erq)
-{
- erq->flags = request->fence.flags;
- erq->context = request->fence.context;
- erq->seqno = request->fence.seqno;
- erq->sched_attr = request->sched.attr;
- erq->head = request->head;
- erq->tail = request->tail;
-
- erq->pid = 0;
- rcu_read_lock();
- if (!intel_context_is_closed(request->context)) {
- const struct i915_gem_context *ctx;
-
- ctx = rcu_dereference(request->context->gem_context);
- if (ctx)
- erq->pid = I915_SELFTEST_ONLY(!ctx->client) ?
- 0 :
- pid_nr(i915_drm_client_pid(ctx->client));
- }
- rcu_read_unlock();
-}
-
-static void engine_record_execlists(struct intel_engine_coredump *ee)
-{
- const struct intel_engine_execlists * const el = &ee->engine->execlists;
- struct i915_request * const *port = el->active;
- unsigned int n = 0;
-
- while (*port)
- record_request(*port++, &ee->execlist[n++]);
-
- ee->num_ports = n;
-}
-
static bool record_context(struct i915_gem_context_coredump *e,
const struct i915_request *rq)
{
@@ -1355,7 +1296,6 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
ee->engine = engine;
engine_record_registers(ee);
- engine_record_execlists(ee);
return ee;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 2d8debabfe28..c31955407d18 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -101,9 +101,6 @@ struct intel_engine_coredump {
struct i915_vma_coredump *vma;
- struct i915_request_coredump execlist[EXECLIST_MAX_PORTS];
- unsigned int num_ports;
-
struct {
u32 gfx_mode;
union {
--
2.20.1
More information about the Intel-gfx-trybot
mailing list