[Intel-gfx] [PATCH 4/5] drm/i915/execlists: Cancel banned contexts on schedule-out
Chris Wilson
chris at chris-wilson.co.uk
Tue Aug 6 13:47:24 UTC 2019
On completion of a banned context, scrub the context image so that we do
not replay the active payload. The intent is that we skip banned
payloads on request submission so that the timeline advancement
continues on in the background. However, if we are returning to a
preempted request, i915_request_skip() is ineffective and instead we
need to patch up the context image so that it continues from the start
of the next request.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 39 +++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 59a7e4eb7e2a..3f166e799398 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -222,6 +222,9 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static void
+__execlists_update_reg_state(struct intel_context *ce,
+ struct intel_engine_cs *engine);
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
@@ -575,6 +578,39 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
tasklet_schedule(&ve->base.execlists.tasklet);
}
+static void cancel_active(struct intel_context *ce, struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = ce->inflight;
+ u32 *regs = ce->lrc_reg_state;
+
+ if (i915_request_completed(rq))
+ return;
+
+ GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+ __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+ }
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
+
+ /* Ring will be advanced on retire; here we need to reset the context */
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->wa_tail);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+
+ /* Let everyone know that the request may now be retired */
+ rq->fence.error = -EIO;
+ *(u32 *)ce->ring->timeline->hwsp_seqno = rq->fence.seqno;
+ GEM_BUG_ON(!i915_request_completed(rq));
+ intel_engine_queue_breadcrumbs(engine);
+}
+
static inline void
execlists_schedule_out(struct i915_request *rq)
{
@@ -589,6 +625,9 @@ execlists_schedule_out(struct i915_request *rq)
intel_engine_context_out(ce->inflight);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ cancel_active(ce, rq);
+
/*
* If this is part of a virtual engine, its next request may
* have been blocked waiting for access to the active context.
--
2.23.0.rc1
More information about the Intel-gfx
mailing list