[PATCH 5/5] drm/i915: Keep ring tight to avoid garbage over resume

Chris Wilson chris at chris-wilson.co.uk
Sat Apr 6 22:14:12 UTC 2019


Avoid the requirement to unwind after resume by updating the ring->tail
on retirement.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine.h      |  2 -
 drivers/gpu/drm/i915/gt/intel_lrc.c         | 25 -----------
 drivers/gpu/drm/i915/gt/intel_lrc.h         |  1 -
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c  | 10 -----
 drivers/gpu/drm/i915/i915_drv.h             |  1 -
 drivers/gpu/drm/i915/i915_gem.c             | 14 +-----
 drivers/gpu/drm/i915/i915_request.c         | 10 +----
 drivers/gpu/drm/i915/intel_guc_submission.c | 47 ++++++++++-----------
 8 files changed, 25 insertions(+), 85 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index e58d6f04177b..ab8faf2afe5d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -268,8 +268,6 @@ static inline void intel_ring_put(struct intel_ring *ring)
 void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
 
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-
 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
 
 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 85bdae1db76c..3f0eb9c823aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2762,31 +2762,6 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
 	return ret;
 }
 
-void intel_lr_context_resume(struct drm_i915_private *i915)
-{
-	struct i915_gem_context *ctx;
-	struct intel_context *ce;
-
-	/*
-	 * Because we emit WA_TAIL_DWORDS there may be a disparity
-	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
-	 * that stored in context. As we only write new commands from
-	 * ce->ring->tail onwards, everything before that is junk. If the GPU
-	 * starts reading from its RING_HEAD from the context, it may try to
-	 * execute that junk and die.
-	 *
-	 * So to avoid that we reset the context images upon resume. For
-	 * simplicity, we just zero everything out.
-	 */
-	list_for_each_entry(ctx, &i915->contexts.list, link) {
-		list_for_each_entry(ce, &ctx->active_engines, active_link) {
-			GEM_BUG_ON(!ce->ring);
-			intel_ring_reset(ce->ring, 0);
-			__execlists_update_reg_state(ce, ce->engine);
-		}
-	}
-}
-
 void intel_execlists_show_requests(struct intel_engine_cs *engine,
 				   struct drm_printer *m,
 				   void (*show_request)(struct drm_printer *m,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index b3274e707423..a598f2d56de3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -100,7 +100,6 @@ struct drm_printer;
 
 struct drm_i915_private;
 
-void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
 
 void intel_execlists_show_requests(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index be83cb8167f3..d3d9111c8642 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1580,16 +1580,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	kfree(engine);
 }
 
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	/* Restart from the beginning of the rings for convenience */
-	for_each_engine(engine, dev_priv, id)
-		intel_ring_reset(engine->buffer, 0);
-}
-
 static int load_pd_dir(struct i915_request *rq,
 		       const struct i915_hw_ppgtt *ppgtt)
 {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 68b536500a04..53b3339ba861 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2003,7 +2003,6 @@ struct drm_i915_private {
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
 		struct i915_gt_timelines {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1b1721152c58..b55d55ca325d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4508,13 +4508,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
 
-	/*
-	 * As we didn't flush the kernel context before suspend, we cannot
-	 * guarantee that the context image is complete. So let's just reset
-	 * it and start again.
-	 */
-	i915->gt.resume(i915);
-
 	if (i915_gem_init_hw(i915))
 		goto err_wedged;
 
@@ -4853,13 +4846,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-		dev_priv->gt.resume = intel_lr_context_resume;
+	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
-	} else {
-		dev_priv->gt.resume = intel_legacy_submission_resume;
+	else
 		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
-	}
 
 	i915_timelines_init(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 339e065da554..ea51f0f1ed37 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -158,15 +158,7 @@ static void advance_ring(struct i915_request *request)
 	 */
 	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
 	if (list_is_last(&request->ring_link, &ring->request_list)) {
-		/*
-		 * We may race here with execlists resubmitting this request
-		 * as we retire it. The resubmission will move the ring->tail
-		 * forwards (to request->wa_tail). We either read the
-		 * current value that was written to hw, or the value that
-		 * is just about to be. Either works, if we miss the last two
-		 * noops - they are safe to be replayed on a reset.
-		 */
-		tail = READ_ONCE(request->tail);
+		tail = ring->emit;
 		list_del(&ring->active_link);
 	} else {
 		tail = request->postfix;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1ed568852395..1bb5ae43d64f 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -569,34 +569,31 @@ static void inject_preempt_context(struct work_struct *work)
 	struct intel_guc_client *client = guc->preempt_client;
 	struct guc_stage_desc *stage_desc = __get_stage_desc(client);
 	struct intel_context *ce = intel_context_lookup(client->owner, engine);
-	u32 data[7];
-
-	if (!ce->ring->emit) { /* recreate upon load/resume */
-		u32 addr = intel_hws_preempt_done_address(engine);
-		u32 *cs;
-
-		cs = ce->ring->vaddr;
-		if (engine->class == RENDER_CLASS) {
-			cs = gen8_emit_ggtt_write_rcs(cs,
-						      GUC_PREEMPT_FINISHED,
-						      addr,
-						      PIPE_CONTROL_CS_STALL);
-		} else {
-			cs = gen8_emit_ggtt_write(cs,
-						  GUC_PREEMPT_FINISHED,
-						  addr,
-						  0);
-			*cs++ = MI_NOOP;
-			*cs++ = MI_NOOP;
-		}
-		*cs++ = MI_USER_INTERRUPT;
+	u32 addr = intel_hws_preempt_done_address(engine);
+	u32 data[7], *cs;
+
+	/* XXX only recreate upon load/resume */
+	cs = ce->ring->vaddr;
+	if (engine->class == RENDER_CLASS) {
+		cs = gen8_emit_ggtt_write_rcs(cs,
+					      GUC_PREEMPT_FINISHED,
+					      addr,
+					      PIPE_CONTROL_CS_STALL);
+	} else {
+		cs = gen8_emit_ggtt_write(cs,
+					  GUC_PREEMPT_FINISHED,
+					  addr,
+					  0);
 		*cs++ = MI_NOOP;
+		*cs++ = MI_NOOP;
+	}
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
 
-		ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
-		GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
+	ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
+	GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
 
-		flush_ggtt_writes(ce->ring->vma);
-	}
+	flush_ggtt_writes(ce->ring->vma);
 
 	spin_lock_irq(&client->wq_lock);
 	guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list