[PATCH 6/7] drm/i915/ringbuffer: Move irq seqno barrier to the GPU for gen5

Chris Wilson chris at chris-wilson.co.uk
Wed Dec 19 11:09:39 UTC 2018


The irq_seqno_barrier is a tradeoff between doing work on every request
(on the GPU) and doing work after every interrupt (on the CPU). We
presume we have many more requests than interrupts! However, for
Ironlake, the workaround is a pretty hideous usleep() and so even though
it was found we need to repeat the MI_STORE_DWORD_IMM 8 times, doing so
is preferrable than requiring a usleep!

Testcase: igt/gem_sync
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 37 +++++++++++++------------
 1 file changed, 20 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e3d0199ea10d..bd73641f1ce6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -853,26 +853,26 @@ static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 	rq->tail = intel_ring_offset(rq, cs);
 	assert_ring_tail_valid(rq->ring, rq->tail);
 }
-
 static const int i9xx_emit_breadcrumb_sz = 6;
 
-static void
-gen5_seqno_barrier(struct intel_engine_cs *engine)
+static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-	/* MI_STORE are internally buffered by the GPU and not flushed
-	 * either by MI_FLUSH or SyncFlush or any other combination of
-	 * MI commands.
-	 *
-	 * "Only the submission of the store operation is guaranteed.
-	 * The write result will be complete (coherent) some time later
-	 * (this is practically a finite period but there is no guaranteed
-	 * latency)."
-	 *
-	 * Empirically, we observe that we need a delay of at least 75us to
-	 * be sure that the seqno write is visible by the CPU.
-	 */
-	usleep_range(125, 250);
+	int i;
+
+	*cs++ = MI_FLUSH;
+
+	for (i = 0; i < 8; i++) {
+		*cs++ = MI_STORE_DWORD_INDEX;
+		*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+		*cs++ = rq->global_seqno;
+	}
+
+	*cs++ = MI_USER_INTERRUPT;
+
+	rq->tail = intel_ring_offset(rq, cs);
+	assert_ring_tail_valid(rq->ring, rq->tail);
 }
+static const int gen5_emit_breadcrumb_sz = 3 * 8 + 2;
 
 static void
 gen6_seqno_barrier(struct intel_engine_cs *engine)
@@ -2105,7 +2105,6 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 	} else if (INTEL_GEN(dev_priv) >= 5) {
 		engine->irq_enable = gen5_irq_enable;
 		engine->irq_disable = gen5_irq_disable;
-		engine->irq_seqno_barrier = gen5_seqno_barrier;
 	} else if (INTEL_GEN(dev_priv) >= 3) {
 		engine->irq_enable = i9xx_irq_enable;
 		engine->irq_disable = i9xx_irq_disable;
@@ -2148,6 +2147,10 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 
 	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
 	engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
+	if (IS_GEN(dev_priv, 5)) {
+		engine->emit_breadcrumb = gen5_emit_breadcrumb;
+		engine->emit_breadcrumb_sz = gen5_emit_breadcrumb_sz;
+	}
 
 	engine->set_default_submission = i9xx_set_default_submission;
 
-- 
2.20.0



More information about the Intel-gfx-trybot mailing list