[Intel-gfx] [PATCH 1/2] drm/i915/execlists: HWS is uncached on !llc platforms

Chris Wilson chris at chris-wilson.co.uk
Fri Oct 23 10:43:31 PDT 2015


As the HWS is mapped into the GPU as uncached, writes into that page do
not automatically snoop the CPU cache and therefore if we want to see
coherent values we need to clear the stale cacheline first. Note, we
already had a workaround for BXT-A for an identical issue, but since we
never enabled snooping it applies to all non-llc platforms.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c | 70 ++++++++++++++++++++--------------------
 1 file changed, 35 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3308337bb6c2..7393dd00d26d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1751,41 +1751,36 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 llc_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void llc_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
 	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 wc_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
-
 	/*
 	 * On BXT A steppings there is a HW coherency issue whereby the
 	 * MI_STORE_DATA_IMM storing the completed request's seqno
-	 * occasionally doesn't invalidate the CPU cache. Work around this by
-	 * clflushing the corresponding cacheline whenever the caller wants
-	 * the coherency to be guaranteed. Note that this cacheline is known
-	 * to be clean at this point, since we only write it in
-	 * bxt_a_set_seqno(), where we also do a clflush after the write. So
-	 * this clflush in practice becomes an invalidate operation.
+	 * occasionally doesn't invalidate the CPU cache. However, as
+	 * we do not mark the context for snoopable access, we have to flush
+	 * the stale cacheline before seeing new values anyway.
 	 */
-
 	if (!lazy_coherency)
 		intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void wc_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
 	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 
-	/* See bxt_a_get_seqno() explaining the reason for the clflush. */
+	/* See wc_get_seqno() explaining the reason for the clflush. */
 	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
@@ -1973,12 +1968,12 @@ static int logical_render_ring_init(struct drm_device *dev)
 		ring->init_hw = gen8_init_render_ring;
 	ring->init_context = gen8_init_rcs_context;
 	ring->cleanup = intel_fini_pipe_control;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
+	if (!HAS_LLC(dev_priv)) {
+		ring->get_seqno = wc_get_seqno;
+		ring->set_seqno = wc_set_seqno;
 	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
+		ring->get_seqno = llc_get_seqno;
+		ring->set_seqno = llc_set_seqno;
 	}
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush_render;
@@ -2025,12 +2020,12 @@ static int logical_bsd_ring_init(struct drm_device *dev)
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
 	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
+	if (!HAS_LLC(dev_priv)) {
+		ring->get_seqno = wc_get_seqno;
+		ring->set_seqno = wc_set_seqno;
 	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
+		ring->get_seqno = llc_get_seqno;
+		ring->set_seqno = llc_set_seqno;
 	}
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
@@ -2055,8 +2050,13 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
 	ring->init_hw = gen8_init_common_ring;
-	ring->get_seqno = gen8_get_seqno;
-	ring->set_seqno = gen8_set_seqno;
+	if (!HAS_LLC(dev_priv)) {
+		ring->get_seqno = wc_get_seqno;
+		ring->set_seqno = wc_set_seqno;
+	} else {
+		ring->get_seqno = llc_get_seqno;
+		ring->set_seqno = llc_set_seqno;
+	}
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
 	ring->irq_get = gen8_logical_ring_get_irq;
@@ -2080,12 +2080,12 @@ static int logical_blt_ring_init(struct drm_device *dev)
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
 	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
+	if (!HAS_LLC(dev_priv)) {
+		ring->get_seqno = wc_get_seqno;
+		ring->set_seqno = wc_set_seqno;
 	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
+		ring->get_seqno = llc_get_seqno;
+		ring->set_seqno = llc_set_seqno;
 	}
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
@@ -2110,12 +2110,12 @@ static int logical_vebox_ring_init(struct drm_device *dev)
 		GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
 	ring->init_hw = gen8_init_common_ring;
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-		ring->get_seqno = bxt_a_get_seqno;
-		ring->set_seqno = bxt_a_set_seqno;
+	if (!HAS_LLC(dev_priv)) {
+		ring->get_seqno = wc_get_seqno;
+		ring->set_seqno = wc_set_seqno;
 	} else {
-		ring->get_seqno = gen8_get_seqno;
-		ring->set_seqno = gen8_set_seqno;
+		ring->get_seqno = llc_get_seqno;
+		ring->set_seqno = llc_set_seqno;
 	}
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
-- 
2.6.1



More information about the Intel-gfx mailing list