[Intel-gfx] [PATCH v2 1/4] drm/i915: Initialize hardware semaphore state on ring init

Mika Kuoppala mika.kuoppala at linux.intel.com
Tue Dec 18 18:26:18 CET 2012


Hardware status page needs to have proper seqno set
as our initial seqno can be arbitrary. If initial seqno is close
to wrap boundary on init and i915_seqno_passed() (31bit space)
refers to hw status page which contains zero, errorneous result
will be returned.

v2: clear mboxes and set hws page directly instead of going
through rings. Suggested by Chris Wilson.

v3: hws needs to be updated for all gens. Noticed by Chris
Wilson.

References: https://bugs.freedesktop.org/show_bug.cgi?id=58230
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         |    8 +++-----
 drivers/gpu/drm/i915/intel_ringbuffer.c |   24 +++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   10 +++++++++-
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d15c862..396be20 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1954,9 +1954,7 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
 
 	/* Finally reset hw state */
 	for_each_ring(ring, dev_priv, i) {
-		ret = intel_ring_handle_seqno_wrap(ring);
-		if (ret)
-			return ret;
+		intel_ring_init_seqno(ring, 0);
 
 		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
 			ring->sync_seqno[j] = 0;
@@ -3933,6 +3931,8 @@ i915_gem_init_hw(struct drm_device *dev)
 
 	i915_gem_init_swizzling(dev);
 
+	dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
+
 	ret = intel_init_render_ring_buffer(dev);
 	if (ret)
 		return ret;
@@ -3949,8 +3949,6 @@ i915_gem_init_hw(struct drm_device *dev)
 			goto cleanup_bsd_ring;
 	}
 
-	dev_priv->next_seqno = (u32)-1 - 0x1000;
-
 	/*
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 69bbe7b..6274469 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1171,6 +1171,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	if (IS_I830(ring->dev) || IS_845G(ring->dev))
 		ring->effective_size -= 128;
 
+	intel_ring_init_seqno(ring, dev_priv->last_seqno);
+
 	return 0;
 
 err_unmap:
@@ -1418,26 +1420,18 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
 }
 
-int intel_ring_handle_seqno_wrap(struct intel_ring_buffer *ring)
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-	int ret;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	BUG_ON(ring->outstanding_lazy_request);
 
-	if (INTEL_INFO(ring->dev)->gen < 6)
-		return 0;
-
-	ret = __intel_ring_begin(ring, 6 * sizeof(uint32_t));
-	if (ret)
-		return ret;
-
-	/* Leaving a stale, pre-wrap seqno behind in the mboxes will result in
-	 * post-wrap semaphore waits completing immediately. Clear them. */
-	update_mboxes(ring, ring->signal_mbox[0]);
-	update_mboxes(ring, ring->signal_mbox[1]);
-	intel_ring_advance(ring);
+	if (INTEL_INFO(ring->dev)->gen >= 6) {
+		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+	}
 
-	return 0;
+	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b4a533e..52ee0e8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -166,6 +166,14 @@ intel_read_status_page(struct intel_ring_buffer *ring,
 	return ring->status_page.page_addr[reg];
 }
 
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+			int reg, u32 value)
+{
+	ring->status_page.page_addr[reg] = value;
+	barrier();
+}
+
 /**
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -196,7 +204,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 }
 void intel_ring_advance(struct intel_ring_buffer *ring);
 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-int __must_check intel_ring_handle_seqno_wrap(struct intel_ring_buffer *ring);
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
-- 
1.7.9.5




More information about the Intel-gfx mailing list