[Intel-gfx] [PATCH 47/49] drm/i915/bdw: Always write seqno to default context

oscar.mateo at intel.com oscar.mateo at intel.com
Thu Mar 27 19:00:16 CET 2014


From: Oscar Mateo <oscar.mateo at intel.com>

Even though we have one Hardware Status Page per context, we are still
managing the seqnos per engine. Therefore, the sequence number must be
written to a consistent place for all contexts: one of the global
default contexts.

Signed-off-by: Thomas Daniel <thomas.daniel at intel.com>

v2: Since get_seqno and set_seqno now look for the seqno in the engine's
status page, they don't need to be changed.

Signed-off-by: Oscar Mateo <oscar.mateo at intel.com>
---
 drivers/gpu/drm/i915/i915_reg.h         |  1 +
 drivers/gpu/drm/i915/intel_ringbuffer.c | 68 +++++++++++++++++++++++++++++++--
 2 files changed, 65 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b36da4f..002b513 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -261,6 +261,7 @@
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define MI_STORE_DWORD_IMM_GEN8	MI_INSTR(0x20, 2)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9a6775d..824c0859 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -738,6 +738,62 @@ gen6_add_request(struct intel_engine *ring,
 }
 
 static int
+gen8_nonrender_add_request_lrc(struct intel_engine *ring,
+			       struct i915_hw_context *ctx)
+{
+	struct intel_ringbuffer *ringbuf;
+	u32 cmd;
+
+	ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+	if (IS_ERR_OR_NULL(ringbuf))
+		return (PTR_ERR(ringbuf));
+
+	cmd = MI_FLUSH_DW + 1;
+	cmd |= MI_INVALIDATE_TLB;
+	cmd |= MI_FLUSH_DW_OP_STOREDW;
+
+	intel_ringbuffer_emit(ringbuf, cmd);
+	intel_ringbuffer_emit(ringbuf,
+			((i915_gem_obj_ggtt_offset(ring->default_context->obj)) +
+			(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)) |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+	intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+	intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_ringbuffer_emit(ringbuf, MI_NOOP);
+	intel_ringbuffer_advance_and_submit(ring, ctx);
+
+	return 0;
+}
+
+static int
+gen8_add_request_lrc(struct intel_engine *ring,
+		     struct i915_hw_context *ctx)
+{
+	struct intel_ringbuffer *ringbuf;
+	u32 cmd;
+
+	ringbuf = intel_ringbuffer_begin(ring, ctx, 6);
+	if (IS_ERR_OR_NULL(ringbuf))
+		return (PTR_ERR(ringbuf));
+
+	cmd = MI_STORE_DWORD_IMM_GEN8;
+	cmd |= (1 << 22); /* use global GTT */
+
+	intel_ringbuffer_emit(ringbuf, cmd);
+	intel_ringbuffer_emit(ringbuf,
+			((i915_gem_obj_ggtt_offset(ring->default_context->obj)) +
+			(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+	intel_ringbuffer_emit(ringbuf, 0); /* upper addr */
+	intel_ringbuffer_emit(ringbuf, ring->outstanding_lazy_seqno);
+	intel_ringbuffer_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_ringbuffer_emit(ringbuf, MI_NOOP);
+	intel_ringbuffer_advance_and_submit(ring, ctx);
+
+	return 0;
+}
+
+static int
 gen8_add_request(struct intel_engine *ring,
 		 struct i915_hw_context *ctx)
 {
@@ -2027,13 +2083,14 @@ int intel_init_render_ring(struct drm_device *dev)
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
 		if (INTEL_INFO(dev)->gen >= 8) {
+			ring->add_request = gen8_add_request;
 			if (dev_priv->lrc_enabled) {
 				ring->write_tail = gen8_write_tail_lrc;
 				ring->init = init_render_ring_lrc;
 				ring->irq_keep_mask =
 				GEN8_GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+				ring->add_request = gen8_add_request_lrc;
 			}
-			ring->add_request = gen8_add_request;
 			ring->flush = gen8_render_ring_flush;
 			ring->irq_get = gen8_ring_get_irq;
 			ring->irq_put = gen8_ring_put_irq;
@@ -2202,14 +2259,15 @@ int intel_init_bsd_ring(struct drm_device *dev)
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		if (INTEL_INFO(dev)->gen >= 8) {
+			ring->add_request = gen8_add_request;
 			if (dev_priv->lrc_enabled) {
 				ring->write_tail = gen8_write_tail_lrc;
 				ring->init = init_ring_common_lrc;
 				ring->irq_keep_mask =
 				GEN8_GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+				ring->add_request = gen8_nonrender_add_request_lrc;
 			}
 			ring->flush = gen8_ring_flush;
-			ring->add_request = gen8_add_request;
 			ring->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 			ring->irq_get = gen8_ring_get_irq;
@@ -2264,14 +2322,15 @@ int intel_init_blt_ring(struct drm_device *dev)
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->add_request = gen8_add_request;
 		if (dev_priv->lrc_enabled) {
 			ring->write_tail = gen8_write_tail_lrc;
 			ring->init = init_ring_common_lrc;
 			ring->irq_keep_mask =
 			GEN8_GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+			ring->add_request = gen8_nonrender_add_request_lrc;
 		}
 		ring->flush = gen8_ring_flush;
-		ring->add_request = gen8_add_request;
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;
@@ -2308,14 +2367,15 @@ int intel_init_vebox_ring(struct drm_device *dev)
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
 	if (INTEL_INFO(dev)->gen >= 8) {
+		ring->add_request = gen8_add_request;
 		if (dev_priv->lrc_enabled) {
 			ring->write_tail = gen8_write_tail_lrc;
 			ring->init = init_ring_common_lrc;
 			ring->irq_keep_mask =
 			GEN8_GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+			ring->add_request = gen8_nonrender_add_request_lrc;
 		}
 		ring->flush = gen8_ring_flush;
-		ring->add_request = gen8_add_request;
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 		ring->irq_get = gen8_ring_get_irq;
-- 
1.9.0




More information about the Intel-gfx mailing list