[Intel-gfx] [RFC 03/13] drm/i915: Additional ring operations for TDR

Lister, Ian ian.lister at intel.com
Mon Dec 16 17:02:41 CET 2013


From 6efa3d541d49006564371377819974708dab58d0 Mon Sep 17 00:00:00 2001
Message-Id: <6efa3d541d49006564371377819974708dab58d0.1387201899.git.ian.lister at intel.com>
In-Reply-To: <cover.1387201899.git.ian.lister at intel.com>
References: <cover.1387201899.git.ian.lister at intel.com>
From: ian-lister <ian.lister at intel.com>
Date: Fri, 6 Dec 2013 16:09:58 +0000
Subject: [RFC 03/13] drm/i915: Additional ring operations for TDR

Added ring enable, disable, save and restore functions in preparation
for individual engine reset. They have been implemented as additional
function pointers in the ring structure so that they can be easily
updated for future generations. The ring context memory is allocated
dynamically in case future generations need to save more register data.

The save/restore functions will be used to save the ring state prior
to engine reset and reinstate it afterwards. The save function can
additionally modify the head/tail pointers to force the ring to
advance to the next QWORD or to restart from the beginning of the ring.

The intel_ring_resample function has been abstracted from init_ring_common
so that it can be used post reset to ensure that the driver state matches
the hardware.

This patch contains an implementation of save/restore for GEN7 only.

Signed-off-by: ian-lister <ian.lister at intel.com>
---
 drivers/gpu/drm/i915/i915_reg.h         |  11 +
 drivers/gpu/drm/i915/intel_ringbuffer.c | 420 +++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  38 +++
 3 files changed, 461 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1f43019..bda7562 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -35,6 +35,7 @@
 
 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
 #define _MASKED_BIT_DISABLE(a) ((a) << 16)
+#define _MASKED_BIT_ENABLE_ALL(b) (0xFFFF0000 | (b))
 
 /* PCI config space */
 
@@ -731,6 +732,8 @@
 #define RING_SYNC_0(base)	((base)+0x40)
 #define RING_SYNC_1(base)	((base)+0x44)
 #define RING_SYNC_2(base)	((base)+0x48)
+#define RING_MI_MODE(base)      ((base)+0x9c)
+#define RING_UHPTR(base)        ((base)+0x134)
 #define GEN6_RVSYNC	(RING_SYNC_0(RENDER_RING_BASE))
 #define GEN6_RBSYNC	(RING_SYNC_1(RENDER_RING_BASE))
 #define GEN6_RVESYNC	(RING_SYNC_2(RENDER_RING_BASE))
@@ -878,6 +881,8 @@
 
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
+# define MODE_STOP                                      (1 << 8)
+# define MODE_IDLE                                      (1 << 9)
 # define MI_FLUSH_ENABLE				(1 << 12)
 # define ASYNC_FLIP_PERF_DISABLE			(1 << 14)
 
@@ -888,6 +893,7 @@
 #define GFX_MODE	0x02520
 #define GFX_MODE_GEN7	0x0229c
 #define RING_MODE_GEN7(ring)	((ring)->mmio_base+0x29c)
+#define RING_EXCC_GEN7(ring)    ((ring)->mmio_base+0x028)
 #define   GFX_RUN_LIST_ENABLE		(1<<15)
 #define   GFX_TLB_INVALIDATE_ALWAYS	(1<<13)
 #define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
@@ -1004,6 +1010,7 @@
 #define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
 
 #define CACHE_MODE_0	0x02120 /* 915+ only */
+#define GEN7_CACHE_MODE_0       0x07000 /* IVB+ */
 #define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
@@ -1021,6 +1028,10 @@
 #define   ECO_FLIP_DONE		(1<<0)
 
 #define CACHE_MODE_1		0x7004 /* IVB+ */
+/* CACHE_MODE_0 offset is different for pre-IVB and IVB+ systems */
+#define CACHE_MODE_0_OFFSET(d) ((INTEL_INFO(d)->gen >= 7) ? \
+                                        GEN7_CACHE_MODE_0 : CACHE_MODE_0)
+
 #define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
 
 #define GEN6_BLITTER_ECOSKPD	0x221d0
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7614bef..42e9ee6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -434,6 +434,23 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
 	I915_WRITE(HWS_PGA, addr);
 }
 
+void intel_ring_resample(struct intel_ring_buffer *ring)
+{
+	/* Re-initialise driver ring state*/
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+		i915_kernel_lost_context(ring->dev);
+	else {
+		ring->head = I915_READ_HEAD(ring);
+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+		ring->space = ring_space(ring);
+		ring->last_retired_head = -1;
+	}
+}
+	
+
 static int init_ring_common(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -449,6 +466,15 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 	else
 		ring_setup_phys_status_page(ring);
 
+	if (!ring->ctx && ring->ctx_size) {
+		/* Allocate the context for per-ring save/restore */
+		ring->ctx = kmalloc(ring->ctx_size, GFP_KERNEL);
+		if (ring->ctx == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
 	/* Stop the ring if it's running. */
 	I915_WRITE_CTL(ring, 0);
 	I915_WRITE_HEAD(ring, 0);
@@ -503,14 +529,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 		goto out;
 	}
 
-	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
-		i915_kernel_lost_context(ring->dev);
-	else {
-		ring->head = I915_READ_HEAD(ring);
-		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring_space(ring);
-		ring->last_retired_head = -1;
-	}
+	intel_ring_resample(ring);
 
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
@@ -1429,6 +1448,11 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 		ring->cleanup(ring);
 
 	cleanup_status_page(ring);
+
+	if (ring->ctx) {
+		kfree(ring->ctx);
+		ring->ctx = NULL;
+	}
 }
 
 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
@@ -1801,6 +1825,364 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 	return 0;
 }
 
+int intel_ring_save(struct intel_ring_buffer *ring, u32 flags)
+{
+	if (ring && ring->save && ring->ctx)
+		return ring->save(ring, ring->ctx,
+			ring->ctx_size, flags);
+	else {
+		DRM_ERROR("ring save not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+gen7_ring_save(struct intel_ring_buffer *ring, uint32_t *data, uint32_t max,
+                u32 flags)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t idx = 0;
+	uint32_t gen = INTEL_INFO(dev)->gen;
+	uint32_t head;
+	uint32_t tail;
+	uint32_t head_addr;
+	uint32_t tail_addr;
+	int clamp_to_tail = 0;
+
+	/* Ring save only added for gen >= 7 */
+	WARN_ON(gen < 7);
+
+	/* Save common registers */
+	if (max < GEN7_COMMON_CTX_SIZE)
+		return -EINVAL;
+
+	head = I915_READ_HEAD(ring);
+	tail = I915_READ_TAIL(ring);
+
+	head_addr = head & HEAD_ADDR;
+	tail_addr = tail & TAIL_ADDR;
+
+	if (flags & FORCE_ADVANCE) {
+		/* The head must always chase the tail.
+		 * If the tail is beyond the head then do not allow
+		 * the head to overtake it. If the tail is less than
+		 * the head then the tail has already wrapped and
+		 * there is no problem in advancing the head or even
+		 * wrapping the head back to 0 as worst case it will
+		 * become equal to tail */
+		if (head_addr <= tail_addr)
+			clamp_to_tail = 1;
+
+		/* Force head to next QWORD boundary */
+		head_addr &= ~0x7;
+		head_addr += 8;
+
+		if (clamp_to_tail && (head_addr > tail_addr)) {
+			head_addr = tail_addr;
+		} else if (head_addr >= ring->size) {
+			/* Wrap head back to start if it exceeds ring size*/
+			head_addr = 0;
+		}
+
+		head &= ~HEAD_ADDR;
+		head |= (head_addr & HEAD_ADDR);
+
+		DRM_DEBUG_TDR("Forced head to 0x%08x\n", head);
+	} else if (head & 0x7) {
+		/* Ensure head pointer is pointing to a QWORD boundary */
+		DRM_DEBUG_TDR("Rounding up head 0x%08x\n", head);
+		head += 0x7;
+		head &= ~0x7;
+	}
+
+	/* Saved with enable = 0 */
+	data[idx++] = I915_READ_CTL(ring) & (RING_NR_PAGES | RING_REPORT_MASK);
+
+	data[idx++] = (flags & RESET_HEAD_TAIL) ? 0 : tail;
+
+	if (flags & RESET_HEAD_TAIL) {
+		/* Save head as 0 so head is reset on restore */
+		data[idx++] = 0;
+	} else {
+		/* Head will already have advanced to next instruction location
+		 * even if the current instruction caused a hang, so we just
+		 * save the current value as the value to restart at 
+		 */
+		data[idx++] = head;
+	}
+
+	data[idx++] = I915_READ_START(ring);
+
+	/* Workaround for reading DCLV registers for gen < 8 */
+	data[idx++] = (gen < 8) ?
+		I915_READ(RING_PP_DIR_DCLV(&dev_priv->ring[VCS]))
+		: I915_READ(RING_PP_DIR_DCLV(ring));
+
+	data[idx++] = (gen < 8) ?
+			I915_READ(RING_PP_DIR_BASE(&dev_priv->ring[VCS]))
+			: I915_READ(RING_PP_DIR_BASE(ring));
+
+	switch (ring->id) {
+	case RCS:
+		if (max < GEN7_RCS_CTX_SIZE)
+			return -EINVAL;
+
+		data[idx++] = I915_READ(RENDER_HWS_PGA_GEN7);
+		data[idx++] = I915_READ(RING_UHPTR(ring->mmio_base));
+		data[idx++] = I915_READ(RING_INSTPM(ring->mmio_base));
+		data[idx++] = I915_READ(RING_IMR(ring->mmio_base));
+		data[idx++] = I915_READ(CACHE_MODE_0_OFFSET(dev));
+		data[idx++] = I915_READ(CACHE_MODE_1);
+		data[idx++] = I915_READ(RING_MI_MODE(ring->mmio_base));
+		data[idx++] = I915_READ(_3D_CHICKEN2);
+		data[idx++] = I915_READ(_3D_CHICKEN3);
+		data[idx++] = I915_READ(GAM_ECOCHK);
+		data[idx++] = I915_READ(GFX_MODE_GEN7);
+		data[idx++] = I915_READ(GEN6_RBSYNC);
+		data[idx++] = I915_READ(GEN7_FF_THREAD_MODE);
+		data[idx++] = I915_READ(RING_MAX_IDLE(ring->mmio_base));
+		break;
+
+	case VCS:
+		if (max < GEN7_BSD_CTX_SIZE)
+			return -EINVAL;
+
+		data[idx++] = I915_READ(BSD_HWS_PGA_GEN7);
+		data[idx++] = I915_READ(RING_MI_MODE(ring->mmio_base));
+		data[idx++] = I915_READ(RING_IMR(ring->mmio_base));
+		data[idx++] = I915_READ(RING_UHPTR(ring->mmio_base));
+		data[idx++] = I915_READ(RING_INSTPM(ring->mmio_base));
+		data[idx++] = I915_READ(RING_EXCC_GEN7(ring));
+		data[idx++] = I915_READ(GAC_ECO_BITS);
+		data[idx++] = I915_READ(RING_MODE_GEN7(ring));
+		data[idx++] = I915_READ(GEN6_VRSYNC);
+		data[idx++] = I915_READ(RING_MAX_IDLE(ring->mmio_base));
+		break;
+
+	case BCS:
+		if (max < GEN7_BLT_CTX_SIZE)
+			return -EINVAL;
+
+		data[idx++] = I915_READ(BLT_HWS_PGA_GEN7);
+		data[idx++] = I915_READ(RING_MI_MODE(ring->mmio_base));
+		data[idx++] = I915_READ(RING_IMR(ring->mmio_base));
+		data[idx++] = I915_READ(RING_UHPTR(ring->mmio_base));
+		data[idx++] = I915_READ(RING_INSTPM(ring->mmio_base));
+		data[idx++] = I915_READ(RING_EXCC_GEN7(ring));
+		data[idx++] = I915_READ(GAB_CTL);
+		data[idx++] = I915_READ(RING_MODE_GEN7(ring));
+		data[idx++] = I915_READ(GEN6_BRSYNC);
+		data[idx++] = I915_READ(GEN6_BVSYNC);
+		data[idx++] = I915_READ(RING_MAX_IDLE(ring->mmio_base));
+		break;
+
+	default:
+		DRM_ERROR("Invalid ring ID %d\n", ring->id);
+		break;
+	}
+
+	return 0;
+}
+
+
+int intel_ring_restore(struct intel_ring_buffer *ring)
+{
+	if (ring && ring->restore && ring->ctx)
+		return ring->restore(ring, ring->ctx,
+			ring->ctx_size);
+	else {
+		DRM_ERROR("ring restore not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+gen7_ring_restore(struct intel_ring_buffer *ring, uint32_t *data,
+		uint32_t max)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t idx = 0;
+	uint32_t x;
+
+	/* NOTE: Registers are restored in reverse order from when
+	 *        they were saved.
+	 */
+	switch (ring->id) {
+	case RCS:
+		if (max < GEN7_RCS_CTX_SIZE)
+			return -EINVAL;
+
+		idx = GEN7_RCS_CTX_SIZE - 1;
+
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), data[idx--]);
+		I915_WRITE(GEN7_FF_THREAD_MODE, data[idx--]);
+		I915_WRITE(GEN6_RBSYNC, data[idx--]);
+		I915_WRITE(RING_MODE_GEN7(ring),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(GAM_ECOCHK, data[idx--]);
+		I915_WRITE(_3D_CHICKEN3,
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(_3D_CHICKEN2,
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_MI_MODE(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(CACHE_MODE_1,
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(CACHE_MODE_0_OFFSET(dev),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_IMR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RING_INSTPM(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_UHPTR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RENDER_HWS_PGA_GEN7, data[idx--]);
+		break;
+
+	case VCS:
+		if (max < GEN7_BSD_CTX_SIZE)
+			return -EINVAL;
+
+		idx = GEN7_BSD_CTX_SIZE - 1;
+
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), data[idx--]);
+		I915_WRITE(GEN6_VRSYNC, data[idx--]);
+		I915_WRITE(RING_MODE_GEN7(ring),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(GAC_ECO_BITS, data[idx--]);
+		I915_WRITE(RING_EXCC_GEN7(ring),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_INSTPM(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_UHPTR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RING_IMR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RING_MI_MODE(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(BSD_HWS_PGA_GEN7, data[idx--]);
+		break;
+
+	case BCS:
+		if (max < GEN7_BLT_CTX_SIZE)
+			return -EINVAL;
+
+		idx = GEN7_BLT_CTX_SIZE - 1;
+
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), data[idx--]);
+		I915_WRITE(GEN6_BVSYNC, data[idx--]);
+		I915_WRITE(GEN6_BRSYNC, data[idx--]);
+		I915_WRITE(RING_MODE_GEN7(ring),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(GAB_CTL, data[idx--]);
+		I915_WRITE(RING_EXCC_GEN7(ring),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_INSTPM(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(RING_UHPTR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RING_IMR(ring->mmio_base), data[idx--]);
+		I915_WRITE(RING_MI_MODE(ring->mmio_base),
+			_MASKED_BIT_ENABLE_ALL(data[idx--]));
+		I915_WRITE(BLT_HWS_PGA_GEN7, data[idx--]);
+		break;
+
+	default:
+		DRM_ERROR("Invalid ring ID %d\n", ring->id);
+                break;
+	}
+
+	/* Restore common registers */
+	if (max < GEN7_COMMON_CTX_SIZE)
+		return -EINVAL;
+
+	idx = GEN7_COMMON_CTX_SIZE - 1;
+
+	I915_WRITE(RING_PP_DIR_BASE(ring), data[idx--]);
+	I915_WRITE(RING_PP_DIR_DCLV(ring), data[idx--]);
+
+	/* Write ring base address before head/tail as it clears head to 0 */
+	I915_WRITE_START(ring, data[idx--]);
+	x = I915_READ_START(ring);
+	I915_WRITE_HEAD(ring, data[idx--]);
+	I915_WRITE_TAIL(ring, data[idx--]);
+	I915_WRITE_CTL(ring, data[idx--]);
+
+	return 0;
+}
+
+int intel_ring_disable(struct intel_ring_buffer *ring)
+{
+	if (ring && ring->disable)
+		return ring->disable(ring);
+	else {
+		DRM_ERROR("ring disable not supported\n");
+		return -EINVAL;
+	}
+}
+
+static int
+gen6_ring_disable(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t ring_ctl;
+
+	if (!(I915_READ_MODE(ring) & MODE_STOP)) {
+		/* Request the ring to stop */
+		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(MODE_STOP));
+
+		/* Wait upto 0.5 second for it to go idle */
+		if (wait_for((I915_READ_MODE(ring) & MODE_IDLE), 500)
+		== -ETIMEDOUT) {
+			DRM_ERROR("timed out trying to disable %s\n",
+				ring->name);
+			/* Continue anyway as it may not go idle if hung...*/
+		}
+	}
+
+	/* Disable the ring */
+	ring_ctl = I915_READ_CTL(ring);
+	ring_ctl &= (RING_NR_PAGES | RING_REPORT_MASK);
+	I915_WRITE_CTL(ring, ring_ctl);
+	ring_ctl = I915_READ_CTL(ring);  /* Barrier read */
+
+	return ((ring_ctl & RING_VALID) == 0) ? 0 : -EIO;
+}
+
+int intel_ring_enable(struct intel_ring_buffer *ring)
+{
+	if (ring && ring->enable)
+		return ring->enable(ring);
+	else {
+		DRM_ERROR("ring enable not supported\n");
+		return -EINVAL;
+	}
+}
+
+static int
+gen6_ring_enable(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t ring_ctl;
+	uint32_t mode;
+
+	/* Clear the MI_MODE stop bit */
+	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(MODE_STOP));
+	mode = I915_READ_MODE(ring);    /* Barrier read */
+
+	/* Enable the ring */
+	ring_ctl = I915_READ_CTL(ring);
+	ring_ctl &= (RING_NR_PAGES | RING_REPORT_MASK);
+	I915_WRITE_CTL(ring, ring_ctl | RING_VALID);
+	ring_ctl = I915_READ_CTL(ring);
+
+	return ((ring_ctl & RING_VALID) == 0) ? -EIO : 0;
+}
+
+
 /* Blitter support (SandyBridge+) */
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1865,10 +2247,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 			ring->irq_get = gen6_ring_get_irq;
 			ring->irq_put = gen6_ring_put_irq;
 		}
+		if (INTEL_INFO(dev)->gen == 7) {
+			ring->ctx_size = GEN7_RCS_CTX_SIZE;
+			ring->save = gen7_ring_save;
+			ring->restore = gen7_ring_restore;
+		}
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		ring->sync_to = gen6_ring_sync;
+		ring->enable = gen6_ring_enable;
+		ring->disable = gen6_ring_disable;
 		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
 		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
 		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
@@ -2044,7 +2433,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			ring->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
 		}
+		if (INTEL_INFO(dev)->gen == 7) {
+			ring->ctx_size = GEN7_BSD_CTX_SIZE;
+			ring->save = gen7_ring_save;
+			ring->restore = gen7_ring_restore;
+		}
+
 		ring->sync_to = gen6_ring_sync;
+		ring->enable = gen6_ring_enable;
+		ring->disable = gen6_ring_disable;
 		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
 		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
 		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
@@ -2101,7 +2498,14 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 		ring->irq_put = gen6_ring_put_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	}
+	if (INTEL_INFO(dev)->gen == 7) {
+		ring->ctx_size = GEN7_BLT_CTX_SIZE;
+		ring->save = gen7_ring_save;
+		ring->restore = gen7_ring_restore;
+	}
 	ring->sync_to = gen6_ring_sync;
+	ring->enable = gen6_ring_enable;
+	ring->disable = gen6_ring_disable;
 	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
 	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
 	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f4744bf..c15b97b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -18,6 +18,15 @@ struct  intel_hw_status_page {
 	struct		drm_i915_gem_object *obj;
 };
 
+
+/* These values must match the requirements of the ring save/restore functions
+* which will vary for each chip*/
+
+#define GEN7_COMMON_CTX_SIZE 6
+#define GEN7_RCS_CTX_SIZE (GEN7_COMMON_CTX_SIZE + 14)
+#define GEN7_BSD_CTX_SIZE (GEN7_COMMON_CTX_SIZE + 16)
+#define GEN7_BLT_CTX_SIZE (GEN7_COMMON_CTX_SIZE + 17)
+
 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
 
@@ -33,6 +42,14 @@ struct  intel_hw_status_page {
 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
+#define I915_READ_MODE(ring) \
+        I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) \
+        I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+
+#define RESET_HEAD_TAIL   0x1
+#define FORCE_ADVANCE     0x2
+
 enum intel_ring_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
@@ -115,6 +132,13 @@ struct  intel_ring_buffer {
 	int		(*sync_to)(struct intel_ring_buffer *ring,
 				   struct intel_ring_buffer *to,
 				   u32 seqno);
+	int             (*enable)(struct intel_ring_buffer *ring);
+	int             (*disable)(struct intel_ring_buffer *ring);
+	int             (*save)(struct intel_ring_buffer *ring,
+				uint32_t *data, uint32_t max,
+				u32 flags);
+	int             (*restore)(struct intel_ring_buffer *ring,
+				uint32_t *data, uint32_t max);
 
 	/* our mbox written by others */
 	u32		semaphore_register[I915_NUM_RINGS];
@@ -163,6 +187,12 @@ struct  intel_ring_buffer {
 		u32 gtt_offset;
 		volatile u32 *cpu_page;
 	} scratch;
+
+	/**
+	 * Ring context to store ring related registers during reset
+	 */
+	u32 *ctx;
+	u32 ctx_size;
 };
 
 static inline bool
@@ -279,4 +309,12 @@ static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
 /* DRI warts */
 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
+void intel_ring_resample(struct intel_ring_buffer *ring);
+int intel_ring_disable(struct intel_ring_buffer *ring);
+int intel_ring_enable(struct intel_ring_buffer *ring);
+int intel_ring_save(struct intel_ring_buffer *ring,
+		u32 flags);
+int intel_ring_restore(struct intel_ring_buffer *ring);
+
+
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
1.8.5.1




More information about the Intel-gfx mailing list