[FOR_CI 06/14] drm/i915/tdr: Identify hung request and drop it

Arun Siluvery arun.siluvery at linux.intel.com
Tue Jul 19 15:21:27 UTC 2016


The current active request is the one that caused the hang so this is
retrieved and removed from elsp queue, otherwise we cannot submit other
workloads to be processed by GPU.

A consistency check between HW and driver is performed to ensure that we
are dropping the correct request. Since this request doesn't get executed
anymore, we also need to advance the seqno to mark it as complete.

Head pointer is advanced to skip the offending batch so that HW resumes
execution other workloads.

Signed-off-by: Arun Siluvery <arun.siluvery at linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 115 +++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_lrc.h |   3 +
 2 files changed, 118 insertions(+)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2e670f1..d4bdb82 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1026,6 +1026,121 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	i915_gem_context_unreference(ctx);
 }
 
+static void intel_lr_context_resync(struct i915_gem_context *ctx,
+				    struct intel_engine_cs *engine)
+{
+	u32 head;
+	u32 head_addr, tail_addr;
+	u32 *reg_state;
+	struct intel_ringbuffer *ringbuf;
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	ringbuf = ctx->engine[engine->id].ringbuf;
+	reg_state = ctx->engine[engine->id].lrc_reg_state;
+
+	head = I915_READ_HEAD(engine);
+	head_addr = head & HEAD_ADDR;
+	tail_addr = reg_state[CTX_RING_TAIL+1] & TAIL_ADDR;
+
+	/*
+	 * force head it to advance to the next QWORD. In most cases the
+	 * engine head pointer will automatically advance to the next
+	 * instruction as soon as it has read the current instruction,
+	 * without waiting for it to complete. This seems to be the default
+	 * behaviour, however an MBOX wait inserted directly to the VCS/BCS
+	 * engines does not behave in the same way, instead the head
+	 * pointer will still be pointing at the MBOX instruction until it
+	 * completes.
+	 */
+	head_addr = roundup(head_addr, 8);
+
+	if (head_addr > tail_addr)
+		head_addr = tail_addr;
+	else if (head_addr >= ringbuf->size)
+		head_addr = 0;
+
+	head &= ~HEAD_ADDR;
+	head |= (head_addr & HEAD_ADDR);
+
+	/* Restore head */
+	reg_state[CTX_RING_HEAD+1] = head;
+	I915_WRITE_HEAD(engine, head);
+
+	ringbuf->head = head;
+	ringbuf->last_retired_head = -1;
+	intel_ring_update_space(ringbuf);
+}
+
+/**
+ * intel_execlists_reset_prepare() - identifies the request that is
+ * hung and drops it
+ *
+ * Head is adjusted to the start of next request.
+ *
+ * @engine: Engine that is currently hung
+ *
+ * Returns:
+ *   0 - on success
+ *   nonzero errorcode otherwise
+ */
+int intel_execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_request *req;
+	int ret = -EINVAL;
+
+	spin_lock(&engine->execlist_lock);
+
+	req = list_first_entry_or_null(&engine->execlist_queue,
+					    struct drm_i915_gem_request,
+					    execlist_link);
+
+	/*
+	 * Only acknowledge the request in the execlist queue if it's actually
+	 * been submitted to hardware, otherwise there's the risk of
+	 * inconsistency between the (unsubmitted) request and the idle
+	 * hardware state.
+	 */
+	if (req && req->ctx && req->elsp_submitted) {
+		u32 execlist_status;
+		u32 hw_context;
+		u32 hw_active;
+		struct drm_i915_private *dev_priv = engine->i915;
+
+		hw_context = I915_READ(RING_EXECLIST_STATUS_CTX_ID(engine));
+		execlist_status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+		hw_active = ((execlist_status & EXECLIST_STATUS_ELEMENT0_ACTIVE) ||
+			     (execlist_status & EXECLIST_STATUS_ELEMENT1_ACTIVE));
+
+		if (hw_active && hw_context == req->ctx->hw_id) {
+			/*
+			 * GPU is now hung and the request that caused it
+			 * will be dropped so mark it as completed
+			 */
+			intel_write_status_page(engine, I915_GEM_HWS_INDEX, req->seqno);
+
+			intel_lr_context_resync(req->ctx, engine);
+
+			/*
+			 * remove the request from the elsp queue so that
+			 * engine can resume execution after reset when new
+			 * requests are submitted
+			 */
+			if (!--req->elsp_submitted) {
+				list_del(&req->execlist_link);
+				i915_gem_request_unreference(req);
+			}
+			ret = 0;
+		} else {
+			DRM_ERROR("GPU hung when HW is not active !!\n");
+		}
+	} else {
+		WARN(1, "No active request found in elsp\n");
+	}
+
+	spin_unlock(&engine->execlist_lock);
+	return ret;
+}
+
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 938e3ee..54bbb76 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -31,7 +31,10 @@
 /* Execlists regs */
 #define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
 #define RING_EXECLIST_STATUS_LO(ring)		_MMIO((ring)->mmio_base + 0x234)
+#define   EXECLIST_STATUS_ELEMENT0_ACTIVE       (1 << 14)
+#define   EXECLIST_STATUS_ELEMENT1_ACTIVE       (1 << 15)
 #define RING_EXECLIST_STATUS_HI(ring)		_MMIO((ring)->mmio_base + 0x234 + 4)
+#define RING_EXECLIST_STATUS_CTX_ID(ring)RING_EXECLIST_STATUS_HI(ring)
 #define RING_CONTEXT_CONTROL(ring)		_MMIO((ring)->mmio_base + 0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
-- 
1.9.1



More information about the Intel-gfx-trybot mailing list