[Intel-gfx] [PATCH] drm/i915: Force ordering on request submission and hangcheck

Mika Kuoppala mika.kuoppala at linux.intel.com
Wed Jan 13 09:04:53 PST 2016


Hangcheck is run on irq context and might be active on a
completely different CPU that is submitting requests. And as
we have been very careful not to add locking to hangcheck to guard
against driver failures, we need to be careful with the coherency.

Update ring last seqno and add to request lists early, with
write memory barrier, before pushing the request to ring. On
hangcheck side, use read memory barrier before inspecting the ring
last seqno. This ensures that when hangcheck is inspecting the
state, it will not see a active ring without requests in it,
and then falsely report it as a missed irq situation.

References: https://bugs.freedesktop.org/show_bug.cgi?id=93693
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 14 +++++++++++---
 drivers/gpu/drm/i915/i915_irq.c |  7 ++++---
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ddc21d4b388d..0ca3856ffbf1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2584,6 +2584,17 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = intel_ring_get_tail(ringbuf);
 
+	/* Hangcheck runs in irq context and might be even
+	 * on different CPU. So we need to the do last seqno and
+	 * list addition early with memory barrier. Otherwise hangcheck
+	 * might see active ring without any requests and
+	 * consider it falsely as a missed irq situation.
+	 */
+	request->previous_seqno = ring->last_submitted_seqno;
+	ring->last_submitted_seqno = request->seqno;
+	list_add_tail(&request->list, &ring->request_list);
+	wmb(); /* Flush last_submitted_seqno and request->list for hangcheck */
+
 	if (i915.enable_execlists)
 		ret = ring->emit_request(request);
 	else {
@@ -2605,9 +2616,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	request->batch_obj = obj;
 
 	request->emitted_jiffies = jiffies;
-	request->previous_seqno = ring->last_submitted_seqno;
-	ring->last_submitted_seqno = request->seqno;
-	list_add_tail(&request->list, &ring->request_list);
 
 	trace_i915_gem_request_add(request);
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 25a89373df63..4dd8e6f0c1de 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2801,8 +2801,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 static bool
 ring_idle(struct intel_engine_cs *ring, u32 seqno)
 {
-	return (list_empty(&ring->request_list) ||
-		i915_seqno_passed(seqno, ring->last_submitted_seqno));
+	rmb(); /* Ordering against __i915_add_request() */
+	return (i915_seqno_passed(seqno, ring->last_submitted_seqno)
+		|| list_empty(&ring->request_list));
 }
 
 static bool
@@ -3101,8 +3102,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
 		semaphore_clear_deadlocks(dev_priv);
 
-		seqno = ring->get_seqno(ring, false);
 		acthd = intel_ring_get_active_head(ring);
+		seqno = ring->get_seqno(ring, false);
 
 		if (ring->hangcheck.seqno == seqno) {
 			if (ring_idle(ring, seqno)) {
-- 
2.5.0



More information about the Intel-gfx mailing list