[Intel-gfx] [PATCH] drm/i915/execlists: Consistent seqno reporting in GEM_TRACE

Tvrtko Ursulin tursulin at ursulin.net
Wed Mar 28 17:39:59 UTC 2018


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Some messages are using %d and some %x which creates confusion while
reading the traces.

I also added:

 1. Fence context/seqno to elsp traces - so it is easier to correlate
    events.

 2. New GEM_TRACE logging to port and request cancellation sites.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
Crystal ball says I'll be removing everything other than the seqno format
consolidation in v2. :)
---
 drivers/gpu/drm/i915/intel_lrc.c | 27 ++++++++++++++++++++++-----
 1 file changed, 22 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fe520c4dd999..c5e8526a2025 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -454,10 +454,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%x, prio=%d\n",
+			GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%d (fence %llx:%d), prio=%d\n",
 				  engine->name, n,
 				  port[n].context_id, count,
 				  rq->global_seqno,
+				  rq->fence.context,
+				  rq->fence.seqno,
 				  rq_prio(rq));
 		} else {
 			GEM_BUG_ON(!n);
@@ -727,6 +729,10 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
 	while (num_ports-- && port_isset(port)) {
 		struct i915_request *rq = port_request(port);
 
+		GEM_TRACE("%s:port%lu cancel %llx:%d [global %d]\n",
+			  rq->engine->name, port - execlists->port,
+			  rq->fence.context, rq->fence.seqno, rq->global_seqno);
+
 		GEM_BUG_ON(!execlists->active);
 		intel_engine_context_out(rq->engine);
 
@@ -802,7 +808,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	struct rb_node *rb;
 	unsigned long flags;
 
-	GEM_TRACE("%s\n", engine->name);
+	GEM_TRACE("%s, hws global %d\n",
+		  engine->name, intel_engine_get_seqno(engine));
 
 	/*
 	 * Before we call engine->cancel_requests(), we should have exclusive
@@ -829,8 +836,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->timeline->requests, link) {
 		GEM_BUG_ON(!rq->global_seqno);
-		if (!i915_request_completed(rq))
+		if (!i915_request_completed(rq)) {
+			GEM_TRACE("%s eio %llx:%d [global %d]\n",
+				  rq->engine->name, rq->fence.context,
+				  rq->fence.seqno, rq->global_seqno);
 			dma_fence_set_error(&rq->fence, -EIO);
+		}
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
@@ -839,6 +850,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 		struct i915_priolist *p = to_priolist(rb);
 
 		list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+			GEM_TRACE("%s submit-eio %llx:%d [global %d]\n",
+				  rq->engine->name, rq->fence.context,
+				  rq->fence.seqno, rq->global_seqno);
+
 			INIT_LIST_HEAD(&rq->priotree.link);
 
 			dma_fence_set_error(&rq->fence, -EIO);
@@ -999,10 +1014,12 @@ static void execlists_submission_tasklet(unsigned long data)
 							EXECLISTS_ACTIVE_USER));
 
 			rq = port_unpack(port, &count);
-			GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
+			GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (fence %llx:%d), prio=%d\n",
 				  engine->name,
 				  port->context_id, count,
 				  rq ? rq->global_seqno : 0,
+				  rq ? rq->fence.context : 0,
+				  rq ? rq->fence.seqno : 0,
 				  rq ? rq_prio(rq) : 0);
 
 			/* Check the context/desc id for this event matches */
@@ -1706,7 +1723,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	struct intel_context *ce;
 	unsigned long flags;
 
-	GEM_TRACE("%s seqno=%x\n",
+	GEM_TRACE("%s seqno=%d\n",
 		  engine->name, request ? request->global_seqno : 0);
 
 	/* See execlists_cancel_requests() for the irq/spinlock split. */
-- 
2.14.1



More information about the Intel-gfx mailing list