[Intel-gfx] [PATCH] drm/i915: Add in-flight request details to intel_engine_dump()

Chris Wilson chris at chris-wilson.co.uk
Sun Oct 15 20:43:10 UTC 2017


In the intel_engine_cs dumper, we were showing the request details for
the request queue but not of those requests already passed to the hw
(just a summary of the seqno). If we show those details, we can then
eliminate the entirely redundant and forgotten debugfs/i915_gem_request

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Jeff McGee <jeff.mcgee at intel.com>
Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c    | 49 ----------------------------------
 drivers/gpu/drm/i915/intel_engine_cs.c | 35 +++++++++++++-----------
 2 files changed, 19 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bb6e01121fc..bb7261266773 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -643,54 +643,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static void print_request(struct seq_file *m,
-			  struct drm_i915_gem_request *rq,
-			  const char *prefix)
-{
-	seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
-		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
-		   rq->priotree.priority,
-		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
-		   rq->timeline->common->name);
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_i915_gem_request *req;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int ret, any;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	any = 0;
-	for_each_engine(engine, dev_priv, id) {
-		int count;
-
-		count = 0;
-		list_for_each_entry(req, &engine->timeline->requests, link)
-			count++;
-		if (count == 0)
-			continue;
-
-		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->timeline->requests, link)
-			print_request(m, req, "    ");
-
-		any++;
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	if (any == 0)
-		seq_puts(m, "No requests\n");
-
-	return 0;
-}
-
 static void i915_ring_seqno_info(struct seq_file *m,
 				 struct intel_engine_cs *engine)
 {
@@ -4753,7 +4705,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
 	{"i915_gem_stolen", i915_gem_stolen_list_info },
-	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a59b2a30ff5a..52340b56d898 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1622,8 +1622,10 @@ static void print_request(struct drm_printer *m,
 			  struct drm_i915_gem_request *rq,
 			  const char *prefix)
 {
-	drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
-		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+	drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+		   rq->global_seqno,
+		   i915_gem_request_completed(rq) ? "!" : "",
+		   rq->ctx->hw_id, rq->fence.seqno,
 		   rq->priotree.priority,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   rq->timeline->common->name);
@@ -1631,8 +1633,9 @@ static void print_request(struct drm_printer *m,
 
 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 {
-	struct intel_breadcrumbs *b = &engine->breadcrumbs;
-	struct i915_gpu_error *error = &engine->i915->gpu_error;
+	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+	const struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_gpu_error * const error = &engine->i915->gpu_error;
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_request *rq;
 	struct rb_node *rb;
@@ -1696,7 +1699,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 
 	if (i915_modparams.enable_execlists) {
 		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-		struct intel_engine_execlists * const execlists = &engine->execlists;
 		u32 ptr, read, write;
 		unsigned int idx;
 
@@ -1744,17 +1746,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 			}
 		}
 		rcu_read_unlock();
-
-		spin_lock_irq(&engine->timeline->lock);
-		for (rb = execlists->first; rb; rb = rb_next(rb)) {
-			struct i915_priolist *p =
-				rb_entry(rb, typeof(*p), node);
-
-			list_for_each_entry(rq, &p->requests,
-					    priotree.link)
-				print_request(m, rq, "\t\tQ ");
-		}
-		spin_unlock_irq(&engine->timeline->lock);
 	} else if (INTEL_GEN(dev_priv) > 6) {
 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
 			   I915_READ(RING_PP_DIR_BASE(engine)));
@@ -1764,6 +1755,18 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 
+	spin_lock_irq(&engine->timeline->lock);
+	list_for_each_entry(rq, &engine->timeline->requests, link)
+		print_request(m, rq, "\t\tE ");
+	for (rb = execlists->first; rb; rb = rb_next(rb)) {
+		struct i915_priolist *p =
+			rb_entry(rb, typeof(*p), node);
+
+		list_for_each_entry(rq, &p->requests, priotree.link)
+			print_request(m, rq, "\t\tQ ");
+	}
+	spin_unlock_irq(&engine->timeline->lock);
+
 	spin_lock_irq(&b->rb_lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-- 
2.15.0.rc0



More information about the Intel-gfx mailing list