[igt-dev] [PATCH i-g-t 1/2] intel-gpu-overlay: Add engine queue stats

Tvrtko Ursulin tursulin at ursulin.net
Mon Feb 12 19:01:57 UTC 2018


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Use new PMU engine queue stats (queued, runnable and running) and display
them per engine.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 lib/igt_perf.h    | 18 +++++++++++++++++-
 overlay/gpu-top.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 overlay/gpu-top.h | 11 +++++++++++
 overlay/overlay.c |  9 +++++++++
 4 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/lib/igt_perf.h b/lib/igt_perf.h
index 7b66fc582b88..68e208941ca9 100644
--- a/lib/igt_perf.h
+++ b/lib/igt_perf.h
@@ -35,9 +35,16 @@ enum drm_i915_pmu_engine_sample {
 	I915_SAMPLE_BUSY = 0,
 	I915_SAMPLE_WAIT = 1,
 	I915_SAMPLE_SEMA = 2,
-	I915_ENGINE_SAMPLE_MAX /* non-ABI */
+	I915_SAMPLE_QUEUED = 3,
+	I915_SAMPLE_RUNNABLE = 4,
+	I915_SAMPLE_RUNNING = 5,
 };
 
+ /* Divide counter value by divisor to get the real value. */
+#define I915_SAMPLE_QUEUED_DIVISOR (1024)
+#define I915_SAMPLE_RUNNABLE_DIVISOR (1024)
+#define I915_SAMPLE_RUNNING_DIVISOR (1024)
+
 #define I915_PMU_SAMPLE_BITS (4)
 #define I915_PMU_SAMPLE_MASK (0xf)
 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
@@ -58,6 +65,15 @@ enum drm_i915_pmu_engine_sample {
 #define I915_PMU_ENGINE_SEMA(class, instance) \
 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
 
+#define I915_PMU_ENGINE_QUEUED(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_QUEUED)
+
+#define I915_PMU_ENGINE_RUNNABLE(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_RUNNABLE)
+
+#define I915_PMU_ENGINE_RUNNING(class, instance) \
+	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_RUNNING)
+
 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
 
 #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
diff --git a/overlay/gpu-top.c b/overlay/gpu-top.c
index 61b8f62fd78c..22e9badb22c1 100644
--- a/overlay/gpu-top.c
+++ b/overlay/gpu-top.c
@@ -72,6 +72,18 @@ static int perf_init(struct gpu_top *gt)
 				 gt->fd) >= 0)
 		gt->have_sema = 1;
 
+	if (perf_i915_open_group(I915_PMU_ENGINE_QUEUED(d->class, d->inst),
+				 gt->fd) >= 0)
+		gt->have_queued = 1;
+
+	if (perf_i915_open_group(I915_PMU_ENGINE_RUNNABLE(d->class, d->inst),
+				 gt->fd) >= 0)
+		gt->have_runnable = 1;
+
+	if (perf_i915_open_group(I915_PMU_ENGINE_RUNNING(d->class, d->inst),
+				 gt->fd) >= 0)
+		gt->have_running = 1;
+
 	gt->ring[0].name = d->name;
 	gt->num_rings = 1;
 
@@ -93,6 +105,24 @@ static int perf_init(struct gpu_top *gt)
 				   gt->fd) < 0)
 			return -1;
 
+		if (gt->have_queued &&
+		    perf_i915_open_group(I915_PMU_ENGINE_QUEUED(d->class,
+								d->inst),
+				   gt->fd) < 0)
+			return -1;
+
+		if (gt->have_runnable &&
+		    perf_i915_open_group(I915_PMU_ENGINE_RUNNABLE(d->class,
+								  d->inst),
+				   gt->fd) < 0)
+			return -1;
+
+		if (gt->have_running &&
+		    perf_i915_open_group(I915_PMU_ENGINE_RUNNING(d->class,
+								 d->inst),
+				   gt->fd) < 0)
+			return -1;
+
 		gt->ring[gt->num_rings++].name = d->name;
 	}
 
@@ -298,6 +328,12 @@ int gpu_top_update(struct gpu_top *gt)
 				s->wait[n] = sample[m++];
 			if (gt->have_sema)
 				s->sema[n] = sample[m++];
+			if (gt->have_queued)
+				s->queued[n] = sample[m++];
+			if (gt->have_runnable)
+				s->runnable[n] = sample[m++];
+			if (gt->have_running)
+				s->running[n] = sample[m++];
 		}
 
 		if (gt->count == 1)
@@ -310,6 +346,12 @@ int gpu_top_update(struct gpu_top *gt)
 				gt->ring[n].u.u.wait = (100 * (s->wait[n] - d->wait[n]) + d_time/2) / d_time;
 			if (gt->have_sema)
 				gt->ring[n].u.u.sema = (100 * (s->sema[n] - d->sema[n]) + d_time/2) / d_time;
+			if (gt->have_queued)
+				gt->ring[n].queued = (double)((s->queued[n] - d->queued[n])) / I915_SAMPLE_QUEUED_DIVISOR * 1e9 / d_time;
+			if (gt->have_runnable)
+				gt->ring[n].runnable = (double)((s->runnable[n] - d->runnable[n])) / I915_SAMPLE_RUNNABLE_DIVISOR  * 1e9 / d_time;
+			if (gt->have_running)
+				gt->ring[n].running = (double)((s->running[n] - d->running[n])) / I915_SAMPLE_RUNNING_DIVISOR * 1e9 / d_time;
 
 			/* in case of rounding + sampling errors, fudge */
 			if (gt->ring[n].u.u.busy > 100)
diff --git a/overlay/gpu-top.h b/overlay/gpu-top.h
index d3cdd779760f..cb4310c82a94 100644
--- a/overlay/gpu-top.h
+++ b/overlay/gpu-top.h
@@ -36,6 +36,9 @@ struct gpu_top {
 	int num_rings;
 	int have_wait;
 	int have_sema;
+	int have_queued;
+	int have_runnable;
+	int have_running;
 
 	struct gpu_top_ring {
 		const char *name;
@@ -47,6 +50,10 @@ struct gpu_top {
 			} u;
 			uint32_t payload;
 		} u;
+
+		double queued;
+		double runnable;
+		double running;
 	} ring[MAX_RINGS];
 
 	struct gpu_top_stat {
@@ -54,7 +61,11 @@ struct gpu_top {
 		uint64_t busy[MAX_RINGS];
 		uint64_t wait[MAX_RINGS];
 		uint64_t sema[MAX_RINGS];
+		uint64_t queued[MAX_RINGS];
+		uint64_t runnable[MAX_RINGS];
+		uint64_t running[MAX_RINGS];
 	} stat[2];
+
 	int count;
 };
 
diff --git a/overlay/overlay.c b/overlay/overlay.c
index 545af7bcb2f5..a639703ebcec 100644
--- a/overlay/overlay.c
+++ b/overlay/overlay.c
@@ -255,6 +255,15 @@ static void show_gpu_top(struct overlay_context *ctx, struct overlay_gpu_top *gt
 		len = sprintf(txt, "%s: %3d%% busy",
 			      gt->gpu_top.ring[n].name,
 			      gt->gpu_top.ring[n].u.u.busy);
+		if (gt->gpu_top.have_queued)
+			len += sprintf(txt + len, ", qd %.2f",
+				       gt->gpu_top.ring[n].queued);
+		if (gt->gpu_top.have_runnable)
+			len += sprintf(txt + len, ", rd %.2f",
+				       gt->gpu_top.ring[n].runnable);
+		if (gt->gpu_top.have_running)
+			len += sprintf(txt + len, ", ed %.2f",
+				       gt->gpu_top.ring[n].running);
 		if (gt->gpu_top.ring[n].u.u.wait)
 			len += sprintf(txt + len, ", %d%% wait",
 				       gt->gpu_top.ring[n].u.u.wait);
-- 
2.14.1



More information about the igt-dev mailing list