[Intel-gfx] [PATCH i-g-t 2/5] intel-gpu-overlay: Add engine queue stats
Tvrtko Ursulin
tursulin at ursulin.net
Thu Apr 5 12:40:25 UTC 2018
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Use new PMU engine queue stats (queued, runnable and running) and display
them per engine.
v2:
* Compact per engine stats. (Chris Wilson)
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
overlay/gpu-top.c | 42 ++++++++++++++++++++++++++++++++++++++++++
overlay/gpu-top.h | 11 +++++++++++
overlay/overlay.c | 7 +++++++
3 files changed, 60 insertions(+)
diff --git a/overlay/gpu-top.c b/overlay/gpu-top.c
index 61b8f62fd78c..22e9badb22c1 100644
--- a/overlay/gpu-top.c
+++ b/overlay/gpu-top.c
@@ -72,6 +72,18 @@ static int perf_init(struct gpu_top *gt)
gt->fd) >= 0)
gt->have_sema = 1;
+ if (perf_i915_open_group(I915_PMU_ENGINE_QUEUED(d->class, d->inst),
+ gt->fd) >= 0)
+ gt->have_queued = 1;
+
+ if (perf_i915_open_group(I915_PMU_ENGINE_RUNNABLE(d->class, d->inst),
+ gt->fd) >= 0)
+ gt->have_runnable = 1;
+
+ if (perf_i915_open_group(I915_PMU_ENGINE_RUNNING(d->class, d->inst),
+ gt->fd) >= 0)
+ gt->have_running = 1;
+
gt->ring[0].name = d->name;
gt->num_rings = 1;
@@ -93,6 +105,24 @@ static int perf_init(struct gpu_top *gt)
gt->fd) < 0)
return -1;
+ if (gt->have_queued &&
+ perf_i915_open_group(I915_PMU_ENGINE_QUEUED(d->class,
+ d->inst),
+ gt->fd) < 0)
+ return -1;
+
+ if (gt->have_runnable &&
+ perf_i915_open_group(I915_PMU_ENGINE_RUNNABLE(d->class,
+ d->inst),
+ gt->fd) < 0)
+ return -1;
+
+ if (gt->have_running &&
+ perf_i915_open_group(I915_PMU_ENGINE_RUNNING(d->class,
+ d->inst),
+ gt->fd) < 0)
+ return -1;
+
gt->ring[gt->num_rings++].name = d->name;
}
@@ -298,6 +328,12 @@ int gpu_top_update(struct gpu_top *gt)
s->wait[n] = sample[m++];
if (gt->have_sema)
s->sema[n] = sample[m++];
+ if (gt->have_queued)
+ s->queued[n] = sample[m++];
+ if (gt->have_runnable)
+ s->runnable[n] = sample[m++];
+ if (gt->have_running)
+ s->running[n] = sample[m++];
}
if (gt->count == 1)
@@ -310,6 +346,12 @@ int gpu_top_update(struct gpu_top *gt)
gt->ring[n].u.u.wait = (100 * (s->wait[n] - d->wait[n]) + d_time/2) / d_time;
if (gt->have_sema)
gt->ring[n].u.u.sema = (100 * (s->sema[n] - d->sema[n]) + d_time/2) / d_time;
+ if (gt->have_queued)
+ gt->ring[n].queued = (double)((s->queued[n] - d->queued[n])) / I915_SAMPLE_QUEUED_DIVISOR * 1e9 / d_time;
+ if (gt->have_runnable)
+ gt->ring[n].runnable = (double)((s->runnable[n] - d->runnable[n])) / I915_SAMPLE_RUNNABLE_DIVISOR * 1e9 / d_time;
+ if (gt->have_running)
+ gt->ring[n].running = (double)((s->running[n] - d->running[n])) / I915_SAMPLE_RUNNING_DIVISOR * 1e9 / d_time;
/* in case of rounding + sampling errors, fudge */
if (gt->ring[n].u.u.busy > 100)
diff --git a/overlay/gpu-top.h b/overlay/gpu-top.h
index d3cdd779760f..cb4310c82a94 100644
--- a/overlay/gpu-top.h
+++ b/overlay/gpu-top.h
@@ -36,6 +36,9 @@ struct gpu_top {
int num_rings;
int have_wait;
int have_sema;
+ int have_queued;
+ int have_runnable;
+ int have_running;
struct gpu_top_ring {
const char *name;
@@ -47,6 +50,10 @@ struct gpu_top {
} u;
uint32_t payload;
} u;
+
+ double queued;
+ double runnable;
+ double running;
} ring[MAX_RINGS];
struct gpu_top_stat {
@@ -54,7 +61,11 @@ struct gpu_top {
uint64_t busy[MAX_RINGS];
uint64_t wait[MAX_RINGS];
uint64_t sema[MAX_RINGS];
+ uint64_t queued[MAX_RINGS];
+ uint64_t runnable[MAX_RINGS];
+ uint64_t running[MAX_RINGS];
} stat[2];
+
int count;
};
diff --git a/overlay/overlay.c b/overlay/overlay.c
index 545af7bcb2f5..d3755397061b 100644
--- a/overlay/overlay.c
+++ b/overlay/overlay.c
@@ -255,6 +255,13 @@ static void show_gpu_top(struct overlay_context *ctx, struct overlay_gpu_top *gt
len = sprintf(txt, "%s: %3d%% busy",
gt->gpu_top.ring[n].name,
gt->gpu_top.ring[n].u.u.busy);
+ if (gt->gpu_top.have_queued &&
+ gt->gpu_top.have_runnable &&
+ gt->gpu_top.have_running)
+ len += sprintf(txt + len, " (%.2f / %.2f / %.2f)",
+ gt->gpu_top.ring[n].queued,
+ gt->gpu_top.ring[n].runnable,
+ gt->gpu_top.ring[n].running);
if (gt->gpu_top.ring[n].u.u.wait)
len += sprintf(txt + len, ", %d%% wait",
gt->gpu_top.ring[n].u.u.wait);
--
2.14.1
More information about the Intel-gfx
mailing list