[PATCH 23/52] drm/i915: Show execlists queues when dumping state
Chris Wilson
chris at chris-wilson.co.uk
Sun Jan 31 05:43:20 UTC 2021
Move the scheduler pretty printer from out of the execlists register
state to and push it to the schduler.
v2: It's not common to all, so shove it out of intel_engine_cs.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 69 +-----------
.../drm/i915/gt/intel_execlists_submission.c | 102 ++++++++++--------
drivers/gpu/drm/i915/i915_request.c | 6 ++
drivers/gpu/drm/i915/i915_scheduler.c | 75 +++++++++++++
drivers/gpu/drm/i915/i915_scheduler.h | 8 ++
5 files changed, 153 insertions(+), 107 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 504b43df10d9..130602600075 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1294,28 +1294,6 @@ static struct intel_timeline *get_timeline(struct i915_request *rq)
return tl;
}
-static int print_ring(char *buf, int sz, struct i915_request *rq)
-{
- int len = 0;
-
- if (!i915_request_signaled(rq)) {
- struct intel_timeline *tl = get_timeline(rq);
-
- len = scnprintf(buf, sz,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
-
- if (tl)
- intel_timeline_put(tl);
- }
-
- return len;
-}
-
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
@@ -1423,22 +1401,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (intel_engine_in_guc_submission_mode(engine)) {
/* nothing to print yet */
- } else if (HAS_EXECLISTS(dev_priv)) {
- struct i915_sched_engine *se =
- intel_engine_get_scheduler(engine);
- struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
- yesno(test_bit(TASKLET_STATE_SCHED,
- &se->tasklet.state)),
- enableddisabled(!atomic_read(&se->tasklet.count)),
- repr_timer(&engine->execlists.preempt),
- repr_timer(&engine->execlists.timer));
+ drm_printf(m, "\tExeclists preempt? %s, timeslice? %s\n",
+ repr_timer(&execlists->preempt),
+ repr_timer(&execlists->timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
@@ -1459,39 +1430,6 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
-
- i915_sched_lock_bh(se);
- rcu_read_lock();
- for (port = execlists->active; (rq = *port); port++) {
- char hdr[160];
- int len;
-
- len = scnprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ccid:%08x%s%s, ",
- (int)(port - execlists->active),
- rq->context->lrc.ccid,
- intel_context_is_closed(rq->context) ? "!" : "",
- intel_context_is_banned(rq->context) ? "*" : "");
- len += print_ring(hdr + len, sizeof(hdr) - len, rq);
- scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- i915_request_show(m, rq, hdr, 0);
- }
- for (port = execlists->pending; (rq = *port); port++) {
- char hdr[160];
- int len;
-
- len = scnprintf(hdr, sizeof(hdr),
- "\t\tPending[%d]: ccid:%08x%s%s, ",
- (int)(port - execlists->pending),
- rq->context->lrc.ccid,
- intel_context_is_closed(rq->context) ? "!" : "",
- intel_context_is_banned(rq->context) ? "*" : "");
- len += print_ring(hdr + len, sizeof(hdr) - len, rq);
- scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
- i915_request_show(m, rq, hdr, 0);
- }
- rcu_read_unlock();
- i915_sched_unlock_bh(se);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1672,6 +1610,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
intel_execlists_show_requests(engine, m, i915_request_show, 8);
+ i915_sched_show_engine(m, &engine->sched, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 7b1edf08264f..e8691ba1c5f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3505,6 +3505,27 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
return 0;
}
+static int print_ring(char *buf, int sz, struct i915_request *rq)
+{
+ int len = 0;
+
+ rcu_read_lock();
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = rcu_dereference(rq->timeline);
+
+ len = scnprintf(buf, sz,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
+ }
+ rcu_read_unlock();
+
+ return len;
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3515,55 +3536,49 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
{
const struct intel_engine_execlists *execlists = &engine->execlists;
struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
+ struct i915_request * const *port;
struct i915_request *rq, *last;
unsigned long flags;
unsigned int count;
struct rb_node *rb;
+ if (!execlists->port_mask)
+ return;
+
+ i915_sched_lock_bh(se);
+ rcu_read_lock();
+
+ for (port = execlists->active; (rq = *port); port++) {
+ char hdr[160];
+ int len;
+
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ccid:%08x%s%s, ",
+ (int)(port - execlists->active),
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ i915_request_show(m, rq, hdr, 0);
+ }
+ for (port = execlists->pending; (rq = *port); port++) {
+ char hdr[160];
+ int len;
+
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d]: ccid:%08x%s%s, ",
+ (int)(port - execlists->pending),
+ rq->context->lrc.ccid,
+ intel_context_is_closed(rq->context) ? "!" : "",
+ intel_context_is_banned(rq->context) ? "*" : "");
+ len += print_ring(hdr + len, sizeof(hdr) - len, rq);
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ i915_request_show(m, rq, hdr, 0);
+ }
+
spin_lock_irqsave(&se->lock, flags);
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &se->requests, sched.link) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\t", 0);
- else
- last = rq;
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - max);
- }
- show_request(m, last, "\t\t", 0);
- }
-
- if (execlists->queue_priority_hint != INT_MIN)
- drm_printf(m, "\t\tQueue priority hint: %d\n",
- READ_ONCE(execlists->queue_priority_hint));
-
- last = NULL;
- count = 0;
- for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-
- priolist_for_each_request(rq, p) {
- if (count++ < max - 1)
- show_request(m, rq, "\t\t", 0);
- else
- last = rq;
- }
- }
- if (last) {
- if (count > max) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - max);
- }
- show_request(m, last, "\t\t", 0);
- }
-
last = NULL;
count = 0;
for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
@@ -3588,6 +3603,9 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
}
spin_unlock_irqrestore(&se->lock, flags);
+
+ rcu_read_unlock();
+ i915_sched_unlock_bh(se);
}
bool
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c4a0cb1bb1bb..7a6acb3a86a0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1896,6 +1896,9 @@ static char queue_status(const struct i915_request *rq)
if (i915_request_is_active(rq))
return 'E';
+ if (i915_request_on_hold(rq))
+ return 'S';
+
if (i915_request_is_ready(rq))
return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
@@ -1964,6 +1967,9 @@ void i915_request_show(struct drm_printer *m,
* - a completed request may still be regarded as executing, its
* status may not be updated until it is retired and removed
* from the lists
+ *
+ * S [Suspended]
+ * - the request has been temporarily suspended from execution
*/
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d2cd98404373..70263b7957ae 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1072,6 +1072,81 @@ void i915_request_show_with_schedule(struct drm_printer *m,
rcu_read_unlock();
}
+void i915_sched_show_engine(struct drm_printer *m,
+ struct i915_sched_engine *se,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max)
+{
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ drm_printf(m, "\tTasklet queued? %s (%s)\n",
+ yesno(test_bit(TASKLET_STATE_SCHED, &se->tasklet.state)),
+ enableddisabled(!atomic_read(&se->tasklet.count)));
+
+ spin_lock_irqsave(&se->lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &se->requests, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+
+ priolist_for_each_request(rq, p) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ list_for_each_entry(rq, &se->hold, sched.link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\t", 0);
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d suspended requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\t", 0);
+ }
+
+ spin_unlock_irqrestore(&se->lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_scheduler.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index d590d743cb15..9b60dafbe047 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -129,4 +129,12 @@ void i915_request_show_with_schedule(struct drm_printer *m,
const char *prefix,
int indent);
+void i915_sched_show_engine(struct drm_printer *m,
+ struct i915_sched_engine *se,
+ void (*show_request)(struct drm_printer *m,
+ const struct i915_request *rq,
+ const char *prefix,
+ int indent),
+ unsigned int max);
+
#endif /* _I915_SCHEDULER_H_ */
--
2.20.1
More information about the Intel-gfx-trybot
mailing list