[Intel-gfx] [PATCH 11/15] drm/i915: Trim i915_guc_info() stack usage
Chris Wilson
chris at chris-wilson.co.uk
Fri Nov 25 09:30:53 UTC 2016
i915_guc_info() (part of debugfs output) tries to avoid holding
struct_mutex for a long period by copying onto the stack. This causes a
warning that the stack frame is massive, so stop doing that. We can even
forgo holding the struct_mutex here as that doesn't serialise the values
being read (and the lists used exist for the device lifetime).
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 39 +++++++++++++------------------------
1 file changed, 14 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8eb8c29b7492..7676e88ae5f2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2434,47 +2434,36 @@ static void i915_guc_client_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_guc guc;
- struct i915_guc_client client = {};
+ const struct intel_guc *guc = &dev_priv->guc;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u64 total = 0;
+ u64 total;
if (!HAS_GUC_SCHED(dev_priv))
return 0;
- if (mutex_lock_interruptible(&dev->struct_mutex))
- return 0;
-
- /* Take a local copy of the GuC data, so we can dump it at leisure */
- guc = dev_priv->guc;
- if (guc.execbuf_client)
- client = *guc.execbuf_client;
-
- mutex_unlock(&dev->struct_mutex);
-
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+ seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
- seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
- seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
- seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
- seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
- seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+ seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
+ seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
+ seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
+ seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
+ seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
+ total = 0;
seq_printf(m, "\nGuC submissions:\n");
for_each_engine(engine, dev_priv, id) {
- u64 submissions = guc.submissions[id];
+ u64 submissions = guc->submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, submissions, guc.last_seqno[id]);
+ engine->name, submissions, guc->last_seqno[id]);
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
- i915_guc_client_info(m, dev_priv, &client);
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
+ i915_guc_client_info(m, dev_priv, guc->execbuf_client);
i915_guc_log_info(m, dev_priv);
--
2.10.2
More information about the Intel-gfx
mailing list