[Intel-gfx] [PATCH v6 12/14] drm/i915: debugfs of GuC status
yu.dai at intel.com
yu.dai at intel.com
Wed Apr 29 15:13:33 PDT 2015
From: Alex Dai <yu.dai at intel.com>
Now print out Bootrom, uKernel and MIA Core status. The scratch reg
0 & 15 are used for communication between driver and firmware. Their
status is also printed out.
Issue: VIZ-4884
Signed-off-by: Alex Dai <yu.dai at intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 79 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/intel_guc.h | 13 ++++++
drivers/gpu/drm/i915/intel_guc_client.c | 33 +++++++++++---
3 files changed, 118 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c2b9e4..f12bbee 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2310,6 +2310,83 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
+static void i915_uc_load_status_info(struct seq_file *m, struct intel_uc_fw *uc_fw)
+{
+ seq_printf(m, "%s firmware status:\n\tpath: <%s>\n\tfetch: %d\n\tload: %d\n",
+ uc_fw->uc_name,
+ uc_fw->uc_fw_path,
+ uc_fw->uc_fw_fetch_status,
+ uc_fw->uc_fw_load_status);
+}
+
+static int i915_guc_load_status_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+ u32 tmp, i;
+
+ if (!HAS_GUC_UCODE(dev_priv->dev))
+ return 0;
+
+ i915_uc_load_status_info(m, &dev_priv->guc.guc_fw);
+
+ tmp = I915_READ(GUC_STATUS);
+
+ seq_puts(m, "\nResponse from GuC:\n");
+ seq_printf(m, "\tBootrom status = 0x%x\n",
+ (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ seq_printf(m, "\tuKernel status = 0x%x\n",
+ (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ seq_printf(m, "\tMIA Core status = 0x%x\n",
+ (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ seq_puts(m, "Scratch registers value:\n");
+ for (i = 0; i < 16; i++)
+ seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
+
+ return 0;
+}
+
+static int i915_guc_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_guc guc;
+ struct i915_guc_client client;
+
+ if (!i915.enable_guc_scheduling)
+ return 0;
+
+ memset(&client, 0, sizeof(struct i915_guc_client));
+
+ /* Take a local copy of the GuC data, so we can dump it at leisure */
+ spin_lock(&dev_priv->guc.host2guc_lock);
+ guc = dev_priv->guc;
+ if (guc.execbuf_client) {
+ spin_lock(&guc.execbuf_client->wq_lock);
+ client = *guc.execbuf_client;
+ spin_unlock(&guc.execbuf_client->wq_lock);
+ }
+ spin_unlock(&dev_priv->guc.host2guc_lock);
+
+ seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
+ seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
+ seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
+
+ seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
+ seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+
+ seq_printf(m, "GuC execbuf client @ %p:\n", guc.execbuf_client);
+ seq_printf(m, "\tTotal submissions: %llu\n", client.submissions);
+ seq_printf(m, "\tFailed to queue: %u\n", client.q_fail);
+ seq_printf(m, "\tFailed doorbell: %u\n", client.b_fail);
+ seq_printf(m, "\tLast submission result: %d\n", client.retcode);
+
+ /* Add more as required ... */
+
+ return 0;
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -4776,6 +4853,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
+ {"i915_guc_info", i915_guc_info, 0},
+ {"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index f8065cf..b096d1a 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -42,6 +42,12 @@ struct i915_guc_client {
uint32_t wq_size;
uint32_t wq_tail;
uint32_t cookie;
+
+ /* GuC submission statistics & status */
+ uint64_t submissions;
+ uint32_t q_fail;
+ uint32_t b_fail;
+ int retcode;
};
#define I915_MAX_DOORBELLS 256
@@ -66,6 +72,13 @@ struct intel_guc {
struct ida ctx_ids;
int db_cacheline;
DECLARE_BITMAP(doorbell_bitmap, I915_MAX_DOORBELLS);
+
+ /* Action status & statistics */
+ uint64_t action_count; /* Total commands issued */
+ uint32_t action_cmd; /* Last command word */
+ uint32_t action_status; /* Last return status */
+ uint32_t action_fail; /* Total number of failures */
+ int32_t action_err; /* Last error code */
};
#define GUC_STATUS 0xc000
diff --git a/drivers/gpu/drm/i915/intel_guc_client.c b/drivers/gpu/drm/i915/intel_guc_client.c
index 31934a3..adfff1c 100644
--- a/drivers/gpu/drm/i915/intel_guc_client.c
+++ b/drivers/gpu/drm/i915/intel_guc_client.c
@@ -87,6 +87,9 @@ static int intel_guc_action(struct intel_guc *guc, u32 *data, u32 len)
spin_lock(&dev_priv->guc.host2guc_lock);
+ dev_priv->guc.action_count += 1;
+ dev_priv->guc.action_cmd = data[0];
+
for (i = 0; i < len; i++)
I915_WRITE(SOFT_SCRATCH(i), data[i]);
@@ -105,7 +108,11 @@ static int intel_guc_action(struct intel_guc *guc, u32 *data, u32 len)
"status=0x%08X response=0x%08X\n",
data[0], ret, status,
I915_READ(SOFT_SCRATCH(15)));
+
+ dev_priv->guc.action_fail += 1;
+ dev_priv->guc.action_err = ret;
}
+ dev_priv->guc.action_status = status;
spin_unlock(&dev_priv->guc.host2guc_lock);
@@ -614,13 +621,25 @@ int i915_guc_client_submit(struct i915_guc_client *client,
struct intel_context *ctx,
struct intel_engine_cs *ring)
{
- int ret;
-
- ret = add_workqueue_item(client, ctx, ring);
- if (ret)
- return ret;
+ int q_ret, b_ret;
+ unsigned long flags;
- ret = ring_doorbell(client);
+ q_ret = add_workqueue_item(client, ctx, ring);
+ if (q_ret == 0)
+ b_ret = ring_doorbell(client);
+
+ spin_lock_irqsave(&client->wq_lock, flags);
+ client->submissions += 1;
+ if (q_ret) {
+ client->q_fail += 1;
+ client->retcode = q_ret;
+ } else if (b_ret) {
+ client->b_fail += 1;
+ client->retcode = q_ret = b_ret;
+ } else {
+ client->retcode = 0;
+ }
+ spin_unlock_irqrestore(&client->wq_lock, flags);
- return ret;
+ return q_ret;
}
--
1.9.1
More information about the Intel-gfx
mailing list