[Intel-gfx] [PATCH 65/66] drm/i915: Add debugfs for vma info per vm
Ben Widawsky
ben at bwidawsk.net
Fri Jun 28 01:31:06 CEST 2013
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_debugfs.c | 81 +++++++++++++++++++++++++++++++++++++
1 file changed, 81 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 20d6265..6bbb602 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/list_sort.h>
#include <generated/utsrelease.h>
#include <drm/drmP.h>
#include "intel_drv.h"
@@ -145,6 +146,42 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s)", obj->ring->name);
}
+static void
+describe_less_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+ seq_printf(m, "%pK: %s%s %02x %02x %d %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
+ obj->last_fenced_seqno,
+ cache_level_str(obj->cache_level),
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ seq_printf(m, " (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ seq_printf(m, " (pinned x %d)", obj->pin_count);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", obj->fence_reg);
+ if (obj->stolen)
+ seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
+ if (obj->ring != NULL)
+ seq_printf(m, " (%s)", obj->ring->name);
+}
+
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1922,6 +1959,49 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
+static int vma_compare(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct i915_vma *vma1, *vma2;
+
+ vma1 = list_entry(a, struct i915_vma, per_vm_link);
+ vma2 = list_entry(a, struct i915_vma, per_vm_link);
+
+ return vma1->node.start - vma2->node.start;
+}
+
+static int i915_vm_info(struct seq_file *m, void *data)
+{
+ LIST_HEAD(sorted_vmas);
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_sort(NULL, &vm->vma_list, vma_compare);
+ if (is_i915_ggtt(vm))
+ seq_puts(m, "Global ");
+ seq_printf(m, "VM: %p\n", vm);
+ list_for_each_entry(vma, &vm->vma_list, per_vm_link) {
+ seq_printf(m, " VMA: 0x%08lx-0x%08lx (obj = ",
+ vma->node.start,
+ vma->node.start + vma->node.size);
+ describe_less_obj(m, vma->obj);
+ seq_puts(m, ")\n");
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -2358,6 +2438,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_dpio", i915_dpio_info, 0},
+ {"i915_vm_info", i915_vm_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
--
1.8.3.1
More information about the Intel-gfx
mailing list