[Intel-gfx] [PATCH 2/3] Implement batch and ring buffer dumping

Carl Worth cworth at cworth.org
Fri Mar 20 19:54:26 CET 2009


From: Ben Gamari <bgamari at gmail.com>

We create a debugfs node (i915_ringbuffer_data) to expose a hex dump
of the ring buffer itself.  We also expose another debugfs node
(i915_ringbuffer_info) with information on the state (i.e. head, tail
addresses) of the ringbuffer.

For batchbuffer dumping, we look at the device's active_list, dumping
each object which has I915_GEM_DOMAIN_COMMAND in its read
domains. This is all exposed through the dri/i915_batchbuffers debugfs
file with a header for each object (giving the objects gtt_offset so
that it can be matched against the offset given in the
BATCH_BUFFER_START command.

Signed-off-by: Carl Worth <cworth at cworth.org>
---
 drivers/gpu/drm/i915/i915_drv.h         |    5 ++
 drivers/gpu/drm/i915/i915_gem.c         |   12 +---
 drivers/gpu/drm/i915/i915_gem_debugfs.c |   99 +++++++++++++++++++++++++++++++
 3 files changed, 108 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a7e2b74..c928844 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -632,6 +632,11 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+					     int write);
+int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f9e4a5b..bbbc2e1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,14 +37,10 @@
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
-					     int write);
 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 						     uint64_t offset,
 						     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 					   unsigned alignment);
@@ -815,7 +811,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static void
+void
 i915_gem_object_free_page_list(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1413,7 +1409,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 	return ret;
 }
 
-static int
+int
 i915_gem_object_get_page_list(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1875,7 +1871,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
  */
-static int
+int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
 	int ret;
@@ -2641,7 +2637,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	exec_offset = exec_list[args->buffer_count - 1].offset;
 
 #if WATCH_EXEC
-	i915_gem_dump_object(object_list[args->buffer_count - 1],
+	i915_gem_dump_object(batch_obj,
 			      args->batch_len,
 			      __func__,
 			      ~0);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 81ebe21..3a2cd34 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,102 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+static void i915_dump_pages(struct seq_file *m, struct page **page_list, int page_count)
+{
+	int page, i;
+	uint32_t *mem;
+
+	for (page = 0; page < page_count; page++) {
+		mem = kmap(page_list[page]);
+		for (i = 0; i < PAGE_SIZE; i += 4)
+			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
+		kunmap(page_list[page]);
+	}
+}
+
+static int i915_batchbuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int free_page_list;
+	int ret;
+
+	spin_lock(&dev_priv->mm.active_list_lock);
+
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+		obj = obj_priv->obj;
+		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+			if (obj_priv->page_list == NULL) {
+				free_page_list = 1;
+				ret = i915_gem_object_get_page_list(obj);
+				if (ret) {
+					DRM_ERROR("Failed to get page list: %d\n", ret);
+					spin_unlock(&dev_priv->mm.active_list_lock);
+					return ret;
+				}
+			} else
+				free_page_list = 0;
+
+			seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
+			i915_dump_pages(m, obj_priv->page_list, obj->size / PAGE_SIZE);
+
+			if (free_page_list)
+				i915_gem_object_free_page_list(obj);
+		}
+	}
+
+	spin_unlock(&dev_priv->mm.active_list_lock);
+
+	return 0;
+}
+
+static int i915_ringbuffer_data(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u8 *virt;
+	uint32_t *ptr, off;
+
+	if (!dev_priv->ring.ring_obj) {
+		seq_printf(m, "No ringbuffer setup\n");
+		return 0;
+	}
+
+	virt = dev_priv->ring.virtual_start;
+
+	for (off = 0; off < dev_priv->ring.Size; off += 4) {
+		ptr = (uint32_t *)(virt + off);
+		seq_printf(m, "%08x :  %08x\n", off, *ptr);
+	}
+
+	return 0;
+}
+
+static int i915_ringbuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned int head, tail, mask;
+
+	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+	tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	mask = dev_priv->ring.tail_mask;
+
+	seq_printf(m, "RingHead :  %08x\n", head);
+	seq_printf(m, "RingTail :  %08x\n", tail);
+	seq_printf(m, "RingMask :  %08x\n", mask);
+	seq_printf(m, "RingSize :  %08lx\n", dev_priv->ring.Size);
+	seq_printf(m, "Acthd :  %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+
+	return 0;
+}
+
+
 static struct drm_info_list i915_gem_debugfs_list[] = {
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +339,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
 	{"i915_gem_hws", i915_hws_info, 0},
+	{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
+	{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+	{"i915_batchbuffers", i915_batchbuffer_info, 0},
 };
 #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
 
-- 
1.6.2




More information about the Intel-gfx mailing list