[Intel-gfx] [PATCH] Implement batch and ring buffer dumping

Ben Gamari bgamari at gmail.com
Tue Feb 3 23:06:56 CET 2009


---
 drivers/gpu/drm/i915/i915_drv.h         |   14 ++++
 drivers/gpu/drm/i915/i915_gem.c         |   17 +++---
 drivers/gpu/drm/i915/i915_gem_debugfs.c |  102 +++++++++++++++++++++++++++++++
 3 files changed, 125 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f8abc79..53e169c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -374,6 +374,15 @@ typedef struct drm_i915_private {
 
 		/* storage for physical objects */
 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+		/**
+		 * Ring buffer for recently submitted batch buffer objects
+		 * This is for keeping track of batch buffers referenced while
+		 * dumping the ring buffer
+		 */
+#define I915_GEM_RECENT_BATCH_LEN 10
+		struct drm_gem_object *recent_batch_list[I915_GEM_RECENT_BATCH_LEN];
+		int recent_batch_head;
 	} mm;
 } drm_i915_private_t;
 
@@ -621,6 +630,11 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+					     int write);
+int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6a9e3a8..b143a48 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,14 +41,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
-					     int write);
 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 						     uint64_t offset,
 						     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 					   unsigned alignment);
@@ -792,7 +788,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static void
+void
 i915_gem_object_free_page_list(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1382,7 +1378,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 	return ret;
 }
 
-static int
+int
 i915_gem_object_get_page_list(struct drm_gem_object *obj)
 {
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1839,7 +1835,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
  */
-static int
+int
 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
 {
 	struct drm_device *dev = obj->dev;
@@ -2585,12 +2581,17 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	exec_offset = exec_list[args->buffer_count - 1].offset;
 
 #if WATCH_EXEC
-	i915_gem_dump_object(object_list[args->buffer_count - 1],
+	i915_gem_dump_object(batch_obj,
 			      args->batch_len,
 			      __func__,
 			      ~0);
 #endif
 
+	/* Record batchbuffer in recent batches list */
+	dev_priv->mm.recent_batch_list[dev_priv->mm.recent_batch_head] = batch_obj;
+	dev_priv->mm.recent_batch_head = (dev_priv->mm.recent_batch_head+1) %
+		I915_GEM_RECENT_BATCH_LEN;
+
 	/* Exec the batchbuffer */
 	ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 7ad49d7..c5379f0 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -176,6 +176,93 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	return 0;
 }
 
+static void i915_dump_pages(struct seq_file *m, struct page **page_list, int page_count)
+{
+	int page, i;
+	int offset = 0;
+	unsigned char *page_vma;
+	int *ptr;
+
+	for (page = 0; page < page_count; page++) {
+		page_vma = kmap(page_list[page]);
+		for (i = 0; i < PAGE_SIZE; i++) {
+			ptr = (int *) (page_vma + i);
+			seq_printf(m, "%08x :  %08x\n", offset++, *ptr);
+		}
+		kunmap(page_list[page]);
+	}
+}
+
+static int i915_batch_buffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	uintptr_t n = (uintptr_t) node->info_ent->data;
+	int free_page_list = 0;
+	int ret;
+
+	obj = dev_priv->mm.recent_batch_list[n];
+	if (obj == NULL)
+		return 0;
+
+	obj_priv = obj->driver_private;
+	if (obj_priv->page_list == NULL) {
+		free_page_list = 1;
+		ret = i915_gem_object_get_page_list(obj);
+		if (ret)
+			return ret;
+	}
+
+	seq_printf(m, "GttOffset=%08x\n", obj_priv->gtt_offset);
+	i915_dump_pages(m, obj_priv->page_list, obj->size / PAGE_SIZE);
+
+	if (free_page_list)
+		i915_gem_object_free_page_list(obj);
+	return 0;
+}
+
+static int i915_ringbuf_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u8 *virt;
+	uint32_t *ptr, off;
+
+	virt = dev_priv->ring.virtual_start;
+
+	for (off = 0; off < dev_priv->ring.Size; off += 4) {
+		ptr = (uint32_t *)(virt + off);
+		seq_printf(m, "%08x :  %08x\n", off, *ptr);
+	}
+
+	return 0;
+}
+
+static int i915_execinfo_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned int head, tail, mask;
+
+	head = dev_priv->ring.head & HEAD_ADDR;
+	tail = dev_priv->ring.tail & TAIL_ADDR;
+	mask = dev_priv->ring.tail_mask;
+
+	seq_printf(m, "RingHead :  %08x\n", head);
+	seq_printf(m, "RingTail :  %08x\n", tail);
+	seq_printf(m, "RingMask :  %08x\n", mask);
+	seq_printf(m, "RingSize :  %08lx\n", dev_priv->ring.Size);
+	seq_printf(m, "Acthd :  %08x\n", I915_READ(ACTHD_I965));
+
+	return 0;
+}
+
+
 static struct drm_info_list i915_gem_debugfs_list[] = {
 	{"i915_gem_active", i915_gem_active_info, 0},
 	{"i915_gem_flushing", i915_gem_flushing_info, 0},
@@ -184,11 +271,26 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
 	{"i915_gem_hws", i915_hws_info, 0},
+	{"i915_gem_ringbuf", i915_ringbuf_info, 0},
+	{"i915_execinfo", i915_execinfo_info, 0},
 };
 #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
 
 int i915_gem_debugfs_init(struct drm_minor *minor)
 {
+	struct drm_info_list *batch_buffer_files;
+	uintptr_t i;
+
+	batch_buffer_files = drm_calloc(I915_GEM_RECENT_BATCH_LEN, sizeof(struct drm_info_list), 0);
+	for (i = 0; i < I915_GEM_RECENT_BATCH_LEN; i++) {
+		batch_buffer_files[i].name = drm_alloc(25, 0);
+		sprintf((char *) batch_buffer_files[i].name, "i915_batchbuffer%d", (int) i);
+		batch_buffer_files[i].show = i915_batch_buffer_info;
+		batch_buffer_files[i].data = (void *) i;
+	}
+	drm_debugfs_create_files(batch_buffer_files, I915_GEM_RECENT_BATCH_LEN,
+		minor->debugfs_root, minor);
+
 	return drm_debugfs_create_files(i915_gem_debugfs_list, I915_GEM_DEBUGFS_ENTRIES,
 			minor->debugfs_root, minor);
 }
-- 
1.6.0.6




More information about the Intel-gfx mailing list