[Intel-gfx] [PATCH 4/5] Implement batch and ring buffer dumping
Ben Gamari
bgamari at gmail.com
Sat Feb 14 04:16:30 CET 2009
We create a debugfs node (i915_ringbuffer_data) to expose a hex dump of
the ring buffer itself. We also expose another debugfs node
(i915_ringbuffer_info) with information on the state (i.e. head, tail
addresses) of the ringbuffer.
For batchbuffer dumping, we look at the device's active_list, dumping
each object which has I915_GEM_DOMAIN_COMMAND in its read domains. This
is all exposed through the dri/i915_batchbuffers debugfs file with a
header for each object (giving the objects gtt_offset so that it can be
matched against the offset given in the BATCH_BUFFER_START command.
It might also be useful to expose the values of the device's debug registers
through debugfs as well, at least until anholt's error-recovery branch
is ready. Especially useful would be,
- IPEIR: So we can identify exactly which instruction the fault occurred in
- IPEHR: For reference/completeness?
- INSTDONE: So we know which unit failed
- INSTDONE_1: ???
---
drivers/gpu/drm/i915/i915_drv.h | 5 ++
drivers/gpu/drm/i915/i915_gem.c | 12 +---
drivers/gpu/drm/i915/i915_gem_debugfs.c | 98 +++++++++++++++++++++++++++++++
3 files changed, 107 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5f4bd38..03556d3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -622,6 +622,11 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write);
+int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0788581..03d6646 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,14 +41,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
@@ -797,7 +793,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void
+void
i915_gem_object_free_page_list(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1387,7 +1383,7 @@ i915_gem_evict_everything(struct drm_device *dev)
return ret;
}
-static int
+int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1844,7 +1840,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
-static int
+int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
struct drm_device *dev = obj->dev;
@@ -2594,7 +2590,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_offset = exec_list[args->buffer_count - 1].offset;
#if WATCH_EXEC
- i915_gem_dump_object(object_list[args->buffer_count - 1],
+ i915_gem_dump_object(batch_obj,
args->batch_len,
__func__,
~0);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 9928df4..7954ad6 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -144,6 +144,101 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_dump_pages(struct seq_file *m, struct page **page_list, int page_count)
+{
+ int page, i;
+ uint32_t *mem;
+
+ for (page = 0; page < page_count; page++) {
+ mem = kmap(page_list[page]);
+ for (i = 0; i < PAGE_SIZE; i += 4)
+ seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
+ kunmap(page_list[page]);
+ }
+}
+
+static int i915_batchbuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int free_page_list;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ obj = obj_priv->obj;
+ if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+ if (obj_priv->page_list == NULL) {
+ free_page_list = 1;
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ DRM_ERROR("Failed to get page list: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ } else
+ free_page_list = 0;
+
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
+ i915_dump_pages(m, obj_priv->page_list, obj->size / PAGE_SIZE);
+
+ if (free_page_list)
+ i915_gem_object_free_page_list(obj);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int i915_ringbuffer_data(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u8 *virt;
+ uint32_t *ptr, off;
+
+ if (!dev_priv->ring.ring_obj) {
+ seq_printf(m, "No ringbuffer setup\n");
+ return 0;
+ }
+
+ virt = dev_priv->ring.virtual_start;
+
+ for (off = 0; off < dev_priv->ring.Size; off += 4) {
+ ptr = (uint32_t *)(virt + off);
+ seq_printf(m, "%08x : %08x\n", off, *ptr);
+ }
+
+ return 0;
+}
+
+static int i915_ringbuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned int head, tail, mask;
+
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ mask = dev_priv->ring.tail_mask;
+
+ seq_printf(m, "RingHead : %08x\n", head);
+ seq_printf(m, "RingTail : %08x\n", tail);
+ seq_printf(m, "RingMask : %08x\n", mask);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
+ seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+
+ return 0;
+}
+
+
static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -152,6 +247,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_hws", i915_hws_info, 0},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_batchbuffers", i915_batchbuffer_info, 0},
};
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
--
1.6.1.3
More information about the Intel-gfx
mailing list