[Intel-gfx] [PATCH] intel: buffer object leak tracing debug code.

Keith Packard keithp at keithp.com
Tue Dec 16 00:08:15 CET 2008


This adds some debug code to track what kinds of buffer objects are in use
and in the re-use cache.

Signed-off-by: Keith Packard <keithp at keithp.com>
---
 libdrm/intel/intel_bufmgr_gem.c |  175 +++++++++++++++++++++++++++++++++++++++
 1 files changed, 175 insertions(+), 0 deletions(-)

diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 9ba377b..4d1f7d1 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -101,6 +101,8 @@ typedef struct _drm_intel_bufmgr_gem {
     uint64_t gtt_size;
 } drm_intel_bufmgr_gem;
 
+#define DEBUG_LEAK 1
+
 struct _drm_intel_bo_gem {
     drm_intel_bo bo;
 
@@ -146,6 +148,9 @@ struct _drm_intel_bo_gem {
     /** free list */
     drm_intel_bo_gem *next;
 
+#if DEBUG_LEAK
+    drm_intel_bo_gem *leak_prev, *leak_next;
+#endif
     /**
      * Boolean of whether this BO and its children have been included in
      * the current drm_intel_bufmgr_check_aperture_space() total.
@@ -168,6 +173,167 @@ struct _drm_intel_bo_gem {
     int reloc_tree_size;
 };
 
+#if DEBUG_LEAK
+static drm_intel_bo_gem *leak_head;
+int drm_intel_bo_inuse;
+int drm_intel_bo_alloc_count, drm_intel_bo_free_count;
+static drm_intel_bufmgr_gem *leak_mgr;
+
+static void
+leak_insert(drm_intel_bo_gem *bo)
+{
+    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bo.bufmgr;
+    assert(bo->leak_prev == NULL);
+    assert(bo->leak_next == NULL);
+    
+    pthread_mutex_lock(&bufmgr_gem->lock);
+    bo->leak_prev = NULL;
+    if (leak_head)
+	leak_head->leak_prev = bo;
+    bo->leak_next = leak_head;
+    leak_head = bo;
+    drm_intel_bo_inuse++;
+    drm_intel_bo_alloc_count++;
+    leak_mgr = bufmgr_gem;
+    pthread_mutex_unlock(&bufmgr_gem->lock);
+}
+
+static void
+leak_remove_locked(drm_intel_bo_gem *bo)
+{
+    drm_intel_bo_gem	**next;
+    
+    if (bo->leak_prev == NULL) {
+	assert(leak_head == bo);
+	next = &leak_head;
+    } else {
+	assert(bo->leak_prev != NULL);
+	assert(bo->leak_prev->leak_next == bo);
+	next = &bo->leak_prev->leak_next;
+    }
+    *next = bo->leak_next;
+
+    if (bo->leak_next != NULL) {
+	assert(bo->leak_next->leak_prev == bo);
+	bo->leak_next->leak_prev = bo->leak_prev;
+    }
+    bo->leak_next = bo->leak_prev = NULL;
+    drm_intel_bo_inuse--;
+    drm_intel_bo_free_count++;
+}
+
+struct bo_count {
+    const char	*name;
+    int		count;
+    int		bytes;
+    int		mapped_count;
+    int		mapped_bytes;
+};
+
+#define MAX_COUNTS  1024
+
+static void
+bo_count_add(struct bo_count bo_counts[MAX_COUNTS], 
+	     drm_intel_bo_gem *bo)
+{
+    int	i;
+
+    for (i = 0; i < MAX_COUNTS; i++) {
+	if (bo_counts[i].name == NULL) {
+	    bo_counts[i].name = bo->name;
+	    break;
+	}
+	if (bo_counts[i].name == bo->name ||
+	    strcmp(bo_counts[i].name, bo->name) == 0)
+	    break;
+    }
+    if (i == MAX_COUNTS)
+	return;
+    bo_counts[i].count++;
+    bo_counts[i].bytes += bo->bo.size;
+    if (bo->mapped) {
+	bo_counts[i].mapped_count++;
+	bo_counts[i].mapped_bytes += bo->bo.size;
+    }
+}
+
+static void
+bo_count_reset(struct bo_count bo_counts[MAX_COUNTS])
+{
+    memset(bo_counts, 0, MAX_COUNTS * sizeof (struct bo_count));
+}
+
+static int bo_count_compar(const void *av, const void *bv)
+{
+    const struct bo_count *a = av;
+    const struct bo_count *b = bv;
+    int	v;
+
+    v = b->count - a->count;
+    if (v == 0)
+	v = b->bytes - a->bytes;
+    return v;
+}
+
+static void
+bo_count_dump(struct bo_count bo_counts[MAX_COUNTS])
+{
+    int	ncount, i;
+
+    for (i = 0; i < MAX_COUNTS; i++)
+	if (bo_counts[i].name == NULL)
+	    break;
+    ncount = i;
+    qsort(bo_counts, ncount, sizeof(struct bo_count), bo_count_compar);
+    for (i = 0; i < ncount; i++)
+	fprintf(stderr, "\t%-25.25s %8d (%10d bytes) alloc %8d (%10d bytes) map\n",
+		bo_counts[i].name,
+		bo_counts[i].count, bo_counts[i].bytes,
+		bo_counts[i].mapped_count, bo_counts[i].mapped_bytes);
+}
+
+void
+drm_intel_bo_count(void);
+
+void
+drm_intel_bo_count(void)
+{
+    struct bo_count bo_inuse[MAX_COUNTS];
+    struct bo_count bo_idle[MAX_COUNTS];
+    drm_intel_bo_gem *bo;
+    char bucket_name[DRM_INTEL_GEM_BO_BUCKETS][64];
+    int	b;
+    int num_idle = 0;
+    
+    fprintf(stderr, "%s: %10d in use %10d alloc %10d free\n",
+	    __func__, drm_intel_bo_inuse, drm_intel_bo_alloc_count, drm_intel_bo_free_count);
+    bo_count_reset(bo_inuse);
+    for (bo = leak_head; bo; bo = bo->leak_next)
+	bo_count_add(bo_inuse, bo);
+    bo_count_dump(bo_inuse);
+
+    if (leak_mgr) {
+	bo_count_reset(bo_idle);
+
+	for (b = 0; b < DRM_INTEL_GEM_BO_BUCKETS; b++) {
+	    num_idle += leak_mgr->cache_bucket[b].num_entries;
+	    sprintf(bucket_name[b], "bucket %d", (1 << b) * 4096);
+	    for (bo = leak_mgr->cache_bucket[b].head; bo; bo = bo->next) {
+		assert(bo->name == NULL);
+		bo->name = bucket_name[b];
+		bo_count_add(bo_idle, bo);
+		bo->name = NULL;
+	    }
+	}
+	fprintf (stderr, "%s: %d idle\n", __func__, num_idle);
+	bo_count_dump(bo_idle);
+    }
+}
+#else
+#define leak_insert(bo)
+#define leak_remove_locked(bo)
+#endif
+
 static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
 
 static unsigned int
@@ -397,6 +563,9 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
     DBG("bo_create: buf %d (%s) %ldb\n",
 	bo_gem->gem_handle, bo_gem->name, size);
 
+    
+    leak_insert(bo_gem);
+
     return &bo_gem->bo;
 }
 
@@ -504,6 +673,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
     if (--bo_gem->refcount == 0) {
 	struct drm_intel_gem_bo_bucket *bucket;
 
+	leak_remove_locked(bo_gem);
 	if (bo_gem->relocs != NULL) {
 	    int i;
 
@@ -582,9 +752,14 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
 	    mmap_arg.size = bo->size;
 	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
 	    if (ret != 0) {
+		static int been_here;
 		fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
 			__FILE__, __LINE__,
 			bo_gem->gem_handle, bo_gem->name, strerror(errno));
+		if (!been_here) {
+		    been_here = 1;
+		    drm_intel_bo_count();
+		}
 		pthread_mutex_unlock(&bufmgr_gem->lock);
 		return ret;
 	    }
-- 
1.5.6.5




More information about the Intel-gfx mailing list