[Intel-gfx] [PATCH 2/2] intel: Free old buffers from the cache bucket after allocating a new bo.

Eric Anholt eric at anholt.net
Tue Mar 31 04:04:06 CEST 2009


This still leaves the problem that an idle bucket won't end up getting its
members freed, only when a new buffer is allocated out of it.  Still, it
should help reign in BO allocations by the X Server after freeing a bunch
of pixmaps and then continuing to use it.
---
 libdrm/intel/intel_bufmgr_gem.c |   42 ++++++++++++++++++++++++++++++++++++--
 1 files changed, 39 insertions(+), 3 deletions(-)

diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 79f9dea..ffe309f 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -178,8 +178,12 @@ struct _drm_intel_bo_gem {
      * relocations.
      */
     int reloc_tree_fences;
+
+    float last_active_time;
 };
 
+static void drm_intel_gem_bo_free(drm_intel_bo *bo);
+
 static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
 
 static unsigned int
@@ -239,12 +243,17 @@ drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
  * moving buffers that are now unbusy (no longer being rendered with) to the
  * inactive list.
  */
-static void
+static float
 drm_intel_gem_update_bucket_inactive(drm_intel_bufmgr_gem *bufmgr_gem,
 				     struct drm_intel_gem_bo_bucket *bucket)
 {
     struct drm_i915_gem_busy busy;
     drm_intel_bo_gem *bo_gem;
+    struct timespec tv;
+    float time;
+
+    clock_gettime(CLOCK_MONOTONIC, &tv);
+    time = (float)tv.tv_sec + (float)tv.tv_nsec / 1000000000.0;
 
     while (!DRMLISTEMPTY(&bucket->active_head)) {
 	int ret;
@@ -258,10 +267,33 @@ drm_intel_gem_update_bucket_inactive(drm_intel_bufmgr_gem *bufmgr_gem,
 	if (ret != 0 || busy.busy == 0) {
 	    DRMLISTDEL(&bo_gem->head);
 	    DRMLISTADDTAIL(&bo_gem->head, &bucket->inactive_head);
+	    bo_gem->last_active_time = time;
 	} else {
 	    break;
 	}
     }
+
+    return time;
+}
+
+static void
+drm_intel_gem_bucket_free_old(drm_intel_bufmgr_gem *bufmgr_gem,
+			      struct drm_intel_gem_bo_bucket *bucket,
+			      float time)
+{
+    drm_intel_bo_gem *bo_gem;
+
+    while (!DRMLISTEMPTY(&bucket->inactive_head)) {
+	bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+			      bucket->inactive_head.next, head);
+
+	if (time - bo_gem->last_active_time > 5) {
+	    DRMLISTDEL(&bo_gem->head);
+	    bucket->num_entries--;
+
+	    drm_intel_gem_bo_free(&bo_gem->bo);
+	}
+    }
 }
 
 static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
@@ -386,6 +418,10 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
     pthread_mutex_lock(&bufmgr_gem->lock);
     /* Get a buffer out of the cache if available */
     if (bucket != NULL) {
+	float time;
+
+	time = drm_intel_gem_update_bucket_inactive(bufmgr_gem, bucket);
+
 	if (for_render) {
 	    /* Allocate new render-target BOs from the tail (MRU)
 	     * of the lists, as it will likely be hot in the GPU cache
@@ -409,8 +445,6 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 	     * the aperture if possible, but most important is to not block
 	     * on the GPU finishing.
 	     */
-	    drm_intel_gem_update_bucket_inactive(bufmgr_gem, bucket);
-
 	    if (!DRMLISTEMPTY(&bucket->inactive_head)) {
 		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
 				      bucket->inactive_head.prev, head);
@@ -418,6 +452,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 		bucket->num_entries--;
 	    }
 	}
+
+	drm_intel_gem_bucket_free_old(bufmgr_gem, bucket, time);
     }
     pthread_mutex_unlock(&bufmgr_gem->lock);
 
-- 
1.6.2.1




More information about the Intel-gfx mailing list