[Intel-gfx] [PATCH] intel: Mark cached bo as purgeable

Chris Wilson chris at chris-wilson.co.uk
Sat Jun 6 10:49:12 CEST 2009


Wrap the new DRM_I915_GEM_MADVISE ioctl that controls the purgeablity of
the backing pages, i.e. we can use the ioctl to reclaim memory under swap
pressure.
---
 libdrm/intel/intel_bufmgr_gem.c |   49 +++++++++++++++++++++++++++++++++++++++
 shared-core/i915_drm.h          |   16 ++++++++++++
 2 files changed, 65 insertions(+), 0 deletions(-)

diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index c25fc4c..d6d2a78 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -203,6 +203,9 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
 static void
 drm_intel_gem_bo_unreference(drm_intel_bo *bo);
 
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo);
+
 static int
 logbase2(int n)
 {
@@ -331,6 +334,41 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
     return 0;
 }
 
+static void
+drm_intel_gem_bo_cache_purge(drm_intel_bufmgr *bufmgr)
+{
+    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+    int i;
+
+    pthread_mutex_lock(&bufmgr_gem->lock);
+
+    for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+	struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+	drm_intel_bo_gem *bo_gem;
+
+	while (!DRMLISTEMPTY(&bucket->head)) {
+	    struct drm_i915_gem_madvise madv;
+
+	    bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
+
+	    madv.handle = bo_gem->gem_handle;
+	    madv.madv = I915_MADV_WILLNEED;
+	    madv.retained = 1;
+	    ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+	    if (madv.retained)
+		break;
+
+	    DRMLISTDEL(&bo_gem->head);
+	    bucket->num_entries--;
+
+	    drm_intel_gem_bo_free(&bo_gem->bo);
+	}
+    }
+
+    pthread_mutex_unlock(&bufmgr_gem->lock);
+}
+
 static drm_intel_bo *
 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 				unsigned long size, unsigned int alignment,
@@ -396,6 +434,17 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
     }
     pthread_mutex_unlock(&bufmgr_gem->lock);
 
+    if (alloc_from_cache) {
+	struct drm_i915_gem_madvise madv;
+
+	madv.handle = bo_gem->gem_handle;
+	madv.madv = I915_MADV_WILLNEED;
+	madv.retained = 1;
+	ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+	if (!madv.retained)
+	    drm_intel_gem_bo_cache_purge(bufmgr);
+    }
+
     if (!alloc_from_cache) {
 	struct drm_i915_gem_create create;
 
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index c8fec5f..e2643ad 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -206,6 +206,7 @@ typedef struct drm_i915_sarea {
 #define DRM_I915_GEM_GET_APERTURE 0x23
 #define DRM_I915_GEM_MMAP_GTT	0x24
 #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
+#define DRM_I915_GEM_MADVISE	0x26
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -244,6 +245,7 @@ typedef struct drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 
 /* Asynchronous page flipping:
  */
@@ -779,4 +781,18 @@ struct drm_i915_get_pipe_from_crtc_id {
 	uint32_t pipe;
 };
 
+#define I915_MADV_WILLNEED	0
+#define I915_MADV_DONTNEED	1
+
+struct drm_i915_gem_madvise {
+	/** Handle of the buffer to change the backing store advice. */
+	uint32_t handle;
+
+	/** Advice. */
+	uint32_t madv;
+
+	/** Whether or not the backing store still exists */
+	uint32_t retained;
+};
+
 #endif				/* _I915_DRM_H_ */
-- 
1.6.3.1




More information about the Intel-gfx mailing list