[Intel-gfx] [PATCH 1/2] intel: Mark cached bo as purgeable
Chris Wilson
chris at chris-wilson.co.uk
Fri Oct 2 05:31:34 CEST 2009
Set the DONTNEED flag on cached buffers so that the kernel is free to
discard those when under memory pressure.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
libdrm/intel/intel_bufmgr_gem.c | 49 +++++++++++++++++++++++++++++++++++++-
shared-core/i915_drm.h | 16 ++++++++++++
2 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 6424c54..6125f6b 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -191,6 +191,9 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
static void
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo);
+
static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long size)
@@ -315,6 +318,38 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
return (ret == 0 && busy.busy);
}
+static int
+drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem,
+ int state)
+{
+ struct drm_i915_gem_madvise madv;
+
+ madv.handle = bo_gem->gem_handle;
+ madv.madv = state;
+ madv.retained = 1;
+ ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+ return madv.retained;
+}
+
+/* drop the oldest entries that have been purged by the kernel */
+static void
+drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
+ struct drm_intel_gem_bo_bucket *bucket)
+{
+ while (!DRMLISTEMPTY(&bucket->head)) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
+ if (drm_intel_gem_bo_madvise (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
+ break;
+
+ DRMLISTDEL(&bo_gem->head);
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ }
+}
+
static drm_intel_bo *
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
@@ -325,7 +360,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned int page_size = getpagesize();
int ret;
struct drm_intel_gem_bo_bucket *bucket;
- int alloc_from_cache = 0;
+ int alloc_from_cache;
unsigned long bo_size;
/* Round the allocated size up to a power of two number of pages. */
@@ -344,6 +379,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a buffer out of the cache if available */
+retry:
+ alloc_from_cache = 0;
if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
if (for_render) {
/* Allocate new render-target BOs from the tail (MRU)
@@ -361,12 +398,19 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
* waiting for the GPU to finish.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
-
if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
alloc_from_cache = 1;
DRMLISTDEL(&bo_gem->head);
}
}
+
+ if (alloc_from_cache) {
+ if(!drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
+ drm_intel_gem_bo_free(&bo_gem->bo);
+ drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, bucket);
+ goto retry;
+ }
+ }
}
pthread_mutex_unlock(&bufmgr_gem->lock);
@@ -592,6 +636,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
+ drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
} else {
drm_intel_gem_bo_free(bo);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 2539966..b214600 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -206,6 +206,7 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -244,6 +245,7 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
/* Asynchronous page flipping:
*/
@@ -727,4 +729,18 @@ struct drm_i915_get_pipe_from_crtc_id {
uint32_t pipe;
};
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice. */
+ uint32_t handle;
+
+ /** Advice. */
+ uint32_t madv;
+
+ /** Whether or not the backing store still exists */
+ uint32_t retained;
+};
+
#endif /* _I915_DRM_H_ */
--
1.6.4.3
More information about the Intel-gfx
mailing list