[Intel-gfx] [PATCH 3/6] intel: Export raw GEM mmap interfaces

Chris Wilson chris at chris-wilson.co.uk
Tue May 5 01:53:57 PDT 2015


Export a set of interfaces to allow the caller to have precise control
over mapping the buffer - but still provide caching of the mmaps between
callers.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 intel/intel_bufmgr.h     |   4 ++
 intel/intel_bufmgr_gem.c | 148 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 152 insertions(+)

diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index 285919e..f35c59b 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -178,6 +178,10 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
 
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
+
 int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
 void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
 void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 43cbae5..61a248f 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -191,6 +191,8 @@ struct _drm_intel_bo_gem {
 	void *mem_virtual;
 	/** GTT virtual address for the buffer, saved across map/unmap cycles */
 	void *gtt_virtual;
+	/** WC CPU address for the buffer, saved across map/unmap cycles */
+	void *wc_virtual;
 	/**
 	 * Virtual address of the buffer allocated by user, used for userptr
 	 * objects only.
@@ -1129,6 +1131,11 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
 		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
 	}
+	if (bo_gem->wc_virtual) {
+		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
+		drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
+		bufmgr_gem->vma_count--;
+	}
 	if (bo_gem->gtt_virtual) {
 		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 		bufmgr_gem->vma_count--;
@@ -1155,6 +1162,9 @@ drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
 	if (bo_gem->mem_virtual)
 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
 
+	if (bo_gem->wc_virtual)
+		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
+
 	if (bo_gem->gtt_virtual)
 		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
 #endif
@@ -1228,6 +1238,11 @@ static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
 			bo_gem->mem_virtual = NULL;
 			bufmgr_gem->vma_count--;
 		}
+		if (bo_gem->wc_virtual) {
+			drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
+			bo_gem->wc_virtual = NULL;
+			bufmgr_gem->vma_count--;
+		}
 		if (bo_gem->gtt_virtual) {
 			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
 			bo_gem->gtt_virtual = NULL;
@@ -1243,6 +1258,8 @@ static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
 	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
 	if (bo_gem->mem_virtual)
 		bufmgr_gem->vma_count++;
+	if (bo_gem->wc_virtual)
+		bufmgr_gem->vma_count++;
 	if (bo_gem->gtt_virtual)
 		bufmgr_gem->vma_count++;
 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -1255,6 +1272,8 @@ static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
 	DRMLISTDEL(&bo_gem->vma_list);
 	if (bo_gem->mem_virtual)
 		bufmgr_gem->vma_count--;
+	if (bo_gem->wc_virtual)
+		bufmgr_gem->vma_count--;
 	if (bo_gem->gtt_virtual)
 		bufmgr_gem->vma_count--;
 	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -3516,6 +3535,135 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
 	}
 }
 
+void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->is_userptr)
+		return NULL;
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+	if (bo_gem->map_count++ == 0)
+		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+	if (bo_gem->gtt_virtual == NULL) {
+		struct drm_i915_gem_mmap_gtt mmap_arg;
+
+		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+
+		/* Get the fake offset back... */
+		bo_gem->gtt_virtual = MAP_FAILED;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP_GTT,
+			     &mmap_arg) == 0) {
+			/* and mmap it */
+			bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+						       MAP_SHARED, bufmgr_gem->fd,
+						       mmap_arg.offset);
+		}
+		if (bo_gem->gtt_virtual == MAP_FAILED) {
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+			bo_gem->gtt_virtual = NULL;
+		}
+	}
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->gtt_virtual;
+}
+
+void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->is_userptr) {
+		/* Return the same user ptr */
+		return bo_gem->user_virtual;
+	}
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+
+	if (bo_gem->map_count++ == 0)
+		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+	if (!bo_gem->mem_virtual) {
+		struct drm_i915_gem_mmap mmap_arg;
+
+		DBG("bo_map: %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+		mmap_arg.size = bo->size;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP,
+			     &mmap_arg)) {
+			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+			    __FILE__, __LINE__, bo_gem->gem_handle,
+			    bo_gem->name, strerror(errno));
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+		} else {
+			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+			bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+		}
+	}
+	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+	    bo_gem->mem_virtual);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->mem_virtual;
+}
+
+void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+	if (bo_gem->is_userptr)
+		return NULL;
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+
+	if (bo_gem->map_count++ == 0)
+		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+	if (!bo_gem->wc_virtual) {
+		struct drm_i915_gem_mmap mmap_arg;
+
+		DBG("bo_map: %d (%s), map_count=%d\n",
+		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+		memclear(mmap_arg);
+		mmap_arg.handle = bo_gem->gem_handle;
+		mmap_arg.size = bo->size;
+		mmap_arg.flags = I915_MMAP_WC;
+		if (drmIoctl(bufmgr_gem->fd,
+			     DRM_IOCTL_I915_GEM_MMAP,
+			     &mmap_arg)) {
+			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+			    __FILE__, __LINE__, bo_gem->gem_handle,
+			    bo_gem->name, strerror(errno));
+			if (--bo_gem->map_count == 0)
+				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+		} else {
+			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
+			bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+		}
+	}
+	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+	    bo_gem->wc_virtual);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
+	return bo_gem->wc_virtual;
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
-- 
2.1.4



More information about the Intel-gfx mailing list