[PATCH] intel: make bufmgr_gem shareable from different API

Lionel Landwerlin lionel.g.landwerlin at intel.com
Thu Sep 11 08:36:20 PDT 2014


When using Mesa and LibVA in the same process, one would like to be
able bind buffers from the output of the decoder to a GL texture
through an EGLImage.

LibVA can reuse buffers allocated by Gbm through a file descriptor. It
will then wrap it into a drm_intel_bo with
drm_intel_bo_gem_create_from_prime().

The problem at the moment is that both library get a different
drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init()
even though they're using the same drm file descriptor. As a result,
instead of manipulating the same buffer object for a given file
descriptor, they get 2 different drm_intel_bo objects and 2 different
refcounts, leading one of the library to get errors from the kernel on
invalid BO when one of the 2 library is done with a shared buffer.

This patch modifies drm_intel_bufmgr_gem_init() so, given a file
descriptor, it will look for an already existing drm_intel_bufmgr
using the same file descriptor and return that object.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
 intel/intel_bufmgr_gem.c | 82 +++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 70 insertions(+), 12 deletions(-)

diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 0e1cb0d..ce43bc6 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket {
 typedef struct _drm_intel_bufmgr_gem {
 	drm_intel_bufmgr bufmgr;
 
+	atomic_t refcount;
+
 	int fd;
 
 	int max_relocs;
@@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem {
 	int num_buckets;
 	time_t time;
 
+	drmMMListHead managers;
+
 	drmMMListHead named;
 	drmMMListHead vma_cache;
 	int vma_count, vma_open, vma_max;
@@ -3186,6 +3190,65 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
 	bo_gem->aub_annotation_count = count;
 }
 
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { NULL, NULL };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find_or_create_for_fd(int fd, int *found)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem;
+
+	assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+	if (bufmgr_list.next == NULL) {
+		DRMINITLISTHEAD(&bufmgr_list);
+	} else {
+		DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+			if (bufmgr_gem->fd == fd) {
+				atomic_inc(&bufmgr_gem->refcount);
+				*found = 1;
+				goto exit;
+			}
+		}
+	}
+
+	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+	if (bufmgr_gem == NULL)
+		goto exit;
+
+	bufmgr_gem->fd = fd;
+	atomic_set(&bufmgr_gem->refcount, 1);
+
+	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+	assert(pthread_mutex_init(&bufmgr_gem->lock, NULL) == 0);
+
+	pthread_mutex_lock(&bufmgr_gem->lock);
+
+	*found = 0;
+
+exit:
+	pthread_mutex_unlock(&bufmgr_list_mutex);
+
+	return bufmgr_gem;
+}
+
+static void
+drm_intel_bufmgr_gem_unref (drm_intel_bufmgr *bufmgr)
+{
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+	if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+		assert(pthread_mutex_lock(&bufmgr_list_mutex) == 0);
+
+		DRMLISTDEL(&bufmgr_gem->managers);
+
+		pthread_mutex_unlock(&bufmgr_list_mutex);
+
+		drm_intel_bufmgr_gem_destroy(bufmgr);
+	}
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -3201,16 +3264,9 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	int ret, tmp;
 	bool exec2 = false;
 
-	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
-	if (bufmgr_gem == NULL)
-		return NULL;
-
-	bufmgr_gem->fd = fd;
-
-	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
-		free(bufmgr_gem);
-		return NULL;
-	}
+	bufmgr_gem = drm_intel_bufmgr_gem_find_or_create_for_fd(fd, &ret);
+	if (bufmgr_gem && ret)
+		return &bufmgr_gem->bufmgr;
 
 	ret = drmIoctl(bufmgr_gem->fd,
 		       DRM_IOCTL_I915_GEM_GET_APERTURE,
@@ -3245,7 +3301,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	else if (IS_GEN8(bufmgr_gem->pci_device))
 		bufmgr_gem->gen = 8;
 	else {
-		free(bufmgr_gem);
+		drm_intel_bufmgr_gem_unref(&bufmgr_gem->bufmgr);
 		return NULL;
 	}
 
@@ -3357,7 +3413,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
 	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
 	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
-	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
 	bufmgr_gem->bufmgr.debug = 0;
 	bufmgr_gem->bufmgr.check_aperture_space =
 	    drm_intel_gem_check_aperture_space;
@@ -3373,5 +3429,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
 	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
 	bufmgr_gem->vma_max = -1; /* unlimited by default */
 
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+
 	return &bufmgr_gem->bufmgr;
 }
-- 
2.0.1



More information about the dri-devel mailing list