[PATCH libdrm] libdrm/amdgpu: add interface for kernel semaphores

Dave Airlie airlied at gmail.com
Tue Mar 14 00:50:50 UTC 2017


From: Dave Airlie <airlied at redhat.com>

This adds the corresponding code for libdrm to use the new
kernel interfaces for semaphores.

This will be used by radv to implement shared semaphores.

TODO: Version checks.

Signed-off-by: Dave Airlie <airlied at redhat.com>
---
 amdgpu/amdgpu.h          |  28 +++++++++
 amdgpu/amdgpu_cs.c       | 161 ++++++++++++++++++++++++++++++++++++++++++++---
 include/drm/amdgpu_drm.h |  28 +++++++++
 3 files changed, 208 insertions(+), 9 deletions(-)

diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 7b26a04..747e248 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -129,6 +129,8 @@ typedef struct amdgpu_va *amdgpu_va_handle;
  */
 typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
 
+typedef uint32_t amdgpu_sem_handle;
+
 /*--------------------------------------------------------------------------*/
 /* -------------------------- Structures ---------------------------------- */
 /*--------------------------------------------------------------------------*/
@@ -365,6 +367,16 @@ struct amdgpu_cs_request {
 	struct amdgpu_cs_fence_info fence_info;
 };
 
+struct amdgpu_cs_request_sem {
+	/*
+	 *
+	 */
+	uint32_t number_of_wait_sem;
+	uint32_t *wait_sems;
+	uint32_t number_of_signal_sem;
+	uint32_t *signal_sems;
+};
+
 /**
  * Structure which provide information about GPU VM MC Address space
  * alignments requirements
@@ -882,6 +894,12 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
 		     struct amdgpu_cs_request *ibs_request,
 		     uint32_t number_of_requests);
 
+int amdgpu_cs_submit_sem(amdgpu_context_handle context,
+			 uint64_t flags,
+			 struct amdgpu_cs_request *ibs_request,
+			 struct amdgpu_cs_request_sem *ibs_sem,
+			 uint32_t number_of_requests);
+
 /**
  *  Query status of Command Buffer Submission
  *
@@ -1255,4 +1273,14 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
 */
 const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
 
+int amdgpu_cs_create_sem(amdgpu_device_handle dev,
+			 amdgpu_sem_handle *sem);
+int amdgpu_cs_export_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem,
+			 int *shared_handle);
+int amdgpu_cs_import_sem(amdgpu_device_handle dev,
+			  int shared_handle,
+			 amdgpu_sem_handle *sem);
+int amdgpu_cs_destroy_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem);
 #endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fb5b3a8..7283327 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -170,7 +170,8 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
  * \sa amdgpu_cs_submit()
 */
 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
-				struct amdgpu_cs_request *ibs_request)
+				struct amdgpu_cs_request *ibs_request,
+				struct amdgpu_cs_request_sem *sem_request)
 {
 	union drm_amdgpu_cs cs;
 	uint64_t *chunk_array;
@@ -178,9 +179,11 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 	struct drm_amdgpu_cs_chunk_data *chunk_data;
 	struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
 	struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+	struct drm_amdgpu_cs_chunk_sem *wait_sem_dependencies = NULL;
+	struct drm_amdgpu_cs_chunk_sem *signal_sem_dependencies = NULL;
 	struct list_head *sem_list;
 	amdgpu_semaphore_handle sem, tmp;
-	uint32_t i, size, sem_count = 0;
+	uint32_t i, j, size, sem_count = 0;
 	bool user_fence;
 	int r = 0;
 
@@ -196,7 +199,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 	}
 	user_fence = (ibs_request->fence_info.handle != NULL);
 
-	size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+	size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1 + (sem_request ? 2 : 0);
 
 	chunk_array = alloca(sizeof(uint64_t) * size);
 	chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
@@ -308,6 +311,45 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
 		chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
 	}
 
+	if (sem_request) {
+		if (sem_request->number_of_wait_sem) {
+			wait_sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_request->number_of_wait_sem);
+			if (!wait_sem_dependencies) {
+				r = -ENOMEM;
+				goto error_unlock;
+			}
+			for (j = 0; j < sem_request->number_of_wait_sem; j++) {
+				struct drm_amdgpu_cs_chunk_sem *dep = &wait_sem_dependencies[j];
+				dep->handle = sem_request->wait_sems[j];
+			}
+			i = cs.in.num_chunks++;
+
+			/* dependencies chunk */
+			chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+			chunks[i].chunk_id = AMDGPU_CHUNK_ID_SEM_WAIT;
+			chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_request->number_of_wait_sem;
+			chunks[i].chunk_data = (uint64_t)(uintptr_t)wait_sem_dependencies;
+		}
+		if (sem_request->number_of_signal_sem) {
+			signal_sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * sem_request->number_of_signal_sem);
+			if (!signal_sem_dependencies) {
+				r = -ENOMEM;
+				goto error_unlock;
+			}
+			for (j = 0; j < sem_request->number_of_signal_sem; j++) {
+				struct drm_amdgpu_cs_chunk_sem *dep = &signal_sem_dependencies[j];
+				dep->handle = sem_request->signal_sems[j];
+			}
+			i = cs.in.num_chunks++;
+
+			/* dependencies chunk */
+			chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+			chunks[i].chunk_id = AMDGPU_CHUNK_ID_SEM_SIGNAL;
+			chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * sem_request->number_of_signal_sem;
+			chunks[i].chunk_data = (uint64_t)(uintptr_t)signal_sem_dependencies;
+		}
+	}
+
 	r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
 				&cs, sizeof(cs));
 	if (r)
@@ -319,17 +361,20 @@ error_unlock:
 	pthread_mutex_unlock(&context->sequence_mutex);
 	free(dependencies);
 	free(sem_dependencies);
+	free(wait_sem_dependencies);
+	free(signal_sem_dependencies);
 	return r;
 }
 
-int amdgpu_cs_submit(amdgpu_context_handle context,
-		     uint64_t flags,
-		     struct amdgpu_cs_request *ibs_request,
-		     uint32_t number_of_requests)
+int amdgpu_cs_submit_sem(amdgpu_context_handle context,
+			 uint64_t flags,
+			 struct amdgpu_cs_request *ibs_request,
+			 struct amdgpu_cs_request_sem *ibs_sem,
+			 uint32_t number_of_requests)
 {
 	uint32_t i;
 	int r;
-
+	bool has_sems = ibs_sem ? true : false;
 	if (NULL == context)
 		return -EINVAL;
 	if (NULL == ibs_request)
@@ -337,15 +382,28 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
 
 	r = 0;
 	for (i = 0; i < number_of_requests; i++) {
-		r = amdgpu_cs_submit_one(context, ibs_request);
+		r = amdgpu_cs_submit_one(context, ibs_request, has_sems ? ibs_sem : NULL);
 		if (r)
 			break;
 		ibs_request++;
+		if (has_sems)
+			ibs_sem++;
 	}
 
 	return r;
 }
 
+int amdgpu_cs_submit(amdgpu_context_handle context,
+		     uint64_t flags,
+		     struct amdgpu_cs_request *ibs_request,
+		     uint32_t number_of_requests)
+{
+	return amdgpu_cs_submit_sem(context, flags,
+				    ibs_request, NULL,
+				    number_of_requests);
+}
+
+
 /**
  * Calculate absolute timeout.
  *
@@ -542,3 +600,88 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
 {
 	return amdgpu_cs_unreference_sem(sem);
 }
+
+
+int amdgpu_cs_create_sem(amdgpu_device_handle dev,
+			 amdgpu_sem_handle *sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_CREATE_SEM;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+
+	*sem = args.out.handle;
+
+	return 0;
+}
+
+int amdgpu_cs_export_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem,
+			  int *shared_handle)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_EXPORT_SEM;
+	args.in.handle = sem;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+	*shared_handle = args.out.fd;
+	return 0;
+}
+
+int amdgpu_cs_import_sem(amdgpu_device_handle dev,
+			  int shared_handle,
+			  amdgpu_sem_handle *sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_IMPORT_SEM;
+	args.in.handle = shared_handle;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+	*sem = args.out.handle;
+	return 0;
+}
+
+
+int amdgpu_cs_destroy_sem(amdgpu_device_handle dev,
+			  amdgpu_sem_handle sem)
+{
+	union drm_amdgpu_sem args;
+	int r;
+
+	if (NULL == dev)
+		return -EINVAL;
+
+	/* Create the context */
+	memset(&args, 0, sizeof(args));
+	args.in.op = AMDGPU_SEM_OP_DESTROY_SEM;
+	args.in.handle = sem;
+	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_SEM, &args, sizeof(args));
+	if (r)
+		return r;
+
+	return 0;
+}
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index d8f2497..fa0bfe2 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_CS		0x09
 #define DRM_AMDGPU_GEM_OP		0x10
 #define DRM_AMDGPU_GEM_USERPTR		0x11
+#define DRM_AMDGPU_SEM                  0x13
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_CS	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
 #define DRM_IOCTL_AMDGPU_GEM_OP		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
 #define DRM_IOCTL_AMDGPU_GEM_USERPTR	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+#define DRM_IOCTL_AMDGPU_SEM		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SEM, union drm_amdgpu_sem)
 
 #define AMDGPU_GEM_DOMAIN_CPU		0x1
 #define AMDGPU_GEM_DOMAIN_GTT		0x2
@@ -303,6 +305,26 @@ union drm_amdgpu_wait_cs {
 	struct drm_amdgpu_wait_cs_out out;
 };
 
+#define AMDGPU_SEM_OP_CREATE_SEM 0
+#define AMDGPU_SEM_OP_IMPORT_SEM 1
+#define AMDGPU_SEM_OP_EXPORT_SEM 2
+#define AMDGPU_SEM_OP_DESTROY_SEM 3
+
+struct drm_amdgpu_sem_in {
+	__u32 op;
+	__u32 handle;
+};
+
+struct drm_amdgpu_sem_out {
+	__u32 fd;
+	__u32 handle;
+};
+
+union drm_amdgpu_sem {
+	struct drm_amdgpu_sem_in in;
+	struct drm_amdgpu_sem_out out;
+};
+
 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO	0
 #define AMDGPU_GEM_OP_SET_PLACEMENT		1
 
@@ -358,6 +380,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_IB		0x01
 #define AMDGPU_CHUNK_ID_FENCE		0x02
 #define AMDGPU_CHUNK_ID_DEPENDENCIES	0x03
+#define AMDGPU_CHUNK_ID_SEM_WAIT        0x04
+#define AMDGPU_CHUNK_ID_SEM_SIGNAL      0x05
 
 struct drm_amdgpu_cs_chunk {
 	uint32_t		chunk_id;
@@ -422,6 +446,10 @@ struct drm_amdgpu_cs_chunk_fence {
 	uint32_t offset;
 };
 
+struct drm_amdgpu_cs_chunk_sem {
+	uint32_t handle;
+};
+
 struct drm_amdgpu_cs_chunk_data {
 	union {
 		struct drm_amdgpu_cs_chunk_ib		ib_data;
-- 
2.7.4



More information about the amd-gfx mailing list