[PATCH 2/5] drm/syncobj: add sync obj wait interface.

Dave Airlie airlied at gmail.com
Wed Apr 26 03:28:30 UTC 2017


From: Dave Airlie <airlied at redhat.com>

This interface will allow sync object to be used to back
Vulkan fences. This API is pretty much the vulkan fence waiting
API, and I've ported the code from amdgpu.

Signed-off-by: Dave Airlie <airlied at redhat.com>
---
 drivers/gpu/drm/drm_internal.h |   2 +
 drivers/gpu/drm/drm_ioctl.c    |   2 +
 drivers/gpu/drm/drm_syncobj.c  | 162 ++++++++++++++++++++++++++++++++++++++++-
 include/uapi/drm/drm.h         |  11 +++
 4 files changed, 176 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 44ef903..a508ad9 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
 int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_private);
+int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 6da7adc..b142466 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -653,6 +653,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
 		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
+		      DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e6a99d4..c24fea0 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1,5 +1,7 @@
 /*
  * Copyright 2017 Red Hat
+ * Parts ported from amdgpu (fence wait code).
+ * Copyright 2016 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -31,10 +33,13 @@
  * that contain an optional fence. The fence can be updated with a new
  * fence, or be NULL.
  *
+ * syncobj's can be waited upon, where it will wait for the underlying
+ * fence.
+ *
  * syncobj's can be export to fd's and back, these fd's are opaque and
  * have no other use case, except passing the syncobj between processes.
  *
- * TODO: sync_file interactions, waiting
+ * TODO: sync_file interactions.
  *
  * Their primary use-case is to implement Vulkan fences and semaphores.
  *
@@ -372,3 +377,158 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
 	return drm_syncobj_fd_to_handle(file_private, args->fd,
 					&args->handle);
 }
+
+/**
+ * calc_timeout - calculate jiffies timeout from absolute value
+ *
+ * @timeout_ns: timeout in ns
+ *
+ * Calculate the timeout in jiffies from an absolute timeout in ns.
+ */
+unsigned long calc_wait_timeout(uint64_t timeout_ns)
+{
+        unsigned long timeout_jiffies;
+        ktime_t timeout;
+
+        /* clamp timeout if it's to large */
+        if (((int64_t)timeout_ns) < 0)
+                return MAX_SCHEDULE_TIMEOUT;
+
+        timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
+        if (ktime_to_ns(timeout) < 0)
+                return 0;
+
+        timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
+        /*  clamp timeout to avoid unsigned-> signed overflow */
+        if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+                return MAX_SCHEDULE_TIMEOUT - 1;
+
+        return timeout_jiffies;
+}
+
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
+				       struct drm_file *file_private,
+				       struct drm_syncobj_wait *wait,
+				       uint32_t *handles)
+{
+	uint32_t i;
+	int ret;
+
+	for (i = 0; i < wait->count_handles; i++) {
+		unsigned long timeout = calc_wait_timeout(wait->timeout_ns);
+		struct dma_fence *fence;
+
+		ret = drm_syncobj_fence_get(file_private, handles[i],
+					    &fence);
+		if (ret)
+			return ret;
+
+		if (!fence)
+			continue;
+
+		ret = dma_fence_wait_timeout(fence, true, timeout);
+
+		dma_fence_put(fence);
+		if (ret < 0)
+			return ret;
+		if (ret == 0)
+			break;
+	}
+
+	wait->out_status = (ret > 0);
+	wait->first_signaled = 0;
+	return 0;
+}
+
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
+				      struct drm_file *file_private,
+				      struct drm_syncobj_wait *wait,
+				      uint32_t *handles)
+{
+	unsigned long timeout = calc_wait_timeout(wait->timeout_ns);
+	struct dma_fence **array;
+	uint32_t i;
+	int ret;
+	uint32_t first = ~0;
+
+	/* Prepare the fence array */
+	array = kcalloc(wait->count_handles,
+			sizeof(struct dma_fence *), GFP_KERNEL);
+
+	if (array == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < wait->count_handles; i++) {
+		struct dma_fence *fence;
+
+		ret = drm_syncobj_fence_get(file_private, handles[i],
+					    &fence);
+		if (ret)
+			goto err_free_fence_array;
+		else if (fence)
+			array[i] = fence;
+		else { /* NULL, the fence has been already signaled */
+			ret = 1;
+			goto out;
+		}
+	}
+
+	ret = dma_fence_wait_any_timeout(array, wait->count_handles, true, timeout,
+					 &first);
+	if (ret < 0)
+		goto err_free_fence_array;
+out:
+	wait->out_status = (ret > 0);
+	wait->first_signaled = first;
+	/* set return value 0 to indicate success */
+	ret = 0;
+
+err_free_fence_array:
+	for (i = 0; i < wait->count_handles; i++)
+		dma_fence_put(array[i]);
+	kfree(array);
+
+	return ret;
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_private)
+{
+	struct drm_syncobj_wait *args = data;
+	uint32_t *handles;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+		return -ENODEV;
+
+	if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
+		return -EINVAL;
+
+	if (args->count_handles == 0)
+		return 0;
+
+	/* Get the handles from userspace */
+	handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
+				GFP_KERNEL);
+	if (handles == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(handles,
+			   (void __user *)(unsigned long)(args->handles),
+			   sizeof(uint32_t) * args->count_handles)) {
+		ret = -EFAULT;
+		goto err_free_handles;
+	}
+
+	if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
+		ret = drm_syncobj_wait_all_fences(dev, file_private,
+						  args, handles);
+	else
+		ret = drm_syncobj_wait_any_fence(dev, file_private,
+						 args, handles);
+err_free_handles:
+	kfree(handles);
+
+	return ret;
+}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index dd0b99c..7c508d0 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -716,6 +716,16 @@ struct drm_syncobj_handle {
 	__u32 pad;
 };
 
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+struct drm_syncobj_wait {
+	__u64 handles;
+	__u64 timeout_ns;
+	__u32 count_handles;
+	__u32 flags;
+	__u32 out_status;
+	__u32 first_signaled; /* on valid when not waiting all */
+};
+
 #if defined(__cplusplus)
 }
 #endif
@@ -838,6 +848,7 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
 #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
 #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
 
 /**
  * Device specific ioctls should only be in their respective headers
-- 
2.9.3



More information about the amd-gfx mailing list