[Intel-gfx] [PATCH v11 01/10] drm/syncobj: add sideband payload

Lionel Landwerlin lionel.g.landwerlin at intel.com
Thu Aug 29 06:40:03 UTC 2019


On 29/08/2019 08:26, Zhou, David(ChunMing) wrote:
> v6 is fine to me as well, RB on it to go ahead.
>
> Out of curious, why " [PATCH v11 01/10] " is on subject?


v11 is the series on the intel-gfx mailing list which depends on the 
timeline semaphore feature in our driver.

That's why I included this patch into it.


-Lionel


>
> -David
>
> -----Original Message-----
> From: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> Sent: Wednesday, August 28, 2019 10:33 PM
> To: intel-gfx at lists.freedesktop.org
> Cc: Lionel Landwerlin <lionel.g.landwerlin at intel.com>; Zhou, David(ChunMing) <David1.Zhou at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>; Jason Ekstrand <jason at jlekstrand.net>
> Subject: [PATCH v11 01/10] drm/syncobj: add sideband payload
>
> The Vulkan timeline semaphores allow signaling to happen on the point of the timeline without all of the its dependencies to be created.
>
> The current 2 implementations (AMD/Intel) of the Vulkan spec on top of the Linux kernel are using a thread to wait on the dependencies of a given point to materialize and delay actual submission to the kernel driver until the wait completes.
>
> If a binary semaphore is submitted for signaling along the side of a timeline semaphore waiting for completion that means that the drm syncobj associated with that binary semaphore will not have a DMA fence associated with it by the time vkQueueSubmit() returns. This and the fact that a binary semaphore can be signaled and unsignaled as before its DMA fences materialize mean that we cannot just rely on the fence within the syncobj but we also need a sideband payload verifying that the fence in the syncobj matches the last submission from the Vulkan API point of view.
>
> This change adds a sideband payload that is incremented with signaled syncobj when vkQueueSubmit() is called. The next vkQueueSubmit() waiting on a the syncobj will read the sideband payload and wait for a fence chain element with a seqno superior or equal to the sideband payload value to be added into the fence chain and use that fence to trigger the submission on the kernel driver.
>
> v2: Use a separate ioctl to get/set the sideband value (Christian)
>
> v3: Use 2 ioctls for get/set (Christian)
>
> v4: Use a single new ioctl
>
> v5: a bunch of blattant mistakes
>      Store payload atomically (Chris)
>
> v6: Only touch atomic value once (Jason)
>
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> Reviewed-by: David Zhou <David1.Zhou at amd.com> (v5)
> Cc: Christian Koenig <Christian.Koenig at amd.com>
> Cc: Jason Ekstrand <jason at jlekstrand.net>
> Cc: David(ChunMing) Zhou <David1.Zhou at amd.com>
> ---
>   drivers/gpu/drm/drm_internal.h |  2 ++
>   drivers/gpu/drm/drm_ioctl.c    |  3 ++
>   drivers/gpu/drm/drm_syncobj.c  | 59 +++++++++++++++++++++++++++++++++-
>   include/drm/drm_syncobj.h      |  9 ++++++
>   include/uapi/drm/drm.h         | 17 ++++++++++
>   5 files changed, 89 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 51a2055c8f18..e297dfd85019 100644
> --- a/drivers/gpu/drm/drm_internal.h
> +++ b/drivers/gpu/drm/drm_internal.h
> @@ -208,6 +208,8 @@ int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
>   				      struct drm_file *file_private);  int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
>   			    struct drm_file *file_private);
> +int drm_syncobj_binary_ioctl(struct drm_device *dev, void *data,
> +			     struct drm_file *file_private);
>   
>   /* drm_framebuffer.c */
>   void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f675a3bb2c88..644d0bc800a4 100644
> --- a/drivers/gpu/drm/drm_ioctl.c
> +++ b/drivers/gpu/drm/drm_ioctl.c
> @@ -703,6 +703,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
>   		      DRM_RENDER_ALLOW),
>   	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
>   		      DRM_RENDER_ALLOW),
> +	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_BINARY, drm_syncobj_binary_ioctl,
> +		      DRM_RENDER_ALLOW),
> +
>   	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0),
>   	DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0),
>   	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER), diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 4b5c7b0ed714..732310b2b367 100644
> --- a/drivers/gpu/drm/drm_syncobj.c
> +++ b/drivers/gpu/drm/drm_syncobj.c
> @@ -1224,8 +1224,10 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
>   	if (ret < 0)
>   		return ret;
>   
> -	for (i = 0; i < args->count_handles; i++)
> +	for (i = 0; i < args->count_handles; i++) {
>   		drm_syncobj_replace_fence(syncobjs[i], NULL);
> +		atomic64_set(&syncobjs[i]->binary_payload, 0);
> +	}
>   
>   	drm_syncobj_array_free(syncobjs, args->count_handles);
>   
> @@ -1395,6 +1397,61 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
>   		if (ret)
>   			break;
>   	}
> +
> +	drm_syncobj_array_free(syncobjs, args->count_handles);
> +
> +	return ret;
> +}
> +
> +int drm_syncobj_binary_ioctl(struct drm_device *dev, void *data,
> +			     struct drm_file *file_private)
> +{
> +	struct drm_syncobj_binary_array *args = data;
> +	struct drm_syncobj **syncobjs;
> +	u32 __user *access_flags = u64_to_user_ptr(args->access_flags);
> +	u64 __user *values = u64_to_user_ptr(args->values);
> +	u32 i;
> +	int ret;
> +
> +	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
> +		return -EOPNOTSUPP;
> +
> +	if (args->pad != 0)
> +		return -EINVAL;
> +
> +	if (args->count_handles == 0)
> +		return -EINVAL;
> +
> +	ret = drm_syncobj_array_find(file_private,
> +				     u64_to_user_ptr(args->handles),
> +				     args->count_handles,
> +				     &syncobjs);
> +	if (ret < 0)
> +		return ret;
> +
> +	for (i = 0; i < args->count_handles; i++) {
> +		u32 flags;
> +		u64 value;
> +
> +		if (get_user(flags, &access_flags[i])) {
> +			ret = -EFAULT;
> +			break;
> +		}
> +
> +		if (flags & DRM_SYNCOBJ_BINARY_VALUE_INC)
> +			value = atomic64_inc_return(&syncobjs[i]->binary_payload) - 1;
> +		else if (flags & DRM_SYNCOBJ_BINARY_VALUE_READ)
> +			value = atomic64_read(&syncobjs[i]->binary_payload);
> +
> +		if (flags & DRM_SYNCOBJ_BINARY_VALUE_READ) {
> +			if (put_user(value, &values[i])) {
> +				ret = -EFAULT;
> +				break;
> +			}
> +		}
> +
> +	}
> +
>   	drm_syncobj_array_free(syncobjs, args->count_handles);
>   
>   	return ret;
> diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 6cf7243a1dc5..aa76cb3f9107 100644
> --- a/include/drm/drm_syncobj.h
> +++ b/include/drm/drm_syncobj.h
> @@ -61,6 +61,15 @@ struct drm_syncobj {
>   	 * @file: A file backing for this syncobj.
>   	 */
>   	struct file *file;
> +	/**
> +	 * @binary_payload: A 64bit payload for binary syncobjs.
> +	 *
> +	 * We use the payload value to wait on binary syncobj fences to
> +	 * materialize. It is a reservation mechanism for the signaler to
> +	 * express that at some point in the future a dma fence with the same
> +	 * seqno will be put into the syncobj.
> +	 */
> +	atomic64_t binary_payload;
>   };
>   
>   void drm_syncobj_free(struct kref *kref); diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 8a5b2f8f8eb9..78a0a413b788 100644
> --- a/include/uapi/drm/drm.h
> +++ b/include/uapi/drm/drm.h
> @@ -785,6 +785,22 @@ struct drm_syncobj_timeline_array {
>   	__u32 pad;
>   };
>   
> +struct drm_syncobj_binary_array {
> +	/* A pointer to an array of u32 syncobj handles. */
> +	__u64 handles;
> +	/* A pointer to an array of u32 access flags for each handle. */
> +	__u64 access_flags;
> +	/* The binary value of a syncobj is read before it is incremented. */
> +#define DRM_SYNCOBJ_BINARY_VALUE_READ (1u << 0) #define
> +DRM_SYNCOBJ_BINARY_VALUE_INC  (1u << 1)
> +	/* A pointer to an array of u64 values written to by the kernel if the
> +	 * handle is flagged for reading.
> +	 */
> +	__u64 values;
> +	/* The length of the 3 arrays above. */
> +	__u32 count_handles;
> +	__u32 pad;
> +};
>   
>   /* Query current scanout sequence number */  struct drm_crtc_get_sequence { @@ -946,6 +962,7 @@ extern "C" {
>   #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
>   #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
>   #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
> +#define DRM_IOCTL_SYNCOBJ_BINARY	DRM_IOWR(0xCE, struct drm_syncobj_binary_array)
>   
>   /**
>    * Device specific ioctls should only be in their respective headers
> --
> 2.23.0
>
>



More information about the Intel-gfx mailing list