[PATCH v7 3/6] iio: core: Add new DMABUF interface infrastructure

Christian König christian.koenig at amd.com
Mon Mar 4 14:07:07 UTC 2024


Am 04.03.24 um 14:59 schrieb Paul Cercueil:
> [SNIP]
>>> +	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
>>> +
>>> +	if (dma_to_ram) {
>>> +		/*
>>> +		 * If we're writing to the DMABUF, make sure we
>>> don't have
>>> +		 * readers
>>> +		 */
>>> +		retl = dma_resv_wait_timeout(dmabuf->resv,
>>> +					     DMA_RESV_USAGE_READ,
>>> true,
>>> +					     timeout);
>>> +		if (retl == 0)
>>> +			retl = -EBUSY;
>>> +		if (retl < 0) {
>>> +			ret = (int)retl;
>>> +			goto err_resv_unlock;
>>> +		}
>>> +	}
>>> +
>>> +	if (buffer->access->lock_queue)
>>> +		buffer->access->lock_queue(buffer);
>>> +
>>> +	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
>>> +	if (ret)
>>> +		goto err_queue_unlock;
>>> +
>>> +	dma_resv_add_fence(dmabuf->resv, &fence->base,
>>> +			   dma_resv_usage_rw(dma_to_ram));
>> That is incorrect use of the function dma_resv_usage_rw(). That
>> function
>> is for retrieving fences and not adding them.
>>
>> See the function implementation and comments, when you use it like
>> this
>> you get exactly what you don't want.
> No, I get exactly what I want. If "dma_to_ram", I get
> DMA_RESV_USAGE_READ, otherwise I get DMA_RESV_USAGE_WRITE.

Ah, so basically !dma_to_ram means that you have a write access to the 
buffer?

> If you really don't like the macro, I can inline things here.

Yeah, that would still be incorrect use.

The dma__resv_usage_rw() is for retrieving the fences to sync to for 
read and write operations and should never be used together with 
dma_fence_resv_add().

Regards,
Christian.

>
> Cheers,
> -Paul
>
>> Regards,
>> Christian.
>>
>>> +	dma_resv_unlock(dmabuf->resv);
>>> +
>>> +	cookie = dma_fence_begin_signalling();
>>> +
>>> +	ret = buffer->access->enqueue_dmabuf(buffer, priv->block,
>>> &fence->base,
>>> +					     priv->sgt,
>>> iio_dmabuf.bytes_used,
>>> +					     cyclic);
>>> +	if (ret) {
>>> +		/*
>>> +		 * DMABUF enqueue failed, but we already added the
>>> fence.
>>> +		 * Signal the error through the fence completion
>>> mechanism.
>>> +		 */
>>> +		iio_buffer_signal_dmabuf_done(&fence->base, ret);
>>> +	}
>>> +
>>> +	if (buffer->access->unlock_queue)
>>> +		buffer->access->unlock_queue(buffer);
>>> +
>>> +	dma_fence_end_signalling(cookie);
>>> +	dma_buf_put(dmabuf);
>>> +
>>> +	return ret;
>>> +
>>> +err_queue_unlock:
>>> +	if (buffer->access->unlock_queue)
>>> +		buffer->access->unlock_queue(buffer);
>>> +err_resv_unlock:
>>> +	dma_resv_unlock(dmabuf->resv);
>>> +err_fence_put:
>>> +	dma_fence_put(&fence->base);
>>> +err_attachment_put:
>>> +	iio_buffer_dmabuf_put(attach);
>>> +err_dmabuf_put:
>>> +	dma_buf_put(dmabuf);
>>> +
>>> +	return ret;
>>> +}
>>> +
>>> +static void iio_buffer_cleanup(struct work_struct *work)
>>> +{
>>> +	struct iio_dma_fence *fence =
>>> +		container_of(work, struct iio_dma_fence, work);
>>> +	struct iio_dmabuf_priv *priv = fence->priv;
>>> +	struct dma_buf_attachment *attach = priv->attach;
>>> +
>>> +	dma_fence_put(&fence->base);
>>> +	iio_buffer_dmabuf_put(attach);
>>> +}
>>> +
>>> +void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int
>>> ret)
>>> +{
>>> +	struct iio_dma_fence *iio_fence =
>>> +		container_of(fence, struct iio_dma_fence, base);
>>> +	bool cookie = dma_fence_begin_signalling();
>>> +
>>> +	/*
>>> +	 * Get a reference to the fence, so that it's not freed as
>>> soon as
>>> +	 * it's signaled.
>>> +	 */
>>> +	dma_fence_get(fence);
>>> +
>>> +	fence->error = ret;
>>> +	dma_fence_signal(fence);
>>> +	dma_fence_end_signalling(cookie);
>>> +
>>> +	/*
>>> +	 * The fence will be unref'd in iio_buffer_cleanup.
>>> +	 * It can't be done here, as the unref functions might try
>>> to lock the
>>> +	 * resv object, which can deadlock.
>>> +	 */
>>> +	INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
>>> +	schedule_work(&iio_fence->work);
>>> +}
>>> +EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
>>> +
>>> +static long iio_buffer_chrdev_ioctl(struct file *filp,
>>> +				    unsigned int cmd, unsigned
>>> long arg)
>>> +{
>>> +	struct iio_dev_buffer_pair *ib = filp->private_data;
>>> +	void __user *_arg = (void __user *)arg;
>>> +	bool nonblock = filp->f_flags & O_NONBLOCK;
>>> +
>>> +	switch (cmd) {
>>> +	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
>>> +		return iio_buffer_attach_dmabuf(ib, _arg,
>>> nonblock);
>>> +	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
>>> +		return iio_buffer_detach_dmabuf(ib, _arg,
>>> nonblock);
>>> +	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
>>> +		return iio_buffer_enqueue_dmabuf(ib, _arg,
>>> nonblock);
>>> +	default:
>>> +		return -EINVAL;
>>> +	}
>>> +}
>>> +
>>>    static const struct file_operations iio_buffer_chrdev_fileops = {
>>>    	.owner = THIS_MODULE,
>>>    	.llseek = noop_llseek,
>>>    	.read = iio_buffer_read,
>>>    	.write = iio_buffer_write,
>>> +	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
>>> +	.compat_ioctl = compat_ptr_ioctl,
>>>    	.poll = iio_buffer_poll,
>>>    	.release = iio_buffer_chrdev_release,
>>>    };
>>> @@ -1953,6 +2414,7 @@ static void iio_buffer_release(struct kref
>>> *ref)
>>>    {
>>>    	struct iio_buffer *buffer = container_of(ref, struct
>>> iio_buffer, ref);
>>>    
>>> +	mutex_destroy(&buffer->dmabufs_mutex);
>>>    	buffer->access->release(buffer);
>>>    }
>>>    
>>> diff --git a/include/linux/iio/buffer_impl.h
>>> b/include/linux/iio/buffer_impl.h
>>> index 89c3fd7c29ca..f4b1147291e5 100644
>>> --- a/include/linux/iio/buffer_impl.h
>>> +++ b/include/linux/iio/buffer_impl.h
>>> @@ -9,8 +9,12 @@
>>>    #include <uapi/linux/iio/buffer.h>
>>>    #include <linux/iio/buffer.h>
>>>    
>>> +struct dma_buf_attachment;
>>> +struct dma_fence;
>>>    struct iio_dev;
>>> +struct iio_dma_buffer_block;
>>>    struct iio_buffer;
>>> +struct sg_table;
>>>    
>>>    /**
>>>     * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the
>>> buffer can not be
>>> @@ -39,6 +43,13 @@ struct iio_buffer;
>>>     *                      device stops sampling. Calles are
>>> balanced with @enable.
>>>     * @release:		called when the last reference to the
>>> buffer is dropped,
>>>     *			should free all resources allocated by the
>>> buffer.
>>> + * @attach_dmabuf:	called from userspace via ioctl to attach
>>> one external
>>> + *			DMABUF.
>>> + * @detach_dmabuf:	called from userspace via ioctl to detach
>>> one previously
>>> + *			attached DMABUF.
>>> + * @enqueue_dmabuf:	called from userspace via ioctl to queue
>>> this DMABUF
>>> + *			object to this buffer. Requires a valid
>>> DMABUF fd, that
>>> + *			was previouly attached to this buffer.
>>>     * @modes:		Supported operating modes by this buffer
>>> type
>>>     * @flags:		A bitmask combination of
>>> INDIO_BUFFER_FLAG_*
>>>     *
>>> @@ -68,6 +79,17 @@ struct iio_buffer_access_funcs {
>>>    
>>>    	void (*release)(struct iio_buffer *buffer);
>>>    
>>> +	struct iio_dma_buffer_block * (*attach_dmabuf)(struct
>>> iio_buffer *buffer,
>>> +						       struct
>>> dma_buf_attachment *attach);
>>> +	void (*detach_dmabuf)(struct iio_buffer *buffer,
>>> +			      struct iio_dma_buffer_block *block);
>>> +	int (*enqueue_dmabuf)(struct iio_buffer *buffer,
>>> +			      struct iio_dma_buffer_block *block,
>>> +			      struct dma_fence *fence, struct
>>> sg_table *sgt,
>>> +			      size_t size, bool cyclic);
>>> +	void (*lock_queue)(struct iio_buffer *buffer);
>>> +	void (*unlock_queue)(struct iio_buffer *buffer);
>>> +
>>>    	unsigned int modes;
>>>    	unsigned int flags;
>>>    };
>>> @@ -136,6 +158,12 @@ struct iio_buffer {
>>>    
>>>    	/* @ref: Reference count of the buffer. */
>>>    	struct kref ref;
>>> +
>>> +	/* @dmabufs: List of DMABUF attachments */
>>> +	struct list_head dmabufs; /* P: dmabufs_mutex */
>>> +
>>> +	/* @dmabufs_mutex: Protects dmabufs */
>>> +	struct mutex dmabufs_mutex;
>>>    };
>>>    
>>>    /**
>>> @@ -156,9 +184,14 @@ int iio_update_buffers(struct iio_dev
>>> *indio_dev,
>>>     **/
>>>    void iio_buffer_init(struct iio_buffer *buffer);
>>>    
>>> +void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach);
>>> +void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach);
>>> +
>>>    struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
>>>    void iio_buffer_put(struct iio_buffer *buffer);
>>>    
>>> +void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int
>>> ret);
>>> +
>>>    #else /* CONFIG_IIO_BUFFER */
>>>    
>>>    static inline void iio_buffer_get(struct iio_buffer *buffer) {}
>>> diff --git a/include/uapi/linux/iio/buffer.h
>>> b/include/uapi/linux/iio/buffer.h
>>> index 13939032b3f6..c666aa95e532 100644
>>> --- a/include/uapi/linux/iio/buffer.h
>>> +++ b/include/uapi/linux/iio/buffer.h
>>> @@ -5,6 +5,28 @@
>>>    #ifndef _UAPI_IIO_BUFFER_H_
>>>    #define _UAPI_IIO_BUFFER_H_
>>>    
>>> +#include <linux/types.h>
>>> +
>>> +/* Flags for iio_dmabuf.flags */
>>> +#define IIO_BUFFER_DMABUF_CYCLIC		(1 << 0)
>>> +#define IIO_BUFFER_DMABUF_SUPPORTED_FLAGS	0x00000001
>>> +
>>> +/**
>>> + * struct iio_dmabuf - Descriptor for a single IIO DMABUF object
>>> + * @fd:		file descriptor of the DMABUF object
>>> + * @flags:	one or more IIO_BUFFER_DMABUF_* flags
>>> + * @bytes_used:	number of bytes used in this DMABUF for
>>> the data transfer.
>>> + *		Should generally be set to the DMABUF's size.
>>> + */
>>> +struct iio_dmabuf {
>>> +	__u32 fd;
>>> +	__u32 flags;
>>> +	__u64 bytes_used;
>>> +};
>>> +
>>>    #define IIO_BUFFER_GET_FD_IOCTL			_IOWR('i',
>>> 0x91, int)
>>> +#define IIO_BUFFER_DMABUF_ATTACH_IOCTL		_IOW('i', 0x92,
>>> int)
>>> +#define IIO_BUFFER_DMABUF_DETACH_IOCTL		_IOW('i', 0x93,
>>> int)
>>> +#define IIO_BUFFER_DMABUF_ENQUEUE_IOCTL		_IOW('i',
>>> 0x94, struct iio_dmabuf)
>>>    
>>>    #endif /* _UAPI_IIO_BUFFER_H_ */
>>>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/dri-devel/attachments/20240304/3dfc90cf/attachment-0001.htm>


More information about the dri-devel mailing list