[PATCH v3 07/11] iio: core: Add new DMABUF interface infrastructure

Paul Cercueil paul at crapouillou.net
Tue Apr 4 07:55:09 UTC 2023


Hi Nuno,

Le mardi 04 avril 2023 à 09:32 +0200, Nuno Sá a écrit :
> On Mon, 2023-04-03 at 17:47 +0200, Paul Cercueil wrote:
> > Add the necessary infrastructure to the IIO core to support a new
> > optional DMABUF based interface.
> > 
> > With this new interface, DMABUF objects (externally created) can be
> > attached to a IIO buffer, and subsequently used for data transfer.
> > 
> > A userspace application can then use this interface to share DMABUF
> > objects between several interfaces, allowing it to transfer data in
> > a
> > zero-copy fashion, for instance between IIO and the USB stack.
> > 
> > The userspace application can also memory-map the DMABUF objects,
> > and
> > access the sample data directly. The advantage of doing this vs.
> > the
> > read() interface is that it avoids an extra copy of the data
> > between
> > the
> > kernel and userspace. This is particularly userful for high-speed
> > devices which produce several megabytes or even gigabytes of data
> > per
> > second.
> > 
> > As part of the interface, 3 new IOCTLs have been added:
> > 
> > IIO_BUFFER_DMABUF_ATTACH_IOCTL(int fd):
> >  Attach the DMABUF object identified by the given file descriptor
> > to
> > the
> >  buffer.
> > 
> > IIO_BUFFER_DMABUF_DETACH_IOCTL(int fd):
> >  Detach the DMABUF object identified by the given file descriptor
> > from
> >  the buffer. Note that closing the IIO buffer's file descriptor
> > will
> >  automatically detach all previously attached DMABUF objects.
> > 
> > IIO_BUFFER_DMABUF_ENQUEUE_IOCTL(struct iio_dmabuf *):
> >  Request a data transfer to/from the given DMABUF object. Its file
> >  descriptor, as well as the transfer size and flags are provided in
> > the
> >  "iio_dmabuf" structure.
> > 
> > These three IOCTLs have to be performed on the IIO buffer's file
> > descriptor, obtained using the IIO_BUFFER_GET_FD_IOCTL() ioctl.
> > 
> > Signed-off-by: Paul Cercueil <paul at crapouillou.net>
> > 
> > ---
> > v2: Only allow the new IOCTLs on the buffer FD created with
> >     IIO_BUFFER_GET_FD_IOCTL().
> > 
> > v3: - Get rid of the old IOCTLs. The IIO subsystem does not create
> > or
> >     manage DMABUFs anymore, and only attaches/detaches externally
> >     created DMABUFs.
> >     - Add IIO_BUFFER_DMABUF_CYCLIC to the supported flags.
> > ---
> >  drivers/iio/industrialio-buffer.c | 402
> > ++++++++++++++++++++++++++++++
> >  include/linux/iio/buffer_impl.h   |  22 ++
> >  include/uapi/linux/iio/buffer.h   |  22 ++
> >  3 files changed, 446 insertions(+)
> > 
> > diff --git a/drivers/iio/industrialio-buffer.c
> > b/drivers/iio/industrialio-buffer.c
> > index 80c78bd6bbef..5d88e098b3e7 100644
> > --- a/drivers/iio/industrialio-buffer.c
> > +++ b/drivers/iio/industrialio-buffer.c
> > @@ -13,10 +13,14 @@
> >  #include <linux/kernel.h>
> >  #include <linux/export.h>
> >  #include <linux/device.h>
> > +#include <linux/dma-buf.h>
> > +#include <linux/dma-fence.h>
> > +#include <linux/dma-resv.h>
> >  #include <linux/file.h>
> >  #include <linux/fs.h>
> >  #include <linux/cdev.h>
> >  #include <linux/slab.h>
> > +#include <linux/mm.h>
> >  #include <linux/poll.h>
> >  #include <linux/sched/signal.h>
> >  
> > @@ -28,11 +32,41 @@
> >  #include <linux/iio/buffer.h>
> >  #include <linux/iio/buffer_impl.h>
> >  
> > +#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
> > +
> > +struct iio_dma_fence;
> > +
> > +struct iio_dmabuf_priv {
> > +       struct list_head entry;
> > +       struct kref ref;
> > +
> > +       struct iio_buffer *buffer;
> > +       struct iio_dma_buffer_block *block;
> > +
> > +       u64 context;
> > +       spinlock_t lock;
> > +
> > +       struct dma_buf_attachment *attach;
> > +       struct iio_dma_fence *fence;
> > +};
> > +
> > +struct iio_dma_fence {
> > +       struct dma_fence base;
> > +       struct iio_dmabuf_priv *priv;
> > +       struct sg_table *sgt;
> > +       enum dma_data_direction dir;
> > +};
> > +
> >  static const char * const iio_endian_prefix[] = {
> >         [IIO_BE] = "be",
> >         [IIO_LE] = "le",
> >  };
> >  
> > +static inline struct iio_dma_fence *to_iio_dma_fence(struct
> > dma_fence *fence)
> > +{
> > +       return container_of(fence, struct iio_dma_fence, base);
> > +}
> > +
> 
> Kind of a nitpick but I only see this being used once so I would
> maybe
> use plain 'container_of()' as you are already doing for:
> 
> ... = container_of(ref, struct iio_dmabuf_priv, ref);
> 
> So I would at least advocate for consistency. I would also probably
> ditch the inline but I guess that is more a matter of
> style/preference.

Yep, at least it should be consistent.

> 
> >  static bool iio_buffer_is_active(struct iio_buffer *buf)
> >  {
> >         return !list_empty(&buf->buffer_list);
> > @@ -329,6 +363,7 @@ void iio_buffer_init(struct iio_buffer *buffer)
> >  {
> > 
> 
> ...
> 
> > +       priv = attach->importer_priv;
> > +       list_del_init(&priv->entry);
> > +
> > +       iio_buffer_dmabuf_put(attach);
> > +       iio_buffer_dmabuf_put(attach);
> > +
> 
> Is this intended? Looks suspicious...

It is intended, yes. You want to release the dma_buf_attachment that's
created in iio_buffer_attach_dmabuf(), and you need to call
iio_buffer_find_attachment() to get a pointer to it, which also gets a
second reference - so it needs to unref twice.

> 
> > +out_dmabuf_put:
> > +       dma_buf_put(dmabuf);
> > +
> > +       return ret;
> > +}
> > +
> > +static const char *
> > +iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
> > +{
> > +       return "iio";
> > +}
> > +
> > +static void iio_buffer_dma_fence_release(struct dma_fence *fence)
> > +{
> > +       struct iio_dma_fence *iio_fence = to_iio_dma_fence(fence);
> > +
> > +       kfree(iio_fence);
> > +}
> > +
> > +static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
> > +       .get_driver_name        =
> > iio_buffer_dma_fence_get_driver_name,
> > +       .get_timeline_name      =
> > iio_buffer_dma_fence_get_driver_name,
> > +       .release                = iio_buffer_dma_fence_release,
> > +};
> > +
> > +static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair
> > *ib,
> > +                                    struct iio_dmabuf __user
> > *iio_dmabuf_req,
> > +                                    bool nonblock)
> > +{
> > +       struct iio_buffer *buffer = ib->buffer;
> > +       struct iio_dmabuf iio_dmabuf;
> > +       struct dma_buf_attachment *attach;
> > +       struct iio_dmabuf_priv *priv;
> > +       enum dma_data_direction dir;
> > +       struct iio_dma_fence *fence;
> > +       struct dma_buf *dmabuf;
> > +       struct sg_table *sgt;
> > +       unsigned long timeout;
> > +       bool dma_to_ram;
> > +       bool cyclic;
> > +       int ret;
> > +
> > +       if (copy_from_user(&iio_dmabuf, iio_dmabuf_req,
> > sizeof(iio_dmabuf)))
> > +               return -EFAULT;
> > +
> > +       if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
> > +               return -EINVAL;
> > +
> > +       cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
> > +
> > +       /* Cyclic flag is only supported on output buffers */
> > +       if (cyclic && buffer->direction !=
> > IIO_BUFFER_DIRECTION_OUT)
> > +               return -EINVAL;
> > +
> > +       dmabuf = dma_buf_get(iio_dmabuf.fd);
> > +       if (IS_ERR(dmabuf))
> > +               return PTR_ERR(dmabuf);
> > +
> > +       if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used >
> > dmabuf-
> > > size) {
> > +               ret = -EINVAL;
> > +               goto err_dmabuf_put;
> > +       }
> > +
> > +       attach = iio_buffer_find_attachment(ib->indio_dev, dmabuf);
> > +       if (IS_ERR(attach)) {
> > +               ret = PTR_ERR(attach);
> > +               goto err_dmabuf_put;
> > +       }
> > +
> > +       priv = attach->importer_priv;
> > +
> > +       dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
> > +       dir = dma_to_ram ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> > +
> > +       sgt = dma_buf_map_attachment(attach, dir);
> > +       if (IS_ERR(sgt)) {
> > +               ret = PTR_ERR(sgt);
> > +               pr_err("Unable to map attachment: %d\n", ret);
> 
> dev_err()? We should be able to reach the iio_dev

Should work with (&ib->indio_dev->dev), yes.

> 
> > +               goto err_attachment_put;
> > +       }
> > +
> > +       fence = kmalloc(sizeof(*fence), GFP_KERNEL);
> > +       if (!fence) {
> > +               ret = -ENOMEM;
> > +               goto err_unmap_attachment;
> > +       }
> > +
> > 
> 
> ...
> 
> >  static const struct file_operations iio_buffer_chrdev_fileops = {
> >         .owner = THIS_MODULE,
> >         .llseek = noop_llseek,
> >         .read = iio_buffer_read,
> >         .write = iio_buffer_write,
> > +       .unlocked_ioctl = iio_buffer_chrdev_ioctl,
> > +       .compat_ioctl = compat_ptr_ioctl,
> >         .poll = iio_buffer_poll,
> >         .release = iio_buffer_chrdev_release,
> >  };
> 
> Hmmm, what about the legacy buffer? We should also support this
> interface using it, right? Otherwise, using one of the new IOCTL in
> iio_device_buffer_ioctl() (or /dev/iio:device0) will error out.

According to Jonathan the old chardev route is deprecated, and it's
fine not to support the IOCTL there.

Cheers,
-Paul


More information about the dri-devel mailing list