[PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks

Chia-I Wu olvaffe at gmail.com
Mon Dec 9 22:44:01 UTC 2019


On Mon, Dec 2, 2019 at 5:36 PM Gurchetan Singh
<gurchetansingh at chromium.org> wrote:
>
> With the misc device, we should end up using the result of
> get_arch_dma_ops(..) or dma-direct ops.
>
> This can allow us to have WC mappings in the guest after
> synchronization.
>
> Signed-off-by: Gurchetan Singh <gurchetansingh at chromium.org>
> ---
>  drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 39 insertions(+)
>
> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> index 0a610e09ae23..61b0a2cff874 100644
> --- a/drivers/dma-buf/udmabuf.c
> +++ b/drivers/dma-buf/udmabuf.c
> @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
>  struct udmabuf {
>         pgoff_t pagecount;
>         struct page **pages;
> +       struct sg_table *sg;
>         struct miscdevice *device;
>  };
>
> @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
>  static void release_udmabuf(struct dma_buf *buf)
>  {
>         struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
>         pgoff_t pg;
>
> +       if (ubuf->sg)
> +               put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
> +
>         for (pg = 0; pg < ubuf->pagecount; pg++)
>                 put_page(ubuf->pages[pg]);
>         kfree(ubuf->pages);
>         kfree(ubuf);
>  }
>
> +static int begin_cpu_udmabuf(struct dma_buf *buf,
> +                            enum dma_data_direction direction)
> +{
> +       struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
> +
> +       if (!ubuf->sg) {
> +               ubuf->sg = get_sg_table(dev, buf, direction);
> +               if (IS_ERR(ubuf->sg))
> +                       return PTR_ERR(ubuf->sg);
> +       } else {
> +               dma_sync_sg_for_device(dev, ubuf->sg->sgl,
> +                                      ubuf->sg->nents,
> +                                      direction);
I know this solves the issue (flush the CPU cache before WC access),
but it looks like an abuse?  It is counter-intuitive that the buffer
is synced for device when one wants CPU access.

> +       }
> +
> +       return 0;
> +}
> +
> +static int end_cpu_udmabuf(struct dma_buf *buf,
> +                          enum dma_data_direction direction)
> +{
> +       struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
> +
> +       if (!ubuf->sg)
> +               return -EINVAL;
> +
> +       dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
> +       return 0;
> +}
> +
>  static const struct dma_buf_ops udmabuf_ops = {
>         .cache_sgt_mapping = true,
>         .map_dma_buf       = map_udmabuf,
>         .unmap_dma_buf     = unmap_udmabuf,
>         .release           = release_udmabuf,
>         .mmap              = mmap_udmabuf,
> +       .begin_cpu_access  = begin_cpu_udmabuf,
> +       .end_cpu_access    = end_cpu_udmabuf,
>  };
>
>  #define SEALS_WANTED (F_SEAL_SHRINK)
> --
> 2.24.0.393.g34dc348eaf-goog
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel


More information about the dri-devel mailing list