[PATCH v2 5/5] drm: GEM CMA: Add DRM PRIME support

Rob Clark robdclark at gmail.com
Tue Jun 4 14:56:36 PDT 2013


couple small comments, other than those it looks ok

BR,
-R

On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart
<laurent.pinchart+renesas at ideasonboard.com> wrote:
> Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas at ideasonboard.com>
> ---
>  drivers/gpu/drm/drm_gem_cma_helper.c | 321 ++++++++++++++++++++++++++++++++++-
>  include/drm/drm_gem_cma_helper.h     |   9 +
>  2 files changed, 327 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
> index 7a4db4e..1dc2157 100644
> --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> @@ -21,6 +21,9 @@
>  #include <linux/slab.h>
>  #include <linux/mutex.h>
>  #include <linux/export.h>
> +#if CONFIG_DMA_SHARED_BUFFER
> +#include <linux/dma-buf.h>
> +#endif

I don't think we need the #if, since drm selects DMA_SHARED_BUFFER

and same for other spot below

>  #include <linux/dma-mapping.h>
>
>  #include <drm/drmP.h>
> @@ -82,6 +85,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
>                 unsigned int size)
>  {
>         struct drm_gem_cma_object *cma_obj;
> +       struct sg_table *sgt = NULL;
> +       int ret;
>
>         size = round_up(size, PAGE_SIZE);
>
> @@ -94,11 +99,29 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
>         if (!cma_obj->vaddr) {
>                 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
>                         size);
> -               drm_gem_cma_free_object(&cma_obj->base);
> -               return ERR_PTR(-ENOMEM);
> +               ret = -ENOMEM;
> +               goto error;
>         }
>
> +       sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
> +       if (sgt == NULL) {
> +               ret = -ENOMEM;
> +               goto error;
> +       }
> +
> +       ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
> +                             cma_obj->paddr, size);
> +       if (ret < 0)
> +               goto error;
> +
> +       cma_obj->sgt = sgt;
> +
>         return cma_obj;
> +
> +error:
> +       kfree(sgt);
> +       drm_gem_cma_free_object(&cma_obj->base);
> +       return ERR_PTR(ret);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_cma_create);
>
> @@ -156,9 +179,16 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
>
>         cma_obj = to_drm_gem_cma_obj(gem_obj);
>
> -       if (cma_obj->vaddr)
> +       if (cma_obj->vaddr) {
>                 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
>                                       cma_obj->vaddr, cma_obj->paddr);
> +               if (cma_obj->sgt) {
> +                       sg_free_table(cma_obj->sgt);
> +                       kfree(cma_obj->sgt);
> +               }
> +       } else if (gem_obj->import_attach) {
> +               drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
> +       }
>
>         drm_gem_object_release(gem_obj);
>
> @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
>  #endif
> +
> +/* -----------------------------------------------------------------------------
> + * DMA-BUF
> + */
> +
> +#if CONFIG_DMA_SHARED_BUFFER
> +struct drm_gem_cma_dmabuf_attachment {
> +       struct sg_table sgt;
> +       enum dma_data_direction dir;
> +};
> +
> +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
> +                                    struct dma_buf_attachment *attach)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> +
> +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> +       if (!cma_attach)
> +               return -ENOMEM;
> +
> +       cma_attach->dir = DMA_NONE;
> +       attach->priv = cma_attach;
> +
> +       return 0;
> +}
> +
> +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> +                                     struct dma_buf_attachment *attach)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> +       struct sg_table *sgt;
> +
> +       if (cma_attach == NULL)
> +               return;
> +
> +       sgt = &cma_attach->sgt;
> +
> +       if (cma_attach->dir != DMA_NONE)
> +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> +                               cma_attach->dir);
> +
> +       sg_free_table(sgt);
> +       kfree(cma_attach);
> +       attach->priv = NULL;
> +}
> +
> +static struct sg_table *
> +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> +                      enum dma_data_direction dir)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> +       struct drm_device *drm = cma_obj->base.dev;
> +       struct scatterlist *rd, *wr;
> +       struct sg_table *sgt;
> +       unsigned int i;
> +       int nents, ret;
> +
> +       DRM_DEBUG_PRIME("\n");
> +
> +       if (WARN_ON(dir == DMA_NONE))
> +               return ERR_PTR(-EINVAL);
> +
> +       /* Return the cached mapping when possible. */
> +       if (cma_attach->dir == dir)
> +               return &cma_attach->sgt;
> +
> +       /* Two mappings with different directions for the same attachment are
> +        * not allowed.
> +        */
> +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> +               return ERR_PTR(-EBUSY);
> +
> +       sgt = &cma_attach->sgt;
> +
> +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
> +       if (ret) {
> +               DRM_ERROR("failed to alloc sgt.\n");
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +       mutex_lock(&drm->struct_mutex);
> +
> +       rd = cma_obj->sgt->sgl;
> +       wr = sgt->sgl;
> +       for (i = 0; i < sgt->orig_nents; ++i) {
> +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> +               rd = sg_next(rd);
> +               wr = sg_next(wr);
> +       }
> +
> +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
> +       if (!nents) {
> +               DRM_ERROR("failed to map sgl with iommu.\n");
> +               sg_free_table(sgt);
> +               sgt = ERR_PTR(-EIO);
> +               goto done;
> +       }
> +
> +       cma_attach->dir = dir;
> +       attach->priv = cma_attach;
> +
> +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> +
> +done:
> +       mutex_unlock(&drm->struct_mutex);
> +       return sgt;
> +}
> +
> +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
> +                                    struct sg_table *sgt,
> +                                    enum dma_data_direction dir)
> +{
> +       /* Nothing to do. */

I kinda think that if you don't support multiple mappings with
different direction, that you should at least support unmap and then
let someone else map with other direction

> +}
> +
> +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +
> +       DRM_DEBUG_PRIME("%s\n", __FILE__);
> +
> +       /*
> +        * drm_gem_cma_dmabuf_release() call means that file object's
> +        * f_count is 0 and it calls drm_gem_object_handle_unreference()
> +        * to drop the references that these values had been increased
> +        * at drm_prime_handle_to_fd()
> +        */
> +       if (cma_obj->base.export_dma_buf == dmabuf) {
> +               cma_obj->base.export_dma_buf = NULL;
> +
> +               /*
> +                * drop this gem object refcount to release allocated buffer
> +                * and resources.
> +                */
> +               drm_gem_object_unreference_unlocked(&cma_obj->base);
> +       }
> +}
> +
> +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
> +                                           unsigned long page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
> +                                            unsigned long page_num, void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
> +                                    unsigned long page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
> +                                     unsigned long page_num, void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
> +                                  struct vm_area_struct *vma)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +       struct drm_gem_object *gem_obj = &cma_obj->base;
> +       int ret;
> +
> +       ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
> +       if (ret < 0)
> +               return ret;
> +
> +       return drm_gem_cma_mmap_obj(cma_obj, vma);
> +}
> +
> +static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +
> +       return cma_obj->vaddr;
> +}
> +
> +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
> +       .attach                 = drm_gem_cma_dmabuf_attach,
> +       .detach                 = drm_gem_cma_dmabuf_detach,
> +       .map_dma_buf            = drm_gem_cma_dmabuf_map,
> +       .unmap_dma_buf          = drm_gem_cma_dmabuf_unmap,
> +       .kmap                   = drm_gem_cma_dmabuf_kmap,
> +       .kmap_atomic            = drm_gem_cma_dmabuf_kmap_atomic,
> +       .kunmap                 = drm_gem_cma_dmabuf_kunmap,
> +       .kunmap_atomic          = drm_gem_cma_dmabuf_kunmap_atomic,
> +       .mmap                   = drm_gem_cma_dmabuf_mmap,
> +       .vmap                   = drm_gem_cma_dmabuf_vmap,
> +       .release                = drm_gem_cma_dmabuf_release,
> +};
> +
> +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
> +                                         struct drm_gem_object *obj, int flags)
> +{
> +       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
> +
> +       return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
> +                             cma_obj->base.size, flags);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
> +
> +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
> +                                                struct dma_buf *dma_buf)
> +{
> +       struct drm_gem_cma_object *cma_obj;
> +       struct dma_buf_attachment *attach;
> +       struct sg_table *sgt;
> +       int ret;
> +
> +       DRM_DEBUG_PRIME("%s\n", __FILE__);
> +
> +       /* is this one of own objects? */
> +       if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
> +               struct drm_gem_object *obj;
> +
> +               cma_obj = dma_buf->priv;
> +               obj = &cma_obj->base;
> +
> +               /* is it from our device? */
> +               if (obj->dev == drm) {
> +                       /*
> +                        * Importing dmabuf exported from out own gem increases
> +                        * refcount on gem itself instead of f_count of dmabuf.
> +                        */
> +                       drm_gem_object_reference(obj);
> +                       dma_buf_put(dma_buf);
> +                       return obj;
> +               }
> +       }
> +
> +       /* Create a CMA GEM buffer. */
> +       cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
> +       if (IS_ERR(cma_obj))
> +               return ERR_PTR(PTR_ERR(cma_obj));
> +
> +       /* Attach to the buffer and map it. Make sure the mapping is contiguous
> +        * on the device memory bus, as that's all we support.
> +        */
> +       attach = dma_buf_attach(dma_buf, drm->dev);
> +       if (IS_ERR(attach)) {
> +               ret = -EINVAL;
> +               goto error_gem_free;
> +       }
> +
> +       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> +       if (IS_ERR_OR_NULL(sgt)) {
> +               ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
> +               goto error_buf_detach;
> +       }
> +
> +       if (sgt->nents != 1) {
> +               ret = -EINVAL;
> +               goto error_buf_unmap;
> +       }
> +
> +       cma_obj->base.import_attach = attach;
> +       cma_obj->paddr = sg_dma_address(sgt->sgl);
> +       cma_obj->sgt = sgt;
> +
> +       DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
> +                       dma_buf->size);
> +
> +       return &cma_obj->base;
> +
> +error_buf_unmap:
> +       dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
> +error_buf_detach:
> +       dma_buf_detach(dma_buf, attach);
> +error_gem_free:
> +       drm_gem_cma_free_object(&cma_obj->base);
> +       return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
> +#endif
> diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
> index 63397ce..6e17251 100644
> --- a/include/drm/drm_gem_cma_helper.h
> +++ b/include/drm/drm_gem_cma_helper.h
> @@ -4,6 +4,9 @@
>  struct drm_gem_cma_object {
>         struct drm_gem_object base;
>         dma_addr_t paddr;
> +       struct sg_table *sgt;
> +
> +       /* For objects with DMA memory allocated by GEM CMA */
>         void *vaddr;
>  };
>
> @@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
>  void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
>  #endif
>
> +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
> +                                         struct drm_gem_object *obj,
> +                                         int flags);
> +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
> +                                                struct dma_buf *dma_buf);
> +
>  #endif /* __DRM_GEM_CMA_HELPER_H__ */
> --
> 1.8.1.5
>


More information about the dri-devel mailing list