[PATCH v2 5/5] drm: GEM CMA: Add DRM PRIME support

Laurent Pinchart laurent.pinchart at ideasonboard.com
Fri Jun 7 12:25:42 PDT 2013


On Wednesday 05 June 2013 07:00:45 Rob Clark wrote:
> On Wed, Jun 5, 2013 at 4:44 AM, Daniel Vetter <daniel at ffwll.ch> wrote:
> > On Tue, Jun 04, 2013 at 11:05:36PM -0400, Rob Clark wrote:
> >> On Tue, Jun 4, 2013 at 9:22 PM, Laurent Pinchart wrote:
> >> > On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
> >> >> couple small comments, other than those it looks ok
> >> > 
> >> > Thanks for the review.
> >> > 
> >> >> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
> >> >> > Signed-off-by: Laurent Pinchart
> >> >> > <laurent.pinchart+renesas at ideasonboard.com>
> >> >> > ---
> >> >> > 
> >> >> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321
> >> >> >  +++++++++++++++++++++++++++++-
> >> >> >  include/drm/drm_gem_cma_helper.h     |   9 +
> >> >> >  2 files changed, 327 insertions(+), 3 deletions(-)
> >> >> > 
> >> >> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
> >> >> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > @@ -21,6 +21,9 @@
> >> >> > 
> >> >> >  #include <linux/slab.h>
> >> >> >  #include <linux/mutex.h>
> >> >> >  #include <linux/export.h>
> >> >> > 
> >> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> >> > +#include <linux/dma-buf.h>
> >> >> > +#endif
> >> >> 
> >> >> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
> >> >> 
> >> >> and same for other spot below
> >> > 
> >> > Indeed. Will be fixed in the next version.
> >> > 
> >> >> >  #include <linux/dma-mapping.h>
> >> >> >  
> >> >> >  #include <drm/drmP.h>
> >> > 
> >> > [snip]
> >> > 
> >> >> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct
> >> >> > drm_gem_cma_object
> >> >> > *cma_obj, struct seq_file *m>
> >> >> > 
> >> >> >  }
> >> >> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
> >> >> >  #endif
> >> >> > 
> >> >> > +
> >> >> > +/*
> >> >> > --------------------------------------------------------------------
> >> >> > -----
> >> >> > ---- + * DMA-BUF
> >> >> > + */
> >> >> > +
> >> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> >> > +struct drm_gem_cma_dmabuf_attachment {
> >> >> > +       struct sg_table sgt;
> >> >> > +       enum dma_data_direction dir;
> >> >> > +};
> >> >> > +
> >> >> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
> >> >> > device *dev, +                                    struct
> >> >> > dma_buf_attachment *attach) +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> >> >> > +
> >> >> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> >> >> > +       if (!cma_attach)
> >> >> > +               return -ENOMEM;
> >> >> > +
> >> >> > +       cma_attach->dir = DMA_NONE;
> >> >> > +       attach->priv = cma_attach;
> >> >> > +
> >> >> > +       return 0;
> >> >> > +}
> >> >> > +
> >> >> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> >> >> > +                                     struct dma_buf_attachment
> >> >> > *attach)
> >> >> > +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach =
> >> >> > attach->priv;
> >> >> > +       struct sg_table *sgt;
> >> >> > +
> >> >> > +       if (cma_attach == NULL)
> >> >> > +               return;
> >> >> > +
> >> >> > +       sgt = &cma_attach->sgt;
> >> >> > +
> >> >> > +       if (cma_attach->dir != DMA_NONE)
> >> >> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> >> >> > +                               cma_attach->dir);
> >> >> > +
> >> >> > +       sg_free_table(sgt);
> >> >> > +       kfree(cma_attach);
> >> >> > +       attach->priv = NULL;
> >> >> > +}
> >> >> > +
> >> >> > +static struct sg_table *
> >> >> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> >> >> > +                      enum dma_data_direction dir)
> >> >> > +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach =
> >> >> > attach->priv;
> >> >> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> >> >> > +       struct drm_device *drm = cma_obj->base.dev;
> >> >> > +       struct scatterlist *rd, *wr;
> >> >> > +       struct sg_table *sgt;
> >> >> > +       unsigned int i;
> >> >> > +       int nents, ret;
> >> >> > +
> >> >> > +       DRM_DEBUG_PRIME("\n");
> >> >> > +
> >> >> > +       if (WARN_ON(dir == DMA_NONE))
> >> >> > +               return ERR_PTR(-EINVAL);
> >> >> > +
> >> >> > +       /* Return the cached mapping when possible. */
> >> >> > +       if (cma_attach->dir == dir)
> >> >> > +               return &cma_attach->sgt;
> >> >> > +
> >> >> > +       /* Two mappings with different directions for the same
> >> >> > attachment
> >> >> > are +        * not allowed.
> >> >> > +        */
> >> >> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> >> >> > +               return ERR_PTR(-EBUSY);
> >> >> > +
> >> >> > +       sgt = &cma_attach->sgt;
> >> >> > +
> >> >> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents,
> >> >> > GFP_KERNEL);
> >> >> > +       if (ret) {
> >> >> > +               DRM_ERROR("failed to alloc sgt.\n");
> >> >> > +               return ERR_PTR(-ENOMEM);
> >> >> > +       }
> >> >> > +
> >> >> > +       mutex_lock(&drm->struct_mutex);
> >> >> > +
> >> >> > +       rd = cma_obj->sgt->sgl;
> >> >> > +       wr = sgt->sgl;
> >> >> > +       for (i = 0; i < sgt->orig_nents; ++i) {
> >> >> > +               sg_set_page(wr, sg_page(rd), rd->length,
> >> >> > rd->offset);
> >> >> > +               rd = sg_next(rd);
> >> >> > +               wr = sg_next(wr);
> >> >> > +       }
> >> >> > +
> >> >> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents,
> >> >> > dir);
> >> >> > +       if (!nents) {
> >> >> > +               DRM_ERROR("failed to map sgl with iommu.\n");
> >> >> > +               sg_free_table(sgt);
> >> >> > +               sgt = ERR_PTR(-EIO);
> >> >> > +               goto done;
> >> >> > +       }
> >> >> > +
> >> >> > +       cma_attach->dir = dir;
> >> >> > +       attach->priv = cma_attach;
> >> >> > +
> >> >> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> >> >> > +
> >> >> > +done:
> >> >> > +       mutex_unlock(&drm->struct_mutex);
> >> >> > +       return sgt;
> >> >> > +}
> >> >> > +
> >> >> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment
> >> >> > *attach,
> >> >> > +                                    struct sg_table *sgt,
> >> >> > +                                    enum dma_data_direction dir)
> >> >> > +{
> >> >> > +       /* Nothing to do. */
> >> >> 
> >> >> I kinda think that if you don't support multiple mappings with
> >> >> different direction, that you should at least support unmap and then
> >> >> let someone else map with other direction
> >> > 
> >> > That would make sense, but in that case I wouldn't be able to cache the
> >> > mapping. It would probably be better to add proper support for multiple
> >> > mappings with different directions.
> >> 
> >> well, you could still cache it, you'd just have to invalidate that
> >> cache on transition in direction
> >> 
> >> > Given that the mapping is cached in the attachment, this will only be
> >> > an issue if a driver tries to map the same attachment twice with
> >> > different directions. Isn't that an uncommon use case that could be
> >> > fixed later ? :-) I'd like to get this set in v3.11 if possible.
> >> 
> >> I don't feel strongly that this should block merging, vs fixing at a
> >> (not too much later) date
> > 
> > I'm not sure whether we should even allow this at all. If an importer
> > wants to switch dma directions he should probably map it bidirectional and
> > we should finally bite the bullet and add a attachment_sync callback to
> > flush caches without dropping the mapping on the floor ...
> > 
> > Tbh I'm not even sure whether multiple mappings make any sense at all for
> > one attachment. Imo that sounds like the importer seriously lost track of
> > things ;-)
> 
> oh, good point.. I was thinking more the case of multiple importers,
> but you are right, there would be multiple attachments in that case so
> this wouldn't be the problem

Problem solved, great :-) I've posted v3.

-- 
Regards,

Laurent Pinchart


More information about the dri-devel mailing list