[Intel-gfx] [PATCH] dma-buf: Add debug option
Daniel Vetter
daniel at ffwll.ch
Mon Jan 18 13:27:56 UTC 2021
On Fri, Jan 15, 2021 at 07:52:53PM +0100, Christian König wrote:
> Am 15.01.21 um 17:47 schrieb Daniel Vetter:
> > We have too many people abusing the struct page they can get at but
> > really shouldn't in importers. Aside from that the backing page might
> > simply not exist (for dynamic p2p mappings) looking at it and using it
> > e.g. for mmap can also wreak the page handling of the exporter
> > completely. Importers really must go through the proper interface like
> > dma_buf_mmap for everything.
> >
> > I'm semi-tempted to enforce this for dynamic importers since those
> > really have no excuse at all to break the rules.
> >
> > Unfortuantely we can't store the right pointers somewhere safe to make
> > sure we oops on something recognizable, so best is to just wrangle
> > them a bit by flipping all the bits. At least on x86 kernel addresses
> > have all their high bits sets and the struct page array is fairly low
> > in the kernel mapping, so flipping all the bits gives us a very high
> > pointer in userspace and hence excellent chances for an invalid
> > dereference.
> >
> > v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
> > fancy caching tricks, which would blow up with this address scrambling
> > trick here (Chris)
> >
> > Enable by default when CONFIG_DMA_API_DEBUG is enabled.
> >
> > v3: Only one copy of the mangle/unmangle code (Christian)
> >
> > v4: #ifdef, not #if (0day)
> >
> > v5: sg_table can also be an ERR_PTR (Chris, Christian)
> >
> > Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk> (v2)
> > Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
> > Cc: Chris Wilson <chris at chris-wilson.co.uk>
> > Cc: Sumit Semwal <sumit.semwal at linaro.org>
> > Cc: "Christian König" <christian.koenig at amd.com>
> > Cc: David Stevens <stevensd at chromium.org>
> > Cc: linux-media at vger.kernel.org
> > Cc: linaro-mm-sig at lists.linaro.org
>
> Reviewed-by: Christian König <christian.koenig at amd.com>
Stuffed into drm-misc-next, thanks for reviewing to both of you.
-Daniel
>
> > ---
> > drivers/dma-buf/Kconfig | 8 +++++++
> > drivers/dma-buf/dma-buf.c | 46 +++++++++++++++++++++++++++++++++++----
> > include/linux/dma-buf.h | 6 +++++
> > 3 files changed, 56 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
> > index 4f8224a6ac95..4e16c71c24b7 100644
> > --- a/drivers/dma-buf/Kconfig
> > +++ b/drivers/dma-buf/Kconfig
> > @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
> > This is marked experimental because we don't yet have a consistent
> > execution context and memory management between drivers.
> > +config DMABUF_DEBUG
> > + bool "DMA-BUF debug checks"
> > + default y if DMA_API_DEBUG
> > + help
> > + This option enables additional checks for DMA-BUF importers and
> > + exporters. Specifically it validates that importers do not peek at the
> > + underlying struct page when they import a buffer.
> > +
> > config DMABUF_SELFTESTS
> > tristate "Selftests for the dma-buf interfaces"
> > default n
> > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > index 1c9bd51db110..f264b70c383e 100644
> > --- a/drivers/dma-buf/dma-buf.c
> > +++ b/drivers/dma-buf/dma-buf.c
> > @@ -666,6 +666,34 @@ void dma_buf_put(struct dma_buf *dmabuf)
> > }
> > EXPORT_SYMBOL_GPL(dma_buf_put);
> > +static void mangle_sg_table(struct sg_table *sg_table)
> > +{
> > +#ifdef CONFIG_DMABUF_DEBUG
> > + int i;
> > + struct scatterlist *sg;
> > +
> > + /* To catch abuse of the underlying struct page by importers mix
> > + * up the bits, but take care to preserve the low SG_ bits to
> > + * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
> > + * before passing the sgt back to the exporter. */
> > + for_each_sgtable_sg(sg_table, sg, i)
> > + sg->page_link ^= ~0xffUL;
> > +#endif
> > +
> > +}
> > +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
> > + enum dma_data_direction direction)
> > +{
> > + struct sg_table *sg_table;
> > +
> > + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > +
> > + if (!IS_ERR_OR_NULL(sg_table))
> > + mangle_sg_table(sg_table);
> > +
> > + return sg_table;
> > +}
> > +
> > /**
> > * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
> > * @dmabuf: [in] buffer to attach device to.
> > @@ -737,7 +765,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> > goto err_unlock;
> > }
> > - sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
> > + sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
> > if (!sgt)
> > sgt = ERR_PTR(-ENOMEM);
> > if (IS_ERR(sgt)) {
> > @@ -784,6 +812,16 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> > }
> > EXPORT_SYMBOL_GPL(dma_buf_attach);
> > +static void __unmap_dma_buf(struct dma_buf_attachment *attach,
> > + struct sg_table *sg_table,
> > + enum dma_data_direction direction)
> > +{
> > + /* uses XOR, hence this unmangles */
> > + mangle_sg_table(sg_table);
> > +
> > + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > +}
> > +
> > /**
> > * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
> > * @dmabuf: [in] buffer to detach from.
> > @@ -802,7 +840,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> > if (dma_buf_is_dynamic(attach->dmabuf))
> > dma_resv_lock(attach->dmabuf->resv, NULL);
> > - dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> > + __unmap_dma_buf(attach, attach->sgt, attach->dir);
> > if (dma_buf_is_dynamic(attach->dmabuf)) {
> > dma_buf_unpin(attach);
> > @@ -924,7 +962,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> > }
> > }
> > - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > + sg_table = __map_dma_buf(attach, direction);
> > if (!sg_table)
> > sg_table = ERR_PTR(-ENOMEM);
> > @@ -987,7 +1025,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> > if (dma_buf_is_dynamic(attach->dmabuf))
> > dma_resv_assert_held(attach->dmabuf->resv);
> > - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > + __unmap_dma_buf(attach, sg_table, direction);
> > if (dma_buf_is_dynamic(attach->dmabuf) &&
> > !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
> > diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> > index 628681bf6c99..efdc56b9d95f 100644
> > --- a/include/linux/dma-buf.h
> > +++ b/include/linux/dma-buf.h
> > @@ -154,6 +154,12 @@ struct dma_buf_ops {
> > * On failure, returns a negative error value wrapped into a pointer.
> > * May also return -EINTR when a signal was received while being
> > * blocked.
> > + *
> > + * Note that exporters should not try to cache the scatter list, or
> > + * return the same one for multiple calls. Caching is done either by the
> > + * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
> > + * of the scatter list is transferred to the caller, and returned by
> > + * @unmap_dma_buf.
> > */
> > struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
> > enum dma_data_direction);
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list