[Intel-gfx] [Linaro-mm-sig] [PATCH] drm-buf: Add debug option
Christian König
christian.koenig at amd.com
Fri Jan 15 13:24:57 UTC 2021
Am 15.01.21 um 14:22 schrieb Daniel Vetter:
> On Fri, Jan 15, 2021 at 2:09 PM Christian König
> <ckoenig.leichtzumerken at gmail.com> wrote:
>> Am 15.01.21 um 14:02 schrieb Daniel Vetter:
>>> have too many people abusing the struct page they can get at but
>>> really shouldn't in importers. Aside from that the backing page might
>>> simply not exist (for dynamic p2p mappings) looking at it and using it
>>> e.g. for mmap can also wreak the page handling of the exporter
>>> completely. Importers really must go through the proper interface like
>>> dma_buf_mmap for everything.
>>>
>>> I'm semi-tempted to enforce this for dynamic importers since those
>>> really have no excuse at all to break the rules.
>>>
>>> Unfortuantely we can't store the right pointers somewhere safe to make
>>> sure we oops on something recognizable, so best is to just wrangle
>>> them a bit by flipping all the bits. At least on x86 kernel addresses
>>> have all their high bits sets and the struct page array is fairly low
>>> in the kernel mapping, so flipping all the bits gives us a very high
>>> pointer in userspace and hence excellent chances for an invalid
>>> dereference.
>>>
>>> v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
>>> fancy caching tricks, which would blow up with this address scrambling
>>> trick here (Chris)
>>>
>>> Enable by default when CONFIG_DMA_API_DEBUG is enabled.
>>>
>>> Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
>>> Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
>>> Cc: Chris Wilson <chris at chris-wilson.co.uk>
>>> Cc: Sumit Semwal <sumit.semwal at linaro.org>
>>> Cc: "Christian König" <christian.koenig at amd.com>
>>> Cc: David Stevens <stevensd at chromium.org>
>>> Cc: linux-media at vger.kernel.org
>>> Cc: linaro-mm-sig at lists.linaro.org
>>> ---
>>> drivers/dma-buf/Kconfig | 8 +++++++
>>> drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++++++----
>>> include/linux/dma-buf.h | 6 +++++
>>> 3 files changed, 59 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
>>> index 4f8224a6ac95..4e16c71c24b7 100644
>>> --- a/drivers/dma-buf/Kconfig
>>> +++ b/drivers/dma-buf/Kconfig
>>> @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
>>> This is marked experimental because we don't yet have a consistent
>>> execution context and memory management between drivers.
>>>
>>> +config DMABUF_DEBUG
>>> + bool "DMA-BUF debug checks"
>>> + default y if DMA_API_DEBUG
>>> + help
>>> + This option enables additional checks for DMA-BUF importers and
>>> + exporters. Specifically it validates that importers do not peek at the
>>> + underlying struct page when they import a buffer.
>>> +
>>> config DMABUF_SELFTESTS
>>> tristate "Selftests for the dma-buf interfaces"
>>> default n
>>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>>> index 1c9bd51db110..6e4725f7dfde 100644
>>> --- a/drivers/dma-buf/dma-buf.c
>>> +++ b/drivers/dma-buf/dma-buf.c
>>> @@ -666,6 +666,30 @@ void dma_buf_put(struct dma_buf *dmabuf)
>>> }
>>> EXPORT_SYMBOL_GPL(dma_buf_put);
>>>
>>> +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
>>> + enum dma_data_direction direction)
>>> +{
>>> + struct sg_table *sg_table;
>>> +
>>> + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>>> +
>>> +#if CONFIG_DMABUF_DEBUG
>>> + if (sg_table) {
>>> + int i;
>>> + struct scatterlist *sg;
>>> +
>>> + /* To catch abuse of the underlying struct page by importers mix
>>> + * up the bits, but take care to preserve the low SG_ bits to
>>> + * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
>>> + * before passing the sgt back to the exporter. */
>>> + for_each_sgtable_sg(sg_table, sg, i)
>>> + sg->page_link ^= ~0xffUL;
>>> + }
>>> +#endif
>>> +
>>> + return sg_table;
>>> +}
>>> +
>>> /**
>>> * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
>>> * @dmabuf: [in] buffer to attach device to.
>>> @@ -737,7 +761,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
>>> goto err_unlock;
>>> }
>>>
>>> - sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
>>> + sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
>>> if (!sgt)
>>> sgt = ERR_PTR(-ENOMEM);
>>> if (IS_ERR(sgt)) {
>>> @@ -784,6 +808,23 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>>> }
>>> EXPORT_SYMBOL_GPL(dma_buf_attach);
>>>
>>> +static void __unmap_dma_buf(struct dma_buf_attachment *attach,
>>> + struct sg_table *sg_table,
>>> + enum dma_data_direction direction)
>>> +{
>>> +
>>> +#if CONFIG_DMABUF_DEBUG
>>> + if (sg_table) {
>>> + int i;
>>> + struct scatterlist *sg;
>>> +
>>> + for_each_sgtable_sg(sg_table, sg, i)
>>> + sg->page_link ^= ~0xffUL;
>>> + }
>>> +#endif
>> Instead of duplicating this I would rather structure the code so that we
>> have a helper to mangle the sgt when necessary.
>>
>> This can then be called from both the map() as well as the unmap() path.
> Well that's why extracted the helper functions (it would be 4 copies
> otherwise). It's true that it's still 2x the same operation, but
> conceptually one of them mangles, the other unmangles the pointers.
> It's just that with XOR mangling, that's both the same code.
> Readability feels better that way to me, but I guess I can do another
> tiny helper function extraction if you insist?
I think it would be better to have only one.
And I insist that the mangle value is only once somewhere, either use
just one function or a define/constant.
Christian.
> -Daniel
>
>> Apart from that looks good to me,
>> Christian.
>>
>>> + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>>> +}
>>> +
>>> /**
>>> * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
>>> * @dmabuf: [in] buffer to detach from.
>>> @@ -802,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>>> if (dma_buf_is_dynamic(attach->dmabuf))
>>> dma_resv_lock(attach->dmabuf->resv, NULL);
>>>
>>> - dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
>>> + __unmap_dma_buf(attach, attach->sgt, attach->dir);
>>>
>>> if (dma_buf_is_dynamic(attach->dmabuf)) {
>>> dma_buf_unpin(attach);
>>> @@ -924,7 +965,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>>> }
>>> }
>>>
>>> - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>>> + sg_table = __map_dma_buf(attach, direction);
>>> if (!sg_table)
>>> sg_table = ERR_PTR(-ENOMEM);
>>>
>>> @@ -987,7 +1028,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>>> if (dma_buf_is_dynamic(attach->dmabuf))
>>> dma_resv_assert_held(attach->dmabuf->resv);
>>>
>>> - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>>> + __unmap_dma_buf(attach, sg_table, direction);
>>>
>>> if (dma_buf_is_dynamic(attach->dmabuf) &&
>>> !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
>>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>>> index 628681bf6c99..efdc56b9d95f 100644
>>> --- a/include/linux/dma-buf.h
>>> +++ b/include/linux/dma-buf.h
>>> @@ -154,6 +154,12 @@ struct dma_buf_ops {
>>> * On failure, returns a negative error value wrapped into a pointer.
>>> * May also return -EINTR when a signal was received while being
>>> * blocked.
>>> + *
>>> + * Note that exporters should not try to cache the scatter list, or
>>> + * return the same one for multiple calls. Caching is done either by the
>>> + * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
>>> + * of the scatter list is transferred to the caller, and returned by
>>> + * @unmap_dma_buf.
>>> */
>>> struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
>>> enum dma_data_direction);
>
More information about the Intel-gfx
mailing list