[PATCH 1/2] dma-buf: start caching of sg_table objects v2

Daniel Vetter daniel at ffwll.ch
Wed May 15 08:58:47 UTC 2019


On Tue, May 14, 2019 at 04:05:14PM +0200, Christian König wrote:
> To allow a smooth transition from pinning buffer objects to dynamic
> invalidation we first start to cache the sg_table for an attachment.
> 
> v2: keep closer to the DRM implementation
> 
> Signed-off-by: Christian König <christian.koenig at amd.com>

i915 doesn't use the prime helpers, so this would be a change for us. Not
sure that matters. Since we can't use this as an easy way out of the
locking problem around reservation_object I'm not sure how useful it is to
move this ahead. At least until we have a clearer picture on how we plan
to roll out the dynamic dma-buf stuff. Once we have a bit more clarity
there I'm all for merging prep work, but as-is I'd be cautious ... This
stuff is a lot more tricky than it seems at first :-/
-Daniel
> ---
>  drivers/dma-buf/dma-buf.c | 27 +++++++++++++++++++++++++--
>  include/linux/dma-buf.h   | 13 +++++++++++++
>  2 files changed, 38 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 7c858020d14b..a2d34c6b80a5 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -573,6 +573,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>  	list_add(&attach->node, &dmabuf->attachments);
>  
>  	mutex_unlock(&dmabuf->lock);
> +
>  	return attach;
>  
>  err_attach:
> @@ -595,6 +596,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>  	if (WARN_ON(!dmabuf || !attach))
>  		return;
>  
> +	if (attach->sgt)
> +		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> +
>  	mutex_lock(&dmabuf->lock);
>  	list_del(&attach->node);
>  	if (dmabuf->ops->detach)
> @@ -630,10 +634,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>  	if (WARN_ON(!attach || !attach->dmabuf))
>  		return ERR_PTR(-EINVAL);
>  
> +	if (attach->sgt) {
> +		/*
> +		 * Two mappings with different directions for the same
> +		 * attachment are not allowed.
> +		 */
> +		if (attach->dir != direction &&
> +		    attach->dir != DMA_BIDIRECTIONAL)
> +			return ERR_PTR(-EBUSY);
> +
> +		return attach->sgt;
> +	}
> +
>  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>  	if (!sg_table)
>  		sg_table = ERR_PTR(-ENOMEM);
>  
> +	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> +		attach->sgt = sg_table;
> +		attach->dir = direction;
> +	}
> +
>  	return sg_table;
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
> @@ -657,8 +678,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>  	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
>  		return;
>  
> -	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> -						direction);
> +	if (attach->sgt == sg_table)
> +		return;
> +
> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>  
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index 58725f890b5b..45b9767e67ec 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -51,6 +51,15 @@ struct dma_buf_attachment;
>   * @vunmap: [optional] unmaps a vmap from the buffer
>   */
>  struct dma_buf_ops {
> +	/**
> +	  * @cache_sgt_mapping:
> +	  *
> +	  * If true the framework will cache the first mapping made for each
> +	  * attachment. This avoids creating mappings for attachments multiple
> +	  * times.
> +	  */
> +	bool cache_sgt_mapping;
> +
>  	/**
>  	 * @attach:
>  	 *
> @@ -307,6 +316,8 @@ struct dma_buf {
>   * @dmabuf: buffer for this attachment.
>   * @dev: device attached to the buffer.
>   * @node: list of dma_buf_attachment.
> + * @sgt: cached mapping.
> + * @dir: direction of cached mapping.
>   * @priv: exporter specific attachment data.
>   *
>   * This structure holds the attachment information between the dma_buf buffer
> @@ -322,6 +333,8 @@ struct dma_buf_attachment {
>  	struct dma_buf *dmabuf;
>  	struct device *dev;
>  	struct list_head node;
> +	struct sg_table *sgt;
> +	enum dma_data_direction dir;
>  	void *priv;
>  };
>  
> -- 
> 2.17.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the dri-devel mailing list