[PATCH 06/12] dma-buf: add optional invalidate_mappings callback v5

Christian König ckoenig.leichtzumerken at gmail.com
Fri Apr 26 12:36:32 UTC 2019


Each importer can now provide an invalidate_mappings callback.

This allows the exporter to provide the mappings without the need to pin
the backing store.

v2: don't try to invalidate mappings when the callback is NULL,
    lock the reservation obj while using the attachments,
    add helper to set the callback
v3: move flag for invalidation support into the DMA-buf,
    use new attach_info structure to set the callback
v4: use importer_priv field instead of mangling exporter priv.
v5: drop invalidation_supported flag

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/dma-buf/dma-buf.c | 51 +++++++++++++++++++++++++++++++++------
 include/linux/dma-buf.h   | 30 +++++++++++++++++++++--
 2 files changed, 71 insertions(+), 10 deletions(-)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 95bcd2ed795b..1b5cdae68cd6 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -607,7 +607,9 @@ dma_buf_attach(const struct dma_buf_attach_info *info)
 		if (ret)
 			goto err_attach;
 	}
+	reservation_object_lock(dmabuf->resv, NULL);
 	list_add(&attach->node, &dmabuf->attachments);
+	reservation_object_unlock(dmabuf->resv);
 
 	mutex_unlock(&dmabuf->lock);
 
@@ -651,7 +653,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 					   DMA_BIDIRECTIONAL);
 
 	mutex_lock(&dmabuf->lock);
+	reservation_object_lock(dmabuf->resv, NULL);
 	list_del(&attach->node);
+	reservation_object_unlock(dmabuf->resv);
 	if (dmabuf->ops->detach)
 		dmabuf->ops->detach(dmabuf, attach);
 
@@ -672,10 +676,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
  * on error. May return -EINTR if it is interrupted by a signal.
  *
- * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
- * the underlying backing storage is pinned for as long as a mapping exists,
- * therefore users/importers should not hold onto a mapping for undue amounts of
- * time.
+ * A mapping must be unmapped by using dma_buf_unmap_attachment().
  */
 struct sg_table *
 dma_buf_map_attachment_locked(struct dma_buf_attachment *attach,
@@ -690,11 +691,22 @@ dma_buf_map_attachment_locked(struct dma_buf_attachment *attach,
 	if (attach->sgt)
 		return attach->sgt;
 
-	r = dma_buf_pin(attach->dmabuf);
-	if (r)
-		return ERR_PTR(r);
+	if (attach->invalidate) {
+		/*
+		 * Mapping a DMA-buf can trigger its invalidation, prevent
+		 * sending this event to the caller by temporary removing
+		 * this attachment from the list.
+		 */
+		list_del(&attach->node);
+	} else {
+		r = dma_buf_pin(attach->dmabuf);
+		if (r)
+			return ERR_PTR(r);
+	}
 
 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	if (attach->invalidate)
+		list_add(&attach->node, &attach->dmabuf->attachments);
 	if (!sg_table)
 		return ERR_PTR(-ENOMEM);
 
@@ -761,7 +773,8 @@ void dma_buf_unmap_attachment_locked(struct dma_buf_attachment *attach,
 
 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
 						direction);
-	dma_buf_unpin(attach->dmabuf);
+	if (!attach->invalidate)
+		dma_buf_unpin(attach->dmabuf);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment_locked);
 
@@ -793,6 +806,26 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
+/**
+ * dma_buf_invalidate_mappings - invalidate all mappings of this dma_buf
+ *
+ * @dmabuf:	[in]	buffer which mappings should be invalidated
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_invalidate_mappings(struct dma_buf *dmabuf)
+{
+	struct dma_buf_attachment *attach;
+
+	reservation_object_assert_held(dmabuf->resv);
+
+	list_for_each_entry(attach, &dmabuf->attachments, node)
+		if (attach->invalidate)
+			attach->invalidate(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_invalidate_mappings);
+
 /**
  * DOC: cpu access
  *
@@ -1205,10 +1238,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
 		seq_puts(s, "\tAttached Devices:\n");
 		attach_count = 0;
 
+		reservation_object_lock(buf_obj->resv, NULL);
 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
 			attach_count++;
 		}
+		reservation_object_unlock(buf_obj->resv);
 
 		seq_printf(s, "Total %d devices attached\n\n",
 				attach_count);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f7d6fc049b39..d27e91bbd796 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -334,6 +334,9 @@ struct dma_buf {
  * @dev: device attached to the buffer.
  * @node: list of dma_buf_attachment.
  * @priv: exporter specific attachment data.
+ * @importer_priv: importer specific attachment data.
+ * @invalidate: invalidate callback, see &dma_buf_attach_info.invalidate for
+ *		more information.
  *
  * This structure holds the attachment information between the dma_buf buffer
  * and its user device(s). The list contains one attachment struct per device
@@ -350,6 +353,8 @@ struct dma_buf_attachment {
 	struct list_head node;
 	struct sg_table *sgt;
 	void *priv;
+	void *importer_priv;
+	void (*invalidate)(struct dma_buf_attachment *attach);
 };
 
 /**
@@ -388,15 +393,35 @@ struct dma_buf_export_info {
 
 /**
  * struct dma_buf_attach_info - holds information needed to attach to a dma_buf
- * @dmabuf:	the exported dma_buf
- * @dev:	the device which wants to import the attachment
+ * @dmabuf:		the exported dma_buf
+ * @dev:		the device which wants to import the attachment
+ * @importer_priv:	private data of importer to this attachment
+ * @invalidate:		optional callback provided by the importer
  *
  * This structure holds the information required to attach to a buffer. Used
  * with dma_buf_attach() only.
+ *
+ * Only if the invalidate callback is provided the framework can avoid pinning
+ * the backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing invalidation.
+ *
+ * After the callback all existing mappings are still valid until all
+ * fences in the dma_bufs reservation object are signaled. After getting an
+ * invalidation callback all mappings should be destroyed by the importer using
+ * the normal dma_buf_unmap_attachment() function as soon as possible.
+ *
+ * New mappings can be created immediately, but can't be used before the
+ * exclusive fence in the dma_bufs reservation object is signaled.
  */
 struct dma_buf_attach_info {
 	struct dma_buf *dmabuf;
 	struct device *dev;
+	void *importer_priv;
+	void (*invalidate)(struct dma_buf_attachment *attach);
 };
 
 /**
@@ -436,6 +461,7 @@ void dma_buf_unmap_attachment_locked(struct dma_buf_attachment *,
 				     enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
 				enum dma_data_direction);
+void dma_buf_invalidate_mappings(struct dma_buf *dma_buf);
 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 			     enum dma_data_direction dir);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
-- 
2.17.1



More information about the dri-devel mailing list