[PATCH 4/5] udmabuf: add get_sg_table helper function

Huan Yang link at vivo.com
Thu Aug 1 10:45:08 UTC 2024


Currently, there are three duplicate pieces of code that retrieve
sg_table and update uduf->sg.

Since the sgt is used to populate the page in both mmap and vmap.It is
necessary to ensure that ubuf->sg is set correctly.

This patch add a helper function, if ubuf->sg exist, just return it.
Or else, try alloc a new sgt, and cmpxchg to set it.

When the swap fails, it means that another process has set sg correctly.
Therefore, we reuse the new sg. If trigger by device, need invoke map to
sync it.

Signed-off-by: Huan Yang <link at vivo.com>
---
 drivers/dma-buf/udmabuf.c | 60 ++++++++++++++++++++++++++++-----------
 1 file changed, 43 insertions(+), 17 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 7ed532342d7f..677ebb2d462f 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -38,8 +38,9 @@ struct udmabuf_folio {
 	struct list_head list;
 };
 
-static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
-				     enum dma_data_direction direction);
+static struct sg_table *udmabuf_get_sg_table(struct device *dev,
+					     struct dma_buf *buf,
+					     enum dma_data_direction direction);
 
 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 {
@@ -52,12 +53,9 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
 		return -EINVAL;
 
-	if (!table) {
-		table = get_sg_table(NULL, buf, 0);
-		if (IS_ERR(table))
-			return PTR_ERR(table);
-		ubuf->sg = table;
-	}
+	table = udmabuf_get_sg_table(NULL, buf, 0);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
 
 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
 		struct page *page = sg_page_iter_page(&piter);
@@ -84,12 +82,9 @@ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
 
 	dma_resv_assert_held(buf->resv);
 
-	if (!sg) {
-		sg = get_sg_table(NULL, buf, 0);
-		if (IS_ERR(sg))
-			return PTR_ERR(sg);
-		ubuf->sg = sg;
-	}
+	sg = udmabuf_get_sg_table(NULL, buf, 0);
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
 
 	pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
 	if (!pages)
@@ -154,6 +149,39 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
 	return ERR_PTR(ret);
 }
 
+static struct sg_table *udmabuf_get_sg_table(struct device *dev,
+					     struct dma_buf *buf,
+					     enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct sg_table *sg = READ_ONCE(ubuf->sg);
+	int ret = 0;
+
+	if (sg)
+		return sg;
+
+	sg = get_sg_table(dev, buf, direction);
+	if (IS_ERR(sg))
+		return sg;
+
+	// Success update ubuf's sg, just return.
+	if (!cmpxchg(&ubuf->sg, NULL, sg))
+		return sg;
+
+	// use the new sg table.
+	sg_free_table(sg);
+	kfree(sg);
+	sg = READ_ONCE(ubuf->sg);
+
+	if (dev)
+		ret = dma_map_sgtable(dev, sg, direction, 0);
+
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return sg;
+}
+
 static void put_sg_table(struct device *dev, struct sg_table *sg,
 			 enum dma_data_direction direction)
 {
@@ -230,12 +258,10 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
 		return 0;
 	}
 
-	sg = get_sg_table(dev, buf, direction);
+	sg = udmabuf_get_sg_table(dev, buf, direction);
 	if (IS_ERR(sg))
 		return PTR_ERR(sg);
 
-	ubuf->sg = sg;
-
 	return 0;
 }
 
-- 
2.45.2



More information about the dri-devel mailing list