[PATCH v2 4/5] dma-buf: heaps: system_heap alloc support async read

Huan Yang link at vivo.com
Tue Jul 30 07:57:48 UTC 2024


The system heap cyclically allocates pages and then places these pages
into a scatter_list, which is then managed by the dma-buf.

This process can parallelize memory allocation and I/O read operations:

Gather each allocated page and trigger a submit once the limit is reached.
Once the memory allocation is complete, there is no need to wait
immediately for the file read to finish. Instead, continue preparing the
dma-buf until it is necessary to return the dma-buf, at which point
waiting for the file content to be fully read is required.

Note that the content of the page cannot be modified after it is
allocated in the heap, as it may cause conflicts with accessing the page
when reading from the file. There are currently no conflicts in the
system_heap for this part.

The formula for the time taken for system_heap buffer allocation and
file reading through async_read is as follows:

  T(total) = T(first gather page) + Max(T(remain alloc), T(I/O))

Compared to the synchronous read:
  T(total) = T(alloc) + T(I/O)

If the allocation time or I/O time is long, the time difference will be
covered by the maximum value between the allocation and I/O. The other
party will be concealed.

Therefore, the larger the size of the file that needs to be read, the
greater the corresponding benefits will be.

Signed-off-by: Huan Yang <link at vivo.com>
---
 drivers/dma-buf/heaps/system_heap.c | 70 +++++++++++++++++++++++++++--
 1 file changed, 66 insertions(+), 4 deletions(-)

diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index d78cdb9d01e5..ba0c3d8ce090 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -331,10 +331,10 @@ static struct page *alloc_largest_available(unsigned long size,
 	return NULL;
 }
 
-static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
-					    unsigned long len,
-					    u32 fd_flags,
-					    u64 heap_flags)
+static struct dma_buf *__system_heap_allocate(struct dma_heap *heap,
+					      struct dma_heap_file *heap_file,
+					      unsigned long len, u32 fd_flags,
+					      u64 heap_flags)
 {
 	struct system_heap_buffer *buffer;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -346,6 +346,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 	struct list_head pages;
 	struct page *page, *tmp_page;
 	int i, ret = -ENOMEM;
+	struct dma_heap_file_task *heap_ftask;
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 	if (!buffer)
@@ -357,6 +358,15 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 	buffer->len = len;
 
 	INIT_LIST_HEAD(&pages);
+
+	if (heap_file) {
+		heap_ftask = dma_heap_declare_file_read(heap_file);
+		if (!heap_ftask) {
+			kfree(buffer);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
 	i = 0;
 	while (size_remaining > 0) {
 		/*
@@ -372,6 +382,13 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 		if (!page)
 			goto free_buffer;
 
+		/**
+		 * If alloc and read, gather each page to read task.
+		 * If got error, free buffer and return error.
+		 */
+		if (heap_file && dma_heap_gather_file_page(heap_ftask, page))
+			goto free_buffer;
+
 		list_add_tail(&page->lru, &pages);
 		size_remaining -= page_size(page);
 		max_order = compound_order(page);
@@ -400,9 +417,29 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 		ret = PTR_ERR(dmabuf);
 		goto free_pages;
 	}
+
+	/**
+	 * End of alloc, dma-buf export and anything we need, but maybe
+	 * file read is still work, so, wait it. if done, destroy all file
+	 * task.
+	 * But maybe something wrong when read file, if so, abandon dma-buf
+	 * return error.
+	 */
+	if (heap_file && dma_heap_end_file_read(heap_ftask)) {
+		dma_buf_put(dmabuf);
+		dmabuf = ERR_PTR(-EIO);
+	}
+
 	return dmabuf;
 
 free_pages:
+	/**
+	 * maybe we already trigger file read, so, before release pages,
+	 * wait for all running file read task done.
+	 */
+	if (heap_file)
+		dma_heap_wait_for_file_read(heap_ftask);
+
 	for_each_sgtable_sg(table, sg, i) {
 		struct page *p = sg_page(sg);
 
@@ -410,6 +447,13 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 	}
 	sg_free_table(table);
 free_buffer:
+	/**
+	 * maybe we already trigger file read, so, before release pages and
+	 * return, destroy file task, include running task.
+	 */
+	if (heap_file)
+		dma_heap_end_file_read(heap_ftask);
+
 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
 		__free_pages(page, compound_order(page));
 	kfree(buffer);
@@ -417,8 +461,26 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
 	return ERR_PTR(ret);
 }
 
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+					    unsigned long len, u32 fd_flags,
+					    u64 heap_flags)
+{
+	return __system_heap_allocate(heap, NULL, len, fd_flags, heap_flags);
+}
+
+static struct dma_buf *
+system_heap_allocate_async_read_file(struct dma_heap *heap,
+				     struct dma_heap_file *heap_file,
+				     u32 fd_flags, u64 heap_flags)
+{
+	return __system_heap_allocate(heap, heap_file,
+				      PAGE_ALIGN(dma_heap_file_size(heap_file)),
+				      fd_flags, heap_flags);
+}
+
 static const struct dma_heap_ops system_heap_ops = {
 	.allocate = system_heap_allocate,
+	.allocate_async_read = system_heap_allocate_async_read_file,
 };
 
 static int system_heap_create(void)
-- 
2.45.2



More information about the dri-devel mailing list