[PATCH v3 2/5] udmabuf: change folios array from kmalloc to kvmalloc
Huan Yang
link at vivo.com
Tue Aug 13 09:05:08 UTC 2024
When PAGE_SIZE 4096, MAX_PAGE_ORDER 10, 64bit machine,
page_alloc only support 4MB.
If above this, trigger this warn and return NULL.
udmabuf can change size limit, if change it to 3072(3GB), and then alloc
3GB udmabuf, will fail create.
[ 4080.876581] ------------[ cut here ]------------
[ 4080.876843] WARNING: CPU: 3 PID: 2015 at mm/page_alloc.c:4556 __alloc_pages+0x2c8/0x350
[ 4080.878839] RIP: 0010:__alloc_pages+0x2c8/0x350
[ 4080.879470] Call Trace:
[ 4080.879473] <TASK>
[ 4080.879473] ? __alloc_pages+0x2c8/0x350
[ 4080.879475] ? __warn.cold+0x8e/0xe8
[ 4080.880647] ? __alloc_pages+0x2c8/0x350
[ 4080.880909] ? report_bug+0xff/0x140
[ 4080.881175] ? handle_bug+0x3c/0x80
[ 4080.881556] ? exc_invalid_op+0x17/0x70
[ 4080.881559] ? asm_exc_invalid_op+0x1a/0x20
[ 4080.882077] ? udmabuf_create+0x131/0x400
Because MAX_PAGE_ORDER, kmalloc can max alloc 4096 * (1 << 10), 4MB
memory, each array entry is pointer(8byte), so can save 524288 pages(2GB).
Further more, costly order(order 3) may not be guaranteed that it can be
applied for, due to fragmentation.
This patch change udmabuf array use kvmalloc_array, this can fallback
alloc into vmalloc, which can guarantee allocation for any size and does
not affect the performance of kmalloc allocations.
Signed-off-by: Huan Yang <link at vivo.com>
Acked-by: Christian König <christian.koenig at amd.com>
Acked-by: Vivek Kasireddy <vivek.kasireddy at intel.com>
---
drivers/dma-buf/udmabuf.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index d39f9e1cd532..3ec72d47bb1c 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -73,7 +73,7 @@ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
dma_resv_assert_held(buf->resv);
- pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -81,7 +81,7 @@ static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
pages[pg] = &ubuf->folios[pg]->page;
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
- kfree(pages);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
@@ -189,8 +189,8 @@ static void release_udmabuf(struct dma_buf *buf)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ kvfree(ubuf->offsets);
+ kvfree(ubuf->folios);
kfree(ubuf);
}
@@ -315,14 +315,14 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf->pagecount)
goto err;
- ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
- GFP_KERNEL);
+ ubuf->folios = kvmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
+ GFP_KERNEL);
if (!ubuf->folios) {
ret = -ENOMEM;
goto err;
}
- ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
- GFP_KERNEL);
+ ubuf->offsets = kvcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
+ GFP_KERNEL);
if (!ubuf->offsets) {
ret = -ENOMEM;
goto err;
@@ -336,7 +336,7 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
pgcnt = list[i].size >> PAGE_SHIFT;
- folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
+ folios = kvmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
if (!folios) {
ret = -ENOMEM;
goto err;
@@ -346,7 +346,7 @@ static long udmabuf_create(struct miscdevice *device,
ret = memfd_pin_folios(memfd, list[i].offset, end,
folios, pgcnt, &pgoff);
if (ret <= 0) {
- kfree(folios);
+ kvfree(folios);
if (!ret)
ret = -EINVAL;
goto err;
@@ -375,7 +375,7 @@ static long udmabuf_create(struct miscdevice *device,
}
}
- kfree(folios);
+ kvfree(folios);
fput(memfd);
memfd = NULL;
}
@@ -391,8 +391,8 @@ static long udmabuf_create(struct miscdevice *device,
if (memfd)
fput(memfd);
unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ kvfree(ubuf->offsets);
+ kvfree(ubuf->folios);
kfree(ubuf);
return ret;
}
--
2.45.2
More information about the dri-devel
mailing list