[PATCH 06/11] lib/scatterlist: Avoid large scatterlist allocations from sgl_alloc_order
Tvrtko Ursulin
tursulin at ursulin.net
Fri Feb 16 14:46:36 UTC 2018
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
For large but low-order allocation requests the number of needed struct
scatterlist elements can be high and so the kmalloc_array comes under risk
of failure when memory is fragmented.
Use the previously factored out scatterlist table allocation and freeing
helpers from sgl_alloc_order to benefit from the common ability to set up
a chained table built up to page size chunks.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Bart Van Assche <bart.vanassche at wdc.com>
Cc: Hannes Reinecke <hare at suse.com>
Cc: Johannes Thumshirn <jthumshirn at suse.de>
Cc: Jens Axboe <axboe at kernel.dk>
---
lib/scatterlist.c | 59 ++++++++++++++++++++++++++++++++++---------------------
1 file changed, 37 insertions(+), 22 deletions(-)
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e11421142cc5..469fa4b1f66b 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -503,6 +503,23 @@ EXPORT_SYMBOL(sg_alloc_table_from_pages);
#ifdef CONFIG_SGL_ALLOC
+static void
+__sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
+ unsigned int order)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (!sg)
+ break;
+ page = sg_page(sg);
+ if (page)
+ __free_pages(page, order);
+ }
+}
+
/**
* sgl_alloc_order - allocate a scatterlist and its pages
* @length: Length in bytes of the scatterlist. Must be at least one
@@ -519,10 +536,11 @@ struct scatterlist *sgl_alloc_order(unsigned long length, unsigned int order,
unsigned int *nent_p)
{
unsigned int chunk_len = PAGE_SIZE << order;
+ unsigned int nent, orig_nents, tmp, i;
struct scatterlist *sgl, *sg;
- unsigned int nent, i;
+ int ret;
- nent = round_up(length, chunk_len) >> (PAGE_SHIFT + order);
+ orig_nents = nent = round_up(length, chunk_len) >> (PAGE_SHIFT + order);
/* Check for nent integer overflow. */
if (length > ((unsigned long)nent << (PAGE_SHIFT + order)))
@@ -532,24 +550,26 @@ struct scatterlist *sgl_alloc_order(unsigned long length, unsigned int order,
*nent_p = nent;
if (chainable) {
+ orig_nents++;
/* Check for integer overflow */
- if (nent == UINT_MAX)
+ if (orig_nents < ents)
return NULL;
- nent++;
}
- sgl = kmalloc_array(nent, sizeof(struct scatterlist), (gfp & ~GFP_DMA));
- if (!sgl)
+ ret = __sg_alloc_scatterlist(orig_nents, SG_MAX_SINGLE_ALLOC, NULL,
+ gfp & ~GFP_DMA, sg_kmalloc, &tmp,
+ &orig_nents, &sgl);
+ if (ret)
return NULL;
- sg_init_table(sgl, nent);
- sg = sgl;
- i = 0;
- while (length) {
+ for_each_sg(sgl, sg, nent, i) {
struct page *page = alloc_pages(gfp, order);
if (!page) {
- sgl_free_n_order(sgl, i, order);
+ __sgl_free_n_order(sgl, i, order);
+ __sg_free_scatterlist(sgl, orig_nents,
+ SG_MAX_SINGLE_ALLOC, false,
+ sg_kfree);
return NULL;
}
@@ -557,7 +577,6 @@ struct scatterlist *sgl_alloc_order(unsigned long length, unsigned int order,
sg_set_page(sg, page, chunk_len, 0);
length -= chunk_len;
sg = sg_next(sg);
- i++;
}
WARN_ONCE(length, "length = %ld\n", length);
return sgl;
@@ -595,18 +614,14 @@ EXPORT_SYMBOL(sgl_alloc);
void sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
unsigned int order)
{
+ unsigned int orig_nents;
struct scatterlist *sg;
- struct page *page;
- unsigned int i;
- for_each_sg(sgl, sg, nents, i) {
- if (!sg)
- break;
- page = sg_page(sg);
- if (page)
- __free_pages(page, order);
- }
- kfree(sgl);
+ for_each_sg(sgl, sg, UINT_MAX, orig_nents);
+
+ __sgl_free_n_order(sgl, nents, order);
+ __sg_free_scatterlist(sgl, ++orig_nents, SG_MAX_SINGLE_ALLOC, false,
+ sg_kfree);
}
EXPORT_SYMBOL(sgl_free_n_order);
--
2.14.1
More information about the Intel-gfx-trybot
mailing list