[PATCH 08/11] sgl_alloc_order_min_max

Tvrtko Ursulin tursulin at ursulin.net
Fri Feb 16 16:56:18 UTC 2018


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/target/target_core_transport.c |   2 +-
 include/linux/scatterlist.h            |  42 ++++++----
 lib/scatterlist.c                      | 148 +++++++++++++++++++++------------
 3 files changed, 123 insertions(+), 69 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4558f2e1fe1b..19f4c7d8b70a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2303,7 +2303,7 @@ static void target_complete_ok_work(struct work_struct *work)
 
 void target_free_sgl(struct scatterlist *sgl, int nents)
 {
-	sgl_free_n_order(sgl, nents, 0);
+	sgl_free_n_order(sgl, nents);
 }
 EXPORT_SYMBOL(target_free_sgl);
 
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index f665a278011a..983a8391d469 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -277,34 +277,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 			      unsigned long size, gfp_t gfp_mask);
 
 #ifdef CONFIG_SGL_ALLOC
-struct scatterlist *sgl_alloc_order(unsigned long length, unsigned int order,
-				    bool chainable, gfp_t gfp,
-				    unsigned int *nent_p);
-void sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
-		      unsigned int order);
+struct scatterlist *
+sgl_alloc_order_min_max(unsigned long length,
+			unsigned int min_order, unsigned int max_order,
+			bool chainable, gfp_t gfp, unsigned int *nent_p);
 
 /**
- * sgl_alloc - allocate a scatterlist and its pages
- * @length: Length in bytes of the scatterlist
+ * sgl_alloc_order - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist. Must be at least one
+ * @order: Second argument for alloc_pages()
+ * @chainable: Whether or not to allocate an extra element in the scatterlist
+ *	for scatterlist chaining purposes
  * @gfp: Memory allocation flags
- * @nent_p: [out] Number of entries in the scatterlist
+ * @nent_p: [out] Number of entries in the scatterlist that have pages
  *
  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  */
 static inline struct scatterlist *
-sgl_alloc(unsigned long length, gfp_t gfp, unsigned int *nent_p)
+sgl_alloc_order(unsigned long length, unsigned int order, bool chainable,
+		gfp_t gfp, unsigned int *nent_p)
 {
-	return sgl_alloc_order(length, 0, false, gfp, nent_p);
+	return sgl_alloc_order_min_max(length, order, order, chainable,
+				       gfp, nent_p);
 }
 
+void sgl_free_n(struct scatterlist *sgl, unsigned int nents);
+
 /**
- * sgl_free_order - free a scatterlist and its pages
- * @sgl: Scatterlist with one or more elements
- * @order: Second argument for __free_pages()
+ * sgl_alloc - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
  */
-static inline void sgl_free_order(struct scatterlist *sgl, unsigned int order)
+static inline struct scatterlist *
+sgl_alloc(unsigned long length, gfp_t gfp, unsigned int *nent_p)
 {
-	sgl_free_n_order(sgl, UINT_MAX, order);
+	return sgl_alloc_order(length, 0, false, gfp, nent_p);
 }
 
 /**
@@ -313,7 +323,7 @@ static inline void sgl_free_order(struct scatterlist *sgl, unsigned int order)
  */
 static inline void sgl_free(struct scatterlist *sgl)
 {
-	sgl_free_order(sgl, 0);
+	sgl_free_n(sgl, UINT_MAX);
 }
 
 #endif /* CONFIG_SGL_ALLOC */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 056852746285..4aff52caf93e 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -504,8 +504,7 @@ EXPORT_SYMBOL(sg_alloc_table_from_pages);
 #ifdef CONFIG_SGL_ALLOC
 
 static void
-__sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
-		   unsigned int order)
+__sgl_free_n(struct scatterlist *sgl, unsigned int nents)
 {
 	struct scatterlist *sg;
 	struct page *page;
@@ -516,75 +515,121 @@ __sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
 			break;
 		page = sg_page(sg);
 		if (page)
-			__free_pages(page, order);
+			__free_pages(page, get_order(sg->length));
 	}
 }
 
-/**
- * sgl_alloc_order - allocate a scatterlist and its pages
- * @length: Length in bytes of the scatterlist. Must be at least one
- * @order: Second argument for alloc_pages()
- * @chainable: Whether or not to allocate an extra element in the scatterlist
- *	for scatterlist chaining purposes
- * @gfp: Memory allocation flags
- * @nent_p: [out] Number of entries in the scatterlist that have pages
- *
- * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
- */
-struct scatterlist *sgl_alloc_order(unsigned long length, unsigned int order,
-				    bool chainable, gfp_t gfp,
-				    unsigned int *nent_p)
+static struct scatterlist *
+__sg_trim_scatterlist(struct scatterlist *sg,
+		      unsigned int nents,
+		      unsigned int orig_nents,
+		      gfp_t gfp)
 {
-	unsigned int chunk_len = PAGE_SIZE << order;
-	unsigned int nent, orig_nents, tmp, i;
-	struct scatterlist *sgl, *sg;
+	struct scatterlist *new, *s;
+	unsigned int tmp, i;
 	int ret;
 
-	orig_nents = nent = round_up(length, chunk_len) >> (PAGE_SHIFT + order);
+	if (nents == orig_nents)
+		return sg;
 
-	/* Check for nent integer overflow. */
-	if (length > ((unsigned long)nent << (PAGE_SHIFT + order)))
+	ret = __sg_alloc_scatterlist(nents, SG_MAX_SINGLE_ALLOC, NULL,
+				     gfp & ~GFP_DMA, sg_kmalloc, &tmp, &tmp,
+				     &new);
+	if (ret)
 		return NULL;
 
-	if (nent_p)
-		*nent_p = nent;
-
-	if (chainable) {
-		orig_nents++;
-		/* Check for integer overflow */
-		if (orig_nents < ents)
-			return NULL;
+	for_each_sg(sg, s, nents, i) {
+		sg_set_page(new, sg_page(s), s->length, 0);
+		new = sg_next(new);
 	}
 
-	ret = __sg_alloc_scatterlist(orig_nents, SG_MAX_SINGLE_ALLOC, NULL,
-				     gfp & ~GFP_DMA, sg_kmalloc, &tmp,
+	__sg_free_scatterlist(sg, orig_nents, SG_MAX_SINGLE_ALLOC, false,
+			      sg_kfree);
+	return new;
+}
+
+struct scatterlist *
+sgl_alloc_order_min_max(unsigned long length,
+			unsigned int min_order, unsigned int max_order,
+			bool chainable, gfp_t gfp, unsigned int *nent_p)
+{
+	unsigned int max_nents, nents, orig_nents, i;
+	struct scatterlist *sgl, *sg;
+	int ret;
+
+	if (WARN_ON_ONCE(!IS_ALIGNED(length, PAGE_SIZE)))
+		return NULL;
+
+	max_nents = round_up(length, PAGE_SIZE << min_order) >>
+		    (PAGE_SHIFT + min_order);
+
+	if (chainable)
+		max_nents++;
+
+	/* Check for max_nents integer overflow. */
+	if (length > ((unsigned long)max_nents << (PAGE_SHIFT + min_order)))
+		return NULL;
+
+	ret = __sg_alloc_scatterlist(max_nents, SG_MAX_SINGLE_ALLOC, NULL,
+				     gfp & ~GFP_DMA, sg_kmalloc, &nents,
 				     &orig_nents, &sgl);
 	if (ret)
 		return NULL;
 
-	for_each_sg(sgl, sg, nent, i) {
-		struct page *page = alloc_pages(gfp, order);
+	for (sg = sgl, i = 0, nents = 0; ;) {
+		unsigned int order = min_t(unsigned int, get_order(length),
+					   max_order);
+		struct page *page;
+
+		do {
+			gfp_t gfp_order = gfp;
+
+			if (order > min_order)
+				gfp |= __GFP_NORETRY | __GFP_NOWARN;
+
+			page = alloc_pages(gfp_order, order);
+			if (page) {
+				break;
+			} else if (order-- >= min_order) {
+				__sgl_free_n(sgl, i);
+				__sg_free_scatterlist(sgl, orig_nents,
+						      SG_MAX_SINGLE_ALLOC,
+						      false, sg_kfree);
+				return NULL;
+			}
+
+			/* Limit subsequent allocations on first failure. */
+			max_order = order;
+		} while (1);
+
+		sg_set_page(sg, page, PAGE_SIZE << order, 0);
+		nents++;
+		length -= PAGE_SIZE << order;
 
-		if (!page) {
-			__sgl_free_n_order(sgl, i, order);
-			__sg_free_scatterlist(sgl, orig_nents,
-					      SG_MAX_SINGLE_ALLOC, false,
-					      sg_kfree);
-			return NULL;
+		if (!length) {
+			sg_mark_end(sg);
+			break;
 		}
 
-		chunk_len = min_t(unsigned long, length, chunk_len);
-		sg_set_page(sg, page, chunk_len, 0);
-		length -= chunk_len;
 		sg = sg_next(sg);
-	}
-	WARN_ONCE(length, "length = %ld\n", length);
+		i++;
+	};
+
+	if (chainable)
+		nents++;
+
+	if (nents < orig_nents)
+		sg = __sg_trim_scatterlist(sgl, nents, orig_nents, gfp);
+
+	if (nent_p)
+		*nent_p = nents;
+
 	return sgl;
 }
-EXPORT_SYMBOL(sgl_alloc_order);
+EXPORT_SYMBOL(sgl_alloc_order_min_max);
 
 /**
- * sgl_free_n_order - free a scatterlist and its pages
+ * sgl_free_n - free a scatterlist and its pages
  * @sgl: Scatterlist with one or more elements
  * @nents: Maximum number of elements to free
  * @order: Second argument for __free_pages()
@@ -596,19 +641,18 @@ EXPORT_SYMBOL(sgl_alloc_order);
  * - All pages in a chained scatterlist can be freed at once by setting @nents
  *   to a high number.
  */
-void sgl_free_n_order(struct scatterlist *sgl, unsigned int nents,
-		      unsigned int order)
+void sgl_free_n(struct scatterlist *sgl, unsigned int nents)
 {
 	unsigned int orig_nents;
 	struct scatterlist *sg;
 
 	for_each_sg(sgl, sg, UINT_MAX, orig_nents);
 
-	__sgl_free_n_order(sgl, nents, order);
+	__sgl_free_n(sgl, nents);
 	__sg_free_scatterlist(sgl, ++orig_nents, SG_MAX_SINGLE_ALLOC, false,
 			      sg_kfree);
 }
-EXPORT_SYMBOL(sgl_free_n_order);
+EXPORT_SYMBOL(sgl_free_n);
 
 #endif /* CONFIG_SGL_ALLOC */
 
-- 
2.14.1



More information about the Intel-gfx-trybot mailing list