[PATCH 09/11] drm/i915: Use sgl_alloc_order_min_max for internal objects

Tvrtko Ursulin tursulin at ursulin.net
Fri Feb 16 14:46:39 UTC 2018


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/gpu/drm/i915/Kconfig             |  1 +
 drivers/gpu/drm/i915/i915_gem_internal.c | 82 +++++++-------------------------
 lib/scatterlist.c                        |  7 +--
 3 files changed, 23 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index dfd95889f4b7..c0245d15d7d6 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,7 @@ config DRM_I915
 	select SYNC_FILE
 	select IOSF_MBI
 	select CRC32
+	select SGL_ALLOC
 	help
 	  Choose this option if you have a system that has "Intel Graphics
 	  Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 8301c06c952f..6b78e2fd62c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -26,32 +26,17 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
-#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 
 /* convert swiotlb segment size into sensible units (pages)! */
 #define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
 
-static void internal_free_pages(struct sg_table *st)
-{
-	struct scatterlist *sg;
-
-	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
-		if (sg_page(sg))
-			__free_pages(sg_page(sg), get_order(sg->length));
-	}
-
-	sg_free_table(st);
-	kfree(st);
-}
-
 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *st;
-	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
-	unsigned int npages;
+	struct scatterlist *sg;
+	struct sg_table *st;
+	unsigned int i;
 	int max_order;
 	gfp_t gfp;
 
@@ -76,58 +61,30 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 		gfp |= __GFP_DMA32;
 	}
 
-create_st:
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st)
 		return -ENOMEM;
 
-	npages = obj->base.size / PAGE_SIZE;
-	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+create_sg:
+	st->sgl = sgl_alloc_order_min_max(obj->base.size, 0, max_order, false,
+					  gfp, &st->nents);
+	if (!st->sgl) {
 		kfree(st);
 		return -ENOMEM;
 	}
 
-	sg = st->sgl;
-	st->nents = 0;
-	sg_page_sizes = 0;
-
-	do {
-		int order = min(fls(npages) - 1, max_order);
-		struct page *page;
-
-		do {
-			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
-					   order);
-			if (page)
-				break;
-			if (!order--)
-				goto err;
-
-			/* Limit subsequent allocations as well */
-			max_order = order;
-		} while (1);
-
-		sg_set_page(sg, page, PAGE_SIZE << order, 0);
-		sg_page_sizes |= PAGE_SIZE << order;
-		st->nents++;
-
-		npages -= 1 << order;
-		if (!npages) {
-			sg_mark_end(sg);
-			break;
-		}
-
-		sg = __sg_next(sg);
-	} while (1);
+	st->orig_nents = st->nents;
 
 	if (i915_gem_gtt_prepare_pages(obj, st)) {
 		/* Failed to dma-map try again with single page sg segments */
+		sgl_free_n(st->sgl, st->nents);
 		if (get_order(st->sgl->length)) {
-			internal_free_pages(st);
 			max_order = 0;
-			goto create_st;
+			goto create_sg;
 		}
-		goto err;
+
+		kfree(st);
+		return -ENOMEM;
 	}
 
 	/* Mark the pages as dontneed whilst they are still pinned. As soon
@@ -137,23 +94,20 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 	 */
 	obj->mm.madv = I915_MADV_DONTNEED;
 
+	sg_page_sizes = 0;
+	for_each_sg(st->sgl, sg, st->nents, i)
+		sg_page_sizes |= PAGE_SIZE << get_order(sg->length);
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
-
-err:
-	sg_set_page(sg, NULL, 0, 0);
-	sg_mark_end(sg);
-	internal_free_pages(st);
-
-	return -ENOMEM;
 }
 
 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
 					       struct sg_table *pages)
 {
 	i915_gem_gtt_finish_pages(obj, pages);
-	internal_free_pages(pages);
+	sgl_free_n(pages->sgl, pages->nents);
+	kfree(pages);
 
 	obj->mm.dirty = false;
 	obj->mm.madv = I915_MADV_WILLNEED;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 4aff52caf93e..9595c8feca1e 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -525,7 +525,7 @@ __sg_trim_scatterlist(struct scatterlist *sg,
 		      unsigned int orig_nents,
 		      gfp_t gfp)
 {
-	struct scatterlist *new, *s;
+	struct scatterlist *new, *s, *d;
 	unsigned int tmp, i;
 	int ret;
 
@@ -538,9 +538,10 @@ __sg_trim_scatterlist(struct scatterlist *sg,
 	if (ret)
 		return NULL;
 
+	d = new;
 	for_each_sg(sg, s, nents, i) {
-		sg_set_page(new, sg_page(s), s->length, 0);
-		new = sg_next(new);
+		sg_set_page(d, sg_page(s), s->length, 0);
+		d = sg_next(d);
 	}
 
 	__sg_free_scatterlist(sg, orig_nents, SG_MAX_SINGLE_ALLOC, false,
-- 
2.14.1



More information about the Intel-gfx-trybot mailing list