[Intel-gfx] [PATCH 4/4] drm/i915: Use __sg_alloc_table_from_pages for userptr allocations

Tvrtko Ursulin tursulin at ursulin.net
Fri Nov 11 08:50:20 UTC 2016


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

With the addition of __sg_alloc_table_from_pages we can control
the maximum coallescing size and eliminate a separate path for
allocating backing store here.

Similar to 871dfbd67d4e ("drm/i915: Allow compaction upto
SWIOTLB max segment size") this enables more compact sg lists to
be created and so has a beneficial effect on workloads with many
and/or large objects of this class.

v2:
 * Rename helper to i915_sg_segment_size and fix swiotlb override.
 * Commit message update.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |  9 +++++++++
 drivers/gpu/drm/i915/i915_gem.c         | 15 +--------------
 drivers/gpu/drm/i915/i915_gem_userptr.c | 28 ++++++----------------------
 3 files changed, 16 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 30777dee3f9c..319f8def0f86 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -4175,4 +4175,13 @@ int remap_io_mapping(struct vm_area_struct *vma,
 	__T;								\
 })
 
+static inline unsigned int i915_sg_segment_size(void)
+{
+#if IS_ENABLED(CONFIG_SWIOTLB)
+	return swiotlb_nr_tbl() << IO_TLB_SHIFT;
+#else
+	return UINT_MAX;
+#endif
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1c20edba7f2a..cb4c188a395c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2223,15 +2223,6 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	mutex_unlock(&obj->mm.lock);
 }
 
-static unsigned int swiotlb_max_size(void)
-{
-#if IS_ENABLED(CONFIG_SWIOTLB)
-	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
-#else
-	return 0;
-#endif
-}
-
 static void i915_sg_trim(struct sg_table *orig_st)
 {
 	struct sg_table new_st;
@@ -2267,7 +2258,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
-	unsigned int max_segment;
+	unsigned int max_segment = rounddown(i915_sg_segment_size(), PAGE_SIZE);
 	int ret;
 	gfp_t gfp;
 
@@ -2278,10 +2269,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
 	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-	max_segment = swiotlb_max_size();
-	if (!max_segment)
-		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 64261639f547..b4461f1832a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -390,36 +390,20 @@ struct get_pages_work {
 	struct task_struct *task;
 };
 
-#if IS_ENABLED(CONFIG_SWIOTLB)
-#define swiotlb_active() swiotlb_nr_tbl()
-#else
-#define swiotlb_active() 0
-#endif
-
 static int
 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
 {
-	struct scatterlist *sg;
-	int ret, n;
+	int ret;
 
 	*st = kmalloc(sizeof(**st), GFP_KERNEL);
 	if (*st == NULL)
 		return -ENOMEM;
 
-	if (swiotlb_active()) {
-		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
-		if (ret)
-			goto err;
-
-		for_each_sg((*st)->sgl, sg, num_pages, n)
-			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
-	} else {
-		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
-						0, num_pages << PAGE_SHIFT,
-						GFP_KERNEL);
-		if (ret)
-			goto err;
-	}
+	ret = __sg_alloc_table_from_pages(*st, pvec, num_pages, 0,
+					  num_pages << PAGE_SHIFT,
+					  GFP_KERNEL, i915_sg_segment_size());
+	if (ret)
+		goto err;
 
 	return 0;
 
-- 
2.7.4



More information about the Intel-gfx mailing list