[Intel-gfx] [PATCH 06/21] drm/i915: introduce vm set_pages/clear_pages

Matthew Auld matthew.auld at intel.com
Tue Jul 25 19:21:18 UTC 2017


Move the setting/clearing of the vma->pages to a vm operation. Doing so
neatens things up a little, but more importantly gives us a sane place
to also set/clear the vma->pages_sizes, which we introduce later in
preparation for supporting huge-pages.

Suggested-by: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c       | 70 +++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_gtt.h       |  2 +
 drivers/gpu/drm/i915/i915_vma.c           | 29 +++++++------
 drivers/gpu/drm/i915/selftests/mock_gtt.c | 11 ++---
 4 files changed, 66 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 10aa7762d9a6..0d0b529c77c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -200,8 +200,6 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 			return ret;
 	}
 
-	vma->pages = vma->obj->mm.pages;
-
 	/* Currently applicable only to VLV */
 	pte_flags = 0;
 	if (vma->obj->gt_ro)
@@ -217,6 +215,26 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
 	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 }
 
+static int ppgtt_set_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(vma->pages);
+
+	vma->pages = vma->obj->mm.pages;
+
+	return 0;
+}
+
+static void clear_pages(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!vma->pages);
+
+	if (vma->pages != vma->obj->mm.pages) {
+		sg_free_table(vma->pages);
+		kfree(vma->pages);
+	}
+	vma->pages = NULL;
+}
+
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
 				  enum i915_cache_level level)
 {
@@ -1380,6 +1398,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
 	ppgtt->base.bind_vma = ppgtt_bind_vma;
+	ppgtt->base.set_pages = ppgtt_set_pages;
+	ppgtt->base.clear_pages = clear_pages;
 	ppgtt->debug_dump = gen8_dump_ppgtt;
 
 	return 0;
@@ -1822,6 +1842,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
 	ppgtt->base.bind_vma = ppgtt_bind_vma;
+	ppgtt->base.set_pages = ppgtt_set_pages;
+	ppgtt->base.clear_pages = clear_pages;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
@@ -2333,12 +2355,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	struct drm_i915_gem_object *obj = vma->obj;
 	u32 pte_flags;
 
-	if (unlikely(!vma->pages)) {
-		int ret = i915_get_ggtt_vma_pages(vma);
-		if (ret)
-			return ret;
-	}
-
 	/* Currently applicable only to VLV */
 	pte_flags = 0;
 	if (obj->gt_ro)
@@ -2375,12 +2391,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	u32 pte_flags;
 	int ret;
 
-	if (unlikely(!vma->pages)) {
-		ret = i915_get_ggtt_vma_pages(vma);
-		if (ret)
-			return ret;
-	}
-
 	/* Currently applicable only to VLV */
 	pte_flags = 0;
 	if (vma->obj->gt_ro)
@@ -2395,7 +2405,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 							     vma->node.start,
 							     vma->size);
 			if (ret)
-				goto err_pages;
+				return ret;
 		}
 
 		appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
@@ -2409,17 +2419,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	}
 
 	return 0;
-
-err_pages:
-	if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
-		if (vma->pages != vma->obj->mm.pages) {
-			GEM_BUG_ON(!vma->pages);
-			sg_free_table(vma->pages);
-			kfree(vma->pages);
-		}
-		vma->pages = NULL;
-	}
-	return ret;
 }
 
 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
@@ -2457,6 +2456,19 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
+static int ggtt_set_pages(struct i915_vma *vma)
+{
+	int ret;
+
+	GEM_BUG_ON(vma->pages);
+
+	ret = i915_get_ggtt_vma_pages(vma);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
 				  unsigned long color,
 				  u64 *start,
@@ -2859,6 +2871,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->base.cleanup = gen6_gmch_remove;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.set_pages = ggtt_set_pages;
+	ggtt->base.clear_pages = clear_pages;
 	ggtt->base.insert_page = gen8_ggtt_insert_page;
 	ggtt->base.clear_range = nop_clear_range;
 	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
@@ -2915,6 +2929,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.set_pages = ggtt_set_pages;
+	ggtt->base.clear_pages = clear_pages;
 	ggtt->base.cleanup = gen6_gmch_remove;
 
 	ggtt->invalidate = gen6_ggtt_invalidate;
@@ -2960,6 +2976,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->base.clear_range = i915_ggtt_clear_range;
 	ggtt->base.bind_vma = ggtt_bind_vma;
 	ggtt->base.unbind_vma = ggtt_unbind_vma;
+	ggtt->base.set_pages = ggtt_set_pages;
+	ggtt->base.clear_pages = clear_pages;
 	ggtt->base.cleanup = i915_gmch_remove;
 
 	ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4c2f7d7c1e7d..57738a61ea6e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -330,6 +330,8 @@ struct i915_address_space {
 	int (*bind_vma)(struct i915_vma *vma,
 			enum i915_cache_level cache_level,
 			u32 flags);
+	int (*set_pages)(struct i915_vma *vma);
+	void (*clear_pages)(struct i915_vma *vma);
 
 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
 };
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 958be0a95960..c8d1b98cc260 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -266,6 +266,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (bind_flags == 0)
 		return 0;
 
+	GEM_BUG_ON(!vma->pages);
+
 	trace_i915_vma_bind(vma, bind_flags);
 	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
 	if (ret)
@@ -471,25 +473,31 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	if (ret)
 		return ret;
 
+	if (!vma->pages) {
+		ret = vma->vm->set_pages(vma);
+		if (ret)
+			goto err_unpin;
+	}
+
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (!IS_ALIGNED(offset, alignment) ||
 		    range_overflows(offset, size, end)) {
 			ret = -EINVAL;
-			goto err_unpin;
+			goto err_clear;
 		}
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, obj->cache_level,
 					   flags);
 		if (ret)
-			goto err_unpin;
+			goto err_clear;
 	} else {
 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
 					  size, alignment, obj->cache_level,
 					  start, end, flags);
 		if (ret)
-			goto err_unpin;
+			goto err_clear;
 
 		GEM_BUG_ON(vma->node.start < start);
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -504,6 +512,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 	return 0;
 
+err_clear:
+	vma->vm->clear_pages(vma);
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
 	return ret;
@@ -517,6 +527,8 @@ i915_vma_remove(struct i915_vma *vma)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
+	vma->vm->clear_pages(vma);
+
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
@@ -568,10 +580,8 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 	return 0;
 
 err_remove:
-	if ((bound & I915_VMA_BIND_MASK) == 0) {
-		GEM_BUG_ON(vma->pages);
+	if ((bound & I915_VMA_BIND_MASK) == 0)
 		i915_vma_remove(vma);
-	}
 err_unpin:
 	__i915_vma_unpin(vma);
 	return ret;
@@ -717,13 +727,6 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-	if (vma->pages != obj->mm.pages) {
-		GEM_BUG_ON(!vma->pages);
-		sg_free_table(vma->pages);
-		kfree(vma->pages);
-	}
-	vma->pages = NULL;
-
 	i915_vma_remove(vma);
 
 destroy:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f2118cf535a0..336e1afb250f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,6 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
 			   u32 flags)
 {
 	GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-	vma->pages = vma->obj->mm.pages;
 	vma->flags |= I915_VMA_LOCAL_BIND;
 	return 0;
 }
@@ -84,6 +83,8 @@ mock_ppgtt(struct drm_i915_private *i915,
 	ppgtt->base.insert_entries = mock_insert_entries;
 	ppgtt->base.bind_vma = mock_bind_ppgtt;
 	ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+	ppgtt->base.set_pages = ppgtt_set_pages;
+	ppgtt->base.clear_pages = clear_pages;
 	ppgtt->base.cleanup = mock_cleanup;
 
 	return ppgtt;
@@ -93,12 +94,6 @@ static int mock_bind_ggtt(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
 			  u32 flags)
 {
-	int err;
-
-	err = i915_get_ggtt_vma_pages(vma);
-	if (err)
-		return err;
-
 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 	return 0;
 }
@@ -124,6 +119,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
 	ggtt->base.insert_entries = mock_insert_entries;
 	ggtt->base.bind_vma = mock_bind_ggtt;
 	ggtt->base.unbind_vma = mock_unbind_ggtt;
+	ggtt->base.set_pages = ggtt_set_pages;
+	ggtt->base.clear_pages = clear_pages;
 	ggtt->base.cleanup = mock_cleanup;
 
 	i915_address_space_init(&ggtt->base, i915, "global");
-- 
2.13.3



More information about the Intel-gfx mailing list