[PATCH 07/14] drm/i915: pass page_size to insert_entries

Matthew Auld matthew.auld at intel.com
Tue Apr 25 11:12:01 UTC 2017


Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 33 ++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  1 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  3 ++-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  1 +
 4 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8bab4aea63e6..0c8350f709da 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -205,7 +205,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				cache_level, pte_flags);
+				vma->obj->gtt_page_size, cache_level, pte_flags);
 
 	return 0;
 }
@@ -906,6 +906,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
+				   unsigned int page_size,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -924,6 +925,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
+				   unsigned int page_size,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -935,9 +937,24 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 	};
 	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
 	struct gen8_insert_pte idx = gen8_insert_pte(start);
+	bool (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+			       struct i915_page_directory_pointer *pdp,
+			       struct sgt_dma *iter,
+			       struct gen8_insert_pte *idx,
+			       enum i915_cache_level cache_level);
+
+	/* TODO: turn this into vfunc */
+	switch (page_size) {
+	case I915_GTT_PAGE_SIZE_4K:
+		insert_entries = gen8_ppgtt_insert_pte_entries;
+		break;
+	default:
+		MISSING_CASE(page_size);
+		return;
+	}
 
-	while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
-					     &idx, cache_level))
+	while (insert_entries(ppgtt, pdps[idx.pml4e++], &iter, &idx,
+			      cache_level))
 		GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
 }
 
@@ -1620,6 +1637,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 				      struct sg_table *pages,
 				      u64 start,
+				      unsigned int page_size,
 				      enum i915_cache_level cache_level,
 				      u32 flags)
 {
@@ -2093,6 +2111,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
+				     unsigned int page_size,
 				     enum i915_cache_level level,
 				     u32 unused)
 {
@@ -2140,6 +2159,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
+				     unsigned int page_size,
 				     enum i915_cache_level level,
 				     u32 flags)
 {
@@ -2224,6 +2244,7 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *pages,
 				     u64 start,
+				     unsigned int page_size,
 				     enum i915_cache_level cache_level,
 				     u32 unused)
 {
@@ -2260,7 +2281,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 
 	intel_runtime_pm_get(i915);
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				cache_level, pte_flags);
+				I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 	intel_runtime_pm_put(i915);
 
 	/*
@@ -2314,14 +2335,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 		appgtt->base.insert_entries(&appgtt->base,
 					    vma->pages, vma->node.start,
-					    cache_level, pte_flags);
+					    I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 	}
 
 	if (flags & I915_VMA_GLOBAL_BIND) {
 		intel_runtime_pm_get(i915);
 		vma->vm->insert_entries(vma->vm,
 					vma->pages, vma->node.start,
-					cache_level, pte_flags);
+					I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 		intel_runtime_pm_put(i915);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f8db231c28aa..5a2a3907d266 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -320,6 +320,7 @@ struct i915_address_space {
 	void (*insert_entries)(struct i915_address_space *vm,
 			       struct sg_table *st,
 			       u64 start,
+			       unsigned int page_size,
 			       enum i915_cache_level cache_level,
 			       u32 flags);
 	void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 50710e3f1caa..259b5e139df1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -256,7 +256,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 				break;
 
 			vm->insert_entries(vm, obj->mm.pages, addr,
-					   I915_CACHE_NONE, 0);
+					   I915_GTT_PAGE_SIZE, I915_CACHE_NONE,
+					   0);
 		}
 		count = n;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..38532a008387 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -35,6 +35,7 @@ static void mock_insert_page(struct i915_address_space *vm,
 static void mock_insert_entries(struct i915_address_space *vm,
 				struct sg_table *st,
 				u64 start,
+				unsigned int page_size,
 				enum i915_cache_level level, u32 flags)
 {
 }
-- 
2.9.3



More information about the Intel-gfx-trybot mailing list