[Intel-gfx] [PATCH 06/15] drm/i915: pass page_size to insert_entries

Matthew Auld matthew.auld at intel.com
Mon Mar 6 23:54:05 UTC 2017


Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 32 ++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  1 +
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  3 ++-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  1 +
 4 files changed, 30 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cee9c4fec52a..9274974956b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -205,7 +205,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				cache_level, pte_flags);
+				vma->obj->page_size, cache_level, pte_flags);
 
 	return 0;
 }
@@ -906,6 +906,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
+				   unsigned long page_size,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -924,6 +925,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
+				   unsigned long page_size,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -935,9 +937,23 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 	};
 	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
 	struct gen8_insert_pte idx = gen8_insert_pte(start);
+	bool (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+			       struct i915_page_directory_pointer *pdp,
+			       struct sgt_dma *iter,
+			       struct gen8_insert_pte *idx,
+			       enum i915_cache_level cache_level);
+
+	switch (page_size) {
+	case I915_GTT_PAGE_SIZE:
+		insert_entries = gen8_ppgtt_insert_pte_entries;
+		break;
+	default:
+		MISSING_CASE(page_size);
+		return;
+	} 
 
-	while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
-					     &idx, cache_level))
+	while (insert_entries(ppgtt, pdps[idx.pml4e++], &iter, &idx,
+			      cache_level))
 		GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
 }
 
@@ -1620,6 +1636,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 				      struct sg_table *pages,
 				      u64 start,
+				      unsigned long page_size,
 				      enum i915_cache_level cache_level,
 				      u32 flags)
 {
@@ -2093,6 +2110,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
+				     unsigned long page_size,
 				     enum i915_cache_level level,
 				     u32 unused)
 {
@@ -2140,6 +2158,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
+				     unsigned long page_size,
 				     enum i915_cache_level level,
 				     u32 flags)
 {
@@ -2224,6 +2243,7 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *pages,
 				     u64 start,
+				     unsigned long page_size,
 				     enum i915_cache_level cache_level,
 				     u32 unused)
 {
@@ -2260,7 +2280,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 
 	intel_runtime_pm_get(i915);
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				cache_level, pte_flags);
+				I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 	intel_runtime_pm_put(i915);
 
 	/*
@@ -2314,14 +2334,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 		appgtt->base.insert_entries(&appgtt->base,
 					    vma->pages, vma->node.start,
-					    cache_level, pte_flags);
+					    I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 	}
 
 	if (flags & I915_VMA_GLOBAL_BIND) {
 		intel_runtime_pm_get(i915);
 		vma->vm->insert_entries(vma->vm,
 					vma->pages, vma->node.start,
-					cache_level, pte_flags);
+					I915_GTT_PAGE_SIZE, cache_level, pte_flags);
 		intel_runtime_pm_put(i915);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 6c90a2ffd0e1..1f51402cf816 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -328,6 +328,7 @@ struct i915_address_space {
 	void (*insert_entries)(struct i915_address_space *vm,
 			       struct sg_table *st,
 			       u64 start,
+			       unsigned long page_size,
 			       enum i915_cache_level cache_level,
 			       u32 flags);
 	void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index c4a39b7d7dd0..c7963efe46ba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -256,7 +256,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 				break;
 
 			vm->insert_entries(vm, obj->mm.pages, addr,
-					   I915_CACHE_NONE, 0);
+					   I915_GTT_PAGE_SIZE, I915_CACHE_NONE,
+					   0);
 		}
 		count = n;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..a2137100d2f5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -35,6 +35,7 @@ static void mock_insert_page(struct i915_address_space *vm,
 static void mock_insert_entries(struct i915_address_space *vm,
 				struct sg_table *st,
 				u64 start,
+				unsigned long page_size,
 				enum i915_cache_level level, u32 flags)
 {
 }
-- 
2.9.3



More information about the Intel-gfx mailing list