[PATCH 15/15] Accurate gtt page tracking for the ppgtt

Matthew Auld matthew.auld at intel.com
Tue May 30 16:28:47 UTC 2017


---
 drivers/gpu/drm/i915/i915_debugfs.c           |  6 ++--
 drivers/gpu/drm/i915/i915_gem.c               | 18 +++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 50 ++++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  4 ++-
 drivers/gpu/drm/i915/i915_gem_object.h        |  7 ++--
 drivers/gpu/drm/i915/i915_vma.c               | 10 +++---
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  5 ++-
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  2 +-
 8 files changed, 54 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fa8dcdcf04bc..bdcbbd9635c5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -156,7 +156,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   get_global_flag(obj),
 		   get_pin_mapped_flag(obj),
 		   obj->base.size / 1024,
-		   stringify_page_sizes(obj->mm.gtt_page_sizes),
+		   stringify_page_sizes(obj->mm.page_sizes.gtt),
 		   obj->base.read_domains,
 		   obj->base.write_domain,
 		   i915_cache_level_str(dev_priv, obj->cache_level),
@@ -452,7 +452,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 			mapped_size += obj->base.size;
 		}
 
-		if (obj->mm.gtt_page_sizes > I915_GTT_PAGE_SIZE) {
+		if (obj->mm.page_sizes.gtt > I915_GTT_PAGE_SIZE) {
 			huge_count++;
 			huge_size += obj->base.size;
 		}
@@ -479,7 +479,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 			mapped_size += obj->base.size;
 		}
 
-		if (obj->mm.gtt_page_sizes > I915_GTT_PAGE_SIZE) {
+		if (obj->mm.page_sizes.gtt > I915_GTT_PAGE_SIZE) {
 			huge_count++;
 			huge_size += obj->base.size;
 		}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f0aac90cf382..cdf4f8c0990a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2294,7 +2294,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	if (!IS_ERR(pages))
 		obj->ops->put_pages(obj, pages);
 
-	obj->mm.phys_page_sizes = obj->mm.gtt_page_sizes = 0;
+	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
 
 unlock:
 	mutex_unlock(&obj->mm.lock);
@@ -2552,25 +2552,25 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 	GEM_BUG_ON(!sg_mask);
 
-	obj->mm.phys_page_sizes = sg_mask;
+	obj->mm.page_sizes.phys = sg_mask;
 
-	obj->mm.gtt_page_sizes = 0;
+	obj->mm.page_sizes.sg = 0;
 
 	/* Select all the gtt page sizes which fit the sg layout */
 	for_each_set_bit(bit, &supported_page_sizes, BITS_PER_LONG) {
-		if (obj->mm.phys_page_sizes & ~0u << bit)
-			obj->mm.gtt_page_sizes |= BIT(bit);
+		if (obj->mm.page_sizes.phys & ~0u << bit)
+			obj->mm.page_sizes.sg |= BIT(bit);
 	}
 
-	largest_size = BIT(fls64(obj->mm.gtt_page_sizes)-1);
+	largest_size = BIT(fls64(obj->mm.page_sizes.sg)-1);
 
 	/* For simplicity we don't support 64K in mixed-mode */
 	if (largest_size == I915_GTT_PAGE_SIZE_64K &&
-	    IS_ALIGNED(obj->mm.phys_page_sizes, I915_GTT_PAGE_SIZE_64K)) {
-		obj->mm.gtt_page_sizes = I915_GTT_PAGE_SIZE_64K;
+	    IS_ALIGNED(obj->mm.page_sizes.phys, I915_GTT_PAGE_SIZE_64K)) {
+		obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K;
 	}
 
-	GEM_BUG_ON(!obj->mm.gtt_page_sizes);
+	GEM_BUG_ON(!obj->mm.page_sizes.sg);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 98071c31ac47..3d9393f388cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -208,7 +208,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				vma->obj->mm.gtt_page_sizes, cache_level,
+				&vma->obj->mm.page_sizes, cache_level,
 				pte_flags);
 
 	return 0;
@@ -910,7 +910,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
-				   unsigned int page_sizes,
+				   struct i915_page_sizes *page_sizes,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -924,12 +924,14 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 
 	gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
 				      cache_level);
+
+	page_sizes->gtt |= I915_GTT_PAGE_SIZE;
 }
 
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 				   struct sg_table *pages,
 				   u64 start,
-				   unsigned int page_sizes,
+				   struct i915_page_sizes *page_sizes,
 				   enum i915_cache_level cache_level,
 				   u32 unused)
 {
@@ -955,23 +957,25 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 		pt_vaddr[idx.pte] = pte_encode | iter.dma;
 		page_size = I915_GTT_PAGE_SIZE;
 
-		if (!idx.pte && page_sizes > I915_GTT_PAGE_SIZE) {
+		if (!idx.pte && page_sizes->sg > I915_GTT_PAGE_SIZE) {
 			dma_addr_t remaining = iter.max - iter.dma;
 
-			if (unlikely(page_sizes & I915_GTT_PAGE_SIZE_1G) &&
+			if (unlikely(page_sizes->sg & I915_GTT_PAGE_SIZE_1G) &&
 			    remaining >= I915_GTT_PAGE_SIZE_1G && !idx.pde) {
 				pdp_vaddr[idx.pdpe] = pte_encode | GEN8_PDPE_PS_1G | iter.dma;
 				page_size = I915_GTT_PAGE_SIZE_1G;
-			} else if (page_sizes & I915_GTT_PAGE_SIZE_2M &&
+			} else if (page_sizes->sg & I915_GTT_PAGE_SIZE_2M &&
 				   remaining >= I915_GTT_PAGE_SIZE_2M) {
 				pd_vaddr[idx.pde] = pte_encode | GEN8_PDE_PS_2M | iter.dma;
 				page_size = I915_GTT_PAGE_SIZE_2M;
 			/* We don't support 64K in mixed mode for now */
-			} else if (page_sizes == I915_GTT_PAGE_SIZE_64K) {
+			} else if (page_sizes->sg == I915_GTT_PAGE_SIZE_64K) {
 				pd_vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
 			}
 		}
 
+		page_sizes->gtt |= page_size;
+
 		start += page_size;
 		iter.dma += page_size;
 		if (iter.dma >= iter.max) {
@@ -1695,7 +1699,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 				      struct sg_table *pages,
 				      u64 start,
-				      unsigned int page_sizes,
+				      struct i915_page_sizes *page_sizes,
 				      enum i915_cache_level cache_level,
 				      u32 flags)
 {
@@ -1731,6 +1735,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 		}
 	} while (1);
 	kunmap_atomic(vaddr);
+
+	page_sizes->gtt |= I915_GTT_PAGE_SIZE;
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
@@ -2165,7 +2171,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
-				     unsigned int page_sizes,
+				     struct i915_page_sizes *page_sizes,
 				     enum i915_cache_level level,
 				     u32 unused)
 {
@@ -2213,7 +2219,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     u64 start,
-				     unsigned int page_sizes,
+				     struct i915_page_sizes *page_sizes,
 				     enum i915_cache_level level,
 				     u32 flags)
 {
@@ -2313,8 +2319,7 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 {
 	struct insert_entries *arg = _arg;
 
-	gen8_ggtt_insert_entries(arg->vm, arg->st, I915_GTT_PAGE_SIZE,
-				 arg->start, arg->level, 0);
+	gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, 0, arg->level, 0);
 	bxt_vtd_ggtt_wa(arg->vm);
 
 	return 0;
@@ -2323,7 +2328,7 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
 					     struct sg_table *st,
 					     u64 start,
-					     unsigned int page_sizes,
+					     struct i915_page_sizes *page_sizes,
 					     enum i915_cache_level level,
 					     u32 unused)
 {
@@ -2395,7 +2400,7 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *pages,
 				     u64 start,
-				     unsigned int page_sizes,
+				     struct i915_page_sizes *page_sizes,
 				     enum i915_cache_level cache_level,
 				     u32 unused)
 {
@@ -2431,8 +2436,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 	intel_runtime_pm_get(i915);
-	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
-				I915_GTT_PAGE_SIZE, cache_level, pte_flags);
+	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, 0,
+				cache_level, pte_flags);
 	intel_runtime_pm_put(i915);
 
 	/*
@@ -2485,18 +2490,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 				goto err_pages;
 		}
 
-		appgtt->base.insert_entries(&appgtt->base,
-					    vma->pages, vma->node.start,
-					    I915_GTT_PAGE_SIZE,
-					    cache_level, pte_flags);
+		appgtt->base.insert_entries(&appgtt->base, vma->pages,
+					    vma->node.start, 0, cache_level,
+					    pte_flags);
 	}
 
 	if (flags & I915_VMA_GLOBAL_BIND) {
 		intel_runtime_pm_get(i915);
-		vma->vm->insert_entries(vma->vm,
-					vma->pages, vma->node.start,
-					I915_GTT_PAGE_SIZE,
-					cache_level, pte_flags);
+		vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
+					0, cache_level, pte_flags);
 		intel_runtime_pm_put(i915);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 9d8a2a44f6fb..0811859b3a55 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -211,6 +211,8 @@ enum i915_cache_level;
 
 struct i915_vma;
 
+struct i915_page_sizes;
+
 struct i915_page_dma {
 	struct page *page;
 	union {
@@ -325,7 +327,7 @@ struct i915_address_space {
 	void (*insert_entries)(struct i915_address_space *vm,
 			       struct sg_table *st,
 			       u64 start,
-			       unsigned int page_sizes,
+			       struct i915_page_sizes *page_sizes,
 			       enum i915_cache_level cache_level,
 			       u32 flags);
 	void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 9cbe57453bcf..79a99682cb8f 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -129,8 +129,11 @@ struct drm_i915_gem_object {
 		struct sg_table *pages;
 		void *mapping;
 
-		unsigned int phys_page_sizes;
-		unsigned int gtt_page_sizes;
+		struct i915_page_sizes {
+			unsigned int phys;
+			unsigned int sg;
+			unsigned int gtt;
+		} page_sizes;
 
 		struct i915_gem_object_page_iter {
 			struct scatterlist *sg_pos;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 879560c42021..fb3364de0513 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -484,9 +484,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		 * enforce this for soft pinning, we need to fallback to normal
 		 * pages if don't meet this restriction.
 		 */
-		if (obj->mm.gtt_page_sizes == I915_GTT_PAGE_SIZE_64K &&
+		if (obj->mm.page_sizes.sg == I915_GTT_PAGE_SIZE_64K &&
 		    !IS_ALIGNED(offset | size, I915_GTT_PAGE_SIZE_2M))
-			obj->mm.gtt_page_sizes = I915_GTT_PAGE_SIZE;
+			obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE;
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, obj->cache_level,
@@ -495,15 +495,15 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 			goto err_unpin;
 	} else {
 		if (i915_vm_is_48bit(vma->vm) &&
-		    obj->mm.gtt_page_sizes > I915_GTT_PAGE_SIZE) {
+		    obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
 			unsigned int page_alignment =
-				BIT(fls64(obj->mm.gtt_page_sizes)-1);
+				BIT(fls64(obj->mm.page_sizes.sg)-1);
 
 			/* We can't mix 64K and 4K pte's in the same page-table (2M
 			 * block), and so to avoid the ugliness and complexity of
 			 * coloring we opt for just aligning 64K objects to 2M.
 			 */
-			if (obj->mm.gtt_page_sizes == I915_GTT_PAGE_SIZE_64K) {
+			if (obj->mm.page_sizes.sg == I915_GTT_PAGE_SIZE_64K) {
 				page_alignment = I915_GTT_PAGE_SIZE_2M;
 				size = roundup(size, page_alignment);
 			}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 259b5e139df1..4ddf7a81387d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -255,9 +255,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
 				break;
 
-			vm->insert_entries(vm, obj->mm.pages, addr,
-					   I915_GTT_PAGE_SIZE, I915_CACHE_NONE,
-					   0);
+			vm->insert_entries(vm, obj->mm.pages, addr, 0,
+					   I915_CACHE_NONE, 0);
 		}
 		count = n;
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 8036ca41dc4c..4d594eaf0b53 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -35,7 +35,7 @@ static void mock_insert_page(struct i915_address_space *vm,
 static void mock_insert_entries(struct i915_address_space *vm,
 				struct sg_table *st,
 				u64 start,
-				unsigned int page_sizes,
+				struct i915_page_sizes *page_sizes,
 				enum i915_cache_level level, u32 flags)
 {
 }
-- 
2.9.4



More information about the Intel-gfx-trybot mailing list