[PATCH 10/15] drm/i915: support huge gtt pages for the 48b PPGTT

Matthew Auld matthew.auld at intel.com
Thu Jun 8 12:36:16 UTC 2017


Support inserting huge gtt pages into the 48b PPGTT, including
mixed-mode where we allow a mixture of gtt page sizes.

v2: Check dma alignment for huge-pages. Either we have a bug somewhere,
or the huge region needs to be aligned to the respective page size
boundary, which sucks and becomes more apparent now that we don't
force alignment for objects which need to be packed into the lower
32bits, which seems to be most of them...

Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Daniel Vetter <daniel at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 95 +++++++++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_gtt.h |  5 ++
 2 files changed, 96 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e6813786b1e9..27c80a2adc29 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -922,6 +922,88 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 				      cache_level);
 }
 
+static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
+					   struct i915_page_directory_pointer **pdps,
+					   struct sgt_dma *iter,
+					   enum i915_cache_level cache_level)
+{
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+	dma_addr_t rem = iter->max - iter->dma;
+	u64 start = vma->node.start;
+
+	do {
+		struct gen8_insert_pte idx = gen8_insert_pte(start);
+		struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
+		struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+		struct i915_page_table *pt = pd->page_table[idx.pde];
+		unsigned int page_size;
+		bool maybe_64K = false;
+		gen8_pte_t encode = pte_encode;
+		gen8_pte_t *vaddr;
+		u16 index, max;
+
+		if (unlikely(vma->page_sizes.sg & I915_GTT_PAGE_SIZE_1G) &&
+		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_1G) &&
+		    rem >= SZ_1G && !(idx.pte | idx.pde)) {
+			vaddr = kmap_atomic_px(pdp);
+			index = idx.pdpe;
+			max = GEN8_PML4ES_PER_PML4;
+			page_size = I915_GTT_PAGE_SIZE_1G;
+			encode |= GEN8_PDPE_PS_1G;
+		} else if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+			   IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+			   rem >= SZ_2M && !idx.pte) {
+			vaddr = kmap_atomic_px(pd);
+			index = idx.pde;
+			max = I915_PDES;
+			page_size = I915_GTT_PAGE_SIZE_2M;
+			encode |= GEN8_PDE_PS_2M;
+		} else {
+			vaddr = kmap_atomic_px(pt);
+			index = idx.pte;
+			max = GEN8_PTES;
+			page_size = I915_GTT_PAGE_SIZE;
+
+			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+			    rem >= SZ_64K && !idx.pte)
+				maybe_64K = true;
+		}
+
+		do {
+			vaddr[index++] = encode | iter->dma;
+
+			start += page_size;
+			iter->dma += page_size;
+			if (iter->dma >= iter->max) {
+				iter->sg = __sg_next(iter->sg);
+				if (!iter->sg)
+					break;
+
+				iter->dma = sg_dma_address(iter->sg);
+				iter->max = iter->dma + iter->sg->length;
+			}
+			rem = iter->max - iter->dma;
+
+			if (maybe_64K && rem < I915_GTT_PAGE_SIZE_64K)
+				maybe_64K = false;
+		} while (rem >= page_size && index < max);
+
+		kunmap_atomic(vaddr);
+
+		/* Is it safe to mark the 2M block as 64K? */
+		if (maybe_64K) {
+			if (index == max ||
+			    (!iter->sg && IS_ALIGNED(vma->node.size,
+						    I915_GTT_PAGE_SIZE_2M))) {
+				vaddr = kmap_atomic_px(pd);
+				vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+				kunmap_atomic(vaddr);
+			}
+		}
+	} while (iter->sg);
+}
+
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 				   struct i915_vma *vma,
 				   enum i915_cache_level cache_level,
@@ -934,11 +1016,16 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
 		.max = iter.dma + iter.sg->length,
 	};
 	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
-	struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
-	while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
-					     &idx, cache_level))
-		GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+		gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+	} else {
+		struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+
+		while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+						     &iter, &idx, cache_level))
+			GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+	}
 }
 
 static void gen8_free_page_tables(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aceb554b86ee..204fd30d1d31 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -149,6 +149,11 @@ typedef u64 gen8_ppgtt_pml4e_t;
 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
 
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M   BIT(7)
+
+#define GEN8_PDPE_PS_1G  BIT(7)
+
 struct sg_table;
 
 struct intel_rotation_info {
-- 
2.9.4



More information about the Intel-gfx-trybot mailing list