[PATCH i-g-t v4 06/40] SQUASH: lib/intel_allocator_simple: Allow create vm to its gtt limit

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Mon Feb 15 19:43:06 UTC 2021


Patch to be squashed.

As from default we prefer an allocation from the top of vm address space
(we can catch addressing issues pro-actively) to avoid hangs we
previously limited vm range to exclude last page. This is related
to hang on the render engine when full 3D pipeline is executed from
the last page.

Currently user can call intel_allocator_simple_create_full() and
define start and end of vm range (with the respect of the gtt size,
it cannot be exceeded).

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 lib/intel_allocator_simple.c | 90 +++++++++++++++++++++++++++---------
 1 file changed, 67 insertions(+), 23 deletions(-)

diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 9725222f5..6e02d7c47 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -11,8 +11,18 @@
 #include "intel_bufops.h"
 #include "igt_map.h"
 
+/*
+ * We limit allocator space to avoid hang when batch would be
+ * pinned in the last page.
+ */
+#define RESERVED 4096
+
 /* Avoid compilation warning */
 struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+struct intel_allocator *
+intel_allocator_simple_create_full(int fd, uint32_t ctx,
+				   uint64_t start, uint64_t end,
+				   enum allocator_strategy strategy);
 
 struct simple_vma_heap {
 	struct igt_list_head holes;
@@ -35,8 +45,6 @@ struct intel_allocator_simple {
 	struct igt_map reserved;
 	struct simple_vma_heap heap;
 
-	uint64_t gtt_size;
-	uint64_t bias;
 	uint64_t start;
 	uint64_t end;
 
@@ -95,7 +103,7 @@ static void simple_vma_heap_validate(struct simple_vma_heap *heap)
 
 
 static void simple_vma_heap_free(struct simple_vma_heap *heap,
-				uint64_t offset, uint64_t size)
+				 uint64_t offset, uint64_t size)
 {
 	struct simple_vma_hole *high_hole = NULL, *low_hole = NULL, *hole;
 	bool high_adjacent, low_adjacent;
@@ -161,13 +169,20 @@ static void simple_vma_heap_free(struct simple_vma_heap *heap,
 }
 
 static void simple_vma_heap_init(struct simple_vma_heap *heap,
-		     uint64_t start, uint64_t size)
+				 uint64_t start, uint64_t size,
+				 enum allocator_strategy strategy)
 {
 	IGT_INIT_LIST_HEAD(&heap->holes);
 	simple_vma_heap_free(heap, start, size);
 
-	/* Default to using high addresses */
-	heap->alloc_high = true;
+	switch (strategy) {
+	case ALLOC_STRATEGY_LOW_TO_HIGH:
+		heap->alloc_high = false;
+		break;
+	case ALLOC_STRATEGY_HIGH_TO_LOW:
+	default:
+		heap->alloc_high = true;
+	}
 }
 
 static void simple_vma_heap_finish(struct simple_vma_heap *heap)
@@ -179,7 +194,7 @@ static void simple_vma_heap_finish(struct simple_vma_heap *heap)
 }
 
 static void simple_vma_hole_alloc(struct simple_vma_hole *hole,
-				uint64_t offset, uint64_t size)
+				  uint64_t offset, uint64_t size)
 {
 	struct simple_vma_hole *high_hole;
 	uint64_t waste;
@@ -568,8 +583,10 @@ static bool intel_allocator_simple_is_empty(struct intel_allocator *ial)
 {
 	struct intel_allocator_simple *ials = ial->priv;
 
-	igt_debug("<fd: %d, ctx: %u> objects: %" PRId64 ", reserved_areas: %" PRId64 "\n",
-		 ial->fd, ial->ctx, ials->allocated_objects, ials->reserved_areas);
+	igt_debug("<fd: %d, ctx: %u> objects: %" PRId64
+		  ", reserved_areas: %" PRId64 "\n",
+		  ial->fd, ial->ctx,
+		  ials->allocated_objects, ials->reserved_areas);
 
 	return !ials->allocated_objects && !ials->reserved_areas;
 }
@@ -592,7 +609,7 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
 
 	igt_info("intel_allocator_simple <fd:%d ctx:%d> on "
 		 "[0x%"PRIx64" : 0x%"PRIx64"]:\n", ial->fd, ial->ctx,
-		 ials->bias, ials->gtt_size);
+		 ials->start, ials->end);
 
 	if (full) {
 		igt_info("holes:\n");
@@ -642,10 +659,10 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
 		}
 		igt_assert(ials->reserved_areas == reserved_areas);
 		igt_assert(ials->reserved_size == reserved_size);
-	} else
+	} else {
 		simple_vma_foreach_hole(hole, heap)
 			total_free += hole->size;
-
+	}
 
 	igt_info("free space: %"PRIu64"B (0x%"PRIx64") (%.2f%% full)\n"
 		 "allocated objects: %"PRIu64", reserved areas: %"PRIu64"\n",
@@ -655,7 +672,10 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
 		 ials->allocated_objects, ials->reserved_areas);
 }
 
-struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx)
+static struct intel_allocator *
+__intel_allocator_simple_create(int fd, uint32_t ctx,
+				uint64_t start, uint64_t end,
+				enum allocator_strategy strategy)
 {
 	struct intel_allocator *ial;
 	struct intel_allocator_simple *ials;
@@ -684,16 +704,11 @@ struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx)
 	/* Reserved addresses hashtable is indexed by an offset */
 	__igt_map_init(&ials->reserved, equal_8bytes, NULL, 3);
 
-	ials->gtt_size = gem_aperture_size(fd);
-	igt_debug("Gtt size: %" PRId64 "\n", ials->gtt_size);
-	if (!gem_uses_full_ppgtt(fd))
-		ials->gtt_size /= 2;
-
-	ials->bias = 0;
-	ials->total_size = ials->gtt_size - ials->bias;
-	ials->start = ials->bias;
-	ials->end = ials->gtt_size;
-	simple_vma_heap_init(&ials->heap, ials->bias, ials->total_size);
+	ials->start = start;
+	ials->end = end;
+	ials->total_size = end - start;
+	simple_vma_heap_init(&ials->heap, ials->start, ials->total_size,
+			     strategy);
 
 	ials->allocated_size = 0;
 	ials->allocated_objects = 0;
@@ -702,3 +717,32 @@ struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx)
 
 	return ial;
 }
+
+struct intel_allocator *
+intel_allocator_simple_create(int fd, uint32_t ctx)
+{
+	uint64_t gtt_size = gem_aperture_size(fd);
+
+	if (!gem_uses_full_ppgtt(fd))
+		gtt_size /= 2;
+	else
+		gtt_size -= RESERVED;
+
+	return __intel_allocator_simple_create(fd, ctx, 0, gtt_size,
+					       ALLOC_STRATEGY_HIGH_TO_LOW);
+}
+
+struct intel_allocator *
+intel_allocator_simple_create_full(int fd, uint32_t ctx,
+				   uint64_t start, uint64_t end,
+				   enum allocator_strategy strategy)
+{
+	uint64_t gtt_size = gem_aperture_size(fd);
+
+	igt_assert(end <= gtt_size);
+	if (!gem_uses_full_ppgtt(fd))
+		gtt_size /= 2;
+	igt_assert(end - start <= gtt_size);
+
+	return __intel_allocator_simple_create(fd, ctx, start, end, strategy);
+}
-- 
2.26.0



More information about the Intel-gfx-trybot mailing list