[PATCH i-g-t 8/8] Use the BST allocator

Andrzej Turko andrzej.turko at linux.intel.com
Fri Jul 23 09:23:59 UTC 2021


The purpose of this commit is to ensure that the bst
allocator works correctly by using it instead of
other allocator implementations.

Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
---
 lib/intel_batchbuffer.c       | 12 ++++++------
 tests/i915/api_intel_bb.c     | 20 ++++++++++----------
 tests/i915/gem_exec_capture.c | 10 +++++-----
 tests/i915/gem_exec_store.c   |  6 +++---
 tests/i915/gem_linear_blits.c |  2 +-
 tests/i915/gem_softpin.c      | 10 +++++-----
 6 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 2b8b903e2..f2b4e8956 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1410,7 +1410,7 @@ struct intel_bb *intel_bb_create(int i915, uint32_t size)
 
 	return __intel_bb_create(i915, 0, size,
 				 relocs && !aux_needs_softpin(i915), 0, 0,
-				 INTEL_ALLOCATOR_SIMPLE,
+				 INTEL_ALLOCATOR_BST,
 				 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
@@ -1433,7 +1433,7 @@ intel_bb_create_with_context(int i915, uint32_t ctx, uint32_t size)
 
 	return __intel_bb_create(i915, ctx, size,
 				 relocs && !aux_needs_softpin(i915), 0, 0,
-				 INTEL_ALLOCATOR_SIMPLE,
+				 INTEL_ALLOCATOR_BST,
 				 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
@@ -1606,7 +1606,7 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
 	ibb->handle = gem_create(ibb->i915, ibb->size);
 
 	/* Keep address for bb in reloc mode and RANDOM allocator */
-	if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+	if (ibb->allocator_type == INTEL_ALLOCATOR_BST)
 		ibb->batch_offset = __intel_bb_get_offset(ibb,
 							  ibb->handle,
 							  ibb->size,
@@ -1867,7 +1867,7 @@ intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint64_t size,
 			 * For simple allocator check entry consistency
 			 * - reserve if it is not already allocated.
 			 */
-			if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE) {
+			if (ibb->allocator_type == INTEL_ALLOCATOR_BST) {
 				bool allocated, reserved;
 
 				reserved = intel_allocator_reserve_if_not_allocated(ibb->allocator_handle,
@@ -1885,7 +1885,7 @@ intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint64_t size,
 		 * we can expect addresses passed by the user can be moved
 		 * within the driver.
 		 */
-		if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+		if (ibb->allocator_type == INTEL_ALLOCATOR_BST)
 			igt_assert_f(object->offset == offset,
 				     "(pid: %ld) handle: %u, offset not match: %" PRIx64 " <> %" PRIx64 "\n",
 				     (long) getpid(), handle,
@@ -2427,7 +2427,7 @@ static void update_offsets(struct intel_bb *ibb,
 		object = intel_bb_find_object(ibb, entry->handle);
 		igt_assert(object);
 
-		if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+		if (ibb->allocator_type == INTEL_ALLOCATOR_BST)
 			igt_assert(object->offset == entry->addr.offset);
 		else
 			entry->addr.offset = object->offset;
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index 74cb18417..c1659c0ea 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -181,7 +181,7 @@ static void simple_bb(struct buf_ops *bops, bool use_context)
 		gem_require_contexts(i915);
 
 	ibb = intel_bb_create_with_allocator(i915, ctx, PAGE_SIZE,
-					     INTEL_ALLOCATOR_SIMPLE);
+					     INTEL_ALLOCATOR_BST);
 	if (debug_bb)
 		intel_bb_set_debug(ibb, true);
 
@@ -221,7 +221,7 @@ static void bb_with_allocator(struct buf_ops *bops)
 	igt_require(gem_uses_full_ppgtt(i915));
 
 	ibb = intel_bb_create_with_allocator(i915, ctx, PAGE_SIZE,
-					     INTEL_ALLOCATOR_SIMPLE);
+					     INTEL_ALLOCATOR_BST);
 	if (debug_bb)
 		intel_bb_set_debug(ibb, true);
 
@@ -627,7 +627,7 @@ static void object_noreloc(struct buf_ops *bops, enum obj_cache_ops cache_op,
 	igt_debug("[4] h2 presumed offset: 0x%"PRIx64"\n", poff_h2);
 
 	/* For simple allocator and purge=cache we must have same addresses */
-	if (allocator_type == INTEL_ALLOCATOR_SIMPLE || !purge_cache) {
+	if (allocator_type == INTEL_ALLOCATOR_BST || !purge_cache) {
 		igt_assert(poff_h1 == poff2_h1);
 		igt_assert(poff_h2 == poff2_h2);
 	}
@@ -1129,7 +1129,7 @@ static void delta_check(struct buf_ops *bops)
 	bool supports_48bit;
 
 	ibb = intel_bb_create_with_allocator(i915, 0, PAGE_SIZE,
-					     INTEL_ALLOCATOR_SIMPLE);
+					     INTEL_ALLOCATOR_BST);
 	supports_48bit = ibb->supports_48b_address;
 	if (!supports_48bit)
 		intel_bb_destroy(ibb);
@@ -1476,10 +1476,10 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
 		object_reloc(bops, KEEP_CACHE);
 
 	igt_subtest("object-noreloc-purge-cache-simple")
-		object_noreloc(bops, PURGE_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		object_noreloc(bops, PURGE_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("object-noreloc-keep-cache-simple")
-		object_noreloc(bops, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		object_noreloc(bops, KEEP_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("object-noreloc-purge-cache-random")
 		object_noreloc(bops, PURGE_CACHE, INTEL_ALLOCATOR_RANDOM);
@@ -1488,10 +1488,10 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
 		object_noreloc(bops, KEEP_CACHE, INTEL_ALLOCATOR_RANDOM);
 
 	igt_subtest("blit-reloc-purge-cache")
-		blit(bops, RELOC, PURGE_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		blit(bops, RELOC, PURGE_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("blit-reloc-keep-cache")
-		blit(bops, RELOC, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		blit(bops, RELOC, KEEP_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("blit-noreloc-keep-cache-random")
 		blit(bops, NORELOC, KEEP_CACHE, INTEL_ALLOCATOR_RANDOM);
@@ -1500,10 +1500,10 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
 		blit(bops, NORELOC, PURGE_CACHE, INTEL_ALLOCATOR_RANDOM);
 
 	igt_subtest("blit-noreloc-keep-cache")
-		blit(bops, NORELOC, KEEP_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		blit(bops, NORELOC, KEEP_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("blit-noreloc-purge-cache")
-		blit(bops, NORELOC, PURGE_CACHE, INTEL_ALLOCATOR_SIMPLE);
+		blit(bops, NORELOC, PURGE_CACHE, INTEL_ALLOCATOR_BST);
 
 	igt_subtest("intel-bb-blit-none")
 		do_intel_bb_blit(bops, 10, I915_TILING_NONE);
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index 6e817c46c..a77ac4767 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -195,7 +195,7 @@ static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
 	uint64_t ahnd;
 
 	handle = gem_create(fd, 4096);
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	__capture1(fd, dir, ahnd, ctx, ring, handle, 4096);
 
@@ -478,7 +478,7 @@ static void many(int fd, int dir, uint64_t size, unsigned int flags)
 	igt_require(count > 1);
 
 	intel_require_memory(count, size, CHECK_RAM);
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	offsets = __captureN(fd, dir, ahnd, 0, size, count, flags);
 
@@ -569,7 +569,7 @@ static void prioinv(int fd, int dir, const intel_ctx_t *ctx,
 	int link[2], dummy;
 
 	intel_allocator_multiprocess_start();
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 	obj.offset = intel_allocator_alloc(ahnd, obj.handle, 4096, ALIGNMENT);
 	obj.offset = CANONICAL(obj.offset);
 	obj.flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
@@ -604,7 +604,7 @@ static void prioinv(int fd, int dir, const intel_ctx_t *ctx,
 			  count, (int)(size >> 20));
 
 		/* Reopen the allocator in the new process. */
-		ahnd = intel_allocator_open(fd, child + 1, INTEL_ALLOCATOR_SIMPLE);
+		ahnd = intel_allocator_open(fd, child+1, INTEL_ALLOCATOR_BST);
 
 		free(__captureN(fd, dir, ahnd, ring, size, count, ASYNC));
 		intel_allocator_close(ahnd);
@@ -644,7 +644,7 @@ static void userptr(int fd, int dir)
 
 	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
 	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	__capture1(fd, dir, ahnd, intel_ctx_0(fd), 0, handle, 4096);
 
diff --git a/tests/i915/gem_exec_store.c b/tests/i915/gem_exec_store.c
index 38c595e34..85fc4b747 100644
--- a/tests/i915/gem_exec_store.c
+++ b/tests/i915/gem_exec_store.c
@@ -60,7 +60,7 @@ static void store_dword(int fd, const intel_ctx_t *ctx,
 		execbuf.flags |= I915_EXEC_SECURE;
 	execbuf.rsvd1 = ctx->id;
 
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = gem_create(fd, 4096);
@@ -145,7 +145,7 @@ static void store_cachelines(int fd, const intel_ctx_t *ctx,
 		execbuf.flags |= I915_EXEC_SECURE;
 	execbuf.rsvd1 = ctx->id;
 
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 	obj = calloc(execbuf.buffer_count, sizeof(*obj));
 	igt_assert(obj);
 	for (i = 0; i < execbuf.buffer_count; i++) {
@@ -257,7 +257,7 @@ static void store_all(int fd, const intel_ctx_t *ctx)
 		execbuf.flags |= I915_EXEC_SECURE;
 	execbuf.rsvd1 = ctx->id;
 
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = gem_create(fd, nengine*sizeof(uint32_t));
diff --git a/tests/i915/gem_linear_blits.c b/tests/i915/gem_linear_blits.c
index 6504c9f27..f86e647e3 100644
--- a/tests/i915/gem_linear_blits.c
+++ b/tests/i915/gem_linear_blits.c
@@ -188,7 +188,7 @@ static void run_test(int fd, int count, bool do_relocs)
 
 	ahnd = intel_allocator_open(fd, 0, do_relocs ?
 					    INTEL_ALLOCATOR_RELOC :
-					    INTEL_ALLOCATOR_SIMPLE);
+					    INTEL_ALLOCATOR_BST);
 
 	handle = malloc(sizeof(uint32_t) * count * 2);
 	offset = calloc(1, sizeof(uint64_t) * count);
diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c
index 82d8a2861..85f7f2650 100644
--- a/tests/i915/gem_softpin.c
+++ b/tests/i915/gem_softpin.c
@@ -806,7 +806,7 @@ static void test_allocator_basic(int fd, bool reserve)
 	 * Check that we can place objects at start/end
 	 * of the GTT using the allocator.
 	 */
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	if (reserve)
 		__reserve(ahnd, fd, true, objects, num_reserved, ressize);
@@ -831,7 +831,7 @@ static void test_allocator_nopin(int fd, bool reserve)
 	 * in execobj[] the kernel does not reject the placement due
 	 * to overlaps or invalid addresses.
 	 */
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 
 	if (reserve)
 		__reserve(ahnd, fd, false, objects, num_reserved, ressize);
@@ -857,11 +857,11 @@ static void test_allocator_fork(int fd)
 	 */
 	intel_allocator_multiprocess_start();
 
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 	__reserve(ahnd, fd, true, objects, num_reserved, ressize);
 
 	igt_fork(child, 8) {
-		ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+		ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 		igt_until_timeout(2)
 			__exec_using_allocator(ahnd, fd, num_obj, true);
 		intel_allocator_close(ahnd);
@@ -872,7 +872,7 @@ static void test_allocator_fork(int fd)
 	__unreserve(ahnd, fd, objects, num_reserved, ressize);
 	igt_assert(intel_allocator_close(ahnd) == true);
 
-	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_BST);
 	igt_assert(intel_allocator_close(ahnd) == true);
 
 	intel_allocator_multiprocess_stop();
-- 
2.25.1



More information about the Intel-gfx-trybot mailing list