[PATCH v2] tests/intel/xe_vm: Add test for 64k page corners

Matthew Brost matthew.brost at intel.com
Thu Jul 25 17:34:39 UTC 2024


Add sections which splits 64k pages in 4k pages or splits compact 64k in
64k pages.

v2:
 - Use SZ_* (Zbigniew)
 - Remove clock block (Zbigniew)
 - Update commit message / comments

Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
 tests/intel/xe_vm.c | 95 ++++++++++++++++++++++-----------------------
 1 file changed, 46 insertions(+), 49 deletions(-)

diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index d507e7cb16..f41c6aca53 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -397,80 +397,77 @@ static void compact_64k_pages(int fd, struct drm_xe_engine_class_instance *eci)
 		.num_syncs = 2,
 		.syncs = to_user_pointer(sync),
 	};
-	int i, b;
-
-#define EIGHT_MB	0x800000
-#define SIXTY_FOUR_KB	0x10000
+	uint64_t batch_offset;
+	uint64_t batch_addr;
+	uint64_t sdi_offset;
+	uint64_t sdi_addr;
+	int b = 0;
 
 	vm = xe_vm_create(fd, 0, 0);
 	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
 
-	bo0 = xe_bo_create(fd, vm, EIGHT_MB,
+	bo0 = xe_bo_create(fd, vm, SZ_8M,
 			   vram_if_possible(fd, eci->gt_id),
 			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
-	ptr0 = xe_bo_map(fd, bo0, EIGHT_MB);
+	ptr0 = xe_bo_map(fd, bo0, SZ_8M);
 
-	bo1 = xe_bo_create(fd, vm, EIGHT_MB / 2,
+	bo1 = xe_bo_create(fd, vm, SZ_8M / 2,
 			   vram_if_possible(fd, eci->gt_id),
 			   DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
-	ptr1 = xe_bo_map(fd, bo1, EIGHT_MB / 2);
+	ptr1 = xe_bo_map(fd, bo1, SZ_8M / 2);
 
 	sync[0].handle = syncobj_create(fd, 0);
-	if (page_size == 0x1000) {
+	if (page_size == SZ_4K) {
 		/* Setup mapping to split a 64k PTE in cache */
-		xe_vm_bind_async(fd, vm, 0, bo0, 0, addr0, SIXTY_FOUR_KB, 0, 0);
+		xe_vm_bind_async(fd, vm, 0, bo0, 0, addr0, SZ_64K, 0, 0);
 
-		addr1 = addr0 + (SIXTY_FOUR_KB / 2);
-		xe_vm_bind_async(fd, vm, 0, bo1, 0, addr1, SIXTY_FOUR_KB / 2,
+		addr1 = addr0 + (SZ_64K / 2);
+		xe_vm_bind_async(fd, vm, 0, bo1, 0, addr1, SZ_64K / 2,
 				 sync, 1);
-	} else if (page_size == SIXTY_FOUR_KB) {
+	} else if (page_size == SZ_64K) {
 		addr0 += page_size;
 
 		/* Setup mapping to split compact 64k pages */
-		xe_vm_bind_async(fd, vm, 0, bo0, 0, addr0, EIGHT_MB, 0, 0);
+		xe_vm_bind_async(fd, vm, 0, bo0, 0, addr0, SZ_8M, 0, 0);
 
-		addr1 = addr0 + (EIGHT_MB / 4);
-		xe_vm_bind_async(fd, vm, 0, bo1, 0, addr1, EIGHT_MB / 2,
+		addr1 = addr0 + (SZ_8M / 4);
+		xe_vm_bind_async(fd, vm, 0, bo1, 0, addr1, SZ_8M / 2,
 				 sync, 1);
 	}
 	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
 
-	/* Verify 1st mapping working */
-	i = 0;
-	{
-		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
-		uint64_t batch_addr = addr0 + batch_offset;
-		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
-		uint64_t sdi_addr = addr0 + sdi_offset;
-		data = ptr0;
+	/* Verify 1st and 2nd mappings working */
+	batch_offset = (char *)&data[0].batch - (char *)data;
+	batch_addr = addr0 + batch_offset;
+	sdi_offset = (char *)&data[0].data - (char *)data;
+	sdi_addr = addr0 + sdi_offset;
+	data = ptr0;
 
-		b = 0;
-		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
-		data[i].batch[b++] = sdi_addr;
-		data[i].batch[b++] = sdi_addr >> 32;
-		data[i].batch[b++] = 0xc0ffee;
+	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+	data[0].batch[b++] = sdi_addr;
+	data[0].batch[b++] = sdi_addr >> 32;
+	data[0].batch[b++] = 0xc0ffee;
 
-		sdi_addr = addr1 + sdi_offset;
-		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
-		data[i].batch[b++] = sdi_addr;
-		data[i].batch[b++] = sdi_addr >> 32;
-		data[i].batch[b++] = 0xc0ffee;
+	sdi_addr = addr1 + sdi_offset;
+	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
+	data[0].batch[b++] = sdi_addr;
+	data[0].batch[b++] = sdi_addr >> 32;
+	data[0].batch[b++] = 0xc0ffee;
 
-		data[i].batch[b++] = MI_BATCH_BUFFER_END;
-		igt_assert(b <= ARRAY_SIZE(data[i].batch));
+	data[0].batch[b++] = MI_BATCH_BUFFER_END;
+	igt_assert(b <= ARRAY_SIZE(data[0].batch));
 
-		sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
-		sync[1].handle = syncobj_create(fd, 0);
-		exec.exec_queue_id = exec_queue;
-		exec.address = batch_addr;
-		xe_exec(fd, &exec);
+	sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
+	sync[1].handle = syncobj_create(fd, 0);
+	exec.exec_queue_id = exec_queue;
+	exec.address = batch_addr;
+	xe_exec(fd, &exec);
 
-		igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0,
-					NULL));
-		igt_assert_eq(data[i].data, 0xc0ffee);
-		data = ptr1;
-		igt_assert_eq(data[i].data, 0xc0ffee);
-	}
+	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0,
+				NULL));
+	igt_assert_eq(data[0].data, 0xc0ffee);
+	data = ptr1;
+	igt_assert_eq(data[0].data, 0xc0ffee);
 
 	sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
 	syncobj_reset(fd, &sync[0].handle, 1);
@@ -481,8 +478,8 @@ static void compact_64k_pages(int fd, struct drm_xe_engine_class_instance *eci)
 	xe_exec_queue_destroy(fd, exec_queue);
 	syncobj_destroy(fd, sync[0].handle);
 	syncobj_destroy(fd, sync[1].handle);
-	munmap(ptr0, EIGHT_MB);
-	munmap(ptr1, EIGHT_MB / 2);
+	munmap(ptr0, SZ_8M);
+	munmap(ptr1, SZ_8M / 2);
 	gem_close(fd, bo0);
 	gem_close(fd, bo1);
 	xe_vm_destroy(fd, vm);
-- 
2.34.1



More information about the igt-dev mailing list