[igt-dev] [PATCH i-g-t 3/5] tests/i915/gem_exec_capture: Support gens without relocations

Andrzej Turko andrzej.turko at linux.intel.com
Wed Jul 7 11:26:42 UTC 2021


With relocations disabled on newer generations
tests must assign addresses to objects by
themselves instead of relying on the driver.

Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
 tests/i915/gem_exec_capture.c | 133 ++++++++++++++++++++++++++--------
 1 file changed, 102 insertions(+), 31 deletions(-)

diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index a6b3d987f..19f5a127f 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -33,6 +33,8 @@
 
 IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
 
+#define ALIGNMENT (1 << 12)
+
 static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 {
 	char *error, *str;
@@ -53,7 +55,7 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 		addr = hi;
 		addr <<= 32;
 		addr |= lo;
-		igt_assert_eq_u64(addr, obj->offset);
+		igt_assert_eq_u64(addr, DECANONICAL(obj->offset));
 		found = true;
 	}
 
@@ -61,7 +63,8 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 	igt_assert(found);
 }
 
-static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
+static void __capture1(int fd, int dir, unsigned ring, uint64_t ahnd,
+		       uint32_t target, uint64_t target_size)
 {
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_exec_object2 obj[4];
@@ -73,32 +76,45 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
 	struct drm_i915_gem_execbuffer2 execbuf;
 	uint32_t *batch, *seqno;
 	int i;
+	bool do_relocs = gem_has_relocations(fd);
 
 	memset(obj, 0, sizeof(obj));
 	obj[SCRATCH].handle = gem_create(fd, 4096);
+	obj[SCRATCH].flags = EXEC_OBJECT_WRITE;
 	obj[CAPTURE].handle = target;
 	obj[CAPTURE].flags = EXEC_OBJECT_CAPTURE;
 	obj[NOCAPTURE].handle = gem_create(fd, 4096);
-
 	obj[BATCH].handle = gem_create(fd, 4096);
-	obj[BATCH].relocs_ptr = (uintptr_t)reloc;
-	obj[BATCH].relocation_count = ARRAY_SIZE(reloc);
+
+	for (i = 0; i < 4; i++) {
+		obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+						      i == CAPTURE ? target_size : 4096,
+						      ALIGNMENT);
+		obj[i].offset = CANONICAL(obj[i].offset);
+		obj[i].flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
+				(do_relocs ? 0 : EXEC_OBJECT_PINNED);
+	}
 
 	memset(reloc, 0, sizeof(reloc));
 	reloc[0].target_handle = obj[BATCH].handle; /* recurse */
-	reloc[0].presumed_offset = 0;
+	reloc[0].presumed_offset = obj[BATCH].offset;
 	reloc[0].offset = 5*sizeof(uint32_t);
 	reloc[0].delta = 0;
 	reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
 	reloc[0].write_domain = 0;
 
 	reloc[1].target_handle = obj[SCRATCH].handle; /* breadcrumb */
-	reloc[1].presumed_offset = 0;
+	reloc[1].presumed_offset = obj[SCRATCH].offset;
 	reloc[1].offset = sizeof(uint32_t);
 	reloc[1].delta = 0;
 	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
 	reloc[1].write_domain = I915_GEM_DOMAIN_RENDER;
 
+	if (do_relocs) {
+		obj[BATCH].relocs_ptr = (uintptr_t)reloc;
+		obj[BATCH].relocation_count = ARRAY_SIZE(reloc);
+	}
+
 	seqno = gem_mmap__wc(fd, obj[SCRATCH].handle, 0, 4096, PROT_READ);
 	gem_set_domain(fd, obj[SCRATCH].handle,
 			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@@ -110,8 +126,8 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
 	i = 0;
 	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
 	if (gen >= 8) {
-		batch[++i] = 0;
-		batch[++i] = 0;
+		batch[++i] = obj[SCRATCH].offset;
+		batch[++i] = obj[SCRATCH].offset >> 32;
 	} else if (gen >= 4) {
 		batch[++i] = 0;
 		batch[++i] = 0;
@@ -127,8 +143,8 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
 	batch[++i] = MI_BATCH_BUFFER_START; /* not crashed? try again! */
 	if (gen >= 8) {
 		batch[i] |= 1 << 8 | 1;
-		batch[++i] = 0;
-		batch[++i] = 0;
+		batch[++i] = obj[BATCH].offset;
+		batch[++i] = obj[BATCH].offset >> 32;
 	} else if (gen >= 6) {
 		batch[i] |= 1 << 8;
 		batch[++i] = 0;
@@ -163,6 +179,9 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
 
 	gem_sync(fd, obj[BATCH].handle);
 
+	intel_allocator_free(ahnd, obj[BATCH].handle);
+	intel_allocator_free(ahnd, obj[NOCAPTURE].handle);
+	intel_allocator_free(ahnd, obj[SCRATCH].handle);
 	gem_close(fd, obj[BATCH].handle);
 	gem_close(fd, obj[NOCAPTURE].handle);
 	gem_close(fd, obj[SCRATCH].handle);
@@ -171,10 +190,16 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
 static void capture(int fd, int dir, unsigned ring)
 {
 	uint32_t handle;
+	uint64_t ahnd = 0;
 
 	handle = gem_create(fd, 4096);
-	__capture1(fd, dir, ring, handle);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+
+	__capture1(fd, dir, ring, ahnd, handle, 4096);
+
 	gem_close(fd, handle);
+	intel_allocator_free(ahnd, handle);
+	intel_allocator_close(ahnd);
 }
 
 static int cmp(const void *A, const void *B)
@@ -195,7 +220,7 @@ static struct offset {
 	unsigned long idx;
 } *__captureN(int fd, int dir, unsigned ring,
 	      unsigned int size, int count,
-	      unsigned int flags)
+	      unsigned int flags, uint64_t ahnd)
 #define INCREMENTAL 0x1
 #define ASYNC 0x2
 {
@@ -206,18 +231,30 @@ static struct offset {
 	uint32_t *batch, *seqno;
 	struct offset *offsets;
 	int i;
+	bool do_relocs = gem_has_relocations(fd);
 
-	offsets = calloc(count , sizeof(*offsets));
+	offsets = calloc(count, sizeof(*offsets));
 	igt_assert(offsets);
 
 	obj = calloc(count + 2, sizeof(*obj));
 	igt_assert(obj);
 
 	obj[0].handle = gem_create(fd, 4096);
+	obj[0].offset = intel_allocator_alloc(ahnd, obj[0].handle,
+					      4096, ALIGNMENT);
+	obj[0].offset = CANONICAL(obj[0].offset);
+	obj[0].flags = EXEC_OBJECT_WRITE | EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
+		       (do_relocs ? 0 : EXEC_OBJECT_PINNED);
+
 	for (i = 0; i < count; i++) {
 		obj[i + 1].handle = gem_create(fd, size);
-		obj[i + 1].flags =
-			EXEC_OBJECT_CAPTURE | EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+		obj[i + 1].offset = intel_allocator_alloc(ahnd, obj[i + 1].handle,
+							  size, ALIGNMENT);
+		obj[i + 1].offset = CANONICAL(obj[i + 1].offset);
+		obj[i + 1].flags = EXEC_OBJECT_CAPTURE |
+				   EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
+				   (do_relocs ? 0 : EXEC_OBJECT_PINNED);
+
 		if (flags & INCREMENTAL) {
 			uint32_t *ptr;
 
@@ -230,23 +267,32 @@ static struct offset {
 	}
 
 	obj[count + 1].handle = gem_create(fd, 4096);
-	obj[count + 1].relocs_ptr = (uintptr_t)reloc;
-	obj[count + 1].relocation_count = ARRAY_SIZE(reloc);
+	obj[count + 1].offset = intel_allocator_alloc(ahnd, obj[count + 1].handle,
+						      4096, ALIGNMENT);
+	obj[count + 1].offset = CANONICAL(obj[count + 1].offset);
+	obj[count + 1].flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS |
+			       (do_relocs ? 0 : EXEC_OBJECT_PINNED);
 
 	memset(reloc, 0, sizeof(reloc));
 	reloc[0].target_handle = obj[count + 1].handle; /* recurse */
-	reloc[0].presumed_offset = 0;
+	reloc[0].presumed_offset = obj[count + 1].offset;
 	reloc[0].offset = 5*sizeof(uint32_t);
 	reloc[0].delta = 0;
 	reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
 	reloc[0].write_domain = 0;
 
 	reloc[1].target_handle = obj[0].handle; /* breadcrumb */
-	reloc[1].presumed_offset = 0;
+	reloc[1].presumed_offset = obj[0].offset;
 	reloc[1].offset = sizeof(uint32_t);
 	reloc[1].delta = 0;
 	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
 	reloc[1].write_domain = I915_GEM_DOMAIN_RENDER;
+	if (do_relocs) {
+		obj[count + 1].relocs_ptr = (uintptr_t)reloc;
+		obj[count + 1].relocation_count = ARRAY_SIZE(reloc);
+	} else {
+		execbuf.flags = I915_EXEC_NO_RELOC;
+	}
 
 	seqno = gem_mmap__wc(fd, obj[0].handle, 0, 4096, PROT_READ);
 	gem_set_domain(fd, obj[0].handle,
@@ -259,8 +305,8 @@ static struct offset {
 	i = 0;
 	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
 	if (gen >= 8) {
-		batch[++i] = 0;
-		batch[++i] = 0;
+		batch[++i] = obj[0].offset;
+		batch[++i] = obj[0].offset >> 32;
 	} else if (gen >= 4) {
 		batch[++i] = 0;
 		batch[++i] = 0;
@@ -276,8 +322,8 @@ static struct offset {
 	batch[++i] = MI_BATCH_BUFFER_START; /* not crashed? try again! */
 	if (gen >= 8) {
 		batch[i] |= 1 << 8 | 1;
-		batch[++i] = 0;
-		batch[++i] = 0;
+		batch[++i] = obj[count + 1].offset;
+		batch[++i] = obj[count + 1].offset >> 32;
 	} else if (gen >= 6) {
 		batch[i] |= 1 << 8;
 		batch[++i] = 0;
@@ -311,13 +357,17 @@ static struct offset {
 		gem_sync(fd, obj[count + 1].handle);
 	}
 
-	gem_close(fd, obj[count + 1].handle);
 	for (i = 0; i < count; i++) {
-		offsets[i].addr = obj[i + 1].offset;
+		offsets[i].addr = DECANONICAL(obj[i + 1].offset);
 		offsets[i].idx = i;
 		gem_close(fd, obj[i + 1].handle);
+		intel_allocator_free(ahnd, obj[i + 1].handle);
 	}
+
 	gem_close(fd, obj[0].handle);
+	gem_close(fd, obj[count + 1].handle);
+	intel_allocator_free(ahnd, obj[0].handle);
+	intel_allocator_free(ahnd, obj[count+1].handle);
 
 	qsort(offsets, count, sizeof(*offsets), cmp);
 	igt_assert(offsets[0].addr <= offsets[count-1].addr);
@@ -412,7 +462,7 @@ ascii85_decode(char *in, uint32_t **out, bool inflate, char **end)
 
 static void many(int fd, int dir, uint64_t size, unsigned int flags)
 {
-	uint64_t ram, gtt;
+	uint64_t ram, gtt, ahnd;
 	unsigned long count, blobs;
 	struct offset *offsets;
 	char *error, *str;
@@ -426,8 +476,9 @@ static void many(int fd, int dir, uint64_t size, unsigned int flags)
 	igt_require(count > 1);
 
 	intel_require_memory(count, size, CHECK_RAM);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
 
-	offsets = __captureN(fd, dir, 0, size, count, flags);
+	offsets = __captureN(fd, dir, 0, size, count, flags, ahnd);
 
 	error = igt_sysfs_get(dir, "error");
 	igt_sysfs_set(dir, "error", "Begone!");
@@ -494,6 +545,7 @@ static void many(int fd, int dir, uint64_t size, unsigned int flags)
 
 	free(error);
 	free(offsets);
+	intel_allocator_close(ahnd);
 }
 
 static void prioinv(int fd, int dir, unsigned ring, const char *name)
@@ -508,10 +560,16 @@ static void prioinv(int fd, int dir, unsigned ring, const char *name)
 		.flags = ring,
 	};
 	int64_t timeout = NSEC_PER_SEC; /* 1s, feeling generous, blame debug */
-	uint64_t ram, gtt, size = 4 << 20;
+	uint64_t ram, gtt, ahnd, size = 4 << 20;
 	unsigned long count;
 	int link[2], dummy;
 
+	intel_allocator_multiprocess_start();
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	obj.offset = intel_allocator_alloc(ahnd, obj.handle, 4096, ALIGNMENT);
+	obj.offset = CANONICAL(obj.offset);
+	obj.flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
 	igt_require(gem_scheduler_enabled(fd));
 	igt_require(igt_params_set(fd, "reset", "%u", -1)); /* engine resets! */
 	igt_require(gem_gpu_reset_type(fd) > 1);
@@ -540,7 +598,13 @@ static void prioinv(int fd, int dir, unsigned ring, const char *name)
 		fd = gem_reopen_driver(fd);
 		igt_debug("Submitting large capture [%ld x %dMiB objects]\n",
 			  count, (int)(size >> 20));
-		free(__captureN(fd, dir, ring, size, count, ASYNC));
+
+		/* Reopen the allocator in the new process. */
+		ahnd = intel_allocator_open(fd, child+1, INTEL_ALLOCATOR_SIMPLE);
+
+		free(__captureN(fd, dir, ring, size, count, ASYNC, ahnd));
+		intel_allocator_close(ahnd);
+
 		write(link[1], &fd, sizeof(fd)); /* wake the parent up */
 		igt_force_gpu_reset(fd);
 		write(link[1], &fd, sizeof(fd)); /* wake the parent up */
@@ -563,19 +627,26 @@ static void prioinv(int fd, int dir, unsigned ring, const char *name)
 	close(link[1]);
 
 	gem_quiescent_gpu(fd);
+	intel_allocator_free(ahnd, obj.handle);
+	intel_allocator_close(ahnd);
+	intel_allocator_multiprocess_stop();
 }
 
 static void userptr(int fd, int dir)
 {
 	uint32_t handle;
+	uint64_t ahnd;
 	void *ptr;
 
 	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
 	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
 
-	__capture1(fd, dir, 0, handle);
+	__capture1(fd, dir, 0, ahnd, handle, 4096);
 
 	gem_close(fd, handle);
+	intel_allocator_free(ahnd, handle);
+	intel_allocator_close(ahnd);
 	free(ptr);
 }
 
-- 
2.25.1



More information about the igt-dev mailing list