[igt-dev] [PATCH i-g-t 1/2] tests/i915/gem_softpin: Exercise eviction with softpinning

Andrzej Turko andrzej.turko at linux.intel.com
Mon Jul 5 08:51:07 UTC 2021


Exercise eviction of many gem objects. The added tests
are analogous to gem_exec_gttfill, but they use softpin
and do not require relocation support.

Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
 tests/i915/gem_softpin.c | 221 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 221 insertions(+)

diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c
index bdb04821d..0af1c4476 100644
--- a/tests/i915/gem_softpin.c
+++ b/tests/i915/gem_softpin.c
@@ -29,6 +29,7 @@
 #include "i915/gem.h"
 #include "i915/gem_create.h"
 #include "igt.h"
+#include "igt_rand.h"
 #include "intel_allocator.h"
 
 #define EXEC_OBJECT_PINNED	(1<<4)
@@ -877,8 +878,212 @@ static void test_allocator_fork(int fd)
 	intel_allocator_multiprocess_stop();
 }
 
+#define BATCH_SIZE (4096<<10)
+/* We don't have alignment detection yet, so assume the worst-case scenario. */
+#define BATCH_ALIGNMENT (1 << 21)
+
+struct batch {
+	uint32_t handle;
+	void *ptr;
+};
+
+static void xchg_batch(void *array, unsigned int i, unsigned int j)
+{
+	struct batch *batches = array;
+	struct batch tmp;
+
+	tmp = batches[i];
+	batches[i] = batches[j];
+	batches[j] = tmp;
+}
+
+static void submit(int fd, int gen,
+		   struct drm_i915_gem_execbuffer2 *eb,
+		   struct batch *batches, unsigned int count,
+		   uint64_t ahnd)
+{
+	struct drm_i915_gem_exec_object2 obj;
+	uint32_t batch[16];
+	uint64_t address;
+	unsigned n, j;
+
+	memset(&obj, 0, sizeof(obj));
+	obj.flags = EXEC_OBJECT_PINNED;
+
+	j = 0;
+	for (unsigned i = 0; i < count; i++) {
+		obj.handle = batches[i].handle;
+		obj.offset = __intel_allocator_alloc(ahnd, obj.handle,
+						     BATCH_SIZE,
+						     BATCH_ALIGNMENT,
+						     ALLOC_STRATEGY_HIGH_TO_LOW);
+		for (; obj.offset == -1; j = (j+1) % count) {
+			if (i != j)
+				intel_allocator_free(ahnd, batches[j].handle);
+			obj.offset = __intel_allocator_alloc(ahnd, obj.handle,
+							     BATCH_SIZE,
+							     BATCH_ALIGNMENT,
+							     ALLOC_STRATEGY_HIGH_TO_LOW);
+		}
+		address = obj.offset + BATCH_SIZE - eb->batch_start_offset - 8;
+		n = 0;
+		batch[n] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+		if (gen >= 8) {
+			batch[n] |= 1 << 21;
+			batch[n]++;
+			batch[++n] = address;
+			batch[++n] = address >> 32;
+		} else if (gen >= 4) {
+			batch[++n] = 0;
+			batch[++n] = address;
+		} else {
+			batch[n]--;
+			batch[++n] = address;
+		}
+		batch[++n] = obj.offset; /* lower_32_bits(value) */
+		batch[++n] = obj.offset >> 32; /* upper_32_bits(value) / nop */
+		batch[++n] = MI_BATCH_BUFFER_END;
+		eb->buffers_ptr = to_user_pointer(&obj);
+
+		memcpy(batches[i].ptr + eb->batch_start_offset,
+		       batch, sizeof(batch));
+
+		gem_execbuf(fd, eb);
+	}
+	/* As we have been lying about the write_domain, we need to do a sync */
+	gem_sync(fd, obj.handle);
+}
+
+static void test_evict_many(int fd, unsigned ring, int timeout)
+{
+	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_execbuffer2 execbuf;
+	unsigned engines[I915_EXEC_RING_MASK + 1];
+	volatile uint64_t *shared;
+	struct timespec tv = {};
+	struct batch *batches;
+	unsigned nengine;
+	unsigned count;
+	uint64_t size, ahnd;
+
+	shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+	igt_assert(shared != MAP_FAILED);
+
+	nengine = 0;
+	if (ring == ALL_ENGINES) {
+		struct intel_execution_engine2 *e;
+
+		__for_each_physical_engine(fd, e) {
+			if (!gem_class_can_store_dword(fd, e->class))
+				continue;
+
+			engines[nengine++] = e->flags;
+		}
+	} else {
+		engines[nengine++] = ring;
+	}
+	igt_require(nengine);
+	igt_assert(nengine * 64 <= BATCH_SIZE);
+
+	size = gem_aperture_size(fd);
+	if (!gem_uses_full_ppgtt(fd))
+		size /= 2;
+	if (size > 1ull<<32) /* Limit to 4GiB as we do not use allow-48b */
+		size = 1ull << 32;
+	igt_require(size < (1ull<<32) * BATCH_SIZE);
+
+	count = size / BATCH_SIZE + 1;
+	igt_debug("Using %'d batches to fill %'llu aperture on %d engines\n",
+		  count, (long long)size, nengine);
+
+	intel_allocator_multiprocess_start();
+	/* Avoid allocating on the last page */
+	ahnd = intel_allocator_open_full(fd, 0, 0, size - 4096,
+					 INTEL_ALLOCATOR_SIMPLE,
+					 ALLOC_STRATEGY_HIGH_TO_LOW);
+
+	intel_require_memory(count, BATCH_SIZE, CHECK_RAM);
+	intel_detect_and_clear_missed_interrupts(fd);
+
+	igt_nsec_elapsed(&tv);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffer_count = 1;
+	if (gen < 6)
+		execbuf.flags |= I915_EXEC_SECURE;
+
+	batches = calloc(count, sizeof(*batches));
+	igt_assert(batches);
+	for (unsigned i = 0; i < count; i++) {
+		batches[i].handle = gem_create(fd, BATCH_SIZE);
+		batches[i].ptr =
+			gem_mmap__device_coherent(fd, batches[i].handle,
+						  0, BATCH_SIZE, PROT_WRITE);
+	}
+
+	/* Flush all memory before we start the timer */
+	submit(fd, gen, &execbuf, batches, count, ahnd);
+
+	igt_info("Setup %u batches in %.2fms\n",
+		 count, 1e-6 * igt_nsec_elapsed(&tv));
+
+	igt_fork(child, nengine) {
+		uint64_t dst, src, dst_offset, src_offset;
+		uint64_t cycles = 0;
+
+		hars_petruska_f54_1_random_perturb(child);
+		igt_permute_array(batches, count, xchg_batch);
+		execbuf.batch_start_offset = child * 64;
+		execbuf.flags |= engines[child];
+
+		dst_offset = BATCH_SIZE - child*64 - 8;
+		if (gen >= 8)
+			src_offset = child*64 + 3*sizeof(uint32_t);
+		else if (gen >= 4)
+			src_offset = child*64 + 4*sizeof(uint32_t);
+		else
+			src_offset = child*64 + 2*sizeof(uint32_t);
+
+		/* We need to open the allocator again in the new process */
+		ahnd = intel_allocator_open_full(fd, 0, 0, size - 4096,
+						 INTEL_ALLOCATOR_SIMPLE,
+						 ALLOC_STRATEGY_HIGH_TO_LOW);
+
+		igt_until_timeout(timeout) {
+			submit(fd, gen, &execbuf, batches, count, ahnd);
+			for (unsigned i = 0; i < count; i++) {
+				dst = *(uint64_t *)(batches[i].ptr + dst_offset);
+				src = *(uint64_t *)(batches[i].ptr + src_offset);
+				igt_assert_eq_u64(dst, src);
+			}
+			cycles++;
+		}
+		shared[child] = cycles;
+		igt_info("engine[%d]: %llu cycles\n", child, (long long)cycles);
+		intel_allocator_close(ahnd);
+	}
+	igt_waitchildren();
+
+	intel_allocator_close(ahnd);
+	intel_allocator_multiprocess_stop();
+
+	for (unsigned i = 0; i < count; i++) {
+		munmap(batches[i].ptr, BATCH_SIZE);
+		gem_close(fd, batches[i].handle);
+	}
+	free(batches);
+
+	shared[nengine] = 0;
+	for (unsigned i = 0; i < nengine; i++)
+		shared[nengine] += shared[i];
+	igt_info("Total: %llu cycles\n", (long long)shared[nengine]);
+
+	igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
+}
+
 igt_main
 {
+	const struct intel_execution_engine2 *e;
 	int fd = -1;
 
 	igt_fixture {
@@ -946,6 +1151,22 @@ igt_main
 		igt_subtest_f("evict-snoop%s", signal ? "-interruptible" : "")
 			test_evict_snoop(fd, signal);
 	}
+
+
+
+	igt_subtest("evict-many-all-engines")
+		test_evict_many(fd, ALL_ENGINES, 20);
+
+	igt_subtest_with_dynamic("evict-many-engines") {
+		__for_each_physical_engine(fd, e) {
+			if (!gem_class_can_store_dword(fd, e->class))
+				continue;
+
+			igt_dynamic_f("%s", e->name)
+				test_evict_many(fd, e->flags, 20);
+		}
+	}
+
 	igt_subtest("evict-hang")
 		test_evict_hang(fd);
 
-- 
2.25.1



More information about the igt-dev mailing list