[igt-dev] [PATCH i-g-t 3/3] tests/gem_exec_alignment.c: Add priority inversion

Dominik Grzegorzek dominik.grzegorzek at intel.com
Thu Mar 19 07:38:19 UTC 2020


Priority inversion test case was implemented, which can point out
that a low priority client causes a delay of a high priority client.

Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
 tests/i915/gem_exec_alignment.c | 194 +++++++++++++++++++++++++++++++-
 1 file changed, 193 insertions(+), 1 deletion(-)

diff --git a/tests/i915/gem_exec_alignment.c b/tests/i915/gem_exec_alignment.c
index 44ee137a..a5f50d2f 100644
--- a/tests/i915/gem_exec_alignment.c
+++ b/tests/i915/gem_exec_alignment.c
@@ -38,7 +38,9 @@
 #include <sys/time.h>
 #include <sys/ioctl.h>
 #include <signal.h>
+#include <sched.h>
 #include "drm.h"
+#include "semaphore.h"
 
 #define MANY_TIMEOUT 10
 
@@ -108,6 +110,195 @@ static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
 	return err;
 }
 
+static void prio_inversion(int fd)
+{
+	uint32_t bbe = MI_BATCH_BUFFER_END;
+	struct drm_i915_gem_exec_object2 *execobj, *execobj_hp;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct timespec begin, now, timeout;
+	uint64_t gtt_size, ram_size, flags;
+	uint64_t alignment, max_alignment, count, max_count, curr_count, i;
+	uint32_t lp, hp, zero = 0, *res, ret;
+	volatile uint32_t *result;
+	double time, time_lp;
+	sem_t *sem;
+
+	/*
+	 * First low priority client create mass of holes in their
+	 * own address space, then launch a batch with oodles of object with
+	 * alignment that doesn't match previous one. While lp execbufer
+	 * is performing we want to start high priority task
+	 * and we expect it will not be blocked.
+	 */
+
+	igt_require(gem_uses_full_ppgtt(fd));
+	igt_require(gem_scheduler_enabled(fd));
+	igt_require(gem_scheduler_has_ctx_priority(fd));
+
+	/* Using two pointers to avoid warnings about volatile discarding. */
+	result = res = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+	memset(res, 0, 4096);
+	igt_assert(result != MAP_FAILED);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+
+	/* Calc number of objects */
+	gtt_size = gem_aperture_size(fd); /* We have to *share* our GTT! */
+	ram_size = intel_get_total_ram_mb();
+	ram_size *= 1024 * 1024;
+	count = ram_size / 4096;
+
+	if (count > file_max()) /* vfs cap */
+		count = file_max();
+	max_alignment = find_last_bit(gtt_size / count);
+	if (max_alignment <= 13)
+		max_alignment = 4096;
+	else
+		max_alignment = 1ull << (max_alignment - 1);
+	max_count = count = gtt_size / max_alignment / 2;
+
+	flags = (gtt_size-1) >> 32 ? 1<<3 : 0; /* EXEC_OBJECT_SUPPORTS_48B_ADDRESS */
+
+	execobj = calloc(sizeof(*execobj), count + 1);
+	igt_assert(execobj);
+
+	execobj_hp = calloc(sizeof(*execobj_hp), 1);
+	igt_assert(execobj_hp);
+
+	/* Fill the low-priority address space */
+	for (i = 0; i < count; i++) {
+		execobj[i].handle = gem_create(fd, 4096);
+		gem_write(fd, execobj[i].handle, 0, &zero, sizeof(zero));
+		execobj[i].flags = flags;
+		execobj[i].alignment = 4096;
+	}
+	execobj[i].handle = gem_create(fd, 4096);
+	execobj[i].alignment = 4096;
+	execobj[i].flags = flags;
+
+	gem_write(fd, execobj[i].handle, 0, &bbe, sizeof(bbe));
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(execobj + count);
+	execbuf.buffer_count = 1;
+
+	/* Warm up both (hi/lo) contexts */
+	hp = execbuf.rsvd1 = gem_context_create(fd);
+	gem_context_set_priority(fd, execbuf.rsvd1,
+				 I915_CONTEXT_MAX_USER_PRIORITY);
+	gem_execbuf(fd, &execbuf);
+	gem_sync(fd, execobj[i].handle);
+
+	/*
+	 * Creating a mess in address space using slow fragmentation loop
+	 * to consume 5s. LP task uses the same objects with next alignment up.
+	 */
+
+	lp = execbuf.rsvd1 = gem_context_create(fd);
+	gem_context_set_priority(fd, execbuf.rsvd1,
+				 I915_CONTEXT_MIN_USER_PRIORITY);
+
+	memset(&timeout, 0, sizeof(struct timespec));
+	for (alignment = 8192; alignment < gtt_size && igt_seconds_elapsed(&timeout) < 5; alignment <<= 1) {
+		if (alignment > max_alignment) {
+			uint64_t factor = alignment / max_alignment;
+			max_count = 2 * count / factor;
+		}
+
+		for (i = 0; i < count; i++)
+			execobj[i].alignment = alignment;
+
+		for (curr_count = 1; curr_count < max_count; curr_count <<= 1) {
+			execbuf.buffer_count = curr_count;
+			execbuf.buffers_ptr =
+			    to_user_pointer(execobj + count - execbuf.buffer_count + 1);
+
+			gem_execbuf(fd, &execbuf);
+			gem_sync(fd, execobj[count].handle);
+			if (igt_seconds_elapsed(&timeout) >= 5)
+				break;
+		}
+	}
+	igt_debug("Low priority address space fragmentation done.\n");
+
+	igt_debug("Starting quiescent\n");
+	gem_quiescent_gpu(fd);
+	igt_debug("Ending quiescent\n");
+
+	for (i = 0; i <= count; i++)
+		execobj[i].alignment = alignment;
+
+	sem = sem_open("test_semaphore", O_CREAT|O_EXCL, 0, 1);
+	sem_unlink("test_semaphore");
+	sem_wait(sem);
+
+	igt_fork(child, 1) {
+		igt_debug("[H] In fork\n");
+		memset(&execbuf, 0, sizeof(execbuf));
+		execobj_hp[0].handle = gem_create(fd, 4096);
+		igt_debug("[H] After create\n");
+
+		gem_write(fd, execobj_hp[0].handle, 0, &bbe, sizeof(bbe));
+		result[0] = hp != execbuf.rsvd1;
+
+		execbuf.rsvd1 = hp;
+		execbuf.buffer_count = 1;
+		execbuf.buffers_ptr =
+		    to_user_pointer(execobj_hp);
+
+		/* Child sleeps waiting for hp task to start. */
+		sem_wait(sem);
+		sched_yield();
+		sem_post(sem);
+
+		usleep(50000);
+		clock_gettime(CLOCK_MONOTONIC, &begin);
+		igt_debug("[H] HP child executing\n");
+		gem_execbuf(fd, &execbuf);
+		igt_debug("[H] HP exec submitted\n");
+		gem_sync(fd, execobj_hp[0].handle);
+		clock_gettime(CLOCK_MONOTONIC, &now);
+		igt_debug("[H] HP sync\n");
+
+		time = igt_time_elapsed(&begin, &now);
+		igt_debug("[H] HP exec performed in %.6fs\n", time);
+		result[1] = time < 0.001;
+		gem_close(fd, execobj_hp[0].handle);
+	}
+
+	/* Relinquish CPU just to allow child to create a context */
+	sleep(1);
+	igt_assert_f(result[0], "HP context (child) not created\n");
+
+	execbuf.rsvd1 = lp;
+	execbuf.buffer_count = curr_count;
+	execbuf.buffers_ptr =
+	    to_user_pointer(execobj + count - execbuf.buffer_count + 1);
+
+	igt_debug("[L] LP parent executing\n");
+	clock_gettime(CLOCK_MONOTONIC, &begin);
+
+	alignment_set_timeout(0, 100000);
+	sem_post(sem);
+	gem_execbuf(fd, &execbuf);
+	gem_sync(fd, execobj[count].handle);
+	alignment_reset_timeout();
+
+	clock_gettime(CLOCK_MONOTONIC, &now);
+	time_lp = igt_time_elapsed(&begin, &now);
+	igt_debug("[L] LP exec performed in %.6fs\n", time_lp);
+
+	igt_waitchildren();
+	igt_assert_f(result[1], "HP child unable to submit within 10ms\n");
+
+	gem_context_destroy(fd, lp);
+	gem_context_destroy(fd, hp);
+
+	for (i = 0; i <= count; i++)
+		gem_close(fd, execobj[i].handle);
+
+	munmap(res, 4096);
+}
+
 static void many(int fd)
 {
 	uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -267,5 +458,6 @@ igt_main
 		single(fd);
 	igt_subtest("many")
 		many(fd);
-
+	igt_subtest("pi")
+		prio_inversion(fd);
 }
-- 
2.20.1



More information about the igt-dev mailing list