[Intel-gfx] [PATCH i-g-t 24/25] i915/gem_exec_balancer: Exercise bonded pairs

Chris Wilson chris at chris-wilson.co.uk
Thu Mar 14 14:19:38 UTC 2019


The submit-fence + load_balancing apis allow for us to execute a named
pair of engines in parallel; that this by submitting a request to one
engine, we can then use the generated submit-fence to submit a second
request to another engine and have it execute at the same time.
Furthermore, by specifying bonded pairs, we can direct the virtual
engine to use a particular engine in parallel to the first request.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 tests/i915/gem_exec_balancer.c | 234 +++++++++++++++++++++++++++++++--
 1 file changed, 225 insertions(+), 9 deletions(-)

diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 8fe6a464d..3e4bd643c 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -102,12 +102,41 @@ list_engines(int i915, uint32_t class_mask, unsigned int *out)
 	return engines;
 }
 
+static int __set_engines(int i915, uint32_t ctx,
+			 const struct class_instance *ci,
+			 unsigned int count)
+{
+	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, count);
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_ENGINES,
+		.size = sizeof(engines),
+		.value = to_user_pointer(&engines)
+	};
+
+	engines.extensions = 0;
+	memcpy(engines.class_instance, ci, sizeof(engines.class_instance));
+
+	return __gem_context_set_param(i915, &p);
+}
+
+static void set_engines(int i915, uint32_t ctx,
+			const struct class_instance *ci,
+			unsigned int count)
+{
+	igt_assert_eq(__set_engines(i915, ctx, ci, count), 0);
+}
+
 static int __set_load_balancer(int i915, uint32_t ctx,
 			       const struct class_instance *ci,
-			       unsigned int count)
+			       unsigned int count,
+			       void *ext)
 {
 	struct i915_context_engines_load_balance balancer = {
-		{ .name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE },
+		{
+			.next_extension = to_user_pointer(ext),
+			.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE
+		},
 		.engines_mask = ~0ull,
 	};
 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, count + 1);
@@ -128,9 +157,10 @@ static int __set_load_balancer(int i915, uint32_t ctx,
 
 static void set_load_balancer(int i915, uint32_t ctx,
 			      const struct class_instance *ci,
-			      unsigned int count)
+			      unsigned int count,
+			      void *ext)
 {
-	igt_assert_eq(__set_load_balancer(i915, ctx, ci, count), 0);
+	igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
 }
 
 static uint32_t load_balancer_create(int i915,
@@ -140,7 +170,7 @@ static uint32_t load_balancer_create(int i915,
 	uint32_t ctx;
 
 	ctx = gem_context_create(i915);
-	set_load_balancer(i915, ctx, ci, count);
+	set_load_balancer(i915, ctx, ci, count, NULL);
 
 	return ctx;
 }
@@ -275,6 +305,74 @@ static void invalid_balancer(int i915)
 	free(ci);
 }
 
+static void invalid_bonds(int i915)
+{
+	struct i915_context_engines_bond bonds[16];
+	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1);
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = gem_context_create(i915),
+		.param = I915_CONTEXT_PARAM_ENGINES,
+		.value = to_user_pointer(&engines),
+		.size = sizeof(engines),
+	};
+	uint32_t handle;
+	void *ptr;
+
+	memset(&engines, 0, sizeof(engines));
+	gem_context_set_param(i915, &p);
+
+	memset(bonds, 0, sizeof(bonds));
+	for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+		bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+		bonds[n].base.next_extension =
+			n ? to_user_pointer(&bonds[n - 1]) : 0;
+		bonds[n].sibling_mask = 0x1;
+	}
+	engines.extensions = to_user_pointer(&bonds);
+	gem_context_set_param(i915, &p);
+
+	bonds[0].base.next_extension = -1ull;
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	bonds[0].base.next_extension = to_user_pointer(&bonds[0]);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+
+	engines.extensions = to_user_pointer(&bonds[1]);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+	bonds[0].base.next_extension = 0;
+	gem_context_set_param(i915, &p);
+
+	handle = gem_create(i915, 4096 * 3);
+	ptr = gem_mmap__gtt(i915, handle, 4096 * 3, PROT_WRITE);
+	gem_close(i915, handle);
+
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	engines.extensions = to_user_pointer(ptr) + 4096;
+	gem_context_set_param(i915, &p);
+
+	memcpy(ptr, &bonds[0], sizeof(bonds[0]));
+	bonds[0].base.next_extension = to_user_pointer(ptr);
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	gem_context_set_param(i915, &p);
+
+	munmap(ptr, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	bonds[0].base.next_extension = 0;
+	memcpy(ptr + 8192, &bonds[0], sizeof(bonds[0]));
+	bonds[0].base.next_extension = to_user_pointer(ptr) + 8192;
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	gem_context_set_param(i915, &p);
+
+	munmap(ptr + 8192, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	munmap(ptr + 4096, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	gem_context_destroy(i915, p.ctx_id);
+}
+
 static void kick_kthreads(int period_us)
 {
 	sched_yield();
@@ -384,7 +482,7 @@ static void individual(int i915)
 
 		for (int pass = 0; pass < count; pass++) { /* approx. count! */
 			igt_permute_array(ci, count, igt_exchange_int64);
-			set_load_balancer(i915, ctx, ci, count);
+			set_load_balancer(i915, ctx, ci, count, NULL);
 			for (unsigned int n = 0; n < count; n++)
 				check_individual_engine(i915, ctx, ci, n);
 		}
@@ -396,6 +494,115 @@ static void individual(int i915)
 	gem_quiescent_gpu(i915);
 }
 
+static void bonded(int i915, unsigned int flags)
+#define CORK 0x1
+{
+	struct i915_context_engines_bond bonds[16];
+	struct class_instance *master_engines;
+	uint32_t master;
+
+	/*
+	 * I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
+	 * to specify which engine(s) to pair with a parallel (EXEC_SUBMIT)
+	 * request submitted to another engine.
+	 */
+
+	master = gem_queue_create(i915);
+
+	memset(bonds, 0, sizeof(bonds));
+	for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+		bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+		bonds[n].base.next_extension =
+			n ? to_user_pointer(&bonds[n - 1]) : 0;
+		bonds[n].sibling_mask = 1 << n;
+	}
+
+	for (int mask = 0; mask < 32; mask++) {
+		unsigned int count, limit;
+		struct class_instance *siblings;
+		uint32_t ctx;
+		int n;
+
+		siblings = list_engines(i915, 1u << mask, &count);
+		if (!siblings)
+			continue;
+
+		if (count < 2) {
+			free(siblings);
+			continue;
+		}
+
+		igt_debug("Found %d engines of class %d\n", count, mask);
+
+		master_engines = list_engines(i915, ~(1u << mask), &limit);
+		set_engines(i915, master, master_engines, limit);
+
+		limit = min(count, limit);
+		for (n = 0; n < limit; n++) {
+			bonds[n].master_class = master_engines[n].class;
+			bonds[n].master_instance = master_engines[n].instance;
+		}
+
+		ctx = gem_context_clone(i915,
+				       	master, I915_CONTEXT_CLONE_VM,
+					I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+		set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
+
+		for (n = 0; n < limit; n++) {
+			struct drm_i915_gem_execbuffer2 eb;
+			IGT_CORK_HANDLE(cork);
+			igt_spin_t *spin, *plug;
+			double load;
+			int pmu;
+
+			igt_assert(siblings[n].class != master_engines[n].class);
+
+			pmu = perf_i915_open(I915_PMU_ENGINE_BUSY(siblings[n].class,
+								  siblings[n].instance));
+
+			plug = NULL;
+			if (flags & CORK) {
+				plug = __igt_spin_batch_new(i915,
+							    .ctx = master,
+							    .engine = n,
+							    .dependency = igt_cork_plug(&cork, i915));
+			}
+
+			spin = __igt_spin_batch_new(i915,
+						    .ctx = master,
+						    .engine = n,
+						    .flags = IGT_SPIN_FENCE_OUT);
+
+			eb = spin->execbuf;
+			eb.rsvd1 = ctx;
+			eb.rsvd2 = spin->out_fence;
+			eb.flags = I915_EXEC_FENCE_SUBMIT;
+			gem_execbuf(i915, &eb);
+
+			if (plug) {
+				igt_cork_unplug(&cork);
+				igt_spin_batch_free(i915, plug);
+			}
+
+			load = measure_load(pmu, 10000);
+			igt_spin_batch_free(i915, spin);
+
+			close(pmu);
+
+			igt_assert_f(load > 0.90,
+				     "engine %d (class:instance %d:%d) was found to be only %.1f%% busy\n",
+				     n, siblings[n].class, siblings[n].instance,
+				     load*100);
+		}
+
+		gem_context_destroy(i915, ctx);
+		free(master_engines);
+		free(siblings);
+	}
+
+	gem_context_destroy(i915, master);
+}
+
 static int add_pmu(int pmu, const struct class_instance *ci)
 {
 	return perf_i915_open_group(I915_PMU_ENGINE_BUSY(ci->class,
@@ -575,7 +782,7 @@ static void semaphore(int i915)
 		count = ARRAY_SIZE(block);
 
 		for (int i = 0; i < count; i++) {
-			set_load_balancer(i915, block[i], ci, count);
+			set_load_balancer(i915, block[i], ci, count, NULL);
 			spin[i] = __igt_spin_batch_new(i915,
 						       .ctx = block[i],
 						       .dependency = scratch);
@@ -586,7 +793,7 @@ static void semaphore(int i915)
 		 * or we let the vip through. If not, we hang.
 		 */
 		vip = gem_context_create(i915);
-		set_load_balancer(i915, vip, ci, count);
+		set_load_balancer(i915, vip, ci, count, NULL);
 		ping(i915, vip, 0);
 		gem_context_destroy(i915, vip);
 
@@ -703,7 +910,7 @@ static bool has_load_balancer(int i915)
 	int err;
 
 	ctx = gem_context_create(i915);
-	err = __set_load_balancer(i915, ctx, &ci, 1);
+	err = __set_load_balancer(i915, ctx, &ci, 1, NULL);
 	gem_context_destroy(i915, ctx);
 
 	return err == 0;
@@ -729,6 +936,9 @@ igt_main
 	igt_subtest("invalid-balancer")
 		invalid_balancer(i915);
 
+	igt_subtest("invalid-bonds")
+		invalid_bonds(i915);
+
 	igt_subtest("individual")
 		individual(i915);
 
@@ -754,6 +964,12 @@ igt_main
 	igt_subtest("smoke")
 		smoketest(i915, 20);
 
+	igt_subtest("bonded-imm")
+		bonded(i915, 0);
+
+	igt_subtest("bonded-cork")
+		bonded(i915, CORK);
+
 	igt_fixture {
 		igt_stop_hang_detector();
 	}
-- 
2.20.1



More information about the Intel-gfx mailing list