[Intel-gfx] [PATCH i-g-t 14/16] i915/gem_exec_balancer: Exercise bonded pairs

Chris Wilson chris at chris-wilson.co.uk
Wed May 8 10:09:56 UTC 2019


The submit-fence + load_balancing apis allow for us to execute a named
pair of engines in parallel; that this by submitting a request to one
engine, we can then use the generated submit-fence to submit a second
request to another engine and have it execute at the same time.
Furthermore, by specifying bonded pairs, we can direct the virtual
engine to use a particular engine in parallel to the first request.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 tests/i915/gem_exec_balancer.c | 234 +++++++++++++++++++++++++++++++--
 1 file changed, 224 insertions(+), 10 deletions(-)

diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 25195d478..20ad66727 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -98,9 +98,35 @@ list_engines(int i915, uint32_t class_mask, unsigned int *out)
 	return engines;
 }
 
+static int __set_engines(int i915, uint32_t ctx,
+			 const struct i915_engine_class_instance *ci,
+			 unsigned int count)
+{
+	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, count);
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_ENGINES,
+		.size = sizeof(engines),
+		.value = to_user_pointer(&engines)
+	};
+
+	engines.extensions = 0;
+	memcpy(engines.engines, ci, sizeof(engines.engines));
+
+	return __gem_context_set_param(i915, &p);
+}
+
+static void set_engines(int i915, uint32_t ctx,
+			const struct i915_engine_class_instance *ci,
+			unsigned int count)
+{
+	igt_assert_eq(__set_engines(i915, ctx, ci, count), 0);
+}
+
 static int __set_load_balancer(int i915, uint32_t ctx,
 			       const struct i915_engine_class_instance *ci,
-			       unsigned int count)
+			       unsigned int count,
+			       void *ext)
 {
 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balancer, count);
 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1 + count);
@@ -113,6 +139,7 @@ static int __set_load_balancer(int i915, uint32_t ctx,
 
 	memset(&balancer, 0, sizeof(balancer));
 	balancer.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
+	balancer.base.next_extension = to_user_pointer(ext);
 
 	igt_assert(count);
 	balancer.num_siblings = count;
@@ -131,9 +158,10 @@ static int __set_load_balancer(int i915, uint32_t ctx,
 
 static void set_load_balancer(int i915, uint32_t ctx,
 			      const struct i915_engine_class_instance *ci,
-			      unsigned int count)
+			      unsigned int count,
+			      void *ext)
 {
-	igt_assert_eq(__set_load_balancer(i915, ctx, ci, count), 0);
+	igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
 }
 
 static uint32_t load_balancer_create(int i915,
@@ -143,7 +171,7 @@ static uint32_t load_balancer_create(int i915,
 	uint32_t ctx;
 
 	ctx = gem_context_create(i915);
-	set_load_balancer(i915, ctx, ci, count);
+	set_load_balancer(i915, ctx, ci, count, NULL);
 
 	return ctx;
 }
@@ -288,6 +316,74 @@ static void invalid_balancer(int i915)
 	}
 }
 
+static void invalid_bonds(int i915)
+{
+	I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
+	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1);
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = gem_context_create(i915),
+		.param = I915_CONTEXT_PARAM_ENGINES,
+		.value = to_user_pointer(&engines),
+		.size = sizeof(engines),
+	};
+	uint32_t handle;
+	void *ptr;
+
+	memset(&engines, 0, sizeof(engines));
+	gem_context_set_param(i915, &p);
+
+	memset(bonds, 0, sizeof(bonds));
+	for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+		bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+		bonds[n].base.next_extension =
+			n ? to_user_pointer(&bonds[n - 1]) : 0;
+		bonds[n].num_bonds = 1;
+	}
+	engines.extensions = to_user_pointer(&bonds);
+	gem_context_set_param(i915, &p);
+
+	bonds[0].base.next_extension = -1ull;
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	bonds[0].base.next_extension = to_user_pointer(&bonds[0]);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+
+	engines.extensions = to_user_pointer(&bonds[1]);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -E2BIG);
+	bonds[0].base.next_extension = 0;
+	gem_context_set_param(i915, &p);
+
+	handle = gem_create(i915, 4096 * 3);
+	ptr = gem_mmap__gtt(i915, handle, 4096 * 3, PROT_WRITE);
+	gem_close(i915, handle);
+
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	engines.extensions = to_user_pointer(ptr) + 4096;
+	gem_context_set_param(i915, &p);
+
+	memcpy(ptr, &bonds[0], sizeof(bonds[0]));
+	bonds[0].base.next_extension = to_user_pointer(ptr);
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	gem_context_set_param(i915, &p);
+
+	munmap(ptr, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	bonds[0].base.next_extension = 0;
+	memcpy(ptr + 8192, &bonds[0], sizeof(bonds[0]));
+	bonds[0].base.next_extension = to_user_pointer(ptr) + 8192;
+	memcpy(ptr + 4096, &bonds[0], sizeof(bonds[0]));
+	gem_context_set_param(i915, &p);
+
+	munmap(ptr + 8192, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	munmap(ptr + 4096, 4096);
+	igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+
+	gem_context_destroy(i915, p.ctx_id);
+}
+
 static void kick_kthreads(int period_us)
 {
 	sched_yield();
@@ -397,7 +493,7 @@ static void individual(int i915)
 
 		for (int pass = 0; pass < count; pass++) { /* approx. count! */
 			igt_permute_array(ci, count, igt_exchange_int64);
-			set_load_balancer(i915, ctx, ci, count);
+			set_load_balancer(i915, ctx, ci, count, NULL);
 			for (unsigned int n = 0; n < count; n++)
 				check_individual_engine(i915, ctx, ci, n);
 		}
@@ -409,6 +505,115 @@ static void individual(int i915)
 	gem_quiescent_gpu(i915);
 }
 
+static void bonded(int i915, unsigned int flags)
+#define CORK 0x1
+{
+	I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
+	struct i915_engine_class_instance *master_engines;
+	uint32_t master;
+
+	/*
+	 * I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
+	 * to specify which engine(s) to pair with a parallel (EXEC_SUBMIT)
+	 * request submitted to another engine.
+	 */
+
+	master = gem_queue_create(i915);
+
+	memset(bonds, 0, sizeof(bonds));
+	for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
+		bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+		bonds[n].base.next_extension =
+			n ? to_user_pointer(&bonds[n - 1]) : 0;
+		bonds[n].num_bonds = 1;
+	}
+
+	for (int mask = 0; mask < 32; mask++) {
+		unsigned int count, limit;
+		struct i915_engine_class_instance *siblings;
+		uint32_t ctx;
+		int n;
+
+		siblings = list_engines(i915, 1u << mask, &count);
+		if (!siblings)
+			continue;
+
+		if (count < 2) {
+			free(siblings);
+			continue;
+		}
+
+		igt_debug("Found %d engines of class %d\n", count, mask);
+
+		master_engines = list_engines(i915, ~(1u << mask), &limit);
+		set_engines(i915, master, master_engines, limit);
+
+		limit = min(count, limit);
+		for (n = 0; n < limit; n++) {
+			bonds[n].master = master_engines[n];
+			bonds[n].engines[0] = siblings[n];
+		}
+
+		ctx = gem_context_clone(i915,
+				       	master, I915_CONTEXT_CLONE_VM,
+					I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+		set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
+
+		for (n = 0; n < limit; n++) {
+			struct drm_i915_gem_execbuffer2 eb;
+			IGT_CORK_HANDLE(cork);
+			igt_spin_t *spin, *plug;
+			double load;
+			int pmu;
+
+			igt_assert(siblings[n].engine_class != master_engines[n].engine_class);
+
+			pmu = perf_i915_open(I915_PMU_ENGINE_BUSY(siblings[n].engine_class,
+								  siblings[n].engine_instance));
+
+			plug = NULL;
+			if (flags & CORK) {
+				plug = __igt_spin_new(i915,
+						      .ctx = master,
+						      .engine = n,
+						      .dependency = igt_cork_plug(&cork, i915));
+			}
+
+			spin = __igt_spin_new(i915,
+					      .ctx = master,
+					      .engine = n,
+					      .flags = IGT_SPIN_FENCE_OUT);
+
+			eb = spin->execbuf;
+			eb.rsvd1 = ctx;
+			eb.rsvd2 = spin->out_fence;
+			eb.flags = I915_EXEC_FENCE_SUBMIT;
+			gem_execbuf(i915, &eb);
+
+			if (plug) {
+				igt_cork_unplug(&cork);
+				igt_spin_free(i915, plug);
+			}
+
+			load = measure_load(pmu, 10000);
+			igt_spin_free(i915, spin);
+
+			close(pmu);
+
+			igt_assert_f(load > 0.90,
+				     "engine %d (class:instance %d:%d) was found to be only %.1f%% busy\n",
+				     n, siblings[n].engine_class, siblings[n].engine_instance,
+				     load*100);
+		}
+
+		gem_context_destroy(i915, ctx);
+		free(master_engines);
+		free(siblings);
+	}
+
+	gem_context_destroy(i915, master);
+}
+
 static void indicies(int i915)
 {
 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
@@ -856,10 +1061,10 @@ static void semaphore(int i915)
 		count = ARRAY_SIZE(block);
 
 		for (int i = 0; i < count; i++) {
-			set_load_balancer(i915, block[i], ci, count);
+			set_load_balancer(i915, block[i], ci, count, NULL);
 			spin[i] = __igt_spin_new(i915,
-						       .ctx = block[i],
-						       .dependency = scratch);
+						 .ctx = block[i],
+						 .dependency = scratch);
 		}
 
 		/*
@@ -867,7 +1072,7 @@ static void semaphore(int i915)
 		 * or we let the vip through. If not, we hang.
 		 */
 		vip = gem_context_create(i915);
-		set_load_balancer(i915, vip, ci, count);
+		set_load_balancer(i915, vip, ci, count, NULL);
 		ping(i915, vip, 0);
 		gem_context_destroy(i915, vip);
 
@@ -984,7 +1189,7 @@ static bool has_load_balancer(int i915)
 	int err;
 
 	ctx = gem_context_create(i915);
-	err = __set_load_balancer(i915, ctx, &ci, 1);
+	err = __set_load_balancer(i915, ctx, &ci, 1, NULL);
 	gem_context_destroy(i915, ctx);
 
 	return err == 0;
@@ -1010,6 +1215,9 @@ igt_main
 	igt_subtest("invalid-balancer")
 		invalid_balancer(i915);
 
+	igt_subtest("invalid-bonds")
+		invalid_bonds(i915);
+
 	igt_subtest("individual")
 		individual(i915);
 
@@ -1044,6 +1252,12 @@ igt_main
 	igt_subtest("smoke")
 		smoketest(i915, 20);
 
+	igt_subtest("bonded-imm")
+		bonded(i915, 0);
+
+	igt_subtest("bonded-cork")
+		bonded(i915, CORK);
+
 	igt_fixture {
 		igt_stop_hang_detector();
 	}
-- 
2.20.1



More information about the Intel-gfx mailing list