[igt-dev] [PATCH v13 8/9] tests: perf_pmu: use the flag value embedded in intel_execution_engines2

Andi Shyti andi.shyti at intel.com
Tue Mar 19 23:44:40 UTC 2019


Now we have flags in the 'intel_execution_engines2' and it's set
by the for_each iterator. Use it!

Signed-off-by: Andi Shyti <andi.shyti at intel.com>
---
 tests/perf_pmu.c | 33 ++++++++++++++-------------------
 1 file changed, 14 insertions(+), 19 deletions(-)

diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 79adeb2c8f3f..a6558bda9d7b 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -158,11 +158,6 @@ static unsigned int measured_usleep(unsigned int usec)
 	return igt_nsec_elapsed(&ts);
 }
 
-static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
-{
-	return gem_class_instance_to_eb_flags(gem_fd, e->class, e->instance);
-}
-
 #define TEST_BUSY (1)
 #define FLAG_SYNC (2)
 #define TEST_TRAILING_IDLE (4)
@@ -267,7 +262,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
 	if (flags & TEST_BUSY)
-		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+		spin = spin_sync(gem_fd, 0, e->flags);
 	else
 		spin = NULL;
 
@@ -316,7 +311,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	sleep(2);
 
-	spin = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = __spin_sync(gem_fd, 0, e->flags);
 
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
@@ -359,11 +354,11 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 * re-submission in execlists mode. Make sure busyness is correctly
 	 * reported with the engine busy, and after the engine went idle.
 	 */
-	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin[0] = __spin_sync(gem_fd, 0, e->flags);
 	usleep(500e3);
 	spin[1] = __igt_spin_batch_new(gem_fd,
 				       .ctx = ctx,
-				       .engine = e2ring(gem_fd, e));
+				       .engine = e->flags);
 
 	/*
 	 * Open PMU as fast as possible after the second spin batch in attempt
@@ -445,7 +440,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 
 	igt_assert_eq(i, num_engines);
 
-	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = spin_sync(gem_fd, 0, e->flags);
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -478,7 +473,7 @@ __submit_spin_batch(int gem_fd, igt_spin_t *spin,
 	struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
 
 	eb.flags &= ~(0x3f | I915_EXEC_BSD_MASK);
-	eb.flags |= e2ring(gem_fd, e) | I915_EXEC_NO_RELOC;
+	eb.flags = e->flags | I915_EXEC_NO_RELOC;
 	eb.batch_start_offset += offset;
 
 	gem_execbuf(gem_fd, &eb);
@@ -503,7 +498,7 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		else if (spin)
 			__submit_spin_batch(gem_fd, spin, e_, 64);
 		else
-			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e_));
+			spin = __spin_poll(gem_fd, 0, e_->flags);
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
 	}
@@ -558,7 +553,7 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		if (spin)
 			__submit_spin_batch(gem_fd, spin, e, 64);
 		else
-			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
+			spin = __spin_poll(gem_fd, 0, e->flags);
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
 	}
@@ -602,7 +597,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
 
 	if (flags & TEST_BUSY)
-		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+		spin = spin_sync(gem_fd, 0, e->flags);
 	else
 		spin = NULL;
 
@@ -689,7 +684,7 @@ sema_wait(int gem_fd, const struct intel_execution_engine2 *e,
 
 	eb.buffer_count = 2;
 	eb.buffers_ptr = to_user_pointer(obj);
-	eb.flags = e2ring(gem_fd, e);
+	eb.flags = e->flags;
 
 	/**
 	 * Start the semaphore wait PMU and after some known time let the above
@@ -845,7 +840,7 @@ event_wait(int gem_fd, const struct intel_execution_engine2 *e)
 
 	eb.buffer_count = 1;
 	eb.buffers_ptr = to_user_pointer(&obj);
-	eb.flags = e2ring(gem_fd, e) | I915_EXEC_SECURE;
+	eb.flags = e->flags | I915_EXEC_SECURE;
 
 	for_each_pipe_with_valid_output(&data.display, p, output) {
 		struct igt_helper_process waiter = { };
@@ -936,7 +931,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	fd[1] = open_pmu(config);
 
-	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = spin_sync(gem_fd, 0, e->flags);
 
 	val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
 	slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1465,7 +1460,7 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
 
 	eb.buffer_count = 1;
 	eb.buffers_ptr = to_user_pointer(&obj);
-	eb.flags = e2ring(gem_fd, e);
+	eb.flags = e->flags;
 
 	/*
 	 * This test is probabilistic so run in a few times to increase the
@@ -1570,7 +1565,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		igt_spin_t *spin;
 
 		/* Allocate our spin batch and idle it. */
-		spin = igt_spin_batch_new(gem_fd, .engine = e2ring(gem_fd, e));
+		spin = igt_spin_batch_new(gem_fd, .engine = e->flags);
 		igt_spin_batch_end(spin);
 		gem_sync(gem_fd, spin->handle);
 
-- 
2.20.1



More information about the igt-dev mailing list