[igt-dev] [RFC v2 16/43] tests/i915/gem_eio: use the gem_engine_topology library

Ramalingam C ramalingam.c at intel.com
Fri Jun 21 10:03:18 UTC 2019


Replace the legacy for_each_engine* defines with the ones
implemented in the gem_engine_topology library.

Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
---
 tests/i915/gem_eio.c | 63 +++++++++++++++++++++++++++-----------------
 1 file changed, 39 insertions(+), 24 deletions(-)

diff --git a/tests/i915/gem_eio.c b/tests/i915/gem_eio.c
index 5396a04e2367..b8cd46c2b62d 100644
--- a/tests/i915/gem_eio.c
+++ b/tests/i915/gem_eio.c
@@ -170,15 +170,15 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 	return err;
 }
 
-static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, const struct intel_execution_engine2 *e)
 {
 	struct igt_spin_factory opts = {
 		.ctx = ctx,
-		.engine = flags,
+		.engine = e->flags,
 		.flags = IGT_SPIN_FAST,
 	};
 
-	if (gem_can_store_dword(fd, opts.engine))
+	if (gem_class_can_store_dword(fd, e->class))
 		opts.flags |= IGT_SPIN_POLL_RUN;
 
 	return __igt_spin_factory(fd, &opts);
@@ -194,9 +194,9 @@ static void __spin_wait(int fd, igt_spin_t *spin)
 	}
 }
 
-static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, const struct intel_execution_engine2 *e)
 {
-	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+	igt_spin_t *spin = __spin_poll(fd, ctx, e);
 
 	__spin_wait(fd, spin);
 
@@ -315,6 +315,7 @@ static void __test_banned(int fd)
 		.buffer_count = 1,
 	};
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
+	const struct intel_execution_engine2 *e;
 	unsigned long count = 0;
 
 	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
@@ -344,7 +345,12 @@ static void __test_banned(int fd)
 		}
 
 		/* Trigger a reset, making sure we are detected as guilty */
-		hang = spin_sync(fd, 0, 0);
+		__for_each_physical_engine(fd, e)
+			if (e->class == I915_ENGINE_CLASS_RENDER) {
+				hang = spin_sync(fd, 0, e);
+				break;
+			}
+
 		trigger_reset(fd);
 		igt_spin_free(fd, hang);
 
@@ -368,6 +374,7 @@ static void test_banned(int fd)
 static void test_wait(int fd, unsigned int flags, unsigned int wait)
 {
 	igt_spin_t *hang;
+	const struct intel_execution_engine2 *e;
 
 	fd = gem_reopen_driver(fd);
 	igt_require_gem(fd);
@@ -382,7 +389,11 @@ static void test_wait(int fd, unsigned int flags, unsigned int wait)
 	else
 		igt_require(i915_reset_control(true));
 
-	hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
+	__for_each_physical_engine(fd, e)
+		if (e->class == I915_ENGINE_CLASS_RENDER){
+			hang = spin_sync(fd, 0, e);
+			break;
+		}
 
 	check_wait(fd, hang->handle, wait, NULL);
 
@@ -416,18 +427,18 @@ static void test_suspend(int fd, int state)
 static void test_inflight(int fd, unsigned int wait)
 {
 	int parent_fd = fd;
-	unsigned int engine;
+	const struct intel_execution_engine2 *engine;
 	int fence[64]; /* mostly conservative estimate of ring size */
 	int max;
 
 	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 
-	max = gem_measure_ring_inflight(fd, -1, 0);
+	max = gem_measure_ring_inflight(fd, ALL_ENGINES, 0);
 	igt_require(max > 1);
 	max = min(max - 1, ARRAY_SIZE(fence));
 
-	for_each_engine(parent_fd, engine) {
+	__for_each_physical_engine(parent_fd, engine) {
 		const uint32_t bbe = MI_BATCH_BUFFER_END;
 		struct drm_i915_gem_exec_object2 obj[2];
 		struct drm_i915_gem_execbuffer2 execbuf;
@@ -442,7 +453,8 @@ static void test_inflight(int fd, unsigned int wait)
 		gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
 		gem_quiescent_gpu(fd);
-		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
+		igt_debug("Starting %s on engine '%s'\n",
+			  __func__, engine->name);
 		igt_require(i915_reset_control(false));
 
 		hang = spin_sync(fd, 0, engine);
@@ -451,7 +463,7 @@ static void test_inflight(int fd, unsigned int wait)
 		memset(&execbuf, 0, sizeof(execbuf));
 		execbuf.buffers_ptr = to_user_pointer(obj);
 		execbuf.buffer_count = 2;
-		execbuf.flags = engine | I915_EXEC_FENCE_OUT;
+		execbuf.flags = engine->flags | I915_EXEC_FENCE_OUT;
 
 		for (unsigned int n = 0; n < max; n++) {
 			gem_execbuf_wr(fd, &execbuf);
@@ -484,7 +496,7 @@ static void test_inflight_suspend(int fd)
 	igt_spin_t *hang;
 	int max;
 
-	max = gem_measure_ring_inflight(fd, -1, 0);
+	max = gem_measure_ring_inflight(fd, ALL_ENGINES, 0);
 	igt_require(max > 1);
 	max = min(max - 1, ARRAY_SIZE(fence));
 
@@ -548,13 +560,13 @@ static uint32_t context_create_safe(int i915)
 static void test_inflight_contexts(int fd, unsigned int wait)
 {
 	int parent_fd = fd;
-	unsigned int engine;
+	const struct intel_execution_engine2 *engine;
 
 	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 	gem_require_contexts(fd);
 
-	for_each_engine(parent_fd, engine) {
+	__for_each_physical_engine(parent_fd, engine) {
 		const uint32_t bbe = MI_BATCH_BUFFER_END;
 		struct drm_i915_gem_exec_object2 obj[2];
 		struct drm_i915_gem_execbuffer2 execbuf;
@@ -570,7 +582,8 @@ static void test_inflight_contexts(int fd, unsigned int wait)
 
 		gem_quiescent_gpu(fd);
 
-		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
+		igt_debug("Starting %s on engine '%s'\n",
+			  __func__, engine->name);
 		igt_require(i915_reset_control(false));
 
 		memset(obj, 0, sizeof(obj));
@@ -584,7 +597,7 @@ static void test_inflight_contexts(int fd, unsigned int wait)
 		memset(&execbuf, 0, sizeof(execbuf));
 		execbuf.buffers_ptr = to_user_pointer(obj);
 		execbuf.buffer_count = 2;
-		execbuf.flags = engine | I915_EXEC_FENCE_OUT;
+		execbuf.flags = engine->flags | I915_EXEC_FENCE_OUT;
 
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			execbuf.rsvd1 = ctx[n];
@@ -671,7 +684,8 @@ static void test_inflight_internal(int fd, unsigned int wait)
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
 	uint32_t bbe = MI_BATCH_BUFFER_END;
-	unsigned engine, nfence = 0;
+	unsigned nfence = 0;
+	const struct intel_execution_engine2 *engine;
 	int fences[16];
 	igt_spin_t *hang;
 
@@ -692,8 +706,8 @@ static void test_inflight_internal(int fd, unsigned int wait)
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(obj);
 	execbuf.buffer_count = 2;
-	for_each_engine(fd, engine) {
-		execbuf.flags = engine | I915_EXEC_FENCE_OUT;
+	__for_each_physical_engine(fd, engine) {
+		execbuf.flags = engine->flags | I915_EXEC_FENCE_OUT;
 
 		gem_execbuf_wr(fd, &execbuf);
 
@@ -716,7 +730,8 @@ static void test_inflight_internal(int fd, unsigned int wait)
 }
 
 static void reset_stress(int fd,
-			 uint32_t ctx0, unsigned int engine,
+			 uint32_t ctx0,
+			 const struct intel_execution_engine2 *engine,
 			 unsigned int flags)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -726,7 +741,7 @@ static void reset_stress(int fd,
 	struct drm_i915_gem_execbuffer2 execbuf = {
 		.buffers_ptr = to_user_pointer(&obj),
 		.buffer_count = 1,
-		.flags = engine,
+		.flags = engine->flags,
 	};
 	igt_stats_t stats;
 
@@ -794,9 +809,9 @@ static void reset_stress(int fd,
 static void test_reset_stress(int fd, unsigned int flags)
 {
 	uint32_t ctx0 = context_create_safe(fd);
-	unsigned int engine;
+	const struct intel_execution_engine2 *engine;
 
-	for_each_engine(fd, engine)
+	__for_each_physical_engine(fd, engine)
 		reset_stress(fd, ctx0, engine, flags);
 
 	gem_context_destroy(fd, ctx0);
-- 
2.19.1



More information about the igt-dev mailing list