[igt-dev] [RFC v2 25/43] tests/i915/gem_exec_schedule: use the gem_engine_topology library
Ramalingam C
ramalingam.c at intel.com
Fri Jun 21 10:03:27 UTC 2019
Replace the legacy for_each_engine* defines with the ones
implemented in the gem_engine_topology library.
Signed-off-by: Ramalingam C <ramalingam.c at intel.com>
---
tests/i915/gem_exec_schedule.c | 354 ++++++++++++++++-----------------
1 file changed, 177 insertions(+), 177 deletions(-)
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 7b4186228f09..21b0d93559e0 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -83,7 +83,8 @@ void __sync_read_u32_count(int fd, uint32_t handle, uint32_t *dst, uint64_t size
gem_read(fd, handle, 0, dst, size);
}
-static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
+static uint32_t __store_dword(int fd, uint32_t ctx,
+ const struct intel_execution_engine2 *e,
uint32_t target, uint32_t offset, uint32_t value,
uint32_t cork, unsigned write_domain)
{
@@ -97,7 +98,7 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj + !cork);
execbuf.buffer_count = 2 + !!cork;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
execbuf.rsvd1 = ctx;
@@ -138,11 +139,12 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
return obj[2].handle;
}
-static void store_dword(int fd, uint32_t ctx, unsigned ring,
+static void store_dword(int fd, uint32_t ctx,
+ const struct intel_execution_engine2 *e,
uint32_t target, uint32_t offset, uint32_t value,
uint32_t cork, unsigned write_domain)
{
- gem_close(fd, __store_dword(fd, ctx, ring,
+ gem_close(fd, __store_dword(fd, ctx, e,
target, offset, value,
cork, write_domain));
}
@@ -161,14 +163,15 @@ static uint32_t create_highest_priority(int fd)
return ctx;
}
-static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
+static void unplug_show_queue(int fd, struct igt_cork *c,
+ const struct intel_execution_engine2 *e)
{
igt_spin_t *spin[MAX_ELSP_QLEN];
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
const struct igt_spin_factory opts = {
.ctx = create_highest_priority(fd),
- .engine = engine,
+ .engine = e->flags,
};
spin[n] = __igt_spin_factory(fd, &opts);
gem_context_destroy(fd, opts.ctx);
@@ -182,7 +185,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
}
-static void fifo(int fd, unsigned ring)
+static void fifo(int fd, const struct intel_execution_engine2 *e)
{
IGT_CORK_HANDLE(cork);
uint32_t scratch, plug;
@@ -193,10 +196,10 @@ static void fifo(int fd, unsigned ring)
plug = igt_cork_plug(&cork, fd);
/* Same priority, same timeline, final result will be the second eb */
- store_dword(fd, 0, ring, scratch, 0, 1, plug, 0);
- store_dword(fd, 0, ring, scratch, 0, 2, plug, 0);
+ store_dword(fd, 0, e, scratch, 0, 1, plug, 0);
+ store_dword(fd, 0, e, scratch, 0, 2, plug, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
result = __sync_read_u32(fd, scratch, 0);
@@ -205,15 +208,15 @@ static void fifo(int fd, unsigned ring)
igt_assert_eq_u32(result, 2);
}
-static void independent(int fd, unsigned int engine)
+static void independent(int fd, const struct intel_execution_engine2 *e)
{
IGT_CORK_HANDLE(cork);
uint32_t scratch, plug, batch;
igt_spin_t *spin = NULL;
- unsigned int other;
+ const struct intel_execution_engine2 *other;
uint32_t *ptr;
- igt_require(engine != 0);
+ igt_require(e);
scratch = gem_create(fd, 4096);
ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
@@ -222,32 +225,32 @@ static void independent(int fd, unsigned int engine)
plug = igt_cork_plug(&cork, fd);
/* Check that we can submit to engine while all others are blocked */
- for_each_physical_engine(fd, other) {
- if (other == engine)
+ __for_each_physical_engine(fd, other) {
+ if (other == e)
continue;
- if (!gem_can_store_dword(fd, other))
+ if (!gem_class_can_store_dword(fd, other->class))
continue;
if (spin == NULL) {
- spin = __igt_spin_new(fd, .engine = other);
+ spin = __igt_spin_new(fd, .engine = other->flags);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
- .flags = other,
+ .flags = other->flags,
};
gem_execbuf(fd, &eb);
}
- store_dword(fd, 0, other, scratch, 0, other, plug, 0);
+ store_dword(fd, 0, other, scratch, 0, other->flags, plug, 0);
}
igt_require(spin);
/* Same priority, but different timeline (as different engine) */
- batch = __store_dword(fd, 0, engine, scratch, 0, engine, plug, 0);
+ batch = __store_dword(fd, 0, e, scratch, 0, e->flags, plug, 0);
- unplug_show_queue(fd, &cork, engine);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
gem_sync(fd, batch);
@@ -257,34 +260,35 @@ static void independent(int fd, unsigned int engine)
/* Only the local engine should be free to complete. */
igt_assert(gem_bo_busy(fd, scratch));
- igt_assert_eq(ptr[0], engine);
+ igt_assert_eq(ptr[0], e->flags);
igt_spin_free(fd, spin);
gem_quiescent_gpu(fd);
/* And we expect the others to have overwritten us, order unspecified */
igt_assert(!gem_bo_busy(fd, scratch));
- igt_assert_neq(ptr[0], engine);
+ igt_assert_neq(ptr[0], e->flags);
munmap(ptr, 4096);
gem_close(fd, scratch);
}
-static void smoketest(int fd, unsigned ring, unsigned timeout)
+static void smoketest(int fd, const struct intel_execution_engine2 *e,
+ unsigned timeout)
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- unsigned engines[MAX_ENGINES];
+ const struct intel_execution_engine2 *engines[MAX_ENGINES];
unsigned nengine;
- unsigned engine;
+ const struct intel_execution_engine2 *other;
uint32_t scratch;
uint32_t result[2 * ncpus];
nengine = 0;
- if (ring == ALL_ENGINES) {
- for_each_physical_engine(fd, engine)
- engines[nengine++] = engine;
+ if (!e) {
+ __for_each_physical_engine(fd, other)
+ engines[nengine++] = other;
} else {
- engines[nengine++] = ring;
+ engines[nengine++] = e;
}
igt_require(nengine);
@@ -302,12 +306,12 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
prio = hars_petruska_f54_1_random_unsafe_max(MAX_PRIO - MIN_PRIO) + MIN_PRIO;
gem_context_set_priority(fd, ctx, prio);
- engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
- store_dword(fd, ctx, engine, scratch,
+ other = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
+ store_dword(fd, ctx, other, scratch,
8*child + 0, ~child,
0, 0);
for (unsigned int step = 0; step < 8; step++)
- store_dword(fd, ctx, engine, scratch,
+ store_dword(fd, ctx, other, scratch,
8*child + 4, count++,
0, 0);
}
@@ -352,7 +356,7 @@ static void semaphore_userlock(int i915)
.handle = batch_create(i915),
};
igt_spin_t *spin = NULL;
- unsigned int engine;
+ const struct intel_execution_engine2 *e;
uint32_t scratch;
igt_require(gem_scheduler_has_semaphores(i915));
@@ -365,16 +369,16 @@ static void semaphore_userlock(int i915)
*/
scratch = gem_create(i915, 4096);
- for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, e) {
if (!spin) {
spin = igt_spin_new(i915,
.dependency = scratch,
- .engine = engine);
+ .engine = e->flags);
} else {
uint64_t saved = spin->execbuf.flags;
spin->execbuf.flags &= ~ENGINE_MASK;
- spin->execbuf.flags |= engine;
+ spin->execbuf.flags |= e->flags;
gem_execbuf(i915, &spin->execbuf);
@@ -390,15 +394,15 @@ static void semaphore_userlock(int i915)
* taking precedence.
*/
scratch = gem_context_create(i915);
- for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, e) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .flags = engine,
+ .flags = e->flags,
.rsvd1 = scratch,
};
- if (engine == (spin->execbuf.flags & ENGINE_MASK))
+ if (e->flags == (spin->execbuf.flags & ENGINE_MASK))
continue;
gem_execbuf(i915, &execbuf);
@@ -415,7 +419,7 @@ static void semaphore_codependency(int i915)
struct {
igt_spin_t *xcs, *rcs;
} task[2];
- unsigned int engine;
+ const struct intel_execution_engine2 *e, *other;
int i;
/*
@@ -429,13 +433,13 @@ static void semaphore_codependency(int i915)
*/
i = 0;
- for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, e) {
uint32_t ctx;
- if (engine == I915_EXEC_RENDER)
+ if (e->class == I915_ENGINE_CLASS_RENDER)
continue;
- if (!gem_can_store_dword(i915, engine))
+ if (!gem_class_can_store_dword(i915, e->class))
continue;
ctx = gem_context_create(i915);
@@ -443,16 +447,19 @@ static void semaphore_codependency(int i915)
task[i].xcs =
__igt_spin_new(i915,
.ctx = ctx,
- .engine = engine,
+ .engine = e->flags,
.flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(task[i].xcs);
- /* Common rcs tasks will be queued in FIFO */
- task[i].rcs =
- __igt_spin_new(i915,
- .ctx = ctx,
- .engine = I915_EXEC_RENDER,
- .dependency = task[i].xcs->handle);
+ __for_each_physical_engine(i915, other) {
+ if (other->class == I915_ENGINE_CLASS_RENDER)
+ /* Common rcs tasks will be queued in FIFO */
+ task[i].rcs =
+ __igt_spin_new(i915,
+ .ctx = ctx,
+ .engine = other->flags,
+ .dependency = task[i].xcs->handle);
+ }
gem_context_destroy(i915, ctx);
@@ -481,7 +488,7 @@ static void semaphore_resolve(int i915)
{
const uint32_t SEMAPHORE_ADDR = 64 << 10;
uint32_t semaphore, outer, inner, *sema;
- unsigned int engine;
+ const struct intel_execution_engine2 *e;
/*
* Userspace may submit batches that wait upon unresolved
@@ -501,7 +508,7 @@ static void semaphore_resolve(int i915)
semaphore = gem_create(i915, 4096);
sema = gem_mmap__wc(i915, semaphore, 0, 4096, PROT_WRITE);
- for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, e) {
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_execbuffer2 eb;
uint32_t handle, cancel;
@@ -509,10 +516,10 @@ static void semaphore_resolve(int i915)
igt_spin_t *spin;
int64_t poke = 1;
- if (!gem_can_store_dword(i915, engine))
+ if (!gem_class_can_store_dword(i915, e->class))
continue;
- spin = __igt_spin_new(i915, .engine = engine);
+ spin = __igt_spin_new(i915, .engine = e->flags);
igt_spin_end(spin); /* we just want its address for later */
gem_sync(i915, spin->handle);
igt_spin_reset(spin);
@@ -609,26 +616,26 @@ static void semaphore_resolve(int i915)
static void semaphore_noskip(int i915)
{
const int gen = intel_gen(intel_get_drm_devid(i915));
- unsigned int engine, other;
+ const struct intel_execution_engine2 *engine, *other;
uint32_t ctx;
igt_require(gen >= 6); /* MI_STORE_DWORD_IMM convenience */
ctx = gem_context_create(i915);
- for_each_physical_engine(i915, engine) {
- for_each_physical_engine(i915, other) {
+ __for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, other) {
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_execbuffer2 eb;
uint32_t handle, *cs, *map;
igt_spin_t *chain, *spin;
- if (other == engine || !gem_can_store_dword(i915, other))
+ if (other == engine || !gem_class_can_store_dword(i915, other->class))
continue;
- chain = __igt_spin_new(i915, .engine = engine);
+ chain = __igt_spin_new(i915, .engine = engine->flags);
- spin = __igt_spin_new(i915, .engine = other);
+ spin = __igt_spin_new(i915, .engine = other->flags);
igt_spin_end(spin); /* we just want its address for later */
gem_sync(i915, spin->handle);
igt_spin_reset(spin);
@@ -662,7 +669,7 @@ static void semaphore_noskip(int i915)
eb.buffer_count = 3;
eb.buffers_ptr = to_user_pointer(obj);
eb.rsvd1 = ctx;
- eb.flags = other;
+ eb.flags = other->flags;
gem_execbuf(i915, &eb);
/* port1: dependency chain from port0 */
@@ -673,7 +680,7 @@ static void semaphore_noskip(int i915)
memset(&eb, 0, sizeof(eb));
eb.buffer_count = 2;
eb.buffers_ptr = to_user_pointer(obj);
- eb.flags = other;
+ eb.flags = other->flags;
gem_execbuf(i915, &eb);
igt_spin_set_timeout(chain, NSEC_PER_SEC / 100);
@@ -688,7 +695,8 @@ static void semaphore_noskip(int i915)
gem_context_destroy(i915, ctx);
}
-static void reorder(int fd, unsigned ring, unsigned flags)
+static void reorder(int fd, const struct intel_execution_engine2 *e,
+ unsigned flags)
#define EQUAL 1
{
IGT_CORK_HANDLE(cork);
@@ -708,10 +716,10 @@ static void reorder(int fd, unsigned ring, unsigned flags)
/* We expect the high priority context to be executed first, and
* so the final result will be value from the low priority context.
*/
- store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], plug, 0);
- store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], plug, 0);
+ store_dword(fd, ctx[LO], e, scratch, 0, ctx[LO], plug, 0);
+ store_dword(fd, ctx[HI], e, scratch, 0, ctx[HI], plug, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
gem_context_destroy(fd, ctx[LO]);
@@ -726,7 +734,7 @@ static void reorder(int fd, unsigned ring, unsigned flags)
igt_assert_eq_u32(result, ctx[LO]);
}
-static void promotion(int fd, unsigned ring)
+static void promotion(int fd, const struct intel_execution_engine2 *e)
{
IGT_CORK_HANDLE(cork);
uint32_t result, dep;
@@ -753,16 +761,16 @@ static void promotion(int fd, unsigned ring)
* fifo would be NOISE, LO, HI.
* strict priority would be HI, NOISE, LO
*/
- store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], plug, 0);
- store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], plug, 0);
+ store_dword(fd, ctx[NOISE], e, result, 0, ctx[NOISE], plug, 0);
+ store_dword(fd, ctx[LO], e, result, 0, ctx[LO], plug, 0);
/* link LO <-> HI via a dependency on another buffer */
- store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
- store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
+ store_dword(fd, ctx[LO], e, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, ctx[HI], e, dep, 0, ctx[HI], 0, 0);
- store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
+ store_dword(fd, ctx[HI], e, result, 0, ctx[HI], 0, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
gem_context_destroy(fd, ctx[NOISE]);
@@ -781,7 +789,8 @@ static void promotion(int fd, unsigned ring)
#define NEW_CTX (0x1 << 0)
#define HANG_LP (0x1 << 1)
-static void preempt(int fd, unsigned ring, unsigned flags)
+static void preempt(int fd, const struct intel_execution_engine2 *e,
+ unsigned flags)
{
uint32_t result = gem_create(fd, 4096);
uint32_t result_read;
@@ -796,7 +805,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
if (flags & HANG_LP)
- hang = igt_hang_ctx(fd, ctx[LO], ring, 0);
+ hang = igt_hang_ctx(fd, ctx[LO], e->flags, 0);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
if (flags & NEW_CTX) {
@@ -806,10 +815,10 @@ static void preempt(int fd, unsigned ring, unsigned flags)
}
spin[n] = __igt_spin_new(fd,
.ctx = ctx[LO],
- .engine = ring);
+ .engine = e->flags);
igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
- store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
+ store_dword(fd, ctx[HI], e, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
result_read = __sync_read_u32(fd, result, 0);
igt_assert_eq_u32(result_read, n + 1);
@@ -833,21 +842,21 @@ static void preempt(int fd, unsigned ring, unsigned flags)
static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
{
- unsigned other;
+ const struct intel_execution_engine2 *other;
gem_context_set_priority(fd, ctx, prio);
- for_each_physical_engine(fd, other) {
+ __for_each_physical_engine(fd, other) {
if (spin == NULL) {
spin = __igt_spin_new(fd,
.ctx = ctx,
- .engine = other);
+ .engine = other->flags);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
.rsvd1 = ctx,
- .flags = other,
+ .flags = other->flags,
};
gem_execbuf(fd, &eb);
}
@@ -858,21 +867,23 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
static void __preempt_other(int fd,
uint32_t *ctx,
- unsigned int target, unsigned int primary,
+ const struct intel_execution_engine2 *target_e,
+ const struct intel_execution_engine2 *primary_e,
unsigned flags)
{
uint32_t result = gem_create(fd, 4096);
uint32_t result_read[4096 / sizeof(uint32_t)];
- unsigned int n, i, other;
+ unsigned int n, i;
+ const struct intel_execution_engine2 *other;
n = 0;
- store_dword(fd, ctx[LO], primary,
+ store_dword(fd, ctx[LO], primary_e,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
n++;
if (flags & CHAIN) {
- for_each_physical_engine(fd, other) {
+ __for_each_physical_engine(fd, other) {
store_dword(fd, ctx[LO], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -880,7 +891,7 @@ static void __preempt_other(int fd,
}
}
- store_dword(fd, ctx[HI], target,
+ store_dword(fd, ctx[HI], target_e,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -896,9 +907,10 @@ static void __preempt_other(int fd,
gem_close(fd, result);
}
-static void preempt_other(int fd, unsigned ring, unsigned int flags)
+static void preempt_other(int fd, const struct intel_execution_engine2 *e,
+ unsigned int flags)
{
- unsigned int primary;
+ const struct intel_execution_engine2 *primary;
igt_spin_t *spin = NULL;
uint32_t ctx[3];
@@ -921,10 +933,9 @@ static void preempt_other(int fd, unsigned ring, unsigned int flags)
ctx[HI] = gem_context_create(fd);
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
- for_each_physical_engine(fd, primary) {
- igt_debug("Primary engine: %s\n", e__->name);
- __preempt_other(fd, ctx, ring, primary, flags);
-
+ __for_each_physical_engine(fd, primary) {
+ igt_debug("Primary engine: %s\n", e->name);
+ __preempt_other(fd, ctx, e, primary, flags);
}
igt_assert(gem_bo_busy(fd, spin->handle));
@@ -936,13 +947,15 @@ static void preempt_other(int fd, unsigned ring, unsigned int flags)
}
static void __preempt_queue(int fd,
- unsigned target, unsigned primary,
+ const struct intel_execution_engine2 *target_e,
+ const struct intel_execution_engine2 *primary_e,
unsigned depth, unsigned flags)
{
uint32_t result = gem_create(fd, 4096);
uint32_t result_read[4096 / sizeof(uint32_t)];
igt_spin_t *above = NULL, *below = NULL;
- unsigned int other, n, i;
+ unsigned int n, i;
+ const struct intel_execution_engine2 *other;
int prio = MAX_PRIO;
uint32_t ctx[3] = {
gem_context_create(fd),
@@ -971,13 +984,13 @@ static void __preempt_queue(int fd,
gem_context_set_priority(fd, ctx[LO], prio--);
n = 0;
- store_dword(fd, ctx[LO], primary,
+ store_dword(fd, ctx[LO], primary_e,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
n++;
if (flags & CHAIN) {
- for_each_physical_engine(fd, other) {
+ __for_each_physical_engine(fd, other) {
store_dword(fd, ctx[LO], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -985,7 +998,7 @@ static void __preempt_queue(int fd,
}
}
- store_dword(fd, ctx[HI], target,
+ store_dword(fd, ctx[HI], target_e,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -1016,22 +1029,23 @@ static void __preempt_queue(int fd,
gem_close(fd, result);
}
-static void preempt_queue(int fd, unsigned ring, unsigned int flags)
+static void preempt_queue(int fd, const struct intel_execution_engine2 *e,
+ unsigned int flags)
{
- unsigned other;
+ const struct intel_execution_engine2 *other;
- for_each_physical_engine(fd, other) {
+ __for_each_physical_engine(fd, other) {
for (unsigned depth = 0; depth <= MAX_ELSP_QLEN; depth++)
- __preempt_queue(fd, ring, other, depth, flags);
+ __preempt_queue(fd, e, other, depth, flags);
}
}
-static void preempt_self(int fd, unsigned ring)
+static void preempt_self(int fd, const struct intel_execution_engine2 *e)
{
uint32_t result = gem_create(fd, 4096);
uint32_t result_read[4096 / sizeof(uint32_t)];
igt_spin_t *spin[MAX_ELSP_QLEN];
- unsigned int other;
+ const struct intel_execution_engine2 *other;
unsigned int n, i;
uint32_t ctx[3];
@@ -1049,17 +1063,17 @@ static void preempt_self(int fd, unsigned ring)
n = 0;
gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
- for_each_physical_engine(fd, other) {
+ __for_each_physical_engine(fd, other) {
spin[n] = __igt_spin_new(fd,
.ctx = ctx[NOISE],
- .engine = other);
+ .engine = other->flags);
store_dword(fd, ctx[HI], other,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
n++;
}
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
- store_dword(fd, ctx[HI], ring,
+ store_dword(fd, ctx[HI], e,
result, (n + 1)*sizeof(uint32_t), n + 1,
0, I915_GEM_DOMAIN_RENDER);
@@ -1082,7 +1096,7 @@ static void preempt_self(int fd, unsigned ring)
gem_close(fd, result);
}
-static void preemptive_hang(int fd, unsigned ring)
+static void preemptive_hang(int fd, const struct intel_execution_engine2 *e)
{
igt_spin_t *spin[MAX_ELSP_QLEN];
igt_hang_t hang;
@@ -1097,12 +1111,12 @@ static void preemptive_hang(int fd, unsigned ring)
spin[n] = __igt_spin_new(fd,
.ctx = ctx[LO],
- .engine = ring);
+ .engine = e->flags);
gem_context_destroy(fd, ctx[LO]);
}
- hang = igt_hang_ctx(fd, ctx[HI], ring, 0);
+ hang = igt_hang_ctx(fd, ctx[HI], e->flags, 0);
igt_post_hang_ring(fd, hang);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
@@ -1117,7 +1131,7 @@ static void preemptive_hang(int fd, unsigned ring)
gem_context_destroy(fd, ctx[HI]);
}
-static void deep(int fd, unsigned ring)
+static void deep(int fd, const struct intel_execution_engine2 *e)
{
#define XS 8
const unsigned int max_req = MAX_PRIO - MIN_PRIO;
@@ -1138,10 +1152,10 @@ static void deep(int fd, unsigned ring)
ctx[n] = gem_context_create(fd);
}
- nreq = gem_measure_ring_inflight(fd, ring, 0) / (4 * XS) * MAX_CONTEXTS;
+ nreq = gem_measure_ring_inflight(fd, e->flags, 0) / (4 * XS) * MAX_CONTEXTS;
if (nreq > max_req)
nreq = max_req;
- igt_info("Using %d requests (prio range %d)\n", nreq, max_req);
+ igt_info("Using %d requests (prio range %d)\n", nreq, max_req);
result = gem_create(fd, size);
for (int m = 0; m < XS; m ++)
@@ -1163,7 +1177,7 @@ static void deep(int fd, unsigned ring)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = XS + 2;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
for (n = 0; n < MAX_CONTEXTS; n++) {
execbuf.rsvd1 = ctx[n];
gem_execbuf(fd, &execbuf);
@@ -1182,7 +1196,7 @@ static void deep(int fd, unsigned ring)
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 3,
- .flags = ring | (gen < 6 ? I915_EXEC_SECURE : 0),
+ .flags = e->flags | (gen < 6 ? I915_EXEC_SECURE : 0),
.rsvd1 = ctx[n % MAX_CONTEXTS],
};
uint32_t batch[16];
@@ -1235,15 +1249,15 @@ static void deep(int fd, unsigned ring)
gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
for (int m = 0; m < XS; m++) {
- store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
- store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, e, result, 4*n, context, dep[m], 0);
+ store_dword(fd, context, e, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
}
expected = context;
}
igt_info("Second deptree: %d requests [%.3fs]\n",
n * XS, 1e-9*igt_nsec_elapsed(&tv));
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
igt_require(expected); /* too slow */
@@ -1281,10 +1295,10 @@ static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
return err;
}
-static void wide(int fd, unsigned ring)
+static void wide(int fd, const struct intel_execution_engine2 *e)
{
struct timespec tv = {};
- unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
+ unsigned int ring_size = gem_measure_ring_inflight(fd, e->flags, MEASURE_RING_NEW_CTX);
IGT_CORK_HANDLE(cork);
uint32_t plug;
@@ -1306,13 +1320,13 @@ static void wide(int fd, unsigned ring)
igt_seconds_elapsed(&tv) < 5 && count < ring_size;
count++) {
for (int n = 0; n < MAX_CONTEXTS; n++) {
- store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], plug, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, ctx[n], e, result, 4*n, ctx[n], plug, I915_GEM_DOMAIN_INSTRUCTION);
}
}
igt_info("Submitted %d requests over %d contexts in %.1fms\n",
count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
for (int n = 0; n < MAX_CONTEXTS; n++)
@@ -1326,14 +1340,14 @@ static void wide(int fd, unsigned ring)
free(ctx);
}
-static void reorder_wide(int fd, unsigned ring)
+static void reorder_wide(int fd, const struct intel_execution_engine2 *e)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_execbuffer2 execbuf;
struct timespec tv = {};
- unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
+ unsigned int ring_size = gem_measure_ring_inflight(fd, e->flags, MEASURE_RING_NEW_CTX);
IGT_CORK_HANDLE(cork);
uint32_t result, target, plug;
uint32_t result_read[1024];
@@ -1360,7 +1374,7 @@ static void reorder_wide(int fd, unsigned ring)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 3;
- execbuf.flags = ring;
+ execbuf.flags = e->flags;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -1414,7 +1428,7 @@ static void reorder_wide(int fd, unsigned ring)
gem_context_destroy(fd, execbuf.rsvd1);
}
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, e);
gem_close(fd, plug);
__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -1440,7 +1454,7 @@ static void bind_to_cpu(int cpu)
igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
}
-static void test_pi_ringfull(int fd, unsigned int engine)
+static void test_pi_ringfull(int fd, const struct intel_execution_engine2 *e)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct sigaction sa = { .sa_handler = alarm_handler };
@@ -1463,7 +1477,7 @@ static void test_pi_ringfull(int fd, unsigned int engine)
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
- execbuf.flags = engine;
+ execbuf.flags = e->flags;
/* Warm up both (hi/lo) contexts */
execbuf.rsvd1 = gem_context_create(fd);
@@ -1571,11 +1585,11 @@ static void test_pi_ringfull(int fd, unsigned int engine)
static void measure_semaphore_power(int i915)
{
struct gpu_power power;
- unsigned int engine, signaler;
+ const struct intel_execution_engine2 *engine, *signaler;
igt_require(gpu_power_open(&power) == 0);
- for_each_physical_engine(i915, signaler) {
+ __for_each_physical_engine(i915, signaler) {
struct gpu_power_sample s_spin[2];
struct gpu_power_sample s_sema[2];
double baseline, total;
@@ -1583,7 +1597,7 @@ static void measure_semaphore_power(int i915)
igt_spin_t *spin;
spin = __igt_spin_new(i915,
- .engine = signaler,
+ .engine = signaler->flags,
.flags = IGT_SPIN_POLL_RUN);
gem_wait(i915, spin->handle, &jiffie); /* waitboost */
igt_spin_busywait_until_started(spin);
@@ -1593,14 +1607,14 @@ static void measure_semaphore_power(int i915)
gpu_power_read(&power, &s_spin[1]);
/* Add a waiter to each engine */
- for_each_physical_engine(i915, engine) {
+ __for_each_physical_engine(i915, engine) {
igt_spin_t *sema;
if (engine == signaler)
continue;
sema = __igt_spin_new(i915,
- .engine = engine,
+ .engine = engine->flags,
.dependency = spin->handle);
igt_spin_free(i915, sema);
@@ -1617,7 +1631,7 @@ static void measure_semaphore_power(int i915)
total = gpu_power_W(&power, &s_sema[0], &s_sema[1]);
igt_info("%s: %.1fmW + %.1fmW (total %1.fmW)\n",
- e__->name,
+ engine->name,
1e3 * baseline,
1e3 * (total - baseline),
1e3 * total);
@@ -1628,7 +1642,7 @@ static void measure_semaphore_power(int i915)
igt_main
{
- const struct intel_execution_engine *e;
+ const struct intel_execution_engine2 *e;
int fd = -1;
igt_skip_on_simulation();
@@ -1646,21 +1660,15 @@ igt_main
}
igt_subtest_group {
- for (e = intel_execution_engines; e->name; e++) {
- /* default exec-id is purely symbolic */
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_f("fifo-%s", e->name) {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
- fifo(fd, e->exec_id | e->flags);
+ igt_require(gem_class_can_store_dword(fd, e->class));
+ fifo(fd, e);
}
igt_subtest_f("independent-%s", e->name) {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
- independent(fd, e->exec_id | e->flags);
+ igt_require(gem_class_can_store_dword(fd, e->class));
+ independent(fd, e);
}
}
}
@@ -1681,26 +1689,22 @@ igt_main
semaphore_noskip(fd);
igt_subtest("smoketest-all")
- smoketest(fd, ALL_ENGINES, 30);
-
- for (e = intel_execution_engines; e->name; e++) {
- if (e->exec_id == 0)
- continue;
+ smoketest(fd, NULL, 30);
+ __for_each_physical_engine(fd, e) {
igt_subtest_group {
igt_fixture {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
- igt_require(gem_can_store_dword(fd, e->exec_id | e->flags));
+ igt_require(gem_class_can_store_dword(fd, e->class));
}
igt_subtest_f("in-order-%s", e->name)
- reorder(fd, e->exec_id | e->flags, EQUAL);
+ reorder(fd, e, EQUAL);
igt_subtest_f("out-order-%s", e->name)
- reorder(fd, e->exec_id | e->flags, 0);
+ reorder(fd, e, 0);
igt_subtest_f("promotion-%s", e->name)
- promotion(fd, e->exec_id | e->flags);
+ promotion(fd, e);
igt_subtest_group {
igt_fixture {
@@ -1708,30 +1712,30 @@ igt_main
}
igt_subtest_f("preempt-%s", e->name)
- preempt(fd, e->exec_id | e->flags, 0);
+ preempt(fd, e, 0);
igt_subtest_f("preempt-contexts-%s", e->name)
- preempt(fd, e->exec_id | e->flags, NEW_CTX);
+ preempt(fd, e, NEW_CTX);
igt_subtest_f("preempt-self-%s", e->name)
- preempt_self(fd, e->exec_id | e->flags);
+ preempt_self(fd, e);
igt_subtest_f("preempt-other-%s", e->name)
- preempt_other(fd, e->exec_id | e->flags, 0);
+ preempt_other(fd, e, 0);
igt_subtest_f("preempt-other-chain-%s", e->name)
- preempt_other(fd, e->exec_id | e->flags, CHAIN);
+ preempt_other(fd, e, CHAIN);
igt_subtest_f("preempt-queue-%s", e->name)
- preempt_queue(fd, e->exec_id | e->flags, 0);
+ preempt_queue(fd, e, 0);
igt_subtest_f("preempt-queue-chain-%s", e->name)
- preempt_queue(fd, e->exec_id | e->flags, CHAIN);
+ preempt_queue(fd, e, CHAIN);
igt_subtest_f("preempt-queue-contexts-%s", e->name)
- preempt_queue(fd, e->exec_id | e->flags, CONTEXTS);
+ preempt_queue(fd, e, CONTEXTS);
igt_subtest_f("preempt-queue-contexts-chain-%s", e->name)
- preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN);
+ preempt_queue(fd, e, CONTEXTS | CHAIN);
igt_subtest_group {
igt_hang_t hang;
@@ -1742,11 +1746,11 @@ igt_main
}
igt_subtest_f("preempt-hang-%s", e->name) {
- preempt(fd, e->exec_id | e->flags, NEW_CTX | HANG_LP);
+ preempt(fd, e, NEW_CTX | HANG_LP);
}
igt_subtest_f("preemptive-hang-%s", e->name)
- preemptive_hang(fd, e->exec_id | e->flags);
+ preemptive_hang(fd, e);
igt_fixture {
igt_disallow_hang(fd, hang);
@@ -1756,16 +1760,16 @@ igt_main
}
igt_subtest_f("deep-%s", e->name)
- deep(fd, e->exec_id | e->flags);
+ deep(fd, e);
igt_subtest_f("wide-%s", e->name)
- wide(fd, e->exec_id | e->flags);
+ wide(fd, e);
igt_subtest_f("reorder-wide-%s", e->name)
- reorder_wide(fd, e->exec_id | e->flags);
+ reorder_wide(fd, e);
igt_subtest_f("smoketest-%s", e->name)
- smoketest(fd, e->exec_id | e->flags, 5);
+ smoketest(fd, e, 5);
}
}
}
@@ -1779,18 +1783,14 @@ igt_main
igt_require(gem_has_execlists(fd));
}
- for (e = intel_execution_engines; e->name; e++) {
- if (e->exec_id == 0)
- continue;
-
+ __for_each_physical_engine(fd, e) {
igt_subtest_group {
igt_fixture {
- igt_require(gem_ring_has_physical_engine(fd, e->exec_id | e->flags));
igt_require(gem_scheduler_has_preemption(fd));
}
igt_subtest_f("pi-ringfull-%s", e->name)
- test_pi_ringfull(fd, e->exec_id | e->flags);
+ test_pi_ringfull(fd, e);
}
}
}
--
2.19.1
More information about the igt-dev
mailing list