[PATCH i-g-t 36/52] WIP/FIXME: tests/gem_ctx_persistence: Adopt to use allocator
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Thu Jul 1 12:37:16 UTC 2021
Still one subtest - close-replace-race need to be fixed
Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
tests/i915/gem_ctx_persistence.c | 156 +++++++++++++++++++++++++++----
1 file changed, 139 insertions(+), 17 deletions(-)
diff --git a/tests/i915/gem_ctx_persistence.c b/tests/i915/gem_ctx_persistence.c
index 10d057f1b..06c91dfb7 100644
--- a/tests/i915/gem_ctx_persistence.c
+++ b/tests/i915/gem_ctx_persistence.c
@@ -43,6 +43,7 @@
#include "igt_sysfs.h"
#include "igt_params.h"
#include "ioctl_wrappers.h" /* gem_wait()! */
+#include "intel_allocator.h"
#include "sw_sync.h"
#define RESET_TIMEOUT_MS 2 * MSEC_PER_SEC; /* default: 640ms */
@@ -179,6 +180,7 @@ static void test_persistence(int i915, unsigned int engine)
igt_spin_t *spin;
int64_t timeout;
uint32_t ctx;
+ uint64_t ahnd;
/*
* Default behaviour are contexts remain alive until their last active
@@ -186,9 +188,11 @@ static void test_persistence(int i915, unsigned int engine)
*/
ctx = gem_context_clone_with_engines(i915, 0);
+ ahnd = get_reloc_ahnd(i915, ctx);
gem_context_set_persistence(i915, ctx, true);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
gem_context_destroy(i915, ctx);
@@ -203,6 +207,7 @@ static void test_persistence(int i915, unsigned int engine)
igt_assert_eq(sync_fence_status(spin->out_fence), 1);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_nonpersistent_cleanup(int i915, unsigned int engine)
@@ -210,6 +215,7 @@ static void test_nonpersistent_cleanup(int i915, unsigned int engine)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
/*
* A nonpersistent context is terminated immediately upon closure,
@@ -219,7 +225,9 @@ static void test_nonpersistent_cleanup(int i915, unsigned int engine)
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
gem_context_destroy(i915, ctx);
@@ -228,6 +236,7 @@ static void test_nonpersistent_cleanup(int i915, unsigned int engine)
igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_nonpersistent_mixed(int i915, unsigned int engine)
@@ -243,16 +252,20 @@ static void test_nonpersistent_mixed(int i915, unsigned int engine)
for (int i = 0; i < ARRAY_SIZE(fence); i++) {
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, i & 1);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
gem_context_destroy(i915, ctx);
fence[i] = spin->out_fence;
+ put_ahnd(ahnd);
}
/* Outer pair of contexts were non-persistent and killed */
@@ -268,6 +281,7 @@ static void test_nonpersistent_hostile(int i915, unsigned int engine)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
/*
* If we cannot cleanly cancel the non-persistent context on closure,
@@ -278,7 +292,9 @@ static void test_nonpersistent_hostile(int i915, unsigned int engine)
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
gem_context_destroy(i915, ctx);
@@ -286,6 +302,7 @@ static void test_nonpersistent_hostile(int i915, unsigned int engine)
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
@@ -293,6 +310,7 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin[2];
uint32_t ctx;
+ uint64_t ahnd;
/*
* Double plus ungood.
@@ -305,10 +323,12 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
igt_require(gem_scheduler_has_preemption(i915));
- ctx = gem_context_clone_with_engines(i915, 0);
+ ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, true);
gem_context_set_priority(i915, ctx, 0);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin[0] = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
@@ -320,6 +340,7 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
gem_context_set_persistence(i915, ctx, false);
gem_context_set_priority(i915, ctx, 1); /* higher priority than 0 */
spin[1] = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
gem_context_destroy(i915, ctx);
@@ -328,6 +349,7 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
igt_spin_free(i915, spin[1]);
igt_spin_free(i915, spin[0]);
+ put_ahnd(ahnd);
}
static void test_nonpersistent_hang(int i915, unsigned int engine)
@@ -335,6 +357,7 @@ static void test_nonpersistent_hang(int i915, unsigned int engine)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
/*
* The user made a simple mistake and submitted an invalid batch,
@@ -344,7 +367,9 @@ static void test_nonpersistent_hang(int i915, unsigned int engine)
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_INVALID_CS);
gem_context_destroy(i915, ctx);
@@ -352,6 +377,7 @@ static void test_nonpersistent_hang(int i915, unsigned int engine)
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void test_nohangcheck_hostile(int i915)
@@ -374,8 +400,10 @@ static void test_nohangcheck_hostile(int i915)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
uint32_t ctx = gem_context_create(i915);
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = eb_ring(e),
.flags = IGT_SPIN_NO_PREEMPTION);
gem_context_destroy(i915, ctx);
@@ -383,6 +411,7 @@ static void test_nohangcheck_hostile(int i915)
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
igt_require(__enable_hangcheck(dir, true));
@@ -411,8 +440,10 @@ static void test_nohangcheck_hang(int i915)
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
uint32_t ctx = gem_context_create(i915);
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = eb_ring(e),
.flags = IGT_SPIN_INVALID_CS);
gem_context_destroy(i915, ctx);
@@ -420,6 +451,7 @@ static void test_nohangcheck_hang(int i915)
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
igt_require(__enable_hangcheck(dir, true));
@@ -477,6 +509,7 @@ static void test_noheartbeat_many(int i915, int count, unsigned int flags)
for_each_physical_ring(e, i915) {
igt_spin_t *spin[count];
+ uint64_t ahnd[count];
if (!set_preempt_timeout(i915, e->full_name, 250))
continue;
@@ -490,7 +523,10 @@ static void test_noheartbeat_many(int i915, int count, unsigned int flags)
uint32_t ctx;
ctx = gem_context_create(i915);
- spin[n] = igt_spin_new(i915, ctx, .engine = eb_ring(e),
+ ahnd[n] = get_reloc_ahnd(i915, ctx);
+ spin[n] = igt_spin_new(i915, ctx,
+ .ahnd = ahnd[n],
+ .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
@@ -506,8 +542,10 @@ static void test_noheartbeat_many(int i915, int count, unsigned int flags)
-EIO);
}
- for (int n = 0; n < ARRAY_SIZE(spin); n++)
+ for (int n = 0; n < ARRAY_SIZE(spin); n++) {
igt_spin_free(i915, spin[n]);
+ put_ahnd(ahnd[n]);
+ }
set_heartbeat(i915, e->full_name, 2500);
cleanup(i915);
@@ -532,6 +570,7 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
for_each_physical_ring(e, i915) {
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
int err;
if (!set_preempt_timeout(i915, e->full_name, 250))
@@ -541,7 +580,10 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
continue;
ctx = gem_context_create(i915);
- spin = igt_spin_new(i915, ctx, .engine = eb_ring(e),
+ ahnd = get_reloc_ahnd(i915, ctx);
+ spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
+ .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
@@ -553,6 +595,7 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
set_heartbeat(i915, e->full_name, 2500);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
igt_assert_eq(err, -EIO);
cleanup(i915);
@@ -565,6 +608,7 @@ static void test_nonpersistent_file(int i915)
{
int debugfs = i915;
igt_spin_t *spin;
+ uint64_t ahnd;
cleanup(i915);
@@ -575,8 +619,9 @@ static void test_nonpersistent_file(int i915)
i915 = gem_reopen_driver(i915);
+ ahnd = get_reloc_ahnd(i915, 0);
gem_context_set_persistence(i915, 0, false);
- spin = igt_spin_new(i915, .flags = IGT_SPIN_FENCE_OUT);
+ spin = igt_spin_new(i915, .ahnd = ahnd, .flags = IGT_SPIN_FENCE_OUT);
close(i915);
flush_delayed_fput(debugfs);
@@ -585,6 +630,7 @@ static void test_nonpersistent_file(int i915)
spin->handle = 0;
igt_spin_free(-1, spin);
+ put_ahnd(ahnd);
}
static int __execbuf_wr(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
@@ -612,6 +658,7 @@ static void test_nonpersistent_queued(int i915, unsigned int engine)
igt_spin_t *spin;
int fence = -1;
uint32_t ctx;
+ uint64_t ahnd;
/*
* Not only must the immediate batch be cancelled, but
@@ -620,7 +667,9 @@ static void test_nonpersistent_queued(int i915, unsigned int engine)
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -654,6 +703,7 @@ static void test_nonpersistent_queued(int i915, unsigned int engine)
igt_assert_eq(wait_for_status(fence, reset_timeout_ms), -EIO);
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void sendfd(int socket, int fd)
@@ -709,12 +759,16 @@ static void test_process(int i915)
igt_fork(child, 1) {
igt_spin_t *spin;
+ uint64_t ahnd;
+ intel_allocator_init();
i915 = gem_reopen_driver(i915);
gem_quiescent_gpu(i915);
gem_context_set_persistence(i915, 0, false);
- spin = igt_spin_new(i915, .flags = IGT_SPIN_FENCE_OUT);
+ ahnd = get_reloc_ahnd(i915, 0);
+ spin = igt_spin_new(i915, .ahnd = ahnd,
+ .flags = IGT_SPIN_FENCE_OUT);
sendfd(sv[0], spin->out_fence);
igt_list_del(&spin->link); /* prevent autocleanup */
@@ -753,12 +807,16 @@ static void test_userptr(int i915)
igt_fork(child, 1) {
igt_spin_t *spin;
+ uint64_t ahnd;
+ intel_allocator_init();
i915 = gem_reopen_driver(i915);
gem_quiescent_gpu(i915);
gem_context_set_persistence(i915, 0, false);
- spin = igt_spin_new(i915, .flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_USERPTR);
+ ahnd = get_reloc_ahnd(i915, 0);
+ spin = igt_spin_new(i915, .ahnd = ahnd,
+ .flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_USERPTR);
sendfd(sv[0], spin->out_fence);
igt_list_del(&spin->link); /* prevent autocleanup */
@@ -800,12 +858,16 @@ static void test_process_mixed(int pfd, unsigned int engine)
for (int persists = 0; persists <= 1; persists++) {
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
+ intel_allocator_init();
ctx = gem_context_create(i915);
+ ahnd = get_reloc_ahnd(i915, ctx);
gem_context_copy_engines(pfd, 0, i915, ctx);
gem_context_set_persistence(i915, ctx, persists);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -842,6 +904,7 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
const struct intel_execution_engine2 *other;
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
int fence = -1;
cleanup(i915);
@@ -858,11 +921,13 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
* reset other users whenever they chose.]
*/
+ ahnd = get_reloc_ahnd(i915, 0);
__for_each_physical_engine(i915, other) {
if (other->flags == engine->flags)
continue;
spin = igt_spin_new(i915,
+ .ahnd = ahnd,
.engine = other->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -880,11 +945,14 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
}
spin->out_fence = -1;
}
+ put_ahnd(ahnd);
igt_require(fence != -1);
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
+ ahnd = get_reloc_ahnd(i915, ctx);
spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
.engine = engine->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
@@ -899,6 +967,7 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
gem_quiescent_gpu(i915);
igt_assert_eq(wait_for_status(fence, reset_timeout_ms), 1);
close(fence);
+ put_ahnd(ahnd);
}
static void test_processes(int i915)
@@ -920,11 +989,15 @@ static void test_processes(int i915)
igt_fork(child, 1) {
igt_spin_t *spin;
int pid;
+ uint64_t ahnd;
+ intel_allocator_init();
i915 = gem_reopen_driver(i915);
gem_context_set_persistence(i915, 0, i);
- spin = igt_spin_new(i915, .flags = IGT_SPIN_FENCE_OUT);
+ ahnd = get_reloc_ahnd(i915, 0);
+ spin = igt_spin_new(i915, .ahnd = ahnd,
+ .flags = IGT_SPIN_FENCE_OUT);
/* prevent autocleanup */
igt_list_del(&spin->link);
@@ -985,11 +1058,13 @@ static void __smoker(int i915,
igt_spin_t *spin;
int fence = -1;
int fd, extra;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
fd = gem_reopen_driver(i915);
gem_context_copy_engines(i915, 0, fd, 0);
gem_context_set_persistence(fd, 0, expected > 0);
- spin = igt_spin_new(fd, .engine = engine, .flags = IGT_SPIN_FENCE_OUT);
+ spin = igt_spin_new(fd, .ahnd = ahnd,
+ .engine = engine, .flags = IGT_SPIN_FENCE_OUT);
extra = rand() % 8;
while (extra--) {
@@ -1015,6 +1090,7 @@ static void __smoker(int i915,
spin->handle = 0;
igt_spin_free(fd, spin);
+ put_ahnd(ahnd);
}
static void smoker(int i915,
@@ -1070,6 +1146,7 @@ static void many_contexts(int i915)
const struct intel_execution_engine2 *e;
int64_t timeout = NSEC_PER_SEC;
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
cleanup(i915);
@@ -1079,7 +1156,7 @@ static void many_contexts(int i915)
* creating new contexts, and submitting new execbuf.
*/
- spin = igt_spin_new(i915, .flags = IGT_SPIN_NO_PREEMPTION);
+ spin = igt_spin_new(i915, .ahnd = ahnd, .flags = IGT_SPIN_NO_PREEMPTION);
igt_spin_end(spin);
gem_sync(i915, spin->handle);
@@ -1110,6 +1187,7 @@ static void many_contexts(int i915)
igt_spin_free(i915, spin);
gem_quiescent_gpu(i915);
+ put_ahnd(ahnd);
}
static void replace_engines(int i915, const struct intel_execution_engine2 *e)
@@ -1125,6 +1203,7 @@ static void replace_engines(int i915, const struct intel_execution_engine2 *e)
};
igt_spin_t *spin[2];
int64_t timeout;
+ uint64_t ahnd = get_reloc_ahnd(i915, param.ctx_id);
/*
* Suppose the user tries to hide a hanging batch by replacing
@@ -1136,10 +1215,10 @@ static void replace_engines(int i915, const struct intel_execution_engine2 *e)
gem_context_set_persistence(i915, param.ctx_id, false);
gem_context_set_param(i915, ¶m);
- spin[0] = igt_spin_new(i915, param.ctx_id);
+ spin[0] = igt_spin_new(i915, param.ctx_id, .ahnd = ahnd);
gem_context_set_param(i915, ¶m);
- spin[1] = igt_spin_new(i915, param.ctx_id);
+ spin[1] = igt_spin_new(i915, param.ctx_id, .ahnd = ahnd);
gem_context_destroy(i915, param.ctx_id);
@@ -1152,6 +1231,7 @@ static void replace_engines(int i915, const struct intel_execution_engine2 *e)
igt_spin_free(i915, spin[1]);
igt_spin_free(i915, spin[0]);
gem_quiescent_gpu(i915);
+ put_ahnd(ahnd);
}
static void race_set_engines(int i915, int in, int out)
@@ -1165,23 +1245,34 @@ static void race_set_engines(int i915, int in, int out)
.size = sizeof(engines),
};
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(i915, 0);
- spin = igt_spin_new(i915);
+ spin = igt_spin_new(i915, .ahnd = ahnd);
igt_spin_end(spin);
while (read(in, ¶m.ctx_id, sizeof(param.ctx_id)) > 0) {
if (!param.ctx_id)
break;
-
+ igt_info("param_ctx: %u\n", param.ctx_id);
__gem_context_set_param(i915, ¶m);
- spin->execbuf.rsvd1 = param.ctx_id;
- __gem_execbuf(i915, &spin->execbuf);
+ if (!ahnd) {
+ spin->execbuf.rsvd1 = param.ctx_id;
+ __gem_execbuf(i915, &spin->execbuf);
+ } else {
+ igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
+ ahnd = get_reloc_ahnd(i915, param.ctx_id);
+ spin = igt_spin_new(i915, param.ctx_id, .ahnd = ahnd);
+ igt_spin_end(spin);
+ put_ahnd(ahnd);
+ }
write(out, ¶m.ctx_id, sizeof(param.ctx_id));
}
igt_spin_free(i915, spin);
+ put_ahnd(ahnd);
}
static void close_replace_race(int i915)
@@ -1215,11 +1306,15 @@ static void close_replace_race(int i915)
igt_until_timeout(5) {
igt_spin_t *spin;
uint32_t ctx;
+ uint64_t ahnd;
ctx = gem_context_clone_with_engines(i915, 0);
gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, ctx, .flags = IGT_SPIN_FENCE_OUT);
+ ahnd = get_reloc_ahnd(i915, ctx);
+ spin = igt_spin_new(i915, ctx,
+ .ahnd = ahnd,
+ .flags = IGT_SPIN_FENCE_OUT);
for (int i = 0; i < ncpus; i++)
write(out[1], &ctx, sizeof(ctx));
@@ -1239,6 +1334,7 @@ static void close_replace_race(int i915)
fence = tmp;
}
spin->out_fence = -1;
+ put_ahnd(ahnd);
}
close(in[0]);
@@ -1273,6 +1369,7 @@ static void replace_engines_hostile(int i915,
};
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
+ uint64_t ahnd = get_reloc_ahnd(i915, param.ctx_id);
/*
* Suppose the user tries to hide a hanging batch by replacing
@@ -1285,6 +1382,7 @@ static void replace_engines_hostile(int i915,
gem_context_set_param(i915, ¶m);
spin = igt_spin_new(i915, param.ctx_id,
+ .ahnd = ahnd,
.flags = IGT_SPIN_NO_PREEMPTION);
param.size = 8;
@@ -1295,6 +1393,7 @@ static void replace_engines_hostile(int i915,
igt_spin_free(i915, spin);
gem_quiescent_gpu(i915);
+ put_ahnd(ahnd);
}
static void do_test(void (*test)(int i915, unsigned int engine),
@@ -1447,9 +1546,21 @@ igt_main
igt_subtest("many-contexts")
many_contexts(i915);
+ }
+
+ igt_subtest_group {
+ igt_fixture {
+ gem_require_contexts(i915);
+ intel_allocator_multiprocess_start();
+ }
igt_subtest("smoketest")
smoketest(i915);
+
+ igt_fixture {
+ intel_allocator_multiprocess_stop();
+ }
+
}
/* Check interactions with set-engines */
@@ -1472,9 +1583,20 @@ igt_main
replace_engines_hostile(i915, e);
}
}
+ }
+
+ igt_subtest_group {
+ igt_fixture {
+ gem_require_contexts(i915);
+ intel_allocator_multiprocess_start();
+ }
igt_subtest("close-replace-race")
close_replace_race(i915);
+
+ igt_fixture {
+ intel_allocator_multiprocess_stop();
+ }
}
igt_fixture {
--
2.26.0
More information about the Intel-gfx-trybot
mailing list