[Intel-gfx] [PATCH i-g-t] i915: Exercise VM_WAIT ioctl
Chris Wilson
chris at chris-wilson.co.uk
Sat Jan 18 22:16:00 UTC 2020
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
include/drm-uapi/i915_drm.h | 37 +++
lib/igt_dummyload.c | 2 +
lib/igt_dummyload.h | 5 +-
tests/Makefile.sources | 3 +
tests/i915/gem_vm_wait.c | 523 ++++++++++++++++++++++++++++++++++++
tests/meson.build | 1 +
6 files changed, 569 insertions(+), 2 deletions(-)
create mode 100644 tests/i915/gem_vm_wait.c
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index b94e991be..62e5ccd2c 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -359,6 +359,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_VM_WAIT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -422,6 +423,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_WAIT, struct drm_i915_gem_vm_wait)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -1824,6 +1826,41 @@ struct drm_i915_gem_vm_control {
__u32 vm_id;
};
+/*
+ * (*IOVA & MASK) OP (VALUE & MASK)
+ *
+ * OP:
+ * - EQ, NEQ
+ * - GT, GTE
+ * - LT, LTE
+ * - BEFORE, AFTER
+ *
+ */
+struct drm_i915_gem_vm_wait {
+ __u64 extensions;
+ __u64 iova;
+ __u32 vm_id;
+ __u16 op;
+#define I915_VM_WAIT_EQ 0
+#define I915_VM_WAIT_NEQ 1
+#define I915_VM_WAIT_GT 2
+#define I915_VM_WAIT_GTE 3
+#define I915_VM_WAIT_LT 4
+#define I915_VM_WAIT_LTE 5
+#define I915_VM_WAIT_BEFORE 6
+#define I915_VM_WAIT_AFTER 7
+#define I915_VM_WAIT_PASSED 8
+ __u16 flags;
+#define I915_VM_WAIT_ABSTIME 0x1
+ __u64 value;
+ __u64 mask;
+#define I915_VM_WAIT_U8 0xffu
+#define I915_VM_WAIT_U16 0xffffu
+#define I915_VM_WAIT_U32 0xfffffffful
+#define I915_VM_WAIT_U64 0xffffffffffffffffull
+ __u64 timeout;
+};
+
struct drm_i915_reg_read {
/*
* Register offset.
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index b7f4caca3..7bc37889b 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -176,6 +176,8 @@ emit_recursive_batch(igt_spin_t *spin,
}
*cs++ = 1;
+ if (opts->flags & IGT_SPIN_WAKE_RUN)
+ *cs++ = 0x2 << 23;
execbuf->buffer_count++;
}
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 421ca183b..8264834ce 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -60,8 +60,9 @@ struct igt_spin_factory {
#define IGT_SPIN_FENCE_IN (1 << 0)
#define IGT_SPIN_FENCE_OUT (1 << 1)
#define IGT_SPIN_POLL_RUN (1 << 2)
-#define IGT_SPIN_FAST (1 << 3)
-#define IGT_SPIN_NO_PREEMPTION (1 << 4)
+#define IGT_SPIN_WAKE_RUN (1 << 3)
+#define IGT_SPIN_FAST (1 << 4)
+#define IGT_SPIN_NO_PREEMPTION (1 << 5)
igt_spin_t *
__igt_spin_factory(int fd, const struct igt_spin_factory *opts);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 806eb02d0..08c664776 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -472,6 +472,9 @@ gem_userptr_blits_SOURCES = i915/gem_userptr_blits.c
TESTS_progs += gem_wait
gem_wait_SOURCES = i915/gem_wait.c
+TESTS_progs += gem_vm_wait
+gem_vm_wait_SOURCES = i915/gem_vm_wait.c
+
TESTS_progs += gem_workarounds
gem_workarounds_SOURCES = i915/gem_workarounds.c
diff --git a/tests/i915/gem_vm_wait.c b/tests/i915/gem_vm_wait.c
new file mode 100644
index 000000000..63107bdd1
--- /dev/null
+++ b/tests/i915/gem_vm_wait.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "i915/gem_vm.h"
+
+static int __gem_vm_wait(int i915, struct drm_i915_gem_vm_wait *w)
+{
+ int err;
+
+ err = 0;
+ if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_WAIT, w)) {
+ err = -errno;
+ igt_assume(err);
+ }
+
+ return err;
+}
+
+static uint32_t get_vm(int i915, uint32_t ctx)
+{
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+
+ gem_context_get_param(i915, &arg);
+ return arg.value;
+}
+
+static uint32_t __batch_create(int i915, uint32_t offset)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(i915, offset + 4);
+ gem_write(i915, handle, offset, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static uint32_t batch_create(int i915)
+{
+ return __batch_create(i915, 0);
+}
+
+static struct drm_i915_gem_exec_object2
+vm_bind(int i915, uint32_t ctx, uint32_t handle, uint64_t offset)
+{
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ {
+ .handle = handle,
+ .offset = offset,
+ .flags = EXEC_OBJECT_PINNED,
+ },
+ { .handle = batch_create(i915), }
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffers_ptr = to_user_pointer(obj),
+ .buffer_count = 2,
+ .rsvd1 = ctx,
+ };
+
+ gem_execbuf(i915, &eb);
+ gem_close(i915, obj[1].handle);
+
+ igt_assert_eq_u64(obj[0].offset, offset);
+ return obj[0];
+}
+
+static void invalid_mask(int i915)
+{
+ struct drm_i915_gem_vm_wait wait;
+ uint32_t handle = gem_create(i915, 4096);
+
+ vm_bind(i915, 0, handle, 0);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);
+ wait.op = I915_VM_WAIT_EQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ /* Having proved we have an otherwise valid arg.. */
+ wait.mask = 0;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -EINVAL);
+
+ gem_vm_destroy(i915, wait.vm_id);
+ gem_close(i915, handle);
+}
+
+static void invalid_flags(int i915)
+{
+ struct drm_i915_gem_vm_wait wait;
+ uint32_t handle = gem_create(i915, 4096);
+
+ vm_bind(i915, 0, handle, 0);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);
+ wait.op = I915_VM_WAIT_EQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ /* Having proved we have an otherwise valid arg.. */
+ wait.flags = 0xffff;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -EINVAL);
+
+ gem_vm_destroy(i915, wait.vm_id);
+ gem_close(i915, handle);
+}
+
+static void invalid_iova(int i915)
+{
+ struct drm_i915_gem_vm_wait wait;
+ uint32_t handle = gem_create(i915, 4096);
+
+ vm_bind(i915, 0, handle, 0);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);
+ wait.op = I915_VM_WAIT_EQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ /* Natural alignments */
+ wait.mask = I915_VM_WAIT_U8;
+ wait.iova = 0x1;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.mask = I915_VM_WAIT_U16;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -EINVAL);
+ wait.iova = 0x2;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -EINVAL);
+ wait.iova = 0x4;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.mask = I915_VM_WAIT_U64;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -EINVAL);
+
+ gem_vm_destroy(i915, wait.vm_id);
+ gem_close(i915, handle);
+}
+
+static const char *op_repr(int op)
+{
+ switch (op) {
+#define CASE(x) case I915_VM_WAIT_##x: return #x
+ CASE(EQ);
+ CASE(NEQ);
+ CASE(GT);
+ CASE(GTE);
+ CASE(LT);
+ CASE(LTE);
+ CASE(BEFORE);
+ CASE(AFTER);
+ CASE(PASSED);
+ default: return "unknown";
+ }
+}
+
+static void basic(int i915)
+{
+ static const struct {
+ unsigned int op;
+ uint32_t a, b;
+ int result;
+#define GOOD 0
+#define BAD -ETIME
+ } ops[] = {
+ { I915_VM_WAIT_EQ, 0, 0, GOOD },
+ { I915_VM_WAIT_EQ, 0, 1, BAD },
+ { I915_VM_WAIT_EQ, 1, 0, BAD },
+ { I915_VM_WAIT_EQ, 1, 1, GOOD },
+ { I915_VM_WAIT_EQ, 0, -1, BAD },
+ { I915_VM_WAIT_EQ, -1, 0, BAD },
+ { I915_VM_WAIT_EQ, -1, -1, GOOD },
+
+ { I915_VM_WAIT_NEQ, 0, 0, BAD },
+ { I915_VM_WAIT_NEQ, 0, 1, GOOD },
+ { I915_VM_WAIT_NEQ, 1, 0, GOOD },
+ { I915_VM_WAIT_NEQ, 1, 1, BAD },
+ { I915_VM_WAIT_NEQ, 0, -1, GOOD },
+ { I915_VM_WAIT_NEQ, -1, 0, GOOD },
+ { I915_VM_WAIT_NEQ, -1, -1, BAD },
+
+ { I915_VM_WAIT_GT, 0, 0, BAD },
+ { I915_VM_WAIT_GT, 1, 0, GOOD },
+ { I915_VM_WAIT_GT, 1, 1, BAD },
+ { I915_VM_WAIT_GT, 0, 1, BAD },
+ { I915_VM_WAIT_GT, -1, 0, GOOD },
+ { I915_VM_WAIT_GT, 0, -1, BAD },
+
+ { I915_VM_WAIT_GTE, 0, 0, GOOD },
+ { I915_VM_WAIT_GTE, 1, 0, GOOD },
+ { I915_VM_WAIT_GTE, 1, 1, GOOD },
+ { I915_VM_WAIT_GTE, 0, 1, BAD },
+ { I915_VM_WAIT_GTE, -1, 0, GOOD },
+ { I915_VM_WAIT_GTE, 0, -1, BAD },
+
+ { I915_VM_WAIT_LT, 0, 0, BAD },
+ { I915_VM_WAIT_LT, 1, 0, BAD },
+ { I915_VM_WAIT_LT, 1, 1, BAD },
+ { I915_VM_WAIT_LT, 0, 1, GOOD },
+ { I915_VM_WAIT_LT, -1, 0, BAD },
+ { I915_VM_WAIT_LT, 0, -1, GOOD },
+
+ { I915_VM_WAIT_LTE, 0, 0, GOOD },
+ { I915_VM_WAIT_LTE, 1, 0, BAD },
+ { I915_VM_WAIT_LTE, 1, 1, GOOD },
+ { I915_VM_WAIT_LTE, 0, 1, GOOD },
+ { I915_VM_WAIT_LTE, -1, 0, BAD },
+ { I915_VM_WAIT_LTE, 0, -1, GOOD },
+
+ { I915_VM_WAIT_BEFORE, 0, 0, BAD },
+ { I915_VM_WAIT_BEFORE, 1, 0, BAD },
+ { I915_VM_WAIT_BEFORE, 1, 1, BAD },
+ { I915_VM_WAIT_BEFORE, 0, 1, GOOD },
+ { I915_VM_WAIT_BEFORE, -1, 0, GOOD },
+ { I915_VM_WAIT_BEFORE, 0, -1, BAD },
+
+ { I915_VM_WAIT_AFTER, 0, 0, BAD },
+ { I915_VM_WAIT_AFTER, 1, 0, GOOD },
+ { I915_VM_WAIT_AFTER, 1, 1, BAD },
+ { I915_VM_WAIT_AFTER, 0, 1, BAD },
+ { I915_VM_WAIT_AFTER, -1, 0, BAD },
+ { I915_VM_WAIT_AFTER, 0, -1, GOOD },
+
+ { I915_VM_WAIT_PASSED, 0, 0, GOOD },
+ { I915_VM_WAIT_PASSED, 1, 0, GOOD },
+ { I915_VM_WAIT_PASSED, 1, 1, GOOD },
+ { I915_VM_WAIT_PASSED, 0, 1, BAD },
+ { I915_VM_WAIT_PASSED, -1, 0, BAD },
+ { I915_VM_WAIT_PASSED, 0, -1, GOOD },
+ { I915_VM_WAIT_PASSED, -1, -1, GOOD },
+ };
+ struct drm_i915_gem_vm_wait wait;
+ uint32_t handle = gem_create(i915, 4096);
+ uint32_t *x = gem_mmap__wc(i915, handle, 0, 4096, PROT_WRITE);
+
+ vm_bind(i915, 0, handle, 0);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);
+ wait.op = I915_VM_WAIT_EQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+ wait.op = I915_VM_WAIT_NEQ;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -ETIME);
+
+ for (int i = 0; i < ARRAY_SIZE(ops); i++) {
+ *x = ops[i].a;
+ wait.value = ops[i].b;
+ wait.op = ops[i].op;
+ igt_assert_f(__gem_vm_wait(i915, &wait) == ops[i].result,
+ "*iova: %08x wait: { op:%s, value:%08x, }, result: %d, expected: %d\n",
+ ops[i].a, op_repr(ops[i].op), ops[i].b,
+ __gem_vm_wait(i915, &wait),
+ ops[i].result);
+ }
+
+ gem_vm_destroy(i915, wait.vm_id);
+ munmap(x, 4096);
+ gem_close(i915, handle);
+}
+
+static void store_dword(int i915,
+ const struct intel_execution_engine2 *e,
+ const struct drm_i915_gem_exec_object2 *target,
+ int offset, uint32_t value)
+{
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ struct drm_i915_gem_exec_object2 obj[2];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ uint32_t batch[16];
+ int i;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = 2;
+ execbuf.flags = e->flags;
+ if (gen > 3 && gen < 6)
+ execbuf.flags |= I915_EXEC_SECURE;
+
+ memset(obj, 0, sizeof(obj));
+ memcpy(obj, target, sizeof(*target));
+ obj[1].handle = gem_create(i915, 4096);
+
+ i = 0;
+ batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ batch[++i] = target->offset + offset;
+ batch[++i] = (target->offset + offset) >> 32;
+ } else if (gen >= 4) {
+ batch[++i] = 0;
+ batch[++i] = target->offset + offset;
+ } else {
+ batch[i]--;
+ batch[++i] = target->offset + offset;
+ }
+ batch[++i] = value;
+ batch[++i] = MI_BATCH_BUFFER_END;
+ gem_write(i915, obj[1].handle, 0, batch, sizeof(batch));
+
+ gem_execbuf(i915, &execbuf);
+ gem_close(i915, obj[1].handle);
+}
+
+static void signal(int i915, const struct intel_execution_engine2 *e)
+{
+ struct drm_i915_gem_vm_wait wait;
+ struct drm_i915_gem_exec_object2 obj =
+ vm_bind(i915, 0, gem_create(i915, 4096), 0);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);
+ wait.iova = obj.offset;
+ wait.op = I915_VM_WAIT_EQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.op = I915_VM_WAIT_NEQ;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -ETIME);
+
+ store_dword(i915, e, &obj, 0, 1);
+ gem_sync(i915, obj.handle);
+
+ wait.op = I915_VM_WAIT_NEQ;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.op = I915_VM_WAIT_EQ;
+ wait.value = 1;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ wait.op = I915_VM_WAIT_NEQ;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -ETIME);
+
+ igt_fork(child, 1) {
+ usleep(50000);
+ store_dword(i915, e, &obj, 0, 2);
+ }
+
+ wait.timeout = NSEC_PER_SEC;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+ igt_waitchildren();
+
+ gem_vm_destroy(i915, wait.vm_id);
+ gem_close(i915, obj.handle);
+}
+
+static void spin(int i915, const struct intel_execution_engine2 *e)
+{
+ struct drm_i915_gem_vm_wait wait;
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(i915,
+ .engine = e->flags,
+ .flags = IGT_SPIN_POLL_RUN | IGT_SPIN_WAKE_RUN);
+ igt_spin_busywait_until_started(spin);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);;
+ wait.iova = spin->obj[0].offset;
+ wait.op = I915_VM_WAIT_NEQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ igt_spin_end(spin);
+ igt_spin_reset(spin);
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -ETIME);
+
+ igt_fork(child, 1) {
+ usleep(50000);
+ gem_execbuf(i915, &spin->execbuf);
+ }
+
+ wait.timeout = NSEC_PER_SEC;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ igt_waitchildren();
+ igt_spin_free(i915, spin);
+
+ gem_vm_destroy(i915, wait.vm_id);
+}
+
+static void hang(int i915, const struct intel_execution_engine2 *e)
+{
+ struct drm_i915_gem_vm_wait wait;
+ igt_spin_t *spin;
+
+ spin = igt_spin_new(i915,
+ .engine = e->flags,
+ .flags = (IGT_SPIN_POLL_RUN |
+ IGT_SPIN_NO_PREEMPTION));
+ igt_spin_busywait_until_started(spin);
+
+ memset(&wait, 0, sizeof(wait));
+ wait.vm_id = get_vm(i915, 0);;
+ wait.iova = spin->obj[0].offset;
+ wait.op = I915_VM_WAIT_NEQ;
+ wait.mask = I915_VM_WAIT_U32;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ igt_spin_end(spin);
+ igt_spin_reset(spin);
+ igt_assert_eq(__gem_vm_wait(i915, &wait), -ETIME);
+
+ igt_fork(child, 1) {
+ usleep(50000);
+ gem_execbuf(i915, &spin->execbuf);
+ }
+
+ wait.timeout = -1;
+ igt_assert_eq(__gem_vm_wait(i915, &wait), 0);
+
+ igt_waitchildren();
+ igt_spin_free(i915, spin);
+
+ gem_vm_destroy(i915, wait.vm_id);
+}
+
+static bool has_vm_wait(int i915)
+{
+ struct drm_i915_gem_vm_wait wait = { .mask = -1ull };
+
+ return __gem_vm_wait(i915, &wait) == -ENOENT;
+}
+
+static bool has_vm(int i915)
+{
+ struct drm_i915_gem_context_param arg = {
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+
+ if (__gem_context_get_param(i915, &arg))
+ return false;
+
+ gem_vm_destroy(i915, arg.value);
+ return true;
+}
+
+igt_main
+{
+ const struct intel_execution_engine2 *e;
+ int i915 = -1;
+
+ igt_fixture {
+ i915 = drm_open_driver_master(DRIVER_INTEL);
+ igt_require(has_vm(i915));
+ igt_require(has_vm_wait(i915));
+ igt_require_gem(i915);
+ }
+
+ igt_subtest("invalid-mask")
+ invalid_mask(i915);
+
+ igt_subtest("invalid-flags")
+ invalid_flags(i915);
+
+ igt_subtest("invalid-iova")
+ invalid_iova(i915);
+
+ igt_subtest("basic")
+ basic(i915);
+
+ igt_subtest_with_dynamic("signal") {
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ signal(i915, e);
+ }
+ }
+
+ igt_subtest_with_dynamic("spin") {
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ spin(i915, e);
+ }
+ }
+
+ igt_subtest_group {
+ igt_hang_t hh;
+
+ igt_fixture
+ hh = igt_allow_hang(i915, 0, 0);
+
+ igt_subtest_with_dynamic("hang") {
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ hang(i915, e);
+ }
+ }
+
+ igt_fixture
+ igt_disallow_hang(i915, hh);
+ }
+
+ igt_fixture {
+ close(i915);
+ }
+}
diff --git a/tests/meson.build b/tests/meson.build
index a79d22ba1..154b7ad16 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -223,6 +223,7 @@ i915_progs = [
'gem_unref_active_buffers',
'gem_userptr_blits',
'gem_vm_create',
+ 'gem_vm_wait',
'gem_wait',
'gem_workarounds',
'gem_write_read_ring_switch',
--
2.25.0
More information about the Intel-gfx
mailing list