[Intel-gfx] [PATCH i-g-t] i915/gem_exec_schedule: Beware priority inversion from iova faults
Chris Wilson
chris at chris-wilson.co.uk
Fri Nov 8 22:30:23 UTC 2019
Check that if two contexts (one high priority, one low) share the same
buffer that has taken a page fault that we do not create an implicit
dependency between the two contexts for servicing that page fault and
binding the vma.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
tests/i915/gem_exec_schedule.c | 154 ++++++++++++++++++++++++++++++++-
1 file changed, 151 insertions(+), 3 deletions(-)
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 0af7b4c6d..a4f894b18 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1637,11 +1637,16 @@ static int userfaultfd(int flags)
}
struct ufd_thread {
- pthread_t thread;
uint32_t batch;
+ uint32_t scratch;
uint32_t *page;
unsigned int engine;
+ unsigned int flags;
int i915;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int count;
};
static uint32_t create_userptr(int i915, void *page)
@@ -1685,6 +1690,7 @@ static void test_pi_userfault(int i915, unsigned int engine)
struct uffdio_copy copy;
struct uffd_msg msg;
struct ufd_thread t;
+ pthread_t thread;
char poison[4096];
int ufd;
@@ -1726,7 +1732,7 @@ static void test_pi_userfault(int i915, unsigned int engine)
igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS);
/* Kick off the low priority submission */
- igt_assert(pthread_create(&t.thread, NULL, ufd_thread, &t) == 0);
+ igt_assert(pthread_create(&thread, NULL, ufd_thread, &t) == 0);
/* Wait until the low priority thread is blocked on a fault */
igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg));
@@ -1770,9 +1776,145 @@ static void test_pi_userfault(int i915, unsigned int engine)
copy.len = 4096;
do_ioctl(ufd, UFFDIO_COPY, ©);
- pthread_join(t.thread, NULL);
+ pthread_join(thread, NULL);
+
+ gem_close(i915, t.batch);
+ munmap(t.page, 4096);
+ close(ufd);
+}
+
+static void *iova_thread(struct ufd_thread *t, int prio)
+{
+ uint32_t ctx =
+ gem_context_clone(t->i915, 0,
+ t->flags & SHARED ? I915_CONTEXT_CLONE_VM : 0,
+ 0);
+
+ gem_context_set_priority(t->i915, ctx, prio);
+
+ store_dword_plug(t->i915, ctx, t->engine,
+ t->scratch, 0, prio,
+ t->batch, 0 /* no write hazard! */);
+
+ pthread_mutex_lock(&t->mutex);
+ if (!--t->count)
+ pthread_cond_signal(&t->cond);
+ pthread_mutex_unlock(&t->mutex);
+
+ gem_context_destroy(t->i915, ctx);
+ return NULL;
+}
+
+static void *iova_low(void *arg)
+{
+ return iova_thread(arg, MIN_PRIO);
+}
+
+static void *iova_high(void *arg)
+{
+ return iova_thread(arg, MAX_PRIO);
+}
+
+static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
+{
+ struct uffdio_api api = { .api = UFFD_API };
+ struct uffdio_register reg;
+ struct uffdio_copy copy;
+ struct uffd_msg msg;
+ struct ufd_thread t;
+ igt_spin_t *spin;
+ pthread_t hi, lo;
+ char poison[4096];
+ uint32_t result;
+ int ufd;
+
+ /*
+ * In this scenario, we have a pair of contending contexts that
+ * share the same resource. That resource is stuck behind a slow
+ * page fault such that neither context has immediate access to it.
+ * What is expected is that as soon as that resource becomes available,
+ * the two contexts are queued with the high priority context taking
+ * precedence. We need to check that we do not cross-contaminate
+ * the two contents with the page fault on the shared resource
+ * initiated by the low priority context. (Consider that the low
+ * priority context may install an exclusive fence for the page
+ * fault, which is then used for strict ordering by the high priority
+ * context, causing an unwanted implicit dependency between the two
+ * and promoting the low priority context to high.)
+ *
+ * SHARED: the two contexts share a vm, but still have separate
+ * timelines that should not mingle.
+ */
+
+ ufd = userfaultfd(0);
+ igt_require_f(ufd != -1, "kernel support for userfaultfd\n");
+ igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API,
+ "userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
+
+ t.i915 = i915;
+ t.engine = engine;
+ t.flags = flags;
+
+ t.count = 2;
+ pthread_cond_init(&t.cond, NULL);
+ pthread_mutex_init(&t.mutex, NULL);
+
+ t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
+ igt_assert(t.page != MAP_FAILED);
+ t.batch = create_userptr(i915, t.page);
+ t.scratch = gem_create(i915, 4096);
+
+ /* Register our fault handler for t.page */
+ memset(®, 0, sizeof(reg));
+ reg.mode = UFFDIO_REGISTER_MODE_MISSING;
+ reg.range.start = to_user_pointer(t.page);
+ reg.range.len = 4096;
+ do_ioctl(ufd, UFFDIO_REGISTER, ®);
+ igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS);
+
+ /* Fill the engine with spinners; the store_dword() is too quick */
+ spin = igt_spin_new(i915, .engine = engine);
+ for (int i = 0; i < MAX_ELSP_QLEN; i++) {
+ spin->execbuf.rsvd1 = create_highest_priority(i915);
+ gem_execbuf(i915, &spin->execbuf);
+ gem_context_destroy(i915, spin->execbuf.rsvd1);
+ }
+
+ /* Kick off the submission threads */
+ igt_assert(pthread_create(&lo, NULL, iova_low, &t) == 0);
+
+ /* Wait until the low priority thread is blocked on the fault */
+ igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg));
+ igt_assert_eq(msg.event, UFFD_EVENT_PAGEFAULT);
+ igt_assert(from_user_pointer(msg.arg.pagefault.address) == t.page);
+
+ /* Then release a very similar thread, but at high priority! */
+ igt_assert(pthread_create(&hi, NULL, iova_high, &t) == 0);
+
+ /* Service the fault; releasing the low priority context */
+ memset(©, 0, sizeof(copy));
+ copy.dst = msg.arg.pagefault.address;
+ copy.src = to_user_pointer(memset(poison, 0xc5, sizeof(poison)));
+ copy.len = 4096;
+ do_ioctl(ufd, UFFDIO_COPY, ©);
+
+ /* Wait until both threads have had a chance to submit */
+ pthread_mutex_lock(&t.mutex);
+ while (t.count)
+ pthread_cond_wait(&t.cond, &t.mutex);
+ pthread_mutex_unlock(&t.mutex);
+ igt_debugfs_dump(i915, "i915_engine_info");
+ igt_spin_free(i915, spin);
+ pthread_join(hi, NULL);
+ pthread_join(lo, NULL);
gem_close(i915, t.batch);
+
+ gem_sync(i915, t.scratch); /* write hazard lies */
+ gem_read(i915, t.scratch, 0, &result, sizeof(result));
+ igt_assert_eq(result, MIN_PRIO);
+ gem_close(i915, t.scratch);
+
munmap(t.page, 4096);
close(ufd);
}
@@ -2019,6 +2161,12 @@ igt_main
igt_subtest_f("pi-userfault-%s", e->name)
test_pi_userfault(fd, eb_ring(e));
+
+ igt_subtest_f("pi-distinct-iova-%s", e->name)
+ test_pi_iova(fd, eb_ring(e), 0);
+
+ igt_subtest_f("pi-shared-iova-%s", e->name)
+ test_pi_iova(fd, eb_ring(e), SHARED);
}
}
}
--
2.24.0
More information about the Intel-gfx
mailing list