<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Thu, Aug 3, 2017 at 5:30 AM, Chris Wilson <span dir="ltr"><<a href="mailto:chris@chris-wilson.co.uk" target="_blank">chris@chris-wilson.co.uk</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">New execbuf API allows to pass arrays of fences as handles rather than<br>
allocate lots of fds.<br>
<br>
Signed-off-by: Chris Wilson <<a href="mailto:chris@chris-wilson.co.uk">chris@chris-wilson.co.uk</a>><br>
---<br>
 tests/gem_exec_fence.c | 484 ++++++++++++++++++++++++++++++<wbr>++++++++++++++++++-<br>
 1 file changed, 482 insertions(+), 2 deletions(-)<br>
<br>
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c<br>
index 5361390e..1c6d98b0 100644<br>
--- a/tests/gem_exec_fence.c<br>
+++ b/tests/gem_exec_fence.c<br>
@@ -36,6 +36,14 @@ IGT_TEST_DESCRIPTION("Check that execbuf waits for explicit fences");<br>
 #define LOCAL_EXEC_FENCE_OUT (1 << 17)<br>
 #define LOCAL_EXEC_FENCE_SUBMIT (1 << 19)<br>
<br>
+#define LOCAL_EXEC_FENCE_ARRAY (1 << 19)<br>
+struct local_gem_exec_fence {<br>
+       uint32_t handle;<br>
+       uint32_t flags;<br>
+#define LOCAL_EXEC_FENCE_WAIT (1 << 0)<br>
+#define LOCAL_EXEC_FENCE_SIGNAL (1 << 1)<br>
+};<br>
+<br>
 #ifndef SYNC_IOC_MERGE<br>
 struct sync_merge_data {<br>
        char    name[32];<br>
@@ -366,10 +374,15 @@ static unsigned int measure_ring_size(int fd)<br>
        obj[1].handle = gem_create(fd, 4096);<br>
        gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));<br>
<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj[1]);<br>
+       execbuf.buffer_count = 1;<br>
+       gem_execbuf(fd, &execbuf);<br>
+       gem_sync(fd, obj[1].handle);<br>
+<br>
        plug(fd, &c);<br>
        obj[0].handle = c.handle;<br>
<br>
-       memset(&execbuf, 0, sizeof(execbuf));<br>
        execbuf.buffers_ptr = to_user_pointer(obj);<br>
        execbuf.buffer_count = 2;<br>
<br>
@@ -380,7 +393,8 @@ static unsigned int measure_ring_size(int fd)<br>
        itv.it_value.tv_usec = 1000;<br>
        setitimer(ITIMER_REAL, &itv, NULL);<br>
<br>
-       last = count = 0;<br>
+       last = -1;<br>
+       count = 0;<br>
        do {<br>
                if (__execbuf(fd, &execbuf) == 0) {<br>
                        count++;<br>
@@ -727,6 +741,440 @@ static bool has_submit_fence(int fd)<br>
        return value;<br>
 }<br>
<br>
+static bool has_syncobj(int fd)<br>
+{<br>
+       struct drm_get_cap cap = { .capability = 0x13 };<br>
+       ioctl(fd, DRM_IOCTL_GET_CAP, &cap);<br>
+       return cap.value;<br>
+}<br>
+<br>
+static bool exec_has_fence_array(int fd)<br>
+{<br>
+       struct drm_i915_getparam gp;<br>
+       int value = 0;<br>
+<br>
+       memset(&gp, 0, sizeof(gp));<br>
+       gp.param = 49; /* I915_PARAM_HAS_EXEC_FENCE_<wbr>ARRAY */<br>
+       gp.value = &value;<br>
+<br>
+       ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));<br>
+       errno = 0;<br>
+<br>
+       return value;<br>
+}<br>
+<br>
+static void test_invalid_fence_array(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct local_gem_exec_fence fence;<br>
+<br>
+       /* create an otherwise valid execbuf */<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+       gem_execbuf(fd, &execbuf);<br>
+<br>
+       /* Now add a few invalid fence-array pointers */<br>
+       execbuf.flags |= LOCAL_EXEC_FENCE_ARRAY;<br>
+       //igt_assert_eq(__gem_execbuf(<wbr>fd, &execbuf), -EINVAL);<br>
+<br>
+       if (sizeof(execbuf.num_cliprects) == sizeof(size_t)) {<br>
+               execbuf.num_cliprects = -1;<br>
+               igt_assert_eq(__gem_execbuf(<wbr>fd, &execbuf), -EINVAL);<br>
+       }<br>
+<br>
+       execbuf.num_cliprects = 1;<br>
+       execbuf.cliprects_ptr = -1;<br>
+       igt_assert_eq(__gem_execbuf(<wbr>fd, &execbuf), -EFAULT);<br>
+<br>
+       memset(&fence, 0, sizeof(fence));<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       igt_assert_eq(__gem_execbuf(<wbr>fd, &execbuf), -ENOENT);<br>
+}<br>
+<br>
+static uint32_t __syncobj_create(int fd)<br>
+{<br>
+       struct local_syncobj_create {<br>
+               uint32_t handle, flags;<br>
+       } arg;<br>
+#define LOCAL_IOCTL_SYNCOBJ_CREATE        DRM_IOWR(0xBF, struct local_syncobj_create)<br>
+<br>
+       memset(&arg, 0, sizeof(arg));<br>
+       ioctl(fd, LOCAL_IOCTL_SYNCOBJ_CREATE, &arg);<br>
+<br>
+       return arg.handle;<br>
+}<br>
+<br>
+static uint32_t syncobj_create(int fd)<br>
+{<br>
+       uint32_t ret;<br>
+<br>
+       igt_assert_neq((ret = __syncobj_create(fd)), 0);<br>
+<br>
+       return ret;<br>
+}<br>
+<br>
+static int __syncobj_destroy(int fd, uint32_t handle)<br>
+{<br>
+       struct local_syncobj_destroy {<br>
+               uint32_t handle, flags;<br>
+       } arg;<br>
+#define LOCAL_IOCTL_SYNCOBJ_DESTROY        DRM_IOWR(0xC0, struct local_syncobj_destroy)<br>
+       int err = 0;<br>
+<br>
+       memset(&arg, 0, sizeof(arg));<br>
+       arg.handle = handle;<br>
+       if (ioctl(fd, LOCAL_IOCTL_SYNCOBJ_DESTROY, &arg))<br>
+               err = -errno;<br>
+<br>
+       errno = 0;<br>
+       return err;<br>
+}<br>
+<br>
+static void syncobj_destroy(int fd, uint32_t handle)<br>
+{<br>
+       igt_assert_eq(__syncobj_<wbr>destroy(fd, handle), 0);<br>
+}<br>
+<br>
+static int __syncobj_to_sync_file(int fd, uint32_t handle)<br>
+{<br>
+       struct local_syncobj_handle {<br>
+               uint32_t handle;<br>
+               uint32_t flags;<br>
+               int32_t fd;<br>
+               uint32_t pad;<br>
+       } arg;<br>
+#define LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_<wbr>FD  DRM_IOWR(0xC1, struct local_syncobj_handle)<br>
+<br>
+       memset(&arg, 0, sizeof(arg));<br>
+       arg.handle = handle;<br>
+       arg.flags = 1 << 0; /* EXPORT_SYNC_FILE */<br>
+       if (ioctl(fd, LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_<wbr>FD, &arg))<br>
+               arg.fd = -errno;<br>
+<br>
+       errno = 0;<br>
+       return arg.fd;<br>
+}<br>
+<br>
+static int syncobj_to_sync_file(int fd, uint32_t handle)<br>
+{<br>
+       int ret;<br>
+<br>
+       igt_assert_lte(0, (ret = __syncobj_to_sync_file(fd, handle)));<br>
+<br>
+       return ret;<br>
+}<br>
+<br>
+static int __syncobj_export(int fd, uint32_t handle, int *syncobj)<br>
+{<br>
+       struct local_syncobj_handle {<br>
+               uint32_t handle;<br>
+               uint32_t flags;<br>
+               int32_t fd;<br>
+               uint32_t pad;<br>
+       } arg;<br>
+       int err;<br>
+<br>
+       memset(&arg, 0, sizeof(arg));<br>
+       arg.handle = handle;<br>
+<br>
+       err = 0;<br>
+       if (ioctl(fd, LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_<wbr>FD, &arg))<br>
+               err = -errno;<br>
+<br>
+       errno = 0;<br>
+       *syncobj = arg.fd;<br>
+       return err;<br>
+}<br>
+<br>
+static int syncobj_export(int fd, uint32_t handle)<br>
+{<br>
+       int syncobj;<br>
+<br>
+       igt_assert_eq(__syncobj_<wbr>export(fd, handle, &syncobj), 0);<br>
+<br>
+       return syncobj;<br>
+}<br>
+<br>
+static int __syncobj_import(int fd, int syncobj, uint32_t *handle)<br>
+{<br>
+       struct local_syncobj_handle {<br>
+               uint32_t handle;<br>
+               uint32_t flags;<br>
+               int32_t fd;<br>
+               uint32_t pad;<br>
+       } arg;<br>
+#define LOCAL_IOCTL_SYNCOBJ_FD_TO_<wbr>HANDLE  DRM_IOWR(0xC2, struct local_syncobj_handle)<br>
+       int err;<br>
+<br>
+       memset(&arg, 0, sizeof(arg));<br>
+       arg.fd = syncobj;<br>
+<br>
+       err = 0;<br>
+       if (ioctl(fd, LOCAL_IOCTL_SYNCOBJ_FD_TO_<wbr>HANDLE, &arg))<br>
+               err = -errno;<br>
+<br>
+       errno = 0;<br>
+       *handle = arg.handle;<br>
+       return err;<br>
+}<br>
+<br>
+static uint32_t syncobj_import(int fd, int syncobj)<br>
+{<br>
+       uint32_t handle;<br>
+<br>
+       igt_assert_eq(__syncobj_<wbr>import(fd, syncobj, &handle), 0);<br>
+<br>
+<br>
+       return handle;<br>
+}<br>
+<br>
+static bool syncobj_busy(int fd, uint32_t handle)<br>
+{<br>
+       bool result;<br>
+       int sf;<br>
+<br>
+       sf = syncobj_to_sync_file(fd, handle);<br>
+       result = poll(&(struct pollfd){sf, POLLIN}, 1, 0) == 0;<br>
+       close(sf);<br>
+<br>
+       return result;<br>
+}<br>
+<br>
+static void test_syncobj_unused_fence(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct local_gem_exec_fence fence = {<br>
+               .handle = syncobj_create(fd),<br>
+       };<br>
+       igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);<br>
+<br>
+       /* sanity check our syncobj_to_sync_file interface */<br>
+       igt_assert_eq(__syncobj_to_<wbr>sync_file(fd, 0), -ENOENT);<br>
+<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+       execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       execbuf.num_cliprects = 1;<br>
+<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       gem_execbuf(fd, &execbuf);<br>
+<br>
+       /* no flags, the fence isn't created */<br>
+       igt_assert_eq(__syncobj_to_<wbr>sync_file(fd, fence.handle), -EINVAL);<br>
+       igt_assert(gem_bo_busy(fd, obj.handle));<br>
+<br>
+       gem_close(fd, obj.handle);<br>
+       syncobj_destroy(fd, fence.handle);<br>
+<br>
+       igt_spin_batch_free(fd, spin);<br>
+}<br>
+<br>
+static void test_syncobj_invalid_wait(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct local_gem_exec_fence fence = {<br>
+               .handle = syncobj_create(fd),<br>
+       };<br>
+<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+       execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       execbuf.num_cliprects = 1;<br>
+<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       /* waiting before the fence is set is invalid */<br>
+       fence.flags = LOCAL_EXEC_FENCE_WAIT;<br>
+       igt_assert_eq(__gem_execbuf(<wbr>fd, &execbuf), -EINVAL);<br>
+<br>
+       gem_close(fd, obj.handle);<br>
+       syncobj_destroy(fd, fence.handle);<br>
+}<br>
+<br>
+static void test_syncobj_signal(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct local_gem_exec_fence fence = {<br>
+               .handle = syncobj_create(fd),<br>
+       };<br>
+       igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);<br>
+<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+       execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       execbuf.num_cliprects = 1;<br>
+<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       fence.flags = LOCAL_EXEC_FENCE_SIGNAL;<br>
+       gem_execbuf(fd, &execbuf);<br>
+<br>
+       igt_assert(gem_bo_busy(fd, obj.handle));<br>
+       igt_assert(syncobj_busy(fd, fence.handle));<br>
+<br>
+       igt_spin_batch_free(fd, spin);<br>
+<br>
+       gem_sync(fd, obj.handle);<br>
+       igt_assert(!gem_bo_busy(fd, obj.handle));<br>
+       igt_assert(!syncobj_busy(fd, fence.handle));<br>
+<br>
+       gem_close(fd, obj.handle);<br>
+       syncobj_destroy(fd, fence.handle);<br>
+}<br>
+<br>
+static void test_syncobj_wait(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct local_gem_exec_fence fence = {<br>
+               .handle = syncobj_create(fd),<br>
+       };<br>
+       igt_spin_t *spin;<br>
+       unsigned engine;<br>
+       unsigned handle[16];<br>
+       int n;<br>
+<br>
+       gem_quiescent_gpu(fd);<br>
+<br>
+       spin = igt_spin_batch_new(fd, 0, 0, 0);<br>
+<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       /* Queue a signaler from the blocked engine */<br>
+       execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       execbuf.num_cliprects = 1;<br>
+       fence.flags = LOCAL_EXEC_FENCE_SIGNAL;<br>
+       gem_execbuf(fd, &execbuf);<br>
+       igt_assert(gem_bo_busy(fd, spin->handle));<br>
+<br>
+       gem_close(fd, obj.handle);<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       n = 0;<br>
+       for_each_engine(fd, engine) {<br>
+               obj.handle = gem_create(fd, 4096);<br>
+               gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+               /* No inter-engine synchronisation, will complete */<br>
+               if (engine == I915_EXEC_BLT) {<br>
+                       execbuf.flags = engine;<br>
+                       execbuf.cliprects_ptr = 0;<br>
+                       execbuf.num_cliprects = 0;<br>
+                       gem_execbuf(fd, &execbuf);<br>
+                       gem_sync(fd, obj.handle);<br>
+                       igt_assert(gem_bo_busy(fd, spin->handle));<br>
+               }<br>
+               igt_assert(gem_bo_busy(fd, spin->handle));<br>
+<br>
+               /* Now wait upon the blocked engine */<br>
+               execbuf.flags = LOCAL_EXEC_FENCE_ARRAY | engine;<br>
+               execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+               execbuf.num_cliprects = 1;<br>
+               fence.flags = LOCAL_EXEC_FENCE_WAIT;<br>
+               gem_execbuf(fd, &execbuf);<br>
+<br>
+               igt_assert(gem_bo_busy(fd, obj.handle));<br>
+               handle[n++] = obj.handle;<br>
+       }<br>
+       syncobj_destroy(fd, fence.handle);<br>
+<br>
+       for (int i = 0; i < n; i++)<br>
+               igt_assert(gem_bo_busy(fd, handle[i]));<br>
+<br>
+       igt_spin_batch_free(fd, spin);<br>
+<br>
+       for (int i = 0; i < n; i++) {<br>
+               gem_sync(fd, handle[i]);<br>
+               gem_close(fd, handle[i]);<br>
+       }<br>
+}<br>
+<br>
+static void test_syncobj_import(int fd)<br>
+{<br>
+       const uint32_t bbe = MI_BATCH_BUFFER_END;<br>
+       struct drm_i915_gem_exec_object2 obj;<br>
+       struct drm_i915_gem_execbuffer2 execbuf;<br>
+       struct local_gem_exec_fence fence = {<br>
+               .handle = syncobj_create(fd),<br>
+       };<br>
+       int export[2];<br>
+       igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);<br>
+<br>
+       for (int n = 0; n < ARRAY_SIZE(export); n++)<br>
+               export[n] = syncobj_export(fd, fence.handle);<br>
+<br>
+       memset(&execbuf, 0, sizeof(execbuf));<br>
+       execbuf.buffers_ptr = to_user_pointer(&obj);<br>
+       execbuf.buffer_count = 1;<br>
+       execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;<br>
+       execbuf.cliprects_ptr = to_user_pointer(&fence);<br>
+       execbuf.num_cliprects = 1;<br>
+<br>
+       memset(&obj, 0, sizeof(obj));<br>
+       obj.handle = gem_create(fd, 4096);<br>
+       gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));<br>
+<br>
+       fence.flags = LOCAL_EXEC_FENCE_SIGNAL;<br>
+       gem_execbuf(fd, &execbuf);<br>
+<br>
+       igt_assert(syncobj_busy(fd, fence.handle));<br>
+       igt_assert(gem_bo_busy(fd, obj.handle));<br>
+<br>
+       for (int n = 0; n < ARRAY_SIZE(export); n++) {<br>
+               uint32_t import = syncobj_import(fd, export[n]);<br>
+               igt_assert(syncobj_busy(fd, import));<br></blockquote><div><br></div><div>syncobj_destroy(fd, import);<br></div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
+       }<br>
+<br>
+       igt_spin_batch_free(fd, spin);<br>
+<br>
+       gem_sync(fd, obj.handle);<br>
+       igt_assert(!gem_bo_busy(fd, obj.handle));<br>
+       igt_assert(!syncobj_busy(fd, fence.handle));<br>
+<br>
+       for (int n = 0; n < ARRAY_SIZE(export); n++) {<br>
+               uint32_t import = syncobj_import(fd, export[n]);<br>
+               igt_assert(!syncobj_busy(fd, import));<br>
+               close(export[n]);<br></blockquote><div><br>syncobj_destroy(fd, import);<br><br></div><div>With those two fixed,<br><br></div><div>Reviewed-by: Jason Ekstrand <<a href="mailto:jason@jlekstrand.net">jason@jlekstrand.net</a>><br></div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
+       }<br>
+<br>
+       gem_close(fd, obj.handle);<br>
+       syncobj_destroy(fd, fence.handle);<br>
+}<br>
+<br>
 igt_main<br>
 {<br>
        const struct intel_execution_engine *e;<br>
@@ -830,6 +1278,38 @@ igt_main<br>
                test_fence_flip(i915);<br>
        }<br>
<br>
+       igt_subtest_group { /* syncobj */<br>
+               igt_fixture {<br>
+                       igt_require(has_syncobj(i915))<wbr>;<br>
+                       igt_require(exec_has_fence_<wbr>array(i915));<br>
+                       igt_fork_hang_detector(i915);<br>
+               }<br>
+<br>
+               igt_subtest("invalid-fence-<wbr>array") {<br>
+                       test_invalid_fence_array(i915)<wbr>;<br>
+               }<br>
+<br>
+               igt_subtest("syncobj-unused-<wbr>fence") {<br>
+                       test_syncobj_unused_fence(<wbr>i915);<br>
+               }<br>
+<br>
+               igt_subtest("syncobj-invalid-<wbr>wait") {<br>
+                       test_syncobj_invalid_wait(<wbr>i915);<br>
+               }<br>
+<br>
+               igt_subtest("syncobj-signal") {<br>
+                       test_syncobj_signal(i915);<br>
+               }<br>
+<br>
+               igt_subtest("syncobj-wait") {<br>
+                       test_syncobj_wait(i915);<br>
+               }<br>
+<br>
+               igt_subtest("syncobj-import") {<br>
+                       test_syncobj_import(i915);<br>
+               }<br>
+       }<br>
+<br>
        igt_fixture {<br>
                close(i915);<br>
        }<br>
<span class="gmail-HOEnZb"><font color="#888888">--<br>
2.13.3<br>
<br>
______________________________<wbr>_________________<br>
Intel-gfx mailing list<br>
<a href="mailto:Intel-gfx@lists.freedesktop.org">Intel-gfx@lists.freedesktop.<wbr>org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/intel-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/<wbr>mailman/listinfo/intel-gfx</a><br>
</font></span></blockquote></div><br></div></div>