<div dir="ltr"><div>Looks good to me.  All three are<br><br></div>Reviewed-by: Jason Ekstrand <<a href="mailto:jason@jlekstrand.net">jason@jlekstrand.net</a>><br></div><div class="gmail_extra"><br><div class="gmail_quote">On Tue, Feb 27, 2018 at 12:10 AM, Tapani Pälli <span dir="ltr"><<a href="mailto:tapani.palli@intel.com" target="_blank">tapani.palli@intel.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Patch adds a new test that is expected to fail, new option and<br>
some minor refactor done to check_memory_contents to allow this.<br>
<br>
Signed-off-by: Tapani Pälli <<a href="mailto:tapani.palli@intel.com">tapani.palli@intel.com</a>><br>
---<br>
 src/tests/func/sync/semaphore-<wbr>fd.c | 105 ++++++++++++++++++++++++++++++<wbr>+++++--<br>
 1 file changed, 101 insertions(+), 4 deletions(-)<br>
<br>
diff --git a/src/tests/func/sync/<wbr>semaphore-fd.c b/src/tests/func/sync/<wbr>semaphore-fd.c<br>
index ea54369..385edde 100644<br>
--- a/src/tests/func/sync/<wbr>semaphore-fd.c<br>
+++ b/src/tests/func/sync/<wbr>semaphore-fd.c<br>
@@ -563,7 +563,7 @@ init_memory_contents(struct test_context *ctx,<br>
 static void<br>
 check_memory_contents(struct test_context *ctx,<br>
                       uint32_t *data, VkDeviceMemory memory,<br>
-                      bool multi_ctx)<br>
+                      bool multi_ctx, bool expect_failure)<br>
 {<br>
     /* First, do the computation on the CPU */<br>
     cpu_process_data(data);<br>
@@ -589,6 +589,13 @@ check_memory_contents(struct test_context *ctx,<br>
             .size = sizeof(struct buffer_layout),<br>
         });<br>
<br>
+    /* If expecting a failure, do a simple memcmp. */<br>
+    if (expect_failure) {<br>
+        t_assert(memcmp(data, map->data, sizeof(map->data)) != 0);<br>
+        vkUnmapMemory(ctx->device, tmp_mem);<br>
+        return;<br>
+    }<br>
+<br>
     t_assert(map->atomic == NUM_HASH_ITERATIONS);<br>
     for (unsigned i = 0; i < NUM_HASH_ITERATIONS; i++) {<br>
         unsigned ctx_iter = multi_ctx ? (i >> 1) : i;<br>
@@ -635,7 +642,7 @@ test_sanity(void)<br>
         }<br>
     }<br>
<br>
-    check_memory_contents(&ctx, cpu_data, mem, false);<br>
+    check_memory_contents(&ctx, cpu_data, mem, false, false);<br>
 }<br>
<br>
 test_define {<br>
@@ -813,7 +820,7 @@ test_opaque_fd(void)<br>
<br>
     logi("All compute batches queued\n");<br>
<br>
-    check_memory_contents(&ctx1, cpu_data, mem1, true);<br>
+    check_memory_contents(&ctx1, cpu_data, mem1, true, false);<br>
 }<br>
<br>
 test_define {<br>
@@ -822,6 +829,96 @@ test_define {<br>
     .no_image = true,<br>
 };<br>
<br>
+static void<br>
+test_opaque_fd_no_sync(void)<br>
+{<br>
+    t_require_ext("VK_KHR_<wbr>external_memory");<br>
+    t_require_ext("VK_KHR_<wbr>external_memory_capabilities")<wbr>;<br>
+    t_require_ext("VK_KHR_<wbr>external_memory_fd");<br>
+    t_require_ext("VK_EXT_global_<wbr>priority");<br>
+<br>
+    struct test_context ctx1, ctx2;<br>
+    init_context(&ctx1, 1.0, VK_QUEUE_GLOBAL_PRIORITY_<wbr>MEDIUM);<br>
+    init_context(&ctx2, 0.0, VK_QUEUE_GLOBAL_PRIORITY_LOW);<br>
+<br>
+#define GET_FUNCTION_PTR(name, device) \<br>
+    PFN_vk##name name = (PFN_vk##name)<wbr>vkGetDeviceProcAddr(device, "vk"#name)<br>
+    GET_FUNCTION_PTR(<wbr>GetMemoryFdKHR, ctx1.device);<br>
+#undef GET_FUNCTION_PTR<br>
+<br>
+    VkMemoryRequirements buffer_reqs =<br>
+        qoGetBufferMemoryRequirements(<wbr>ctx1.device, ctx1.buffer);<br>
+<br>
+    VkDeviceMemory mem1 =<br>
+        qoAllocMemoryFromRequirements(<wbr>ctx1.device, &buffer_reqs,<br>
+            .properties = VK_MEMORY_PROPERTY_HOST_<wbr>COHERENT_BIT,<br>
+            .pNext = &(<wbr>VkExportMemoryAllocateInfoKHR) {<br>
+                .sType = VK_STRUCTURE_TYPE_EXPORT_<wbr>MEMORY_ALLOCATE_INFO_KHR,<br>
+                .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_<wbr>TYPE_OPAQUE_FD_BIT_KHR,<br>
+            });<br>
+<br>
+    int fd;<br>
+    VkResult result = GetMemoryFdKHR(ctx1.device,<br>
+        &(VkMemoryGetFdInfoKHR) {<br>
+            .sType = VK_STRUCTURE_TYPE_MEMORY_GET_<wbr>FD_INFO_KHR,<br>
+            .memory = mem1,<br>
+            .handleType = VK_EXTERNAL_MEMORY_HANDLE_<wbr>TYPE_OPAQUE_FD_BIT_KHR,<br>
+        }, &fd);<br>
+    t_assert(result == VK_SUCCESS);<br>
+    t_assert(fd >= 0);<br>
+<br>
+    VkDeviceMemory mem2 =<br>
+        qoAllocMemoryFromRequirements(<wbr>ctx2.device, &buffer_reqs,<br>
+            .properties = VK_MEMORY_PROPERTY_HOST_<wbr>COHERENT_BIT,<br>
+            .pNext = &(VkImportMemoryFdInfoKHR) {<br>
+                .sType = VK_STRUCTURE_TYPE_IMPORT_<wbr>MEMORY_FD_INFO_KHR,<br>
+                .handleType = VK_EXTERNAL_MEMORY_HANDLE_<wbr>TYPE_OPAQUE_FD_BIT_KHR,<br>
+                .fd = fd,<br>
+            });<br>
+<br>
+    qoBindBufferMemory(ctx1.<wbr>device, ctx1.buffer, mem1, 0);<br>
+    qoBindBufferMemory(ctx2.<wbr>device, ctx2.buffer, mem2, 0);<br>
+<br>
+    uint32_t cpu_data[LOCAL_WORKGROUP_SIZE * 2];<br>
+    init_memory_contents(&ctx1, cpu_data, mem1);<br>
+<br>
+    VkCommandBuffer cmd_buffer1 = create_command_buffer(&ctx1, 0);<br>
+    VkCommandBuffer cmd_buffer2 = create_command_buffer(&ctx2, 1);<br>
+<br>
+    logi("Begin queuing batches\n");<br>
+<br>
+    /* NUM_HASH_ITERATIONS is odd, so we use ctx1 for both the first and<br>
+     * last submissions.  This makes keeping track of where the memory is a<br>
+     * bit easier.<br>
+     */<br>
+    for (unsigned i = 0; i < NUM_HASH_ITERATIONS; i++) {<br>
+        VkSubmitInfo submit = {<br>
+            .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,<br>
+            .commandBufferCount = 1,<br>
+        };<br>
+<br>
+        if ((i & 1) == 0) {<br>
+            submit.pCommandBuffers = &cmd_buffer1;<br>
+            result = vkQueueSubmit(ctx1.queue, 1, &submit, VK_NULL_HANDLE);<br>
+            t_assert(result == VK_SUCCESS);<br>
+        } else {<br>
+            submit.pCommandBuffers = &cmd_buffer2;<br>
+            result = vkQueueSubmit(ctx2.queue, 1, &submit, VK_NULL_HANDLE);<br>
+            t_assert(result == VK_SUCCESS);<br>
+        }<br>
+    }<br>
+<br>
+    logi("All compute batches queued\n");<br>
+<br>
+    check_memory_contents(&ctx1, cpu_data, mem1, true, true);<br>
+}<br>
+<br>
+test_define {<br>
+    .name = "func.sync.semaphore-fd.<wbr>opaque-fd-no-sync",<br>
+    .start = test_opaque_fd_no_sync,<br>
+    .no_image = true,<br>
+};<br>
+<br>
 static void<br>
 test_sync_fd(void)<br>
 {<br>
@@ -964,7 +1061,7 @@ test_sync_fd(void)<br>
<br>
     logi("All compute batches queued\n");<br>
<br>
-    check_memory_contents(&ctx1, cpu_data, mem1, true);<br>
+    check_memory_contents(&ctx1, cpu_data, mem1, true, false);<br>
 }<br>
<br>
 test_define {<br>
<span class="HOEnZb"><font color="#888888">--<br>
2.14.3<br>
<br>
</font></span></blockquote></div><br></div>