<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Mon, Oct 16, 2017 at 11:55 AM, Chad Versace <span dir="ltr"><<a href="mailto:chadversary@chromium.org" target="_blank">chadversary@chromium.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">The patch renames anv_bo_cache_import() to<br>
anv_bo_cache_import_with_size(<wbr>); and adds a new variant,<br>
anv_bo_cache_import(), that lacks the 'size' parameter. Same as<br>
pre-patch, anv_bo_cache_import_with_size(<wbr>) continues to validate the<br>
imported size.<br>
<br>
The patch is essentially a refactor patch.  It should change no existing<br>
behavior.<br>
<br>
This split makes it easier to implement VK_ANDROID_native_buffer. When<br>
the user imports a gralloc hande into a VkImage using<br>
VK_ANDROID_native_buffer, the user provides no size. The driver must<br>
infer the size from the internals of the gralloc buffer.<br>
---<br>
 src/intel/vulkan/anv_<wbr>allocator.c | 118 +++++++++++++++++++++++++-----<wbr>---------<br>
 src/intel/vulkan/anv_device.c    |   7 ++-<br>
 src/intel/vulkan/anv_intel.c     |   5 +-<br>
 src/intel/vulkan/anv_private.h   |   3 +<br>
 src/intel/vulkan/anv_queue.c     |   5 +-<br>
 5 files changed, 90 insertions(+), 48 deletions(-)<br>
<br>
diff --git a/src/intel/vulkan/anv_<wbr>allocator.c b/src/intel/vulkan/anv_<wbr>allocator.c<br>
index 401cea40e6..1deecc36d7 100644<br>
--- a/src/intel/vulkan/anv_<wbr>allocator.c<br>
+++ b/src/intel/vulkan/anv_<wbr>allocator.c<br>
@@ -1269,62 +1269,98 @@ anv_bo_cache_alloc(struct anv_device *device,<br>
    return VK_SUCCESS;<br>
 }<br>
<br>
+static VkResult<br>
+anv_bo_cache_import_locked(<wbr>struct anv_device *device,<br>
+                           struct anv_bo_cache *cache,<br>
+                           int fd, struct anv_bo **bo_out)<br>
+{<br>
+   uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);<br>
+   if (!gem_handle)<br>
+      return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
+<br>
+   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(<wbr>cache, gem_handle);<br>
+   if (bo) {<br>
+      __sync_fetch_and_add(&bo-><wbr>refcount, 1);<br>
+      return VK_SUCCESS;<br>
+   }<br>
+<br>
+   off_t size = lseek(fd, 0, SEEK_END);<br>
+   if (size == (off_t)-1) {<br>
+      anv_gem_close(device, gem_handle);<br>
+      return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
+   }<br>
+<br>
+   bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,<br>
+                 VK_SYSTEM_ALLOCATION_SCOPE_<wbr>OBJECT);<br>
+   if (!bo) {<br>
+      anv_gem_close(device, gem_handle);<br>
+      return vk_error(VK_ERROR_OUT_OF_HOST_<wbr>MEMORY);<br>
+   }<br>
+<br>
+   bo->refcount = 1;<br>
+   anv_bo_init(&bo->bo, gem_handle, size);<br>
+   _mesa_hash_table_insert(cache-<wbr>>bo_map, (void *)(uintptr_t)gem_handle, bo);<br>
+   *bo_out = &bo->bo;<br>
+<br>
+   return VK_SUCCESS;<br>
+}<br>
+<br>
 VkResult<br>
 anv_bo_cache_import(struct anv_device *device,<br>
                     struct anv_bo_cache *cache,<br>
-                    int fd, uint64_t size, struct anv_bo **bo_out)<br>
+                    int fd, struct anv_bo **bo_out)<br>
 {<br>
+   VkResult result;<br>
+<br>
    pthread_mutex_lock(&cache-><wbr>mutex);<br>
+   result = anv_bo_cache_import_locked(<wbr>device, cache, fd, bo_out);<br>
+   pthread_mutex_unlock(&cache-><wbr>mutex);<br>
+<br>
+   return result;<br>
+}<br>
+<br>
+VkResult<br>
+anv_bo_cache_import_with_<wbr>size(struct anv_device *device,<br>
+                              struct anv_bo_cache *cache,<br>
+                              int fd, uint64_t size,<br>
+                              struct anv_bo **bo_out)<br></blockquote><div><br></div><div>I don't really like this function...  I'd rather we do the size checks in the caller but we can do that refactor later.<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+{<br>
+   struct anv_bo *bo = NULL;<br>
+   VkResult result;<br>
<br>
    /* The kernel is going to give us whole pages anyway */<br>
    size = align_u64(size, 4096);<br>
<br>
-   uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);<br>
-   if (!gem_handle) {<br>
+   pthread_mutex_lock(&cache-><wbr>mutex);<br>
+<br>
+   result = anv_bo_cache_import_locked(<wbr>device, cache, fd, &bo);<br>
+   if (result != VK_SUCCESS) {<br>
       pthread_mutex_unlock(&cache-><wbr>mutex);<br>
-      return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
+      return result;<br>
    }<br>
<br>
-   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(<wbr>cache, gem_handle);<br>
-   if (bo) {<br>
-      if (bo->bo.size != size) {<br>
-         pthread_mutex_unlock(&cache-><wbr>mutex);<br>
-         return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
-      }<br>
-      __sync_fetch_and_add(&bo-><wbr>refcount, 1);<br>
-   } else {<br>
-      /* For security purposes, we reject BO imports where the size does not<br>
-       * match exactly.  This prevents a malicious client from passing a<br>
-       * buffer to a trusted client, lying about the size, and telling the<br>
-       * trusted client to try and texture from an image that goes<br>
-       * out-of-bounds.  This sort of thing could lead to GPU hangs or worse<br>
-       * in the trusted client.  The trusted client can protect itself against<br>
-       * this sort of attack but only if it can trust the buffer size.<br>
-       */<br>
-      off_t import_size = lseek(fd, 0, SEEK_END);<br>
-      if (import_size == (off_t)-1 || import_size < size) {<br>
-         anv_gem_close(device, gem_handle);<br>
-         pthread_mutex_unlock(&cache-><wbr>mutex);<br>
-         return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
-      }<br>
-<br>
-      bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,<br>
-                    VK_SYSTEM_ALLOCATION_SCOPE_<wbr>OBJECT);<br>
-      if (!bo) {<br>
-         anv_gem_close(device, gem_handle);<br>
-         pthread_mutex_unlock(&cache-><wbr>mutex);<br>
-         return vk_error(VK_ERROR_OUT_OF_HOST_<wbr>MEMORY);<br>
-      }<br>
-<br>
-      bo->refcount = 1;<br>
-<br>
-      anv_bo_init(&bo->bo, gem_handle, size);<br>
-<br>
-      _mesa_hash_table_insert(cache-<wbr>>bo_map, (void *)(uintptr_t)gem_handle, bo);<br>
+   /* For security purposes, we reject BO imports where the size does not<br>
+    * match exactly.  This prevents a malicious client from passing a<br>
+    * buffer to a trusted client, lying about the size, and telling the<br>
+    * trusted client to try and texture from an image that goes<br>
+    * out-of-bounds.  This sort of thing could lead to GPU hangs or worse<br>
+    * in the trusted client.  The trusted client can protect itself against<br>
+    * this sort of attack but only if it can trust the buffer size.<br>
+    *<br>
+    * To successfully prevent that attack, we must check the fd's size and<br>
+    * complete the fd's import into the cache during the same critical<br>
+    * section.  Otherwise, if the cache were unlocked between importing the fd<br>
+    * and checking the its size, then the attacker could exploit a race by<br>
+    * importing the fd concurrently in separate threads.<br>
+    */<br>
+   if (bo->size != size) {<br></blockquote><div><br></div><div>FYI: There are patches in-flight to make this a < rather than a !=.<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+      anv_bo_cache_release(device, cache, bo);<br>
+      pthread_mutex_unlock(&cache-><wbr>mutex);<br></blockquote><div><br></div><div>This is a problem... anv_bo_cache_release will try to take the lock but it's already held here.  I think the solution is to just not separate bo_cach_import from bo_cache_import_locked and let bo_cache_import do all the locking.  There's no harm in doing an import and then releasing it later if the size is wrong.<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+      return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
    }<br>
<br>
    pthread_mutex_unlock(&cache-><wbr>mutex);<br>
-   *bo_out = &bo->bo;<br>
+   *bo_out = bo;<br>
<br>
    return VK_SUCCESS;<br>
 }<br>
diff --git a/src/intel/vulkan/anv_device.<wbr>c b/src/intel/vulkan/anv_device.<wbr>c<br>
index 1634b5158c..94bbf8c819 100644<br>
--- a/src/intel/vulkan/anv_device.<wbr>c<br>
+++ b/src/intel/vulkan/anv_device.<wbr>c<br>
@@ -1542,9 +1542,10 @@ VkResult anv_AllocateMemory(<br>
       assert(fd_info->handleType ==<br>
              VK_EXTERNAL_MEMORY_HANDLE_<wbr>TYPE_OPAQUE_FD_BIT_KHR);<br>
<br>
-      result = anv_bo_cache_import(device, &device->bo_cache,<br>
-                                   fd_info->fd, pAllocateInfo->allocationSize,<br>
-                                   &mem->bo);<br>
+      result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache,<br>
+                                             fd_info->fd,<br>
+                                             pAllocateInfo->allocationSize,<br>
+                                             &mem->bo);<br>
       if (result != VK_SUCCESS)<br>
          goto fail;<br>
<br>
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c<br>
index d6bad44091..9c1321a065 100644<br>
--- a/src/intel/vulkan/anv_intel.c<br>
+++ b/src/intel/vulkan/anv_intel.c<br>
@@ -75,8 +75,9 @@ VkResult anv_CreateDmaBufImageINTEL(<br>
<br>
    image = anv_image_from_handle(image_h)<wbr>;<br>
<br>
-   result = anv_bo_cache_import(device, &device->bo_cache,<br>
-                                pCreateInfo->fd, image->size, &mem->bo);<br>
+   result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache,<br>
+                                          pCreateInfo->fd, image->size,<br>
+                                          &mem->bo);<br>
    if (result != VK_SUCCESS)<br>
       goto fail_import;<br>
<br>
diff --git a/src/intel/vulkan/anv_<wbr>private.h b/src/intel/vulkan/anv_<wbr>private.h<br>
index 27d2c34203..ecdd9d5e32 100644<br>
--- a/src/intel/vulkan/anv_<wbr>private.h<br>
+++ b/src/intel/vulkan/anv_<wbr>private.h<br>
@@ -717,6 +717,9 @@ VkResult anv_bo_cache_alloc(struct anv_device *device,<br>
                             struct anv_bo_cache *cache,<br>
                             uint64_t size, struct anv_bo **bo);<br>
 VkResult anv_bo_cache_import(struct anv_device *device,<br>
+                             struct anv_bo_cache *cache,<br>
+                             int fd, struct anv_bo **bo);<br>
+VkResult anv_bo_cache_import_with_size(<wbr>struct anv_device *device,<br>
                              struct anv_bo_cache *cache,<br>
                              int fd, uint64_t size, struct anv_bo **bo);<br>
 VkResult anv_bo_cache_export(struct anv_device *device,<br>
diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c<br>
index aff7c53119..e26254a87e 100644<br>
--- a/src/intel/vulkan/anv_queue.c<br>
+++ b/src/intel/vulkan/anv_queue.c<br>
@@ -1006,6 +1006,7 @@ VkResult anv_ImportSemaphoreFdKHR(<br>
    ANV_FROM_HANDLE(anv_device, device, _device);<br>
    ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo-><wbr>semaphore);<br>
    int fd = pImportSemaphoreFdInfo->fd;<br>
+   VkResult result;<br>
<br>
    struct anv_semaphore_impl new_impl = {<br>
       .type = ANV_SEMAPHORE_TYPE_NONE,<br>
@@ -1033,8 +1034,8 @@ VkResult anv_ImportSemaphoreFdKHR(<br>
       } else {<br>
          new_impl.type = ANV_SEMAPHORE_TYPE_BO;<br>
<br>
-         VkResult result = anv_bo_cache_import(device, &device->bo_cache,<br>
-                                               fd, 4096, &<a href="http://new_impl.bo" rel="noreferrer" target="_blank">new_impl.bo</a>);<br>
+         result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache, fd,<br>
+                                                4096, &<a href="http://new_impl.bo" rel="noreferrer" target="_blank">new_impl.bo</a>);<br>
          if (result != VK_SUCCESS)<br>
             return result;<br>
<span class="HOEnZb"><font color="#888888"><br>
--<br>
2.13.0<br>
<br>
______________________________<wbr>_________________<br>
mesa-dev mailing list<br>
<a href="mailto:mesa-dev@lists.freedesktop.org">mesa-dev@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/mesa-dev" rel="noreferrer" target="_blank">https://lists.freedesktop.org/<wbr>mailman/listinfo/mesa-dev</a><br>
</font></span></blockquote></div><br></div></div>