[Mesa-dev] [PATCH 07/10] anv: Add sizeless anv_bo_cache_import()

Chad Versace chadversary at chromium.org
Mon Oct 16 18:55:05 UTC 2017


The patch renames anv_bo_cache_import() to
anv_bo_cache_import_with_size(); and adds a new variant,
anv_bo_cache_import(), that lacks the 'size' parameter. Same as
pre-patch, anv_bo_cache_import_with_size() continues to validate the
imported size.

The patch is essentially a refactor patch.  It should change no existing
behavior.

This split makes it easier to implement VK_ANDROID_native_buffer. When
the user imports a gralloc hande into a VkImage using
VK_ANDROID_native_buffer, the user provides no size. The driver must
infer the size from the internals of the gralloc buffer.
---
 src/intel/vulkan/anv_allocator.c | 118 +++++++++++++++++++++++++--------------
 src/intel/vulkan/anv_device.c    |   7 ++-
 src/intel/vulkan/anv_intel.c     |   5 +-
 src/intel/vulkan/anv_private.h   |   3 +
 src/intel/vulkan/anv_queue.c     |   5 +-
 5 files changed, 90 insertions(+), 48 deletions(-)

diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 401cea40e6..1deecc36d7 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -1269,62 +1269,98 @@ anv_bo_cache_alloc(struct anv_device *device,
    return VK_SUCCESS;
 }
 
+static VkResult
+anv_bo_cache_import_locked(struct anv_device *device,
+                           struct anv_bo_cache *cache,
+                           int fd, struct anv_bo **bo_out)
+{
+   uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+   if (!gem_handle)
+      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+
+   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+   if (bo) {
+      __sync_fetch_and_add(&bo->refcount, 1);
+      return VK_SUCCESS;
+   }
+
+   off_t size = lseek(fd, 0, SEEK_END);
+   if (size == (off_t)-1) {
+      anv_gem_close(device, gem_handle);
+      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+   }
+
+   bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (!bo) {
+      anv_gem_close(device, gem_handle);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   bo->refcount = 1;
+   anv_bo_init(&bo->bo, gem_handle, size);
+   _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+   *bo_out = &bo->bo;
+
+   return VK_SUCCESS;
+}
+
 VkResult
 anv_bo_cache_import(struct anv_device *device,
                     struct anv_bo_cache *cache,
-                    int fd, uint64_t size, struct anv_bo **bo_out)
+                    int fd, struct anv_bo **bo_out)
 {
+   VkResult result;
+
    pthread_mutex_lock(&cache->mutex);
+   result = anv_bo_cache_import_locked(device, cache, fd, bo_out);
+   pthread_mutex_unlock(&cache->mutex);
+
+   return result;
+}
+
+VkResult
+anv_bo_cache_import_with_size(struct anv_device *device,
+                              struct anv_bo_cache *cache,
+                              int fd, uint64_t size,
+                              struct anv_bo **bo_out)
+{
+   struct anv_bo *bo = NULL;
+   VkResult result;
 
    /* The kernel is going to give us whole pages anyway */
    size = align_u64(size, 4096);
 
-   uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
-   if (!gem_handle) {
+   pthread_mutex_lock(&cache->mutex);
+
+   result = anv_bo_cache_import_locked(device, cache, fd, &bo);
+   if (result != VK_SUCCESS) {
       pthread_mutex_unlock(&cache->mutex);
-      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+      return result;
    }
 
-   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
-   if (bo) {
-      if (bo->bo.size != size) {
-         pthread_mutex_unlock(&cache->mutex);
-         return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
-      }
-      __sync_fetch_and_add(&bo->refcount, 1);
-   } else {
-      /* For security purposes, we reject BO imports where the size does not
-       * match exactly.  This prevents a malicious client from passing a
-       * buffer to a trusted client, lying about the size, and telling the
-       * trusted client to try and texture from an image that goes
-       * out-of-bounds.  This sort of thing could lead to GPU hangs or worse
-       * in the trusted client.  The trusted client can protect itself against
-       * this sort of attack but only if it can trust the buffer size.
-       */
-      off_t import_size = lseek(fd, 0, SEEK_END);
-      if (import_size == (off_t)-1 || import_size < size) {
-         anv_gem_close(device, gem_handle);
-         pthread_mutex_unlock(&cache->mutex);
-         return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
-      }
-
-      bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
-                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-      if (!bo) {
-         anv_gem_close(device, gem_handle);
-         pthread_mutex_unlock(&cache->mutex);
-         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-      }
-
-      bo->refcount = 1;
-
-      anv_bo_init(&bo->bo, gem_handle, size);
-
-      _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+   /* For security purposes, we reject BO imports where the size does not
+    * match exactly.  This prevents a malicious client from passing a
+    * buffer to a trusted client, lying about the size, and telling the
+    * trusted client to try and texture from an image that goes
+    * out-of-bounds.  This sort of thing could lead to GPU hangs or worse
+    * in the trusted client.  The trusted client can protect itself against
+    * this sort of attack but only if it can trust the buffer size.
+    *
+    * To successfully prevent that attack, we must check the fd's size and
+    * complete the fd's import into the cache during the same critical
+    * section.  Otherwise, if the cache were unlocked between importing the fd
+    * and checking the its size, then the attacker could exploit a race by
+    * importing the fd concurrently in separate threads.
+    */
+   if (bo->size != size) {
+      anv_bo_cache_release(device, cache, bo);
+      pthread_mutex_unlock(&cache->mutex);
+      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
    }
 
    pthread_mutex_unlock(&cache->mutex);
-   *bo_out = &bo->bo;
+   *bo_out = bo;
 
    return VK_SUCCESS;
 }
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 1634b5158c..94bbf8c819 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -1542,9 +1542,10 @@ VkResult anv_AllocateMemory(
       assert(fd_info->handleType ==
              VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
 
-      result = anv_bo_cache_import(device, &device->bo_cache,
-                                   fd_info->fd, pAllocateInfo->allocationSize,
-                                   &mem->bo);
+      result = anv_bo_cache_import_with_size(device, &device->bo_cache,
+                                             fd_info->fd,
+                                             pAllocateInfo->allocationSize,
+                                             &mem->bo);
       if (result != VK_SUCCESS)
          goto fail;
 
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c
index d6bad44091..9c1321a065 100644
--- a/src/intel/vulkan/anv_intel.c
+++ b/src/intel/vulkan/anv_intel.c
@@ -75,8 +75,9 @@ VkResult anv_CreateDmaBufImageINTEL(
 
    image = anv_image_from_handle(image_h);
 
-   result = anv_bo_cache_import(device, &device->bo_cache,
-                                pCreateInfo->fd, image->size, &mem->bo);
+   result = anv_bo_cache_import_with_size(device, &device->bo_cache,
+                                          pCreateInfo->fd, image->size,
+                                          &mem->bo);
    if (result != VK_SUCCESS)
       goto fail_import;
 
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 27d2c34203..ecdd9d5e32 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -717,6 +717,9 @@ VkResult anv_bo_cache_alloc(struct anv_device *device,
                             struct anv_bo_cache *cache,
                             uint64_t size, struct anv_bo **bo);
 VkResult anv_bo_cache_import(struct anv_device *device,
+                             struct anv_bo_cache *cache,
+                             int fd, struct anv_bo **bo);
+VkResult anv_bo_cache_import_with_size(struct anv_device *device,
                              struct anv_bo_cache *cache,
                              int fd, uint64_t size, struct anv_bo **bo);
 VkResult anv_bo_cache_export(struct anv_device *device,
diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c
index aff7c53119..e26254a87e 100644
--- a/src/intel/vulkan/anv_queue.c
+++ b/src/intel/vulkan/anv_queue.c
@@ -1006,6 +1006,7 @@ VkResult anv_ImportSemaphoreFdKHR(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
    int fd = pImportSemaphoreFdInfo->fd;
+   VkResult result;
 
    struct anv_semaphore_impl new_impl = {
       .type = ANV_SEMAPHORE_TYPE_NONE,
@@ -1033,8 +1034,8 @@ VkResult anv_ImportSemaphoreFdKHR(
       } else {
          new_impl.type = ANV_SEMAPHORE_TYPE_BO;
 
-         VkResult result = anv_bo_cache_import(device, &device->bo_cache,
-                                               fd, 4096, &new_impl.bo);
+         result = anv_bo_cache_import_with_size(device, &device->bo_cache, fd,
+                                                4096, &new_impl.bo);
          if (result != VK_SUCCESS)
             return result;
 
-- 
2.13.0



More information about the mesa-dev mailing list