[Mesa-dev] [PATCH 11/14] anv: Add sizeless anv_bo_cache_import()
Chad Versace
chad at kiwitree.net
Thu Sep 28 00:11:57 UTC 2017
The patch renames anv_bo_cache_import() to
anv_bo_cache_import_with_size(); and adds a new variant,
anv_bo_cache_import(), that lacks the 'size' parameter. Same as
pre-patch, anv_bo_cache_import_with_size() continues to validate the
imported size.
The patch is essentially a refactor patch. It should change no existing
behavior.
This split makes it easier to implement VK_ANDROID_native_buffer. When
the user imports a gralloc hande into a VkImage using
VK_ANDROID_native_buffer, the user provides no size. The driver must
infer the size from the internals of the gralloc buffer.
---
src/intel/vulkan/anv_allocator.c | 118 +++++++++++++++++++++++++--------------
src/intel/vulkan/anv_device.c | 7 ++-
src/intel/vulkan/anv_intel.c | 4 +-
src/intel/vulkan/anv_private.h | 3 +
src/intel/vulkan/anv_queue.c | 5 +-
5 files changed, 88 insertions(+), 49 deletions(-)
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 2cf1130bf29..756f49f3103 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -1269,63 +1269,97 @@ anv_bo_cache_alloc(struct anv_device *device,
return VK_SUCCESS;
}
+static VkResult
+anv_bo_cache_import_locked(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, struct anv_bo **bo_out)
+{
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle)
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ __sync_fetch_and_add(&bo->refcount, 1);
+ return VK_SUCCESS;
+ }
+
+ off_t size = lseek(fd, 0, SEEK_END);
+ if (size == (off_t)-1) {
+ anv_gem_close(device, gem_handle);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+ anv_bo_init(&bo->bo, gem_handle, size);
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ *bo_out = &bo->bo;
+ return VK_SUCCESS;
+}
+
VkResult
anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
- int fd, uint64_t size, struct anv_bo **bo_out)
+ int fd, struct anv_bo **bo_out)
{
+ VkResult result;
+
pthread_mutex_lock(&cache->mutex);
+ result = anv_bo_cache_import_locked(device, cache, fd, bo_out);
+ pthread_mutex_unlock(&cache->mutex);
+
+ return result;
+}
+
+VkResult
+anv_bo_cache_import_with_size(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size,
+ struct anv_bo **bo_out)
+{
+ struct anv_bo *bo = NULL;
+ VkResult result;
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
- uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
- if (!gem_handle) {
+ pthread_mutex_lock(&cache->mutex);
+
+ result = anv_bo_cache_import_locked(device, cache, fd, &bo);
+ if (result != VK_SUCCESS) {
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return result;
}
- struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
- if (bo) {
- if (bo->bo.size != size) {
- pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
- }
- __sync_fetch_and_add(&bo->refcount, 1);
- } else {
- /* For security purposes, we reject BO imports where the size does not
- * match exactly. This prevents a malicious client from passing a
- * buffer to a trusted client, lying about the size, and telling the
- * trusted client to try and texture from an image that goes
- * out-of-bounds. This sort of thing could lead to GPU hangs or worse
- * in the trusted client. The trusted client can protect itself against
- * this sort of attack but only if it can trust the buffer size.
- */
- off_t import_size = lseek(fd, 0, SEEK_END);
- if (import_size == (off_t)-1 || import_size != size) {
- anv_gem_close(device, gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
- }
-
- bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!bo) {
- anv_gem_close(device, gem_handle);
- pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
- bo->refcount = 1;
-
- anv_bo_init(&bo->bo, gem_handle, size);
-
- _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ /* For security purposes, we reject BO imports where the size does not
+ * match exactly. This prevents a malicious client from passing a
+ * buffer to a trusted client, lying about the size, and telling the
+ * trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ *
+ * To successfully prevent that attack, we must check the fd's size and
+ * complete the fd's import into the cache during the same critical
+ * section. Otherwise, if the cache were unlocked between importing the fd
+ * and checking the its size, then the attacker could exploit a race by
+ * importing the fd concurrently in separate threads.
+ */
+ if (bo->size != size) {
+ anv_bo_cache_release(device, cache, bo);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
}
+ *bo_out = bo;
pthread_mutex_unlock(&cache->mutex);
- *bo_out = &bo->bo;
-
return VK_SUCCESS;
}
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 5c20f80024b..ddf5fa4b62a 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -1534,9 +1534,10 @@ VkResult anv_AllocateMemory(
assert(fd_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
- result = anv_bo_cache_import(device, &device->bo_cache,
- fd_info->fd, pAllocateInfo->allocationSize,
- &mem->bo);
+ result = anv_bo_cache_import_with_size(device, &device->bo_cache,
+ fd_info->fd,
+ pAllocateInfo->allocationSize,
+ &mem->bo);
if (result != VK_SUCCESS)
goto fail;
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c
index be6568468e1..60a4b1574aa 100644
--- a/src/intel/vulkan/anv_intel.c
+++ b/src/intel/vulkan/anv_intel.c
@@ -51,8 +51,8 @@ VkResult anv_CreateDmaBufImageINTEL(
uint64_t size = (uint64_t)pCreateInfo->strideInBytes * pCreateInfo->extent.height;
- result = anv_bo_cache_import(device, &device->bo_cache,
- pCreateInfo->fd, size, &mem->bo);
+ result = anv_bo_cache_import_with_size(device, &device->bo_cache,
+ pCreateInfo->fd, size, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index cd1295d078b..8e310d7e1a6 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -715,6 +715,9 @@ VkResult anv_bo_cache_alloc(struct anv_device *device,
struct anv_bo_cache *cache,
uint64_t size, struct anv_bo **bo);
VkResult anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, struct anv_bo **bo);
+VkResult anv_bo_cache_import_with_size(struct anv_device *device,
struct anv_bo_cache *cache,
int fd, uint64_t size, struct anv_bo **bo);
VkResult anv_bo_cache_export(struct anv_device *device,
diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c
index aff7c531190..e26254a87ed 100644
--- a/src/intel/vulkan/anv_queue.c
+++ b/src/intel/vulkan/anv_queue.c
@@ -1006,6 +1006,7 @@ VkResult anv_ImportSemaphoreFdKHR(
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
int fd = pImportSemaphoreFdInfo->fd;
+ VkResult result;
struct anv_semaphore_impl new_impl = {
.type = ANV_SEMAPHORE_TYPE_NONE,
@@ -1033,8 +1034,8 @@ VkResult anv_ImportSemaphoreFdKHR(
} else {
new_impl.type = ANV_SEMAPHORE_TYPE_BO;
- VkResult result = anv_bo_cache_import(device, &device->bo_cache,
- fd, 4096, &new_impl.bo);
+ result = anv_bo_cache_import_with_size(device, &device->bo_cache, fd,
+ 4096, &new_impl.bo);
if (result != VK_SUCCESS)
return result;
--
2.13.5
More information about the mesa-dev
mailing list