<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Tue, Oct 17, 2017 at 10:52 AM, Chad Versace <span dir="ltr"><<a href="mailto:chadversary@chromium.org" target="_blank">chadversary@chromium.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">On Mon 16 Oct 2017, Jason Ekstrand wrote:<br>
> On Mon, Oct 16, 2017 at 11:55 AM, Chad Versace <[1]<a href="mailto:chadversary@chromium.org">chadversary@chromium.org</a>><br>
<div><div class="h5">> wrote:<br>
><br>
> The patch renames anv_bo_cache_import() to<br>
> anv_bo_cache_import_with_size(<wbr>); and adds a new variant,<br>
> anv_bo_cache_import(), that lacks the 'size' parameter. Same as<br>
> pre-patch, anv_bo_cache_import_with_size(<wbr>) continues to validate the<br>
> imported size.<br>
><br>
> The patch is essentially a refactor patch. It should change no existing<br>
> behavior.<br>
><br>
> This split makes it easier to implement VK_ANDROID_native_buffer. When<br>
> the user imports a gralloc hande into a VkImage using<br>
> VK_ANDROID_native_buffer, the user provides no size. The driver must<br>
> infer the size from the internals of the gralloc buffer.<br>
> ---<br>
> src/intel/vulkan/anv_<wbr>allocator.c | 118 +++++++++++++++++++++++++-----<br>
> ---------<br>
> src/intel/vulkan/anv_device.<wbr>c | 7 ++-<br>
> src/intel/vulkan/anv_intel.c | 5 +-<br>
> src/intel/vulkan/anv_private.<wbr>h | 3 +<br>
> src/intel/vulkan/anv_queue.c | 5 +-<br>
> 5 files changed, 90 insertions(+), 48 deletions(-)<br>
><br>
> diff --git a/src/intel/vulkan/anv_<wbr>allocator.c b/src/intel/vulkan/anv_<br>
> allocator.c<br>
> index 401cea40e6..1deecc36d7 100644<br>
> --- a/src/intel/vulkan/anv_<wbr>allocator.c<br>
> +++ b/src/intel/vulkan/anv_<wbr>allocator.c<br>
> @@ -1269,62 +1269,98 @@ anv_bo_cache_alloc(struct anv_device *device,<br>
> return VK_SUCCESS;<br>
> }<br>
><br>
> +static VkResult<br>
> +anv_bo_cache_import_locked(<wbr>struct anv_device *device,<br>
> + struct anv_bo_cache *cache,<br>
> + int fd, struct anv_bo **bo_out)<br>
> +{<br>
> + uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);<br>
> + if (!gem_handle)<br>
> + return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> +<br>
> + struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(<wbr>cache,<br>
> gem_handle);<br>
> + if (bo) {<br>
> + __sync_fetch_and_add(&bo-><wbr>refcount, 1);<br>
> + return VK_SUCCESS;<br>
> + }<br>
> +<br>
> + off_t size = lseek(fd, 0, SEEK_END);<br>
> + if (size == (off_t)-1) {<br>
> + anv_gem_close(device, gem_handle);<br>
> + return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> + }<br>
> +<br>
> + bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,<br>
> + VK_SYSTEM_ALLOCATION_SCOPE_<wbr>OBJECT);<br>
> + if (!bo) {<br>
> + anv_gem_close(device, gem_handle);<br>
> + return vk_error(VK_ERROR_OUT_OF_HOST_<wbr>MEMORY);<br>
> + }<br>
> +<br>
> + bo->refcount = 1;<br>
> + anv_bo_init(&bo->bo, gem_handle, size);<br>
> + _mesa_hash_table_insert(<wbr>cache->bo_map, (void *)(uintptr_t)gem_handle,<br>
> bo);<br>
> + *bo_out = &bo->bo;<br>
> +<br>
> + return VK_SUCCESS;<br>
> +}<br>
> +<br>
> VkResult<br>
> anv_bo_cache_import(struct anv_device *device,<br>
> struct anv_bo_cache *cache,<br>
> - int fd, uint64_t size, struct anv_bo **bo_out)<br>
> + int fd, struct anv_bo **bo_out)<br>
> {<br>
> + VkResult result;<br>
> +<br>
> pthread_mutex_lock(&cache-><wbr>mutex);<br>
> + result = anv_bo_cache_import_locked(<wbr>device, cache, fd, bo_out);<br>
> + pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> +<br>
> + return result;<br>
> +}<br>
> +<br>
> +VkResult<br>
> +anv_bo_cache_import_with_<wbr>size(struct anv_device *device,<br>
> + struct anv_bo_cache *cache,<br>
> + int fd, uint64_t size,<br>
> + struct anv_bo **bo_out)<br>
><br>
><br>
> I don't really like this function... I'd rather we do the size checks in the<br>
> caller but we can do that refactor later.<br>
<br>
</div></div>I don't believe the caller can check the size securely. See my comments<br>
below.<br>
<div><div class="h5"><br>
><br>
> +{<br>
> + struct anv_bo *bo = NULL;<br>
> + VkResult result;<br>
><br>
> /* The kernel is going to give us whole pages anyway */<br>
> size = align_u64(size, 4096);<br>
><br>
> - uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);<br>
> - if (!gem_handle) {<br>
> + pthread_mutex_lock(&cache-><wbr>mutex);<br>
> +<br>
> + result = anv_bo_cache_import_locked(<wbr>device, cache, fd, &bo);<br>
> + if (result != VK_SUCCESS) {<br>
> pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> - return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> + return result;<br>
> }<br>
><br>
> - struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(<wbr>cache,<br>
> gem_handle);<br>
> - if (bo) {<br>
> - if (bo->bo.size != size) {<br>
> - pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> - return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> - }<br>
> - __sync_fetch_and_add(&bo-><wbr>refcount, 1);<br>
> - } else {<br>
> - /* For security purposes, we reject BO imports where the size does<br>
> not<br>
> - * match exactly. This prevents a malicious client from passing a<br>
> - * buffer to a trusted client, lying about the size, and telling the<br>
> - * trusted client to try and texture from an image that goes<br>
> - * out-of-bounds. This sort of thing could lead to GPU hangs or<br>
> worse<br>
> - * in the trusted client. The trusted client can protect itself<br>
> against<br>
> - * this sort of attack but only if it can trust the buffer size.<br>
> - */<br>
> - off_t import_size = lseek(fd, 0, SEEK_END);<br>
> - if (import_size == (off_t)-1 || import_size < size) {<br>
> - anv_gem_close(device, gem_handle);<br>
> - pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> - return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> - }<br>
> -<br>
> - bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,<br>
> - VK_SYSTEM_ALLOCATION_SCOPE_<wbr>OBJECT);<br>
> - if (!bo) {<br>
> - anv_gem_close(device, gem_handle);<br>
> - pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> - return vk_error(VK_ERROR_OUT_OF_HOST_<wbr>MEMORY);<br>
> - }<br>
> -<br>
> - bo->refcount = 1;<br>
> -<br>
> - anv_bo_init(&bo->bo, gem_handle, size);<br>
> -<br>
> - _mesa_hash_table_insert(cache-<wbr>>bo_map, (void *)(uintptr_t)<br>
> gem_handle, bo);<br>
> + /* For security purposes, we reject BO imports where the size does not<br>
> + * match exactly. This prevents a malicious client from passing a<br>
> + * buffer to a trusted client, lying about the size, and telling the<br>
> + * trusted client to try and texture from an image that goes<br>
> + * out-of-bounds. This sort of thing could lead to GPU hangs or worse<br>
> + * in the trusted client. The trusted client can protect itself<br>
> against<br>
> + * this sort of attack but only if it can trust the buffer size.<br>
> + *<br>
> + * To successfully prevent that attack, we must check the fd's size and<br>
> + * complete the fd's import into the cache during the same critical<br>
> + * section. Otherwise, if the cache were unlocked between importing<br>
> the fd<br>
> + * and checking the its size, then the attacker could exploit a race by<br>
> + * importing the fd concurrently in separate threads.<br>
> + */<br>
> + if (bo->size != size) {<br>
><br>
><br>
> FYI: There are patches in-flight to make this a < rather than a !=.<br>
<br>
</div></div>Should I replace < with != now?<br></blockquote><div><br></div><div>No, that's a functional change; this is a refactor.<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
<span class="">> <br>
><br>
> + anv_bo_cache_release(device, cache, bo);<br>
> + pthread_mutex_unlock(&cache-><wbr>mutex);<br>
><br>
><br>
> This is a problem... anv_bo_cache_release will try to take the lock but it's<br>
> already held here. <br>
<br>
</span>Right. anv_bo_cache_release() locks/unlocks the mutex while<br>
anv_bo_cache_import_with_size(<wbr>) holds the lock. Oops.<br>
<br>
One solution, which you won't like, is to add a new function<br>
anv_bo_cache_release_locked() and call that within<br>
anv_bo_cache_import_with_size(<wbr>).<br>
<span class=""><br>
> I think the solution is to just not separate bo_cach_import<br>
> from bo_cache_import_locked and let bo_cache_import do all the locking. <br>
> There's no harm in doing an import and then releasing it later if the size is<br>
> wrong.<br>
<br>
</span>I believe it's unsafe to complete the import then release it if the size<br>
is wrong. I explained that in the comment above that begins "To<br>
successfully prevent that attack...".<br>
<div><div class="h5"><br>
> <br>
><br>
> + return vk_error(VK_ERROR_INVALID_<wbr>EXTERNAL_HANDLE_KHR);<br>
> }<br>
><br>
> pthread_mutex_unlock(&cache-><wbr>mutex);<br>
> - *bo_out = &bo->bo;<br>
> + *bo_out = bo;<br>
><br>
> return VK_SUCCESS;<br>
> }<br>
> diff --git a/src/intel/vulkan/anv_device.<wbr>c b/src/intel/vulkan/anv_device.<wbr>c<br>
> index 1634b5158c..94bbf8c819 100644<br>
> --- a/src/intel/vulkan/anv_device.<wbr>c<br>
> +++ b/src/intel/vulkan/anv_device.<wbr>c<br>
> @@ -1542,9 +1542,10 @@ VkResult anv_AllocateMemory(<br>
> assert(fd_info->handleType ==<br>
> VK_EXTERNAL_MEMORY_HANDLE_<wbr>TYPE_OPAQUE_FD_BIT_KHR);<br>
><br>
> - result = anv_bo_cache_import(device, &device->bo_cache,<br>
> - fd_info->fd, pAllocateInfo-><br>
> allocationSize,<br>
> - &mem->bo);<br>
> + result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache,<br>
> + fd_info->fd,<br>
> + pAllocateInfo-><br>
> allocationSize,<br>
> + &mem->bo);<br>
> if (result != VK_SUCCESS)<br>
> goto fail;<br>
><br>
> diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c<br>
> index d6bad44091..9c1321a065 100644<br>
> --- a/src/intel/vulkan/anv_intel.c<br>
> +++ b/src/intel/vulkan/anv_intel.c<br>
> @@ -75,8 +75,9 @@ VkResult anv_CreateDmaBufImageINTEL(<br>
><br>
> image = anv_image_from_handle(image_h)<wbr>;<br>
><br>
> - result = anv_bo_cache_import(device, &device->bo_cache,<br>
> - pCreateInfo->fd, image->size, &mem->bo);<br>
> + result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache,<br>
> + pCreateInfo->fd, image->size,<br>
> + &mem->bo);<br>
> if (result != VK_SUCCESS)<br>
> goto fail_import;<br>
><br>
> diff --git a/src/intel/vulkan/anv_<wbr>private.h b/src/intel/vulkan/anv_<br>
> private.h<br>
> index 27d2c34203..ecdd9d5e32 100644<br>
> --- a/src/intel/vulkan/anv_<wbr>private.h<br>
> +++ b/src/intel/vulkan/anv_<wbr>private.h<br>
> @@ -717,6 +717,9 @@ VkResult anv_bo_cache_alloc(struct anv_device *device,<br>
> struct anv_bo_cache *cache,<br>
> uint64_t size, struct anv_bo **bo);<br>
> VkResult anv_bo_cache_import(struct anv_device *device,<br>
> + struct anv_bo_cache *cache,<br>
> + int fd, struct anv_bo **bo);<br>
> +VkResult anv_bo_cache_import_with_size(<wbr>struct anv_device *device,<br>
> struct anv_bo_cache *cache,<br>
> int fd, uint64_t size, struct anv_bo **bo);<br>
> VkResult anv_bo_cache_export(struct anv_device *device,<br>
> diff --git a/src/intel/vulkan/anv_queue.c b/src/intel/vulkan/anv_queue.c<br>
> index aff7c53119..e26254a87e 100644<br>
> --- a/src/intel/vulkan/anv_queue.c<br>
> +++ b/src/intel/vulkan/anv_queue.c<br>
> @@ -1006,6 +1006,7 @@ VkResult anv_ImportSemaphoreFdKHR(<br>
> ANV_FROM_HANDLE(anv_device, device, _device);<br>
> ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo-><br>
> semaphore);<br>
> int fd = pImportSemaphoreFdInfo->fd;<br>
> + VkResult result;<br>
><br>
> struct anv_semaphore_impl new_impl = {<br>
> .type = ANV_SEMAPHORE_TYPE_NONE,<br>
> @@ -1033,8 +1034,8 @@ VkResult anv_ImportSemaphoreFdKHR(<br>
> } else {<br>
> new_impl.type = ANV_SEMAPHORE_TYPE_BO;<br>
><br>
> - VkResult result = anv_bo_cache_import(device, &device->bo_cache,<br>
</div></div>> - fd, 4096, &[2]<a href="http://new_impl.bo" rel="noreferrer" target="_blank">new_impl.bo</a>);<br>
<span class="">> + result = anv_bo_cache_import_with_size(<wbr>device, &device->bo_cache,<br>
> fd,<br>
</span>> + 4096, &[3]<a href="http://new_impl.bo" rel="noreferrer" target="_blank">new_impl.bo</a>);<br>
<span class="">> if (result != VK_SUCCESS)<br>
> return result;<br>
><br>
> --<br>
> 2.13.0<br>
><br>
> ______________________________<wbr>_________________<br>
> mesa-dev mailing list<br>
</span>> [4]<a href="mailto:mesa-dev@lists.freedesktop.org">mesa-dev@lists.freedesktop.<wbr>org</a><br>
> [5]<a href="https://lists.freedesktop.org/mailman/listinfo/mesa-dev" rel="noreferrer" target="_blank">https://lists.freedesktop.<wbr>org/mailman/listinfo/mesa-dev</a><br>
><br>
><br>
><br>
> References:<br>
><br>
> [1] mailto:<a href="mailto:chadversary@chromium.org">chadversary@chromium.<wbr>org</a><br>
> [2] <a href="http://new_impl.bo/" rel="noreferrer" target="_blank">http://new_impl.bo/</a><br>
> [3] <a href="http://new_impl.bo/" rel="noreferrer" target="_blank">http://new_impl.bo/</a><br>
> [4] mailto:<a href="mailto:mesa-dev@lists.freedesktop.org">mesa-dev@lists.<wbr>freedesktop.org</a><br>
> [5] <a href="https://lists.freedesktop.org/mailman/listinfo/mesa-dev" rel="noreferrer" target="_blank">https://lists.freedesktop.org/<wbr>mailman/listinfo/mesa-dev</a><br>
<div class="HOEnZb"><div class="h5"><br>
> ______________________________<wbr>_________________<br>
> mesa-dev mailing list<br>
> <a href="mailto:mesa-dev@lists.freedesktop.org">mesa-dev@lists.freedesktop.org</a><br>
> <a href="https://lists.freedesktop.org/mailman/listinfo/mesa-dev" rel="noreferrer" target="_blank">https://lists.freedesktop.org/<wbr>mailman/listinfo/mesa-dev</a><br>
<br>
</div></div></blockquote></div><br></div></div>