[Mesa-dev] [PATCH v3 07/18] anv/allocator: Add a BO cache

Chad Versace chadversary at chromium.org
Fri Mar 31 01:30:45 UTC 2017


On Wed 15 Mar 2017, Jason Ekstrand wrote:
> This cache allows us to easily ensure that we have a unique anv_bo for
> each gem handle.  We'll need this in order to support multiple-import of
> memory objects and semaphores.
> 
> v2 (Jason Ekstrand):
>  - Reject BO imports if the size doesn't match the prime fd size as
>    reported by lseek().
> 
> v3 (Jason Ekstrand):
>  - Fix reference counting around cache_release (Chris Willson)
>  - Move the mutex_unlock() later in cache_release
> ---
>  src/intel/vulkan/anv_allocator.c | 261 +++++++++++++++++++++++++++++++++++++++
>  src/intel/vulkan/anv_private.h   |  26 ++++
>  2 files changed, 287 insertions(+)


> +struct anv_cached_bo {
> +   struct anv_bo bo;
> +
> +   uint32_t refcount;
> +};

Extra whitespace.

> +static bool
> +uint32_t_equal(const void *a, const void *b)
> +{
> +   return a == b;
> +}

This can be replaced with with hash_table.h:_mesa_key_pointer_equal().

> +VkResult
> +anv_bo_cache_init(struct anv_bo_cache *cache)
> +{
> +   cache->bo_map = _mesa_hash_table_create(NULL, hash_uint32_t, uint32_t_equal);
> +   if (!cache->bo_map)
> +      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);

Since the table's keys are not real pointers (they're just uint32_t gem
handles), you need to set a custom value for deleted keys.

  _mesa_hash_table_set_deleted_key(cache->bo_map, (void*)(uintptr_t) 0);

> +
> +   if (pthread_mutex_init(&cache->mutex, NULL)) {
> +      _mesa_hash_table_destroy(cache->bo_map, NULL);
> +      return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY,
> +                       "pthread_mutex_inti failed: %m");

Typo: s/inti/init/

> +   }
> +
> +   return VK_SUCCESS;
> +}
> +
> +void
> +anv_bo_cache_finish(struct anv_bo_cache *cache)
> +{
> +   _mesa_hash_table_destroy(cache->bo_map, NULL);
> +   pthread_mutex_destroy(&cache->mutex);
> +}
> +
> +static struct anv_cached_bo *
> +anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
> +{
> +   struct hash_entry *entry =
> +      _mesa_hash_table_search(cache->bo_map,
> +                              (const void *)(uintptr_t)gem_handle);
> +   if (!entry)
> +      return NULL;
> +
> +   struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
> +   assert(bo->bo.gem_handle == gem_handle);
> +
> +   return bo;
> +}
> +
> +VkResult
> +anv_bo_cache_alloc(struct anv_device *device,
> +                   struct anv_bo_cache *cache,
> +                   uint64_t size, struct anv_bo **bo_out,
> +                   VkAllocationCallbacks *alloc)
> +{
> +   struct anv_cached_bo *bo =
> +      vk_alloc(alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
> +   if (!bo)
> +      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
> +
> +   bo->refcount = 1;
> +
> +   /* The kernel is going to give us whole pages anyway */
> +   size = align_u64(size, 4096);
> +
> +   VkResult result = anv_bo_init_new(&bo->bo, device, size);
> +   if (result != VK_SUCCESS) {
> +      vk_free(alloc, bo);
> +      return result;
> +   }
> +
> +   assert(bo->bo.gem_handle);
> +
> +   pthread_mutex_lock(&cache->mutex);
> +
> +   _mesa_hash_table_insert(cache->bo_map,
> +                           (void *)(uintptr_t)bo->bo.gem_handle, bo);
> +
> +   pthread_mutex_unlock(&cache->mutex);
> +
> +   *bo_out = &bo->bo;
> +
> +   return VK_SUCCESS;
> +}
> +
> +VkResult
> +anv_bo_cache_import(struct anv_device *device,
> +                    struct anv_bo_cache *cache,
> +                    int fd, uint64_t size, struct anv_bo **bo_out,
> +                    VkAllocationCallbacks *alloc)
> +{
> +   pthread_mutex_lock(&cache->mutex);
> +
> +   /* The kernel is going to give us whole pages anyway */
> +   size = align_u64(size, 4096);
> +
> +   uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
> +   if (!gem_handle) {
> +      pthread_mutex_unlock(&cache->mutex);
> +      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
> +   }
> +
> +   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
> +   if (bo) {
> +      assert(bo->bo.size == size);
> +      __sync_fetch_and_add(&bo->refcount, 1);
> +   } else {
> +      /* For security purposes, we reject BO imports where the size does not
> +       * match exactly.  This prevents a malicious client from passing a
> +       * buffer to a trusted client, lying about the size, and telling the
> +       * trusted client to try and texture from an image that goes
> +       * out-of-bounds.  This sort of thing could lead to GPU hangs or worse
> +       * in the trusted client.  The trusted client can protect itself against
> +       * this sort of attack but only if it can trust the buffer size.
> +       */
> +      off_t import_size = lseek(fd, 0, SEEK_END);

We might as well as use lseek64 here, for future-proofing. Someday, on
some systems, a client may try to import a 4GB dma_buf, on which the
client does its own suballocations.

> +      if (import_size == (off_t)-1 || import_size != size) {
> +         anv_gem_close(device, gem_handle);

This anv_gem_close initially looked wrong to me. But, you're closing the
gem handle only *after* the hash table lookup fails, and while the mutex
is locked, so I'm convinced it's safe.

If given two fds that point to the same bo, does anv_gem_fd_to_handle()
return the same gem handle? I suspect so. If that's true, then closing
the gem handle in this error path, if the mutex were *not* locked, would
cause big problems. Especially if we care about security. It would be
nice to add a comment about that race condition here. If you don't want
to write it, I'll offer to write it in a follow-up patch.

> +         pthread_mutex_unlock(&cache->mutex);
> +         return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
> +      }
> +
> +      struct anv_cached_bo *bo =
> +         vk_alloc(alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
> +      if (!bo) {
> +         anv_gem_close(device, gem_handle);
> +         pthread_mutex_unlock(&cache->mutex);
> +         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
> +      }
> +
> +      bo->refcount = 1;
> +
> +      anv_bo_init(&bo->bo, gem_handle, size);
> +
> +      _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
> +   }
> +
> +   pthread_mutex_unlock(&cache->mutex);
> +
> +   /* From the Vulkan spec:
> +    *
> +    *    "Importing memory from a file descriptor transfers ownership of
> +    *    the file descriptor from the application to the Vulkan
> +    *    implementation. The application must not perform any operations on
> +    *    the file descriptor after a successful import."
> +    *
> +    * If the import fails, we leave the file descriptor open.
> +    */
> +   close(fd);
> +
> +   *bo_out = &bo->bo;
> +
> +   return VK_SUCCESS;
> +}
> +
> +VkResult
> +anv_bo_cache_export(struct anv_device *device,
> +                    struct anv_bo_cache *cache,
> +                    struct anv_bo *bo_in, int *fd_out)
> +{
> +   assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
> +   struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
> +
> +   int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
> +   if (fd < 0)
> +      return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
> +
> +   *fd_out = fd;
> +
> +   return VK_SUCCESS;
> +}
> +
> +struct anv_bo *
> +anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
> +{
> +   pthread_mutex_lock(&cache->mutex);
> +
> +   struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
> +
> +   pthread_mutex_unlock(&cache->mutex);
> +
> +   return &bo->bo;
> +}
> +
> +static bool
> +atomic_dec_not_one(uint32_t *counter)
> +{
> +   uint32_t old, val;
> +
> +   val = *counter;
> +   while (1) {
> +      if (val == 1)
> +         return false;
> +
> +      old = __sync_val_compare_and_swap(counter, val, val - 1);
> +      if (old == val)
> +         return true;
> +
> +      val = old;
> +   }
> +}
> +
> +void
> +anv_bo_cache_release(struct anv_device *device,
> +                     struct anv_bo_cache *cache,
> +                     struct anv_bo *bo_in,
> +                     VkAllocationCallbacks *alloc)
> +{

I'm still pondering the correctness of anv_bo_cache_release(). I'm not
done reviewing it yet.

> +   assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
> +   struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
> +
> +   /* Try to decrement the counter but don't go below one.  If this succeeds
> +    * then the refcount has been decremented and we are not the last
> +    * reference.
> +    */
> +   if (atomic_dec_not_one(&bo->refcount))
> +      return;
> +
> +   pthread_mutex_lock(&cache->mutex);
> +
> +   /* We are probably the last reference since our attempt to decrement above
> +    * failed.  However, we can't actually know until we are inside the mutex.
> +    * Otherwise, someone could import the BO between the decrement and our
> +    * taking the mutex.
> +    */
> +   if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
> +      /* Turns out we're not the last reference.  Unlock and bail. */
> +      pthread_mutex_unlock(&cache->mutex);
> +      return;
> +   }
> +
> +   struct hash_entry *entry =
> +      _mesa_hash_table_search(cache->bo_map,
> +                              (const void *)(uintptr_t)bo->bo.gem_handle);
> +   assert(entry);
> +   _mesa_hash_table_remove(cache->bo_map, entry);
> +
> +   if (bo->bo.map)
> +      anv_gem_munmap(bo->bo.map, bo->bo.size);
> +
> +   anv_gem_close(device, bo->bo.gem_handle);
> +
> +   /* Don't unlock until we've actually closed the BO.  The whole point of
> +    * the BO cache is to ensure that we correctly handle races with creating
> +    * and releasing GEM handles and we don't want to let someone import the BO
> +    * again between mutex unlock and closing the GEM handle.
> +    */
> +   pthread_mutex_unlock(&cache->mutex);
> +
> +   vk_free(alloc, bo);
> +}
> diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
> index 1643cdf..2cd0cc8 100644
> --- a/src/intel/vulkan/anv_private.h
> +++ b/src/intel/vulkan/anv_private.h
> @@ -511,6 +511,32 @@ struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
>                                        gl_shader_stage stage,
>                                        unsigned per_thread_scratch);
>  
> +/** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
> +struct anv_bo_cache {
> +   struct hash_table *bo_map;
> +   pthread_mutex_t mutex;
> +};
> +
> +VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
> +void anv_bo_cache_finish(struct anv_bo_cache *cache);
> +VkResult anv_bo_cache_alloc(struct anv_device *device,
> +                            struct anv_bo_cache *cache,
> +                            uint64_t size, struct anv_bo **bo,
> +                            VkAllocationCallbacks *alloc);
> +VkResult anv_bo_cache_import(struct anv_device *device,
> +                             struct anv_bo_cache *cache,
> +                             int fd, uint64_t size, struct anv_bo **bo,
> +                             VkAllocationCallbacks *alloc);
> +VkResult anv_bo_cache_export(struct anv_device *device,
> +                             struct anv_bo_cache *cache,
> +                             struct anv_bo *bo_in, int *fd_out);
> +struct anv_bo *anv_bo_cache_lookup(struct anv_bo_cache *cache,
> +                                   uint32_t gem_handle);
> +void anv_bo_cache_release(struct anv_device *device,
> +                          struct anv_bo_cache *cache,
> +                          struct anv_bo *bo,
> +                          VkAllocationCallbacks *alloc);
> +
>  struct anv_physical_device {
>      VK_LOADER_DATA                              _loader_data;
>  
> -- 
> 2.5.0.400.gff86faf
> 
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/mesa-dev


More information about the mesa-dev mailing list