[Mesa-dev] [PATCH v2 07/18] anv/allocator: Add a BO cache
Jason Ekstrand
jason at jlekstrand.net
Tue Mar 14 02:26:09 UTC 2017
This cache allows us to easily ensure that we have a unique anv_bo for
each gem handle. We'll need this in order to support multiple-import of
memory objects and semaphores.
---
src/intel/vulkan/anv_allocator.c | 212 +++++++++++++++++++++++++++++++++++++++
src/intel/vulkan/anv_private.h | 26 +++++
2 files changed, 238 insertions(+)
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 45c663b..84364a6 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -34,6 +34,8 @@
#include "anv_private.h"
+#include "util/hash_table.h"
+
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
@@ -976,3 +978,213 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
return &bo->bo;
}
+
+struct anv_cached_bo {
+ struct anv_bo bo;
+
+ uint32_t refcount;
+};
+
+static uint32_t
+hash_uint32_t(const void *key)
+{
+ return (uint32_t)(uintptr_t)key;
+}
+
+static bool
+uint32_t_equal(const void *a, const void *b)
+{
+ return a == b;
+}
+
+VkResult
+anv_bo_cache_init(struct anv_bo_cache *cache)
+{
+ cache->bo_map = _mesa_hash_table_create(NULL, hash_uint32_t, uint32_t_equal);
+ if (!cache->bo_map)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pthread_mutex_init(&cache->mutex, NULL)) {
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY,
+ "pthread_mutex_inti failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+anv_bo_cache_finish(struct anv_bo_cache *cache)
+{
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ pthread_mutex_destroy(&cache->mutex);
+}
+
+static struct anv_cached_bo *
+anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)gem_handle);
+ if (!entry)
+ return NULL;
+
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
+ assert(bo->bo.gem_handle == gem_handle);
+
+ return bo;
+}
+
+VkResult
+anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, struct anv_bo **bo_out,
+ VkAllocationCallbacks *alloc)
+{
+ struct anv_cached_bo *bo =
+ vk_alloc(alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ bo->refcount = 1;
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ VkResult result = anv_bo_init_new(&bo->bo, device, size);
+ if (result != VK_SUCCESS) {
+ vk_free(alloc, bo);
+ return result;
+ }
+
+ assert(bo->bo.gem_handle);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->bo_map,
+ (void *)(uintptr_t)bo->bo.gem_handle, bo);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size, struct anv_bo **bo_out,
+ VkAllocationCallbacks *alloc)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ assert(bo->bo.size == size);
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ struct anv_cached_bo *bo =
+ vk_alloc(alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ /* From the Vulkan spec:
+ *
+ * "Importing memory from a file descriptor transfers ownership of
+ * the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+struct anv_bo *
+anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return &bo->bo;
+}
+
+void
+anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in,
+ VkAllocationCallbacks *alloc)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ uint32_t count = __sync_fetch_and_add(&bo->refcount, -1);
+ assert(count > 0);
+ if (count > 1)
+ return;
+ assert(count == 1);
+ assert(bo->refcount == 0);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)bo->bo.gem_handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->bo_map, entry);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ if (bo->bo.map)
+ anv_gem_munmap(bo->bo.map, bo->bo.size);
+
+ anv_gem_close(device, bo->bo.gem_handle);
+
+ vk_free(alloc, bo);
+}
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 1643cdf..2cd0cc8 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -511,6 +511,32 @@ struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
gl_shader_stage stage,
unsigned per_thread_scratch);
+/** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
+struct anv_bo_cache {
+ struct hash_table *bo_map;
+ pthread_mutex_t mutex;
+};
+
+VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
+void anv_bo_cache_finish(struct anv_bo_cache *cache);
+VkResult anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, struct anv_bo **bo,
+ VkAllocationCallbacks *alloc);
+VkResult anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size, struct anv_bo **bo,
+ VkAllocationCallbacks *alloc);
+VkResult anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out);
+struct anv_bo *anv_bo_cache_lookup(struct anv_bo_cache *cache,
+ uint32_t gem_handle);
+void anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo,
+ VkAllocationCallbacks *alloc);
+
struct anv_physical_device {
VK_LOADER_DATA _loader_data;
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list