[Mesa-dev] [PATCH v2 7/9] anv/allocator: Add a syncobj cache
Jason Ekstrand
jason at jlekstrand.net
Fri Aug 4 01:25:26 UTC 2017
This is mostly a copy+paste of the BO cache but it's a bit simpler
because syncobjs don't have actual backing storage so we don't need to
check sizes or anything like that. Also, we put the refcount directly
in anv_syncobj because they will always be heap pointers.
---
src/intel/vulkan/anv_allocator.c | 194 +++++++++++++++++++++++++++++++++++++++
src/intel/vulkan/anv_device.c | 9 +-
src/intel/vulkan/anv_private.h | 40 ++++++++
3 files changed, 242 insertions(+), 1 deletion(-)
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index efaaebc..204c466 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -1422,3 +1422,197 @@ anv_bo_cache_release(struct anv_device *device,
vk_free(&device->alloc, bo);
}
+
+VkResult
+anv_syncobj_cache_init(struct anv_syncobj_cache *cache)
+{
+ cache->map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ if (!cache->map)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pthread_mutex_init(&cache->mutex, NULL)) {
+ _mesa_hash_table_destroy(cache->map, NULL);
+ return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY,
+ "pthread_mutex_init failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+anv_syncobj_cache_finish(struct anv_syncobj_cache *cache)
+{
+ _mesa_hash_table_destroy(cache->map, NULL);
+ pthread_mutex_destroy(&cache->mutex);
+}
+
+static struct anv_syncobj *
+anv_syncobj_cache_lookup_locked(struct anv_syncobj_cache *cache,
+ uint32_t handle)
+{
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->map, (const void *)(uintptr_t)handle);
+ if (!entry)
+ return NULL;
+
+ struct anv_syncobj *syncobj = (struct anv_syncobj *)entry->data;
+ assert(syncobj->handle == handle);
+
+ return syncobj;
+}
+
+static inline struct anv_syncobj *
+anv_syncobj_cache_lookup(struct anv_syncobj_cache *cache, uint32_t handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_syncobj *syncobj = anv_syncobj_cache_lookup_locked(cache, handle);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return syncobj;
+}
+
+VkResult
+anv_syncobj_cache_create(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj **syncobj_out)
+{
+ struct anv_syncobj *syncobj =
+ vk_alloc(&device->alloc, sizeof(struct anv_syncobj), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!syncobj)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ syncobj->handle = anv_gem_syncobj_create(device);
+ if (!syncobj->handle) {
+ vk_free(&device->alloc, syncobj);
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ }
+
+ VG(syncobj->index = 0);
+ syncobj->refcount = 1;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->map,
+ (void *)(uintptr_t)syncobj->handle, syncobj);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *syncobj_out = syncobj;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_syncobj_cache_import(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ int fd, struct anv_syncobj **syncobj_out)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ uint32_t handle = anv_gem_syncobj_fd_to_handle(device, fd);
+ if (!handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ struct anv_syncobj *syncobj = anv_syncobj_cache_lookup_locked(cache, handle);
+ if (syncobj) {
+ __sync_fetch_and_add(&syncobj->refcount, 1);
+ } else {
+ syncobj = vk_alloc(&device->alloc, sizeof(struct anv_syncobj), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!syncobj) {
+ anv_gem_syncobj_destroy(device, handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ syncobj->handle = handle;
+ VG(syncobj->index = 0);
+ syncobj->refcount = 1;
+
+ _mesa_hash_table_insert(cache->map, (void *)(uintptr_t)handle, syncobj);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ /* From the Vulkan spec:
+ *
+ * "Importing semaphore state from a file descriptor transfers
+ * ownership of the file descriptor from the application to the
+ * Vulkan implementation. The application must not perform any
+ * operations on the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd);
+
+ *syncobj_out = syncobj;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_syncobj_cache_export(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj *syncobj, int *fd_out)
+{
+ assert(anv_syncobj_cache_lookup(cache, syncobj->handle) == syncobj);
+
+ int fd = anv_gem_syncobj_handle_to_fd(device, syncobj->handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+void
+anv_syncobj_cache_release(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj *syncobj)
+{
+ assert(anv_syncobj_cache_lookup(cache, syncobj->handle) == syncobj);
+
+ /* Try to decrement the counter but don't go below one. If this succeeds
+ * then the refcount has been decremented and we are not the last
+ * reference.
+ */
+ if (atomic_dec_not_one(&syncobj->refcount))
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ /* We are probably the last reference since our attempt to decrement above
+ * failed. However, we can't actually know until we are inside the mutex.
+ * Otherwise, someone could import the BO between the decrement and our
+ * taking the mutex.
+ */
+ if (unlikely(__sync_sub_and_fetch(&syncobj->refcount, 1) > 0)) {
+ /* Turns out we're not the last reference. Unlock and bail. */
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->map,
+ (const void *)(uintptr_t)syncobj->handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->map, entry);
+
+ anv_gem_syncobj_destroy(device, syncobj->handle);
+
+ /* Don't unlock until we've actually destroyed the syncobj. The whole
+ * point of the syncobj cache is to ensure that we correctly handle races
+ * with creating and releasing DRM handles and we don't want to let someone
+ * import the syncobj again between mutex unlock and closing the handle.
+ */
+ pthread_mutex_unlock(&cache->mutex);
+
+ vk_free(&device->alloc, syncobj);
+}
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 732fa2e..252ca95 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -1138,10 +1138,14 @@ VkResult anv_CreateDevice(
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
- result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384);
+ result = anv_syncobj_cache_init(&device->syncobj_cache);
if (result != VK_SUCCESS)
goto fail_bo_cache;
+ result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384);
+ if (result != VK_SUCCESS)
+ goto fail_syncobj_cache;
+
result = anv_state_pool_init(&device->instruction_state_pool, device, 16384);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
@@ -1203,6 +1207,8 @@ VkResult anv_CreateDevice(
anv_state_pool_finish(&device->instruction_state_pool);
fail_dynamic_state_pool:
anv_state_pool_finish(&device->dynamic_state_pool);
+ fail_syncobj_cache:
+ anv_syncobj_cache_finish(&device->syncobj_cache);
fail_bo_cache:
anv_bo_cache_finish(&device->bo_cache);
fail_batch_bo_pool:
@@ -1251,6 +1257,7 @@ void anv_DestroyDevice(
anv_state_pool_finish(&device->instruction_state_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
+ anv_syncobj_cache_finish(&device->syncobj_cache);
anv_bo_cache_finish(&device->bo_cache);
anv_bo_pool_finish(&device->batch_bo_pool);
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index b451fa5..b51905f 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -613,6 +613,45 @@ void anv_bo_cache_release(struct anv_device *device,
struct anv_bo_cache *cache,
struct anv_bo *bo);
+struct anv_syncobj {
+ uint32_t handle;
+
+ /* Index into the current fence array. This is used by the fence array
+ * building alrogithm to track which buffers are already in the validation
+ * list so that we can ensure uniqueness.
+ */
+ uint32_t index;
+
+ uint32_t refcount;
+};
+
+/** Implements a syncobj cache that ensures a 1-1 mapping of DRM syncobjs to
+ * anv_syncobjs.
+ */
+struct anv_syncobj_cache {
+ struct hash_table *map;
+ pthread_mutex_t mutex;
+};
+
+VkResult anv_syncobj_cache_init(struct anv_syncobj_cache *cache);
+void anv_syncobj_cache_finish(struct anv_syncobj_cache *cache);
+VkResult
+anv_syncobj_cache_create(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj **syncobj_out);
+VkResult
+anv_syncobj_cache_import(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ int fd, struct anv_syncobj **syncobj_out);
+VkResult
+anv_syncobj_cache_export(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj *syncobj, int *fd_out);
+void
+anv_syncobj_cache_release(struct anv_device *device,
+ struct anv_syncobj_cache *cache,
+ struct anv_syncobj *syncobj);
+
struct anv_memory_type {
/* Standard bits passed on to the client */
VkMemoryPropertyFlags propertyFlags;
@@ -740,6 +779,7 @@ struct anv_device {
struct anv_bo_pool batch_bo_pool;
struct anv_bo_cache bo_cache;
+ struct anv_syncobj_cache syncobj_cache;
struct anv_state_pool dynamic_state_pool;
struct anv_state_pool instruction_state_pool;
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list