[Mesa-dev] [PATCH 08/21] anv/allocator: Add a BO cache
Jason Ekstrand
jason at jlekstrand.net
Fri Apr 14 17:37:55 UTC 2017
This cache allows us to easily ensure that we have a unique anv_bo for
each gem handle. We'll need this in order to support multiple-import of
memory objects and semaphores.
v2 (Jason Ekstrand):
- Reject BO imports if the size doesn't match the prime fd size as
reported by lseek().
---
src/intel/vulkan/anv_allocator.c | 257 +++++++++++++++++++++
src/intel/vulkan/anv_private.h | 21 ++
.../drivers/dri/i965/brw_nir_trig_workarounds.c | 191 +++++++++++++++
3 files changed, 469 insertions(+)
create mode 100644 src/mesa/drivers/dri/i965/brw_nir_trig_workarounds.c
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 697309f..4ab5f60 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -34,6 +34,8 @@
#include "anv_private.h"
+#include "util/hash_table.h"
+
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
@@ -1004,3 +1006,258 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
return &bo->bo;
}
+
+struct anv_cached_bo {
+ struct anv_bo bo;
+
+ uint32_t refcount;
+};
+
+VkResult
+anv_bo_cache_init(struct anv_bo_cache *cache)
+{
+ cache->bo_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ if (!cache->bo_map)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pthread_mutex_init(&cache->mutex, NULL)) {
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY,
+ "pthread_mutex_inti failed: %m");
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+anv_bo_cache_finish(struct anv_bo_cache *cache)
+{
+ _mesa_hash_table_destroy(cache->bo_map, NULL);
+ pthread_mutex_destroy(&cache->mutex);
+}
+
+static struct anv_cached_bo *
+anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)gem_handle);
+ if (!entry)
+ return NULL;
+
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
+ assert(bo->bo.gem_handle == gem_handle);
+
+ return bo;
+}
+
+static struct anv_bo *
+anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return &bo->bo;
+}
+
+VkResult
+anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, struct anv_bo **bo_out)
+{
+ struct anv_cached_bo *bo =
+ vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ bo->refcount = 1;
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ VkResult result = anv_bo_init_new(&bo->bo, device, size);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->alloc, bo);
+ return result;
+ }
+
+ assert(bo->bo.gem_handle);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->bo_map,
+ (void *)(uintptr_t)bo->bo.gem_handle, bo);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size, struct anv_bo **bo_out)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ if (bo->bo.size != size) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ /* For security purposes, we reject BO imports where the size does not
+ * match exactly. This prevents a malicious client from passing a
+ * buffer to a trusted client, lying about the size, and telling the
+ * trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ */
+ off_t import_size = lseek(fd, 0, SEEK_END);
+ if (import_size == (off_t)-1 || import_size != size) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
+ }
+
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+
+ if (device->instance->physicalDevice.supports_48bit_addresses)
+ bo->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (device->instance->physicalDevice.has_exec_async)
+ bo->bo.flags |= EXEC_OBJECT_ASYNC;
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ /* From the Vulkan spec:
+ *
+ * "Importing memory from a file descriptor transfers ownership of
+ * the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+static bool
+atomic_dec_not_one(uint32_t *counter)
+{
+ uint32_t old, val;
+
+ val = *counter;
+ while (1) {
+ if (val == 1)
+ return false;
+
+ old = __sync_val_compare_and_swap(counter, val, val - 1);
+ if (old == val)
+ return true;
+
+ val = old;
+ }
+}
+
+void
+anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ /* Try to decrement the counter but don't go below one. If this succeeds
+ * then the refcount has been decremented and we are not the last
+ * reference.
+ */
+ if (atomic_dec_not_one(&bo->refcount))
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ /* We are probably the last reference since our attempt to decrement above
+ * failed. However, we can't actually know until we are inside the mutex.
+ * Otherwise, someone could import the BO between the decrement and our
+ * taking the mutex.
+ */
+ if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
+ /* Turns out we're not the last reference. Unlock and bail. */
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)bo->bo.gem_handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->bo_map, entry);
+
+ if (bo->bo.map)
+ anv_gem_munmap(bo->bo.map, bo->bo.size);
+
+ anv_gem_close(device, bo->bo.gem_handle);
+
+ /* Don't unlock until we've actually closed the BO. The whole point of
+ * the BO cache is to ensure that we correctly handle races with creating
+ * and releasing GEM handles and we don't want to let someone import the BO
+ * again between mutex unlock and closing the GEM handle.
+ */
+ pthread_mutex_unlock(&cache->mutex);
+
+ vk_free(&device->alloc, bo);
+}
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index b7629ba..127f3c2 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -604,6 +604,27 @@ struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
gl_shader_stage stage,
unsigned per_thread_scratch);
+/** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
+struct anv_bo_cache {
+ struct hash_table *bo_map;
+ pthread_mutex_t mutex;
+};
+
+VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
+void anv_bo_cache_finish(struct anv_bo_cache *cache);
+VkResult anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, struct anv_bo **bo);
+VkResult anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t size, struct anv_bo **bo);
+VkResult anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out);
+void anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo);
+
struct anv_physical_device {
VK_LOADER_DATA _loader_data;
diff --git a/src/mesa/drivers/dri/i965/brw_nir_trig_workarounds.c b/src/mesa/drivers/dri/i965/brw_nir_trig_workarounds.c
new file mode 100644
index 0000000..73bb3fe
--- /dev/null
+++ b/src/mesa/drivers/dri/i965/brw_nir_trig_workarounds.c
@@ -0,0 +1,191 @@
+#include "brw_nir.h"
+
+#include "nir.h"
+#include "nir_search.h"
+#include "nir_search_helpers.h"
+
+#ifndef NIR_OPT_ALGEBRAIC_STRUCT_DEFS
+#define NIR_OPT_ALGEBRAIC_STRUCT_DEFS
+
+struct transform {
+ const nir_search_expression *search;
+ const nir_search_value *replace;
+ unsigned condition_offset;
+};
+
+#endif
+
+
+static const nir_search_variable search1_0 = {
+ { nir_search_value_variable, 0 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression search1 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fcos,
+ { &search1_0.value },
+ NULL,
+};
+
+static const nir_search_variable replace1_0_0 = {
+ { nir_search_value_variable, 0 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression replace1_0 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fcos,
+ { &replace1_0_0.value },
+ NULL,
+};
+
+static const nir_search_constant replace1_1 = {
+ { nir_search_value_constant, 0 },
+ nir_type_float, { 0x3fefffc115df6556 /* 0.99997 */ },
+};
+static const nir_search_expression replace1 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fmul,
+ { &replace1_0.value, &replace1_1.value },
+ NULL,
+};
+
+static const struct transform brw_nir_apply_trig_workarounds_fcos_xforms[] = {
+ { &search1, &replace1.value, 0 },
+};
+
+static const nir_search_variable search0_0 = {
+ { nir_search_value_variable, 0 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression search0 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fsin,
+ { &search0_0.value },
+ NULL,
+};
+
+static const nir_search_variable replace0_0_0 = {
+ { nir_search_value_variable, 0 },
+ 0, /* x */
+ false,
+ nir_type_invalid,
+ NULL,
+};
+static const nir_search_expression replace0_0 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fsin,
+ { &replace0_0_0.value },
+ NULL,
+};
+
+static const nir_search_constant replace0_1 = {
+ { nir_search_value_constant, 0 },
+ nir_type_float, { 0x3fefffc115df6556 /* 0.99997 */ },
+};
+static const nir_search_expression replace0 = {
+ { nir_search_value_expression, 0 },
+ false,
+ nir_op_fmul,
+ { &replace0_0.value, &replace0_1.value },
+ NULL,
+};
+
+static const struct transform brw_nir_apply_trig_workarounds_fsin_xforms[] = {
+ { &search0, &replace0.value, 0 },
+};
+
+static bool
+brw_nir_apply_trig_workarounds_block(nir_block *block, const bool *condition_flags,
+ void *mem_ctx)
+{
+ bool progress = false;
+
+ nir_foreach_instr_reverse_safe(instr, block) {
+ if (instr->type != nir_instr_type_alu)
+ continue;
+
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+ if (!alu->dest.dest.is_ssa)
+ continue;
+
+ switch (alu->op) {
+ case nir_op_fcos:
+ for (unsigned i = 0; i < ARRAY_SIZE(brw_nir_apply_trig_workarounds_fcos_xforms); i++) {
+ const struct transform *xform = &brw_nir_apply_trig_workarounds_fcos_xforms[i];
+ if (condition_flags[xform->condition_offset] &&
+ nir_replace_instr(alu, xform->search, xform->replace,
+ mem_ctx)) {
+ progress = true;
+ break;
+ }
+ }
+ break;
+ case nir_op_fsin:
+ for (unsigned i = 0; i < ARRAY_SIZE(brw_nir_apply_trig_workarounds_fsin_xforms); i++) {
+ const struct transform *xform = &brw_nir_apply_trig_workarounds_fsin_xforms[i];
+ if (condition_flags[xform->condition_offset] &&
+ nir_replace_instr(alu, xform->search, xform->replace,
+ mem_ctx)) {
+ progress = true;
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return progress;
+}
+
+static bool
+brw_nir_apply_trig_workarounds_impl(nir_function_impl *impl, const bool *condition_flags)
+{
+ void *mem_ctx = ralloc_parent(impl);
+ bool progress = false;
+
+ nir_foreach_block_reverse(block, impl) {
+ progress |= brw_nir_apply_trig_workarounds_block(block, condition_flags, mem_ctx);
+ }
+
+ if (progress)
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+
+ return progress;
+}
+
+
+bool
+brw_nir_apply_trig_workarounds(nir_shader *shader)
+{
+ bool progress = false;
+ bool condition_flags[1];
+ const nir_shader_compiler_options *options = shader->options;
+ (void) options;
+
+ condition_flags[0] = true;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl)
+ progress |= brw_nir_apply_trig_workarounds_impl(function->impl, condition_flags);
+ }
+
+ return progress;
+}
+
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list