[Mesa-dev] [PATCH 3/4] radv: fallback to an in-memory cache when no pipline cache is provided

Timothy Arceri tarceri at itsqueeze.com
Wed Mar 15 04:17:32 UTC 2017


---
 src/amd/vulkan/radv_device.c         | 4 +++-
 src/amd/vulkan/radv_pipeline.c       | 9 ++++++---
 src/amd/vulkan/radv_pipeline_cache.c | 7 +++++--
 src/amd/vulkan/radv_private.h        | 3 ++-
 4 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c
index 875057c..65e6a2c 100644
--- a/src/amd/vulkan/radv_device.c
+++ b/src/amd/vulkan/radv_device.c
@@ -984,26 +984,28 @@ VkResult radv_CreateDevice(
 
 	if (device->physical_device->rad_info.chip_class >= CIK)
 		cik_create_gfx_config(device);
 
 	VkPipelineCacheCreateInfo ci;
 	ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
 	ci.pNext = NULL;
 	ci.flags = 0;
 	ci.pInitialData = NULL;
 	ci.initialDataSize = 0;
-	VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
+	VkPipelineCache pc;
 	result = radv_CreatePipelineCache(radv_device_to_handle(device),
 					  &ci, NULL, &pc);
 	if (result != VK_SUCCESS)
 		goto fail;
 
+	device->mem_cache = radv_pipeline_cache_from_handle(pc);
+
 	*pDevice = radv_device_to_handle(device);
 	return VK_SUCCESS;
 
 fail:
 	if (device->trace_bo)
 		device->ws->buffer_destroy(device->trace_bo);
 
 	if (device->gfx_init)
 		device->ws->buffer_destroy(device->gfx_init);
 
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 13ae87c..73a3776 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -552,33 +552,36 @@ radv_pipeline_compile(struct radv_pipeline *pipeline,
 					     &code, &code_size, dump);
 
 	if (stage == MESA_SHADER_GEOMETRY) {
 		void *gs_copy_code = NULL;
 		unsigned gs_copy_code_size = 0;
 		pipeline->gs_copy_shader = radv_pipeline_create_gs_copy_shader(
 			pipeline, nir, &gs_copy_code, &gs_copy_code_size, dump);
 
 		if (pipeline->gs_copy_shader) {
 			pipeline->gs_copy_shader =
-				radv_pipeline_cache_insert_shader(cache,
+				radv_pipeline_cache_insert_shader(pipeline->device,
+								  cache,
 								  gs_copy_sha1,
 								  pipeline->gs_copy_shader,
 								  gs_copy_code,
 								  gs_copy_code_size);
 		}
 	}
 	if (!module->nir)
 		ralloc_free(nir);
 
 	if (variant)
-		variant = radv_pipeline_cache_insert_shader(cache, sha1, variant,
-							    code, code_size);
+		variant = radv_pipeline_cache_insert_shader(pipeline->device,
+							    cache, sha1,
+							    variant, code,
+							    code_size);
 
 	if (code)
 		free(code);
 	return variant;
 }
 
 static VkResult
 radv_pipeline_scratch_init(struct radv_device *device,
                            struct radv_pipeline *pipeline)
 {
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index 5b7e1c4..3a58f6a 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -149,20 +149,22 @@ radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
 
 struct radv_shader_variant *
 radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
 					       struct radv_pipeline_cache *cache,
 					       const unsigned char *sha1)
 {
 	struct cache_entry *entry = NULL;
 
 	if (cache)
 		entry = radv_pipeline_cache_search(cache, sha1);
+	else
+		entry = radv_pipeline_cache_search(device->mem_cache, sha1);
 
 	if (!entry)
 		return NULL;
 
 	if (!entry->variant) {
 		struct radv_shader_variant *variant;
 
 		variant = calloc(1, sizeof(struct radv_shader_variant));
 		if (!variant)
 			return NULL;
@@ -251,27 +253,28 @@ radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
 		radv_pipeline_cache_grow(cache);
 
 	/* Failing to grow that hash table isn't fatal, but may mean we don't
 	 * have enough space to add this new kernel. Only add it if there's room.
 	 */
 	if (cache->kernel_count < cache->table_size / 2)
 		radv_pipeline_cache_set_entry(cache, entry);
 }
 
 struct radv_shader_variant *
-radv_pipeline_cache_insert_shader(struct radv_pipeline_cache *cache,
+radv_pipeline_cache_insert_shader(struct radv_device *device,
+				  struct radv_pipeline_cache *cache,
 				  const unsigned char *sha1,
 				  struct radv_shader_variant *variant,
 				  const void *code, unsigned code_size)
 {
 	if (!cache)
-		return variant;
+		cache = device->mem_cache;
 
 	pthread_mutex_lock(&cache->mutex);
 	struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
 	if (entry) {
 		if (entry->variant) {
 			radv_shader_variant_destroy(cache->device, variant);
 			variant = entry->variant;
 		} else {
 			entry->variant = variant;
 		}
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index c11fb74..cbd2968 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -309,21 +309,22 @@ radv_pipeline_cache_finish(struct radv_pipeline_cache *cache);
 void
 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
 			 const void *data, size_t size);
 
 struct radv_shader_variant *
 radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
 					       struct radv_pipeline_cache *cache,
 					       const unsigned char *sha1);
 
 struct radv_shader_variant *
-radv_pipeline_cache_insert_shader(struct radv_pipeline_cache *cache,
+radv_pipeline_cache_insert_shader(struct radv_device *device,
+				  struct radv_pipeline_cache *cache,
 				  const unsigned char *sha1,
 				  struct radv_shader_variant *variant,
 				  const void *code, unsigned code_size);
 
 void radv_shader_variant_destroy(struct radv_device *device,
 				 struct radv_shader_variant *variant);
 
 struct radv_meta_state {
 	VkAllocationCallbacks alloc;
 
-- 
2.9.3



More information about the mesa-dev mailing list