[Mesa-dev] [PATCH 8/9] radv: reuse the multiple shader store & load functions for gs copy variant

Timothy Arceri tarceri at itsqueeze.com
Sat Oct 14 23:36:46 UTC 2017


---
 src/amd/vulkan/radv_pipeline.c       |  27 +++++---
 src/amd/vulkan/radv_pipeline_cache.c | 127 -----------------------------------
 src/amd/vulkan/radv_private.h        |  12 +---
 3 files changed, 17 insertions(+), 149 deletions(-)

diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 4bb9fbb15b..e95925fda5 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -1544,25 +1544,23 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
 				                   strlen(modules[i]->nir->info.name),
 				                   modules[i]->sha1);
 		}
 	}
 
 	radv_hash_shaders(hash, pStages, pipeline->layout, keys, get_hash_flags(device));
 	memcpy(gs_copy_hash, hash, 20);
 	gs_copy_hash[0] ^= 1;
 
 	if (modules[MESA_SHADER_GEOMETRY]) {
-		pipeline->gs_copy_shader =
-			radv_create_shader_variant_from_pipeline_cache(
-				pipeline->device,
-				cache,
-				gs_copy_hash);
+		struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
+		radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants);
+		pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
 	}
 
 	if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders) &&
 	    (!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader))
 		return;
 
 	if (!modules[MESA_SHADER_FRAGMENT]) {
 		nir_builder fs_b;
 		nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
 		fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "noop_fs");
@@ -1622,26 +1620,33 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
 		void *gs_copy_code = NULL;
 		unsigned gs_copy_code_size = 0;
 		if (!pipeline->gs_copy_shader) {
 			pipeline->gs_copy_shader = radv_create_gs_copy_shader(
 					device, nir[MESA_SHADER_GEOMETRY], &gs_copy_code,
 					&gs_copy_code_size,
 					keys[MESA_SHADER_GEOMETRY].has_multiview_view_index);
 		}
 
 		if (pipeline->gs_copy_shader) {
-			pipeline->gs_copy_shader =
-				radv_pipeline_cache_insert_shader(device, cache,
-								  gs_copy_hash,
-								  pipeline->gs_copy_shader,
-								  gs_copy_code,
-								  gs_copy_code_size);
+			void *code[MESA_SHADER_STAGES] = {0};
+			unsigned code_size[MESA_SHADER_STAGES] = {0};
+			struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
+
+			code[MESA_SHADER_GEOMETRY] = gs_copy_code;
+			code_size[MESA_SHADER_GEOMETRY] = gs_copy_code_size;
+			variants[MESA_SHADER_GEOMETRY] = pipeline->gs_copy_shader;
+
+			radv_pipeline_cache_insert_shaders(device, cache,
+							   gs_copy_hash,
+							   variants,
+							   (const void**)code,
+							   code_size);
 		}
 		free(gs_copy_code);
 	}
 
 	radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
 					   (const void**)codes, code_sizes);
 
 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
 		free(codes[i]);
 		if (modules[i] && !modules[i]->nir)
diff --git a/src/amd/vulkan/radv_pipeline_cache.c b/src/amd/vulkan/radv_pipeline_cache.c
index d29098eea0..1e8a703ce2 100644
--- a/src/amd/vulkan/radv_pipeline_cache.c
+++ b/src/amd/vulkan/radv_pipeline_cache.c
@@ -163,74 +163,20 @@ radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
 
 	pthread_mutex_lock(&cache->mutex);
 
 	entry = radv_pipeline_cache_search_unlocked(cache, sha1);
 
 	pthread_mutex_unlock(&cache->mutex);
 
 	return entry;
 }
 
-struct radv_shader_variant *
-radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
-					       struct radv_pipeline_cache *cache,
-					       const unsigned char *sha1)
-{
-	struct cache_entry *entry = NULL;
-
-	if (cache)
-		entry = radv_pipeline_cache_search(cache, sha1);
-	else
-		entry = radv_pipeline_cache_search(device->mem_cache, sha1);
-
-	if (!entry) {
-		if (!device->physical_device->disk_cache)
-			return NULL;
-		uint8_t disk_sha1[20];
-		disk_cache_compute_key(device->physical_device->disk_cache,
-				       sha1, 20, disk_sha1);
-		entry = (struct cache_entry *)
-			disk_cache_get(device->physical_device->disk_cache,
-				       disk_sha1, NULL);
-		if (!entry)
-			return NULL;
-	}
-
-	if (!entry->variants[0]) {
-		struct radv_shader_variant *variant;
-		char *p = entry->code;
-		struct cache_entry_variant_info info;
-
-		variant = calloc(1, sizeof(struct radv_shader_variant));
-		if (!variant)
-			return NULL;
-
-		memcpy(&info, p, sizeof(struct cache_entry_variant_info));
-		p += sizeof(struct cache_entry_variant_info);
-
-		variant->code_size = entry->code_sizes[0];
-		variant->config = info.config;
-		variant->info = info.variant_info;
-		variant->rsrc1 = info.rsrc1;
-		variant->rsrc2 = info.rsrc2;
-		variant->ref_count = 1;
-
-		void *ptr = radv_alloc_shader_memory(device, variant);
-		memcpy(ptr, p, entry->code_sizes[0]);
-
-		entry->variants[0] = variant;
-	}
-
-	p_atomic_inc(&entry->variants[0]->ref_count);
-	return entry->variants[0];
-}
-
 bool
 radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
 					        struct radv_pipeline_cache *cache,
 					        const unsigned char *sha1,
 					        struct radv_shader_variant **variants)
 {
 	struct cache_entry *entry;
 	if (cache)
 		entry = radv_pipeline_cache_search(cache, sha1);
 	else
@@ -350,93 +296,20 @@ radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
 	if (cache->kernel_count == cache->table_size / 2)
 		radv_pipeline_cache_grow(cache);
 
 	/* Failing to grow that hash table isn't fatal, but may mean we don't
 	 * have enough space to add this new kernel. Only add it if there's room.
 	 */
 	if (cache->kernel_count < cache->table_size / 2)
 		radv_pipeline_cache_set_entry(cache, entry);
 }
 
-struct radv_shader_variant *
-radv_pipeline_cache_insert_shader(struct radv_device *device,
-				  struct radv_pipeline_cache *cache,
-				  const unsigned char *sha1,
-				  struct radv_shader_variant *variant,
-				  const void *code, unsigned code_size)
-{
-	if (!cache)
-		cache = device->mem_cache;
-
-	pthread_mutex_lock(&cache->mutex);
-	struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
-	if (entry) {
-		if (entry->variants[0]) {
-			radv_shader_variant_destroy(cache->device, variant);
-			variant = entry->variants[0];
-		} else {
-			entry->variants[0] = variant;
-		}
-		p_atomic_inc(&variant->ref_count);
-		pthread_mutex_unlock(&cache->mutex);
-		return variant;
-	}
-
-	entry = vk_alloc(&cache->alloc, sizeof(*entry) + sizeof(struct cache_entry_variant_info) + code_size, 8,
-			   VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
-	if (!entry) {
-		pthread_mutex_unlock(&cache->mutex);
-		return variant;
-	}
-
-	memset(entry, 0, sizeof(*entry));
-
-	char* p = entry->code;
-	struct cache_entry_variant_info info;
-
-	info.config = variant->config;
-	info.variant_info = variant->info;
-	info.rsrc1 = variant->rsrc1;
-	info.rsrc2 = variant->rsrc2;
-	memcpy(p, &info, sizeof(struct cache_entry_variant_info));
-	p += sizeof(struct cache_entry_variant_info);
-
-	memcpy(entry->sha1, sha1, 20);
-	memcpy(p, code, code_size);
-
-	entry->code_sizes[0] = code_size;
-
-	/* Set variant to NULL so we have reproducible cache items */
-	entry->variants[0] = NULL;
-
-	/* Always add cache items to disk. This will allow collection of
-	 * compiled shaders by third parties such as steam, even if the app
-	 * implements its own pipeline cache.
-	 */
-	if (device->physical_device->disk_cache) {
-		uint8_t disk_sha1[20];
-		disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20,
-				       disk_sha1);
-		disk_cache_put(device->physical_device->disk_cache,
-			       disk_sha1, entry, entry_size(entry), NULL);
-	}
-
-	entry->variants[0] = variant;
-	p_atomic_inc(&variant->ref_count);
-
-	radv_pipeline_cache_add_entry(cache, entry);
-
-	cache->modified = true;
-	pthread_mutex_unlock(&cache->mutex);
-	return variant;
-}
-
 void
 radv_pipeline_cache_insert_shaders(struct radv_device *device,
 				   struct radv_pipeline_cache *cache,
 				   const unsigned char *sha1,
 				   struct radv_shader_variant **variants,
 				   const void *const *codes,
 				   const unsigned *code_sizes)
 {
 	if (!cache)
 		cache = device->mem_cache;
diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h
index 683ea3155e..576836e970 100644
--- a/src/amd/vulkan/radv_private.h
+++ b/src/amd/vulkan/radv_private.h
@@ -318,31 +318,21 @@ struct radv_pipeline_cache {
 
 void
 radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
 			 struct radv_device *device);
 void
 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache);
 void
 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
 			 const void *data, size_t size);
 
-struct radv_shader_variant *
-radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
-					       struct radv_pipeline_cache *cache,
-					       const unsigned char *sha1);
-
-struct radv_shader_variant *
-radv_pipeline_cache_insert_shader(struct radv_device *device,
-				  struct radv_pipeline_cache *cache,
-				  const unsigned char *sha1,
-				  struct radv_shader_variant *variant,
-				  const void *code, unsigned code_size);
+struct radv_shader_variant;
 
 bool
 radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
 					        struct radv_pipeline_cache *cache,
 					        const unsigned char *sha1,
 					        struct radv_shader_variant **variants);
 
 void
 radv_pipeline_cache_insert_shaders(struct radv_device *device,
 				   struct radv_pipeline_cache *cache,
-- 
2.13.6



More information about the mesa-dev mailing list