Mesa (master): nir,radv/aco: add and use pass to lower make available/visible barriers
GitLab Mirror
gitlab-mirror at kemper.freedesktop.org
Tue Jul 28 17:17:22 UTC 2020
Module: Mesa
Branch: master
Commit: 2adb337256f146b10b31365ac6ba8ab820d9014e
URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=2adb337256f146b10b31365ac6ba8ab820d9014e
Author: Rhys Perry <pendingchaos02 at gmail.com>
Date: Fri May 1 14:32:31 2020 +0100
nir,radv/aco: add and use pass to lower make available/visible barriers
Lower them to ACCESS_COHERENT to simplify the backend and
probably give better performance than invalidating or writing back the
entire L0/L1 cache.
Signed-off-by: Rhys Perry <pendingchaos02 at gmail.com>
Reviewed-by: Daniel Schürmann <daniel at schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4905>
---
src/amd/vulkan/radv_pipeline.c | 1 +
src/compiler/Makefile.sources | 1 +
src/compiler/nir/meson.build | 1 +
src/compiler/nir/nir.h | 2 +
src/compiler/nir/nir_lower_memory_model.c | 269 ++++++++++++++++++++++++++++++
5 files changed, 274 insertions(+)
diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c
index 3cb9429bd61..ad5d97d7463 100644
--- a/src/amd/vulkan/radv_pipeline.c
+++ b/src/amd/vulkan/radv_pipeline.c
@@ -2943,6 +2943,7 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline,
nir_lower_non_uniform_ssbo_access |
nir_lower_non_uniform_texture_access |
nir_lower_non_uniform_image_access);
+ NIR_PASS_V(nir[i], nir_lower_memory_model);
}
}
}
diff --git a/src/compiler/Makefile.sources b/src/compiler/Makefile.sources
index 6e18c620e3e..34e61c280a4 100644
--- a/src/compiler/Makefile.sources
+++ b/src/compiler/Makefile.sources
@@ -273,6 +273,7 @@ NIR_FILES = \
nir/nir_lower_io_to_scalar.c \
nir/nir_lower_io_to_vector.c \
nir/nir_lower_mediump_outputs.c \
+ nir/nir_lower_memory_model.c \
nir/nir_lower_non_uniform_access.c \
nir/nir_lower_packing.c \
nir/nir_lower_passthrough_edgeflags.c \
diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build
index faa141c7f95..d0970011ec5 100644
--- a/src/compiler/nir/meson.build
+++ b/src/compiler/nir/meson.build
@@ -153,6 +153,7 @@ files_libnir = files(
'nir_lower_io_to_scalar.c',
'nir_lower_io_to_vector.c',
'nir_lower_mediump_outputs.c',
+ 'nir_lower_memory_model.c',
'nir_lower_non_uniform_access.c',
'nir_lower_packing.c',
'nir_lower_passthrough_edgeflags.c',
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 7bbf347a3b5..71143ffb0fc 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -4465,6 +4465,8 @@ bool nir_lower_interpolation(nir_shader *shader,
bool nir_lower_discard_to_demote(nir_shader *shader);
+bool nir_lower_memory_model(nir_shader *shader);
+
bool nir_normalize_cubemap_coords(nir_shader *shader);
void nir_live_ssa_defs_impl(nir_function_impl *impl);
diff --git a/src/compiler/nir/nir_lower_memory_model.c b/src/compiler/nir/nir_lower_memory_model.c
new file mode 100644
index 00000000000..1dafa52869c
--- /dev/null
+++ b/src/compiler/nir/nir_lower_memory_model.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright © 2020 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Replaces make availability/visible semantics on barriers with
+ * ACCESS_COHERENT on memory loads/stores
+ */
+
+#include "nir/nir.h"
+#include "shader_enums.h"
+
+static bool
+get_intrinsic_info(nir_intrinsic_instr *intrin, nir_variable_mode *mode,
+ bool *reads, bool *writes)
+{
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_image_deref_load:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *reads = true;
+ break;
+ case nir_intrinsic_image_deref_store:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *writes = true;
+ break;
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *reads = true;
+ *writes = true;
+ break;
+ case nir_intrinsic_load_ssbo:
+ *mode = nir_var_mem_ssbo;
+ *reads = true;
+ break;
+ case nir_intrinsic_store_ssbo:
+ *mode = nir_var_mem_ssbo;
+ *writes = true;
+ break;
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ *mode = nir_var_mem_ssbo;
+ *reads = true;
+ *writes = true;
+ break;
+ case nir_intrinsic_load_global:
+ *mode = nir_var_mem_global;
+ *reads = true;
+ break;
+ case nir_intrinsic_store_global:
+ *mode = nir_var_mem_global;
+ *writes = true;
+ break;
+ case nir_intrinsic_global_atomic_add:
+ case nir_intrinsic_global_atomic_imin:
+ case nir_intrinsic_global_atomic_umin:
+ case nir_intrinsic_global_atomic_imax:
+ case nir_intrinsic_global_atomic_umax:
+ case nir_intrinsic_global_atomic_and:
+ case nir_intrinsic_global_atomic_or:
+ case nir_intrinsic_global_atomic_xor:
+ case nir_intrinsic_global_atomic_exchange:
+ case nir_intrinsic_global_atomic_comp_swap:
+ *mode = nir_var_mem_global;
+ *reads = true;
+ *writes = true;
+ break;
+ case nir_intrinsic_load_deref:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *reads = true;
+ break;
+ case nir_intrinsic_store_deref:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *writes = true;
+ break;
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ *mode = nir_src_as_deref(intrin->src[0])->mode;
+ *reads = true;
+ *writes = true;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool
+visit_instr(nir_instr *instr, uint32_t *cur_modes, unsigned vis_avail_sem)
+{
+ if (instr->type != nir_instr_type_intrinsic)
+ return false;
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+ if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
+ (nir_intrinsic_memory_semantics(intrin) & vis_avail_sem)) {
+ *cur_modes |= nir_intrinsic_memory_modes(intrin);
+
+ unsigned semantics = nir_intrinsic_memory_semantics(intrin);
+ nir_intrinsic_set_memory_semantics(
+ intrin, semantics & ~vis_avail_sem);
+ return true;
+ }
+
+ if (!*cur_modes)
+ return false; /* early exit */
+
+ nir_variable_mode mode;
+ bool reads = false, writes = false;
+ if (!get_intrinsic_info(intrin, &mode, &reads, &writes))
+ return false;
+
+ if (!reads && vis_avail_sem == NIR_MEMORY_MAKE_VISIBLE)
+ return false;
+ if (!writes && vis_avail_sem == NIR_MEMORY_MAKE_AVAILABLE)
+ return false;
+
+ unsigned access = nir_intrinsic_access(intrin);
+
+ if (access & (ACCESS_NON_READABLE | ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER | ACCESS_COHERENT))
+ return false;
+
+ if (*cur_modes & mode) {
+ nir_intrinsic_set_access(intrin, access | ACCESS_COHERENT);
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+lower_make_visible(nir_cf_node *cf_node, uint32_t *cur_modes)
+{
+ bool progress = false;
+ switch (cf_node->type) {
+ case nir_cf_node_block: {
+ nir_block *block = nir_cf_node_as_block(cf_node);
+ nir_foreach_instr(instr, block)
+ visit_instr(instr, cur_modes, NIR_MEMORY_MAKE_VISIBLE);
+ break;
+ }
+ case nir_cf_node_if: {
+ nir_if *nif = nir_cf_node_as_if(cf_node);
+ uint32_t cur_modes_then = *cur_modes;
+ uint32_t cur_modes_else = *cur_modes;
+ foreach_list_typed(nir_cf_node, if_node, node, &nif->then_list)
+ progress |= lower_make_visible(if_node, &cur_modes_then);
+ foreach_list_typed(nir_cf_node, if_node, node, &nif->else_list)
+ progress |= lower_make_visible(if_node, &cur_modes_else);
+ *cur_modes |= cur_modes_then | cur_modes_else;
+ break;
+ }
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
+ bool loop_progress;
+ do {
+ loop_progress = false;
+ foreach_list_typed(nir_cf_node, loop_node, node, &loop->body)
+ loop_progress |= lower_make_visible(loop_node, cur_modes);
+ progress |= loop_progress;
+ } while (loop_progress);
+ break;
+ }
+ case nir_cf_node_function:
+ unreachable("Invalid cf type");
+ }
+ return progress;
+}
+
+static bool
+lower_make_available(nir_cf_node *cf_node, uint32_t *cur_modes)
+{
+ bool progress = false;
+ switch (cf_node->type) {
+ case nir_cf_node_block: {
+ nir_block *block = nir_cf_node_as_block(cf_node);
+ nir_foreach_instr_reverse(instr, block)
+ visit_instr(instr, cur_modes, NIR_MEMORY_MAKE_AVAILABLE);
+ break;
+ }
+ case nir_cf_node_if: {
+ nir_if *nif = nir_cf_node_as_if(cf_node);
+ uint32_t cur_modes_then = *cur_modes;
+ uint32_t cur_modes_else = *cur_modes;
+ foreach_list_typed_reverse(nir_cf_node, if_node, node, &nif->then_list)
+ progress |= lower_make_available(if_node, &cur_modes_then);
+ foreach_list_typed_reverse(nir_cf_node, if_node, node, &nif->else_list)
+ progress |= lower_make_available(if_node, &cur_modes_else);
+ *cur_modes |= cur_modes_then | cur_modes_else;
+ break;
+ }
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
+ bool loop_progress;
+ do {
+ loop_progress = false;
+ foreach_list_typed_reverse(nir_cf_node, loop_node, node, &loop->body)
+ loop_progress |= lower_make_available(loop_node, cur_modes);
+ progress |= loop_progress;
+ } while (loop_progress);
+ break;
+ }
+ case nir_cf_node_function:
+ unreachable("Invalid cf type");
+ }
+ return progress;
+}
+
+bool
+nir_lower_memory_model(nir_shader *shader)
+{
+ bool progress = false;
+
+ struct exec_list *cf_list = &nir_shader_get_entrypoint(shader)->body;
+
+ uint32_t modes = 0;
+ foreach_list_typed(nir_cf_node, cf_node, node, cf_list)
+ progress |= lower_make_visible(cf_node, &modes);
+
+ modes = 0;
+ foreach_list_typed_reverse(nir_cf_node, cf_node, node, cf_list)
+ progress |= lower_make_available(cf_node, &modes);
+
+ return progress;
+}
More information about the mesa-commit
mailing list