[Mesa-dev] [PATCH 2/2] nir: add pass to lower atomic counters to SSBO

Rob Clark robdclark at gmail.com
Mon Apr 24 15:28:53 UTC 2017


This is equivalent to what mesa/st does in glsl_to_tgsi.  For most hw
there isn't a particularly good reason to treat these differently.

Signed-off-by: Rob Clark <robdclark at gmail.com>
---
v2: do the interface_type thing properly

 src/compiler/Makefile.sources                |   1 +
 src/compiler/nir/nir.h                       |   1 +
 src/compiler/nir/nir_lower_atomics_to_ssbo.c | 222 +++++++++++++++++++++++++++
 3 files changed, 224 insertions(+)
 create mode 100644 src/compiler/nir/nir_lower_atomics_to_ssbo.c

diff --git a/src/compiler/Makefile.sources b/src/compiler/Makefile.sources
index 2455d4e..b2a3a42 100644
--- a/src/compiler/Makefile.sources
+++ b/src/compiler/Makefile.sources
@@ -208,6 +208,7 @@ NIR_FILES = \
 	nir/nir_lower_64bit_packing.c \
 	nir/nir_lower_alu_to_scalar.c \
 	nir/nir_lower_atomics.c \
+	nir/nir_lower_atomics_to_ssbo.c \
 	nir/nir_lower_bitmap.c \
 	nir/nir_lower_clamp_color_outputs.c \
 	nir/nir_lower_clip.c \
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index ce5b434..be35930 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -2546,6 +2546,7 @@ void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *option
 
 bool nir_lower_atomics(nir_shader *shader,
                        const struct gl_shader_program *shader_program);
+bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
 bool nir_lower_to_source_mods(nir_shader *shader);
 
 bool nir_lower_gs_intrinsics(nir_shader *shader);
diff --git a/src/compiler/nir/nir_lower_atomics_to_ssbo.c b/src/compiler/nir/nir_lower_atomics_to_ssbo.c
new file mode 100644
index 0000000..2c04485
--- /dev/null
+++ b/src/compiler/nir/nir_lower_atomics_to_ssbo.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright © 2017 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rob Clark <robclark at freedesktop.org>
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+
+/*
+ * Remap atomic counters to SSBOs.  Atomic counters get remapped to
+ * SSBO binding points [0..ssbo_offset) and the original SSBOs are
+ * remapped to [ssbo_offset..n) (mostly to align with what mesa/st
+ * does.
+ */
+
+static bool
+lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b)
+{
+   nir_intrinsic_op op;
+   switch (instr->intrinsic) {
+   case nir_intrinsic_ssbo_atomic_add:
+   case nir_intrinsic_ssbo_atomic_imin:
+   case nir_intrinsic_ssbo_atomic_umin:
+   case nir_intrinsic_ssbo_atomic_imax:
+   case nir_intrinsic_ssbo_atomic_umax:
+   case nir_intrinsic_ssbo_atomic_and:
+   case nir_intrinsic_ssbo_atomic_or:
+   case nir_intrinsic_ssbo_atomic_xor:
+   case nir_intrinsic_ssbo_atomic_exchange:
+   case nir_intrinsic_ssbo_atomic_comp_swap:
+   case nir_intrinsic_store_ssbo:
+   case nir_intrinsic_load_ssbo:
+      /* keep same opcode, remap buffer_index */
+      op = instr->intrinsic;
+      break;
+   case nir_intrinsic_atomic_counter_inc:
+   case nir_intrinsic_atomic_counter_add:
+   case nir_intrinsic_atomic_counter_dec:
+      /* inc and dec get remapped to add: */
+      op = nir_intrinsic_ssbo_atomic_add;
+      break;
+   case nir_intrinsic_atomic_counter_read:
+      op = nir_intrinsic_load_ssbo;
+      break;
+   case nir_intrinsic_atomic_counter_min:
+      op = nir_intrinsic_ssbo_atomic_umin;
+      break;
+   case nir_intrinsic_atomic_counter_max:
+      op = nir_intrinsic_ssbo_atomic_umax;
+      break;
+   case nir_intrinsic_atomic_counter_and:
+      op = nir_intrinsic_ssbo_atomic_and;
+      break;
+   case nir_intrinsic_atomic_counter_or:
+      op = nir_intrinsic_ssbo_atomic_or;
+      break;
+   case nir_intrinsic_atomic_counter_xor:
+      op = nir_intrinsic_ssbo_atomic_xor;
+      break;
+   case nir_intrinsic_atomic_counter_exchange:
+      op = nir_intrinsic_ssbo_atomic_exchange;
+      break;
+   case nir_intrinsic_atomic_counter_comp_swap:
+      op = nir_intrinsic_ssbo_atomic_comp_swap;
+      break;
+   default:
+      return false;
+   }
+
+   b->cursor = nir_before_instr(&instr->instr);
+
+   /* easy case, just remap SSBO buffer index: */
+   if (op == instr->intrinsic) {
+      unsigned srcn = (op == nir_intrinsic_store_ssbo) ? 1 : 0;
+      nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[srcn], 1);
+      nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, ssbo_offset));
+      nir_instr_rewrite_src(&instr->instr,
+                            &instr->src[srcn],
+                            nir_src_for_ssa(new_idx));
+
+      return true;
+   }
+
+   nir_ssa_def *buffer = nir_imm_int(b, nir_intrinsic_base(instr));
+   nir_ssa_def *temp = NULL;
+   nir_intrinsic_instr *new_instr =
+         nir_intrinsic_instr_create(ralloc_parent(instr), op);
+
+   /* a couple instructions need special handling since they don't map
+    * 1:1 with ssbo atomics
+    */
+   switch (instr->intrinsic) {
+   case nir_intrinsic_atomic_counter_inc:
+      /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
+      temp = nir_imm_int(b, +1);
+      new_instr->src[0] = nir_src_for_ssa(buffer);
+      new_instr->src[1] = instr->src[0];
+      new_instr->src[2] = nir_src_for_ssa(temp);
+      break;
+   case nir_intrinsic_atomic_counter_dec:
+      /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */
+      /* NOTE semantic difference so we adjust the return value below */
+      temp = nir_imm_int(b, -1);
+      new_instr->src[0] = nir_src_for_ssa(buffer);
+      new_instr->src[1] = instr->src[0];
+      new_instr->src[2] = nir_src_for_ssa(temp);
+      break;
+   case nir_intrinsic_atomic_counter_read:
+      /* remapped to load_ssbo: { buffer_idx, offset } */
+      new_instr->src[0] = nir_src_for_ssa(buffer);
+      new_instr->src[1] = instr->src[0];
+      break;
+   default:
+      /* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */
+      new_instr->src[0] = nir_src_for_ssa(buffer);
+      new_instr->src[1] = instr->src[0];
+      new_instr->src[2] = instr->src[1];
+      if (op == nir_intrinsic_ssbo_atomic_comp_swap)
+         new_instr->src[3] = instr->src[2];
+      break;
+   }
+
+   nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
+                     instr->dest.ssa.num_components,
+                     instr->dest.ssa.bit_size, NULL);
+   nir_instr_insert_before(&instr->instr, &new_instr->instr);
+   nir_instr_remove(&instr->instr);
+
+   if (instr->intrinsic == nir_intrinsic_atomic_counter_dec) {
+      b->cursor = nir_after_instr(&new_instr->instr);
+      nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
+      nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(result));
+   } else {
+      nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&new_instr->dest.ssa));
+   }
+
+   return true;
+}
+
+bool
+nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset)
+{
+   bool progress = false;
+
+   nir_foreach_function(function, shader) {
+      if (function->impl) {
+         nir_builder builder;
+         nir_builder_init(&builder, function->impl);
+         nir_foreach_block(block, function->impl) {
+            nir_foreach_instr_safe(instr, block) {
+               if (instr->type == nir_instr_type_intrinsic)
+                  progress |= lower_instr(nir_instr_as_intrinsic(instr),
+                                          ssbo_offset, &builder);
+            }
+         }
+
+         nir_metadata_preserve(function->impl, nir_metadata_block_index |
+                                               nir_metadata_dominance);
+      }
+   }
+
+   if (progress) {
+      /* replace atomic_uint uniforms with ssbo's: */
+      unsigned replaced = 0;
+      nir_foreach_variable_safe(var, &shader->uniforms) {
+         if (glsl_get_base_type(var->type) == GLSL_TYPE_ATOMIC_UINT) {
+            exec_node_remove(&var->node);
+
+            if (replaced & (1 << var->data.binding))
+               continue;
+
+            nir_variable *ssbo;
+            char name[16];
+
+            /* A length of 0 is used to denote unsized arrays */
+            const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0);
+
+            snprintf(name, sizeof(name), "counter%d", var->data.binding);
+
+            ssbo = nir_variable_create(shader, nir_var_shader_storage,
+                                       type, name);
+            ssbo->data.binding = var->data.binding;
+
+            struct glsl_struct_field field = {
+                  .type = type,
+                  .name = "counters",
+                  .location = -1,
+            };
+
+            ssbo->interface_type =
+                  glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
+                                      false, "counters");
+
+            replaced |= (1 << var->data.binding);
+         }
+      }
+   }
+
+   return progress;
+}
+
-- 
2.9.3



More information about the mesa-dev mailing list