[Mesa-dev] [PATCH 4/6] nir: Introduce a nir_opt_move_comparisons() pass.

Kenneth Graunke kenneth at whitecape.org
Wed Aug 10 02:30:17 UTC 2016


This tries to move comparisons (a common source of boolean values)
closer to their first use.  For GPUs which use condition codes,
this can eliminate a lot of temporary booleans and comparisons
which reload the condition code register based on a boolean.

Signed-off-by: Kenneth Graunke <kenneth at whitecape.org>
---
 src/compiler/Makefile.sources               |   1 +
 src/compiler/nir/nir.h                      |   2 +
 src/compiler/nir/nir_opt_move_comparisons.c | 173 ++++++++++++++++++++++++++++
 3 files changed, 176 insertions(+)
 create mode 100644 src/compiler/nir/nir_opt_move_comparisons.c

diff --git a/src/compiler/Makefile.sources b/src/compiler/Makefile.sources
index 0ff9b23..008a101 100644
--- a/src/compiler/Makefile.sources
+++ b/src/compiler/Makefile.sources
@@ -226,6 +226,7 @@ NIR_FILES = \
 	nir/nir_opt_gcm.c \
 	nir/nir_opt_global_to_local.c \
 	nir/nir_opt_peephole_select.c \
+	nir/nir_opt_move_comparisons.c \
 	nir/nir_opt_remove_phis.c \
 	nir/nir_opt_undef.c \
 	nir/nir_phi_builder.c \
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 9ce5be2..79511a7 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -2574,6 +2574,8 @@ bool nir_opt_dead_cf(nir_shader *shader);
 
 void nir_opt_gcm(nir_shader *shader);
 
+bool nir_opt_move_comparisons(nir_shader *shader);
+
 bool nir_opt_peephole_select(nir_shader *shader);
 
 bool nir_opt_remove_phis(nir_shader *shader);
diff --git a/src/compiler/nir/nir_opt_move_comparisons.c b/src/compiler/nir/nir_opt_move_comparisons.c
new file mode 100644
index 0000000..74927c9
--- /dev/null
+++ b/src/compiler/nir/nir_opt_move_comparisons.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "nir.h"
+
+/**
+ * \file nir_opt_move_comparisons.c
+ *
+ * This pass moves ALU comparison operations just before their first use.
+ *
+ * It only moves instructions within a single basic block; cross-block
+ * movement is left to global code motion.
+ *
+ * Many GPUs generate condition codes for comparisons, and use predication
+ * for conditional selects and control flow.  In a sequence such as:
+ *
+ *     vec1 32 ssa_1 = flt a b
+ *     <some other operations>
+ *     vec1 32 ssa_2 = bcsel ssa_1 c d
+ *
+ * the backend would likely do the comparison, producing condition codes,
+ * then save those to a boolean value.  The intervening operations might
+ * trash the condition codes.  Then, in order to do the bcsel, it would
+ * need to re-populate the condition code register based on the boolean.
+ *
+ * By moving the comparison just before the bcsel, the condition codes could
+ * be used directly.  This eliminates the need to reload them from the boolean
+ * (generally eliminating an instruction).  It may also eliminate the need to
+ * create a boolean value altogether (unless it's used elsewhere), which could
+ * lower register pressure.
+ */
+
+static bool
+is_comparison(nir_op op)
+{
+   switch (op) {
+   case nir_op_flt:
+   case nir_op_fge:
+   case nir_op_feq:
+   case nir_op_fne:
+   case nir_op_ilt:
+   case nir_op_ult:
+   case nir_op_ige:
+   case nir_op_uge:
+   case nir_op_ieq:
+   case nir_op_ine:
+      return true;
+   default:
+      return false;
+   }
+}
+
+static bool
+move_comparison_source(nir_src *src, nir_block *block, struct exec_node *before)
+{
+   if (src->is_ssa && src->ssa->parent_instr->block == block &&
+       src->ssa->parent_instr->type == nir_instr_type_alu &&
+       is_comparison(nir_instr_as_alu(src->ssa->parent_instr)->op)) {
+
+      struct exec_node *src_node = &src->ssa->parent_instr->node;
+      exec_node_remove(src_node);
+
+      if (before)
+         exec_node_insert_node_before(before, src_node);
+      else
+         exec_list_push_tail(&block->instr_list, src_node);
+
+      return true;
+   }
+
+   return false;
+}
+
+/* nir_foreach_src callback boilerplate */
+struct nomc_tuple
+{
+   nir_instr *instr;
+   bool progress;
+};
+
+static bool
+move_comparison_source_cb(nir_src *src, void *data)
+{
+   struct nomc_tuple *tuple = data;
+
+   if (move_comparison_source(src, tuple->instr->block, &tuple->instr->node))
+      tuple->progress = true;
+
+   return true; /* nir_foreach_src should keep going */
+}
+
+static bool
+move_comparisons(nir_block *block)
+{
+   bool progress = false;
+
+   /* We use a simple approach: walk instructions backwards.
+    *
+    * If the instruction's source is a comparison from the same block,
+    * simply move it here.  This may break SSA if it's used earlier in
+    * the block as well.  However, as we walk backwards, we'll find the
+    * earlier use and move it again, further up.  It eventually ends up
+    * dominating all uses again, restoring SSA form.
+    *
+    * Before walking instructions, we consider the if-condition at the
+    * end of the block, if one exists.  It's effectively a use at the
+    * bottom of the block.
+    */
+   nir_if *iff = nir_block_get_following_if(block);
+   if (iff) {
+      progress |= move_comparison_source(&iff->condition, block, NULL);
+   }
+
+   nir_foreach_instr_reverse(instr, block) {
+      if (instr->type == nir_instr_type_alu) {
+         /* Walk ALU instruction sources backwards so that bcsel's boolean
+          * condition is processed last.
+          */
+         nir_alu_instr *alu = nir_instr_as_alu(instr);
+         for (int i = nir_op_infos[alu->op].num_inputs - 1; i >= 0; i--) {
+            progress |= move_comparison_source(&alu->src[i].src,
+                                               block, &instr->node);
+         }
+      } else {
+         struct nomc_tuple tuple = { instr, false };
+         nir_foreach_src(instr, move_comparison_source_cb, &tuple);
+         progress |= tuple.progress;
+      }
+   }
+
+   return progress;
+}
+
+bool
+nir_opt_move_comparisons(nir_shader *shader)
+{
+   bool progress = false;
+
+   nir_foreach_function(func, shader) {
+      if (func->impl) {
+         nir_foreach_block(block, func->impl) {
+            if (move_comparisons(block)) {
+               nir_metadata_preserve(func->impl, nir_metadata_block_index |
+                                                 nir_metadata_dominance |
+                                                 nir_metadata_live_ssa_defs);
+               progress = true;
+            }
+         }
+      }
+   }
+
+   return progress;
+}
-- 
2.9.0



More information about the mesa-dev mailing list