[Mesa-dev] [PATCH 4/5] nir: add a vectorization pass

Jason Ekstrand jason at jlekstrand.net
Wed Nov 18 11:25:35 PST 2015


On Sat, Nov 14, 2015 at 6:59 PM, Connor Abbott <cwabbott0 at gmail.com> wrote:
> This effectively does the opposite of nir_lower_alus_to_scalar, trying
> to combine per-component ALU operations with the same sources but
> different swizzles into one larger ALU operation. It uses a similar
> model as CSE, where we do a depth-first approach and keep around a hash
> set of instructions to be combined, but there are a few major
> differences:
>
> 1. For now, we only support entirely per-component ALU operations.
> 2. Since it's not always guaranteed that we'll be able to combine
> equivalent instructions, we keep a stack of equivalent instructions
> around, trying to combine new instructions with instructions on the
> stack.
>
> The pass isn't comprehensive by far; it can't handle operations where
> some of the sources are per-component and others aren't, and it can't
> handle phi nodes. But it should handle the more common cases, and it
> should be reasonably efficient.
>
> Signed-off-by: Connor Abbott <cwabbott0 at gmail.com>
> ---
>  src/glsl/Makefile.sources        |   1 +
>  src/glsl/nir/nir.h               |   2 +
>  src/glsl/nir/nir_opt_vectorize.c | 447 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 450 insertions(+)
>  create mode 100644 src/glsl/nir/nir_opt_vectorize.c
>
> diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources
> index d4b02c1..7390975 100644
> --- a/src/glsl/Makefile.sources
> +++ b/src/glsl/Makefile.sources
> @@ -70,6 +70,7 @@ NIR_FILES = \
>         nir/nir_opt_peephole_select.c \
>         nir/nir_opt_remove_phis.c \
>         nir/nir_opt_undef.c \
> +       nir/nir_opt_vectorize.c \
>         nir/nir_print.c \
>         nir/nir_remove_dead_variables.c \
>         nir/nir_search.c \
> diff --git a/src/glsl/nir/nir.h b/src/glsl/nir/nir.h
> index beabcaf..c1c04fd 100644
> --- a/src/glsl/nir/nir.h
> +++ b/src/glsl/nir/nir.h
> @@ -2037,6 +2037,8 @@ bool nir_opt_remove_phis(nir_shader *shader);
>
>  bool nir_opt_undef(nir_shader *shader);
>
> +bool nir_opt_vectorize(nir_shader *shader);
> +
>  void nir_sweep(nir_shader *shader);
>
>  nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
> diff --git a/src/glsl/nir/nir_opt_vectorize.c b/src/glsl/nir/nir_opt_vectorize.c
> new file mode 100644
> index 0000000..2a34a42
> --- /dev/null
> +++ b/src/glsl/nir/nir_opt_vectorize.c
> @@ -0,0 +1,447 @@
> +/*
> + * Copyright © 2015 Connor Abbott
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + */
> +
> +#include "nir.h"
> +#include "nir_vla.h"
> +#include "nir_builder.h"
> +#include  "nir_array.h"
> +
> +#define HASH(hash, data) _mesa_fnv32_1a_accumulate((hash), (data))
> +
> +static uint32_t
> +hash_src(uint32_t hash, const nir_src *src)
> +{
> +   assert(src->is_ssa);
> +
> +   return HASH(hash, src->ssa);
> +}
> +
> +static uint32_t
> +hash_alu_src(uint32_t hash, const nir_alu_src *src)
> +{
> +   assert(!src->abs && !src->negate);
> +
> +   /* intentionally don't hash swizzle */
> +
> +   return hash_src(hash, &src->src);
> +}
> +
> +static uint32_t
> +hash_alu(uint32_t hash, const nir_alu_instr *instr)
> +{
> +   hash = HASH(hash, instr->op);
> +
> +   for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
> +      hash = hash_alu_src(hash, &instr->src[i]);
> +
> +   return hash;
> +}
> +
> +static uint32_t
> +hash_instr(const nir_instr *instr)
> +{
> +   uint32_t hash = _mesa_fnv32_1a_offset_bias;
> +
> +   switch (instr->type) {
> +   case nir_instr_type_alu:
> +      return hash_alu(hash, nir_instr_as_alu(instr));
> +   default:
> +      unreachable("bad instruction type");
> +   }
> +}
> +
> +static bool
> +srcs_equal(const nir_src *src1, const nir_src *src2)
> +{
> +   assert(src1->is_ssa);
> +   assert(src2->is_ssa);
> +
> +   return src1->ssa == src2->ssa;
> +}
> +
> +static bool
> +alu_srcs_equal(const nir_alu_src *src1, const nir_alu_src *src2)
> +{
> +   assert(!src1->abs);
> +   assert(!src1->negate);
> +   assert(!src2->abs);
> +   assert(!src2->negate);
> +
> +   return srcs_equal(&src1->src, &src2->src);
> +}
> +
> +static bool
> +instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
> +{
> +   switch (instr1->type) {
> +   case nir_instr_type_alu: {
> +      nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
> +      nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
> +
> +      if (alu1->op != alu2->op)
> +         return false;
> +
> +      for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
> +         if (!alu_srcs_equal(&alu1->src[i], &alu2->src[i]))
> +            return false;
> +      }
> +
> +      return true;
> +   }
> +
> +   default:
> +      unreachable("bad instruction type");
> +   }
> +}
> +
> +static bool
> +instr_can_rewrite(nir_instr *instr)
> +{
> +   switch (instr->type) {
> +   case nir_instr_type_alu: {
> +      nir_alu_instr *alu = nir_instr_as_alu(instr);
> +
> +      /* Don't try and vectorize mov's. Either they'll be handled by copy
> +       * prop, or they're actually necessary and trying to vectorize them
> +       * would result in fighting with copy prop.
> +       */
> +      if (alu->op == nir_op_imov || alu->op == nir_op_fmov)
> +         return false;
> +
> +      if (nir_op_infos[alu->op].output_size != 0)
> +         return false;
> +
> +      for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
> +         if (nir_op_infos[alu->op].input_sizes[i] != 0)
> +            return false;
> +      }
> +
> +      return true;
> +   }
> +
> +   /* TODO support phi nodes */
> +   default:
> +      break;
> +   }
> +
> +   return false;
> +}
> +
> +/*
> + * Tries to combine two instructions whose sources are different components of
> + * the same instructions into one vectorized instruction. Note that instr1
> + * should dominate instr2.
> + */
> +
> +static nir_instr *
> +instr_try_combine(nir_instr *instr1, nir_instr *instr2)
> +{
> +   assert(instr1->type == nir_instr_type_alu);
> +   assert(instr2->type == nir_instr_type_alu);
> +   nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
> +   nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
> +
> +   unsigned alu1_components = alu1->dest.dest.ssa.num_components;
> +   unsigned alu2_components = alu2->dest.dest.ssa.num_components;
> +   unsigned total_components = alu1_components + alu2_components;
> +
> +   if (total_components > 4)
> +      return NULL;

It would be nice if we could pick up on a = b.xy + c.xy and d = b.yz +
c.yz and turn that into a = b.xyz = c.xyz.  Don't need to fix that
today, but I thought I'd at least make a note.  That said, if we
combined this with a vector-narrowing pass, it might just clean that
up for us.

> +   nir_builder b;
> +   nir_builder_init(&b, nir_cf_node_get_function(&instr1->block->cf_node));

This is kind of expensive.  Could we init the builder once and pass it in.

> +   b.cursor = nir_after_instr(instr1);
> +
> +   nir_alu_instr *new_alu = nir_alu_instr_create(b.shader, alu1->op);
> +   nir_ssa_dest_init(&new_alu->instr, &new_alu->dest.dest, total_components, NULL);
> +   new_alu->dest.write_mask = (1 << total_components) - 1;
> +
> +   for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
> +      new_alu->src[i].src = alu1->src[i].src;
> +
> +      for (unsigned j = 0; j < alu1_components; j++)
> +         new_alu->src[i].swizzle[j] = alu1->src[i].swizzle[j];
> +
> +      for (unsigned j = 0; j < alu2_components; j++) {
> +         new_alu->src[i].swizzle[j + alu1_components] =
> +            alu2->src[i].swizzle[j];
> +      }
> +   }

Can't we just update the old instruction in-place to add the new swizzles?

> +
> +   nir_builder_instr_insert(&b, &new_alu->instr);
> +
> +   unsigned swiz[4] = {0, 1, 2, 3};
> +   nir_ssa_def *new_alu1 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
> +                                       alu1_components, false);

Then we might not need this.

> +   for (unsigned i = 0; i < alu2_components; i++)
> +      swiz[i] += alu1_components;
> +   nir_ssa_def *new_alu2 = nir_swizzle(&b, &new_alu->dest.dest.ssa, swiz,
> +                                       alu2_components, false);
> +
> +   nir_foreach_use_safe(&alu1->dest.dest.ssa, src) {
> +      if (src->parent_instr->type == nir_instr_type_alu) {
> +         /* For ALU instructions, rewrite the source directly to avoid a
> +          * round-trip through copy propagation.
> +          */
> +
> +         nir_instr_rewrite_src(src->parent_instr, src,
> +                               nir_src_for_ssa(&new_alu->dest.dest.ssa));
> +      } else {
> +         nir_instr_rewrite_src(src->parent_instr, src,
> +                               nir_src_for_ssa(new_alu1));
> +      }
> +   }

And we could delete all but the else case of this.

> +
> +   nir_foreach_if_use_safe(&alu1->dest.dest.ssa, src) {
> +      nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu1));
> +   }
> +
> +   assert(list_empty(&alu1->dest.dest.ssa.uses));
> +   assert(list_empty(&alu1->dest.dest.ssa.if_uses));
> +
> +   nir_foreach_use_safe(&alu2->dest.dest.ssa, src) {
> +      if (src->parent_instr->type == nir_instr_type_alu) {
> +         /* For ALU instructions, rewrite the source directly to avoid a
> +          * round-trip through copy propagation.
> +          */

Not sure how worth it this really is.  We don't want to re-implement
copy-propagation in every pass.  That's why we have it in the first
place.  However, I could see an argument for doing it in that if we
don't do it here, we can't vectorize something that uses this and we
have to go back-and-forth between vectorize and copy-prop.  Ok, I
think I've convinced myself.

> +         nir_alu_instr *use = nir_instr_as_alu(src->parent_instr);
> +
> +         unsigned src_index = 5;

Personally, I'd make it an int and use -1, but 5 works too...

> +         for (unsigned i = 0; i < nir_op_infos[use->op].num_inputs; i++) {
> +            if (&use->src[i].src == src) {
> +               src_index = i;
> +               break;
> +            }
> +         }
> +         assert(src_index != 5);

There's actually a better way to do this that I've used a few places.
You can use exec_node_data to get the nir_alu_src from the nir_src and
then just do index = alu_src - use->src.  Then you assert that it's in
range.

> +
> +         nir_instr_rewrite_src(src->parent_instr, src,
> +                               nir_src_for_ssa(&new_alu->dest.dest.ssa));
> +
> +         for (unsigned i = 0;
> +              i < nir_ssa_alu_instr_src_components(use, src_index); i++) {
> +            use->src[src_index].swizzle[i] += alu1_components;
> +         }
> +      } else {
> +         nir_instr_rewrite_src(src->parent_instr, src,
> +                               nir_src_for_ssa(new_alu2));
> +      }
> +   }
> +
> +   nir_foreach_if_use_safe(&alu2->dest.dest.ssa, src) {
> +      nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu2));
> +   }
> +
> +   assert(list_empty(&alu2->dest.dest.ssa.uses));
> +   assert(list_empty(&alu2->dest.dest.ssa.if_uses));
> +
> +   nir_instr_remove(instr1);
> +   nir_instr_remove(instr2);
> +
> +   return &new_alu->instr;
> +}
> +
> +/*
> + * Use an array to represent a stack of instructions that are equivalent.
> + *
> + * We push and pop instructions off the stack in dominance order. The first
> + * element dominates the second element which dominates the third, etc. When
> + * trying to add to the stack, first we try and combine the instruction with
> + * each of the instructions on the stack and, if successful, replace the
> + * instruction on the stack with the newly-combined instruction.
> + */
> +
> +static nir_array *
> +vec_instr_stack_create(void *mem_ctx)
> +{
> +   nir_array *stack = ralloc(mem_ctx, nir_array);
> +   nir_array_init(stack, mem_ctx);
> +   return stack;
> +}
> +
> +/* returns true if we were able to successfully replace the instruction */
> +
> +static bool
> +vec_instr_stack_push(nir_array *stack, nir_instr *instr)
> +{
> +   /* Walk the stack from child to parent to make live ranges shorter by
> +    * matching the closest thing we can
> +    */
> +   nir_array_foreach_reverse(stack, nir_instr *, stack_instr) {
> +      nir_instr *new_instr = instr_try_combine(*stack_instr, instr);
> +      if (new_instr) {
> +         *stack_instr = new_instr;
> +         return true;

Again, this would be simpler if we just rewrote the instruction in-place.

> +      }
> +   }
> +
> +   nir_array_add(stack, nir_instr *, instr);
> +   return false;
> +}
> +
> +static void
> +vec_instr_stack_pop(nir_array *stack, nir_instr *instr)
> +{
> +   nir_instr *last = nir_array_pop(stack, nir_instr *);
> +   assert(last == instr);
> +}
> +
> +static bool
> +cmp_func(const void *data1, const void *data2)
> +{
> +   const nir_array *arr1 = data1;
> +   const nir_array *arr2 = data2;
> +
> +   const nir_instr *instr1 = *nir_array_first(arr1, nir_instr *);
> +   const nir_instr *instr2 = *nir_array_first(arr2, nir_instr *);
> +
> +   return instrs_equal(instr1, instr2);
> +}
> +
> +static uint32_t
> +hash_stack(const void *data)
> +{
> +   const nir_array *stack = data;
> +   const nir_instr *first = *nir_array_first(stack, nir_instr *);
> +   return hash_instr(first);
> +}
> +
> +static struct set *
> +vec_instr_set_create(void)
> +{
> +   return _mesa_set_create(NULL, hash_stack, cmp_func);
> +}
> +
> +static void
> +vec_instr_set_destroy(struct set *instr_set)
> +{
> +   _mesa_set_destroy(instr_set, NULL);
> +}
> +
> +static bool
> +vec_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr)
> +{
> +   if (!instr_can_rewrite(instr))
> +      return false;
> +
> +   nir_array *new_stack = vec_instr_stack_create(instr_set);
> +   vec_instr_stack_push(new_stack, instr);
> +
> +   struct set_entry *entry = _mesa_set_search(instr_set, new_stack);

It seems to me like it would be simpler to use a top_instr -> stack
hash map here instead of a set where we pluck off the first
instruction.  It would allow us to avoid ralloc'ing a stack just so
that we can check it in the set.

> +
> +   if (entry) {
> +      ralloc_free(new_stack);
> +      nir_array *stack = (nir_array *) entry->key;
> +      return vec_instr_stack_push(stack, instr);
> +   }
> +
> +   _mesa_set_add(instr_set, new_stack);
> +   return false;
> +}
> +
> +static void
> +vec_instr_set_remove(struct set *instr_set, nir_instr *instr)
> +{
> +   if (!instr_can_rewrite(instr))
> +      return;
> +
> +   /*
> +    * It's pretty unfortunate that we have to do this, but it's a side effect
> +    * of the hash set interfaces. The hash set assumes that we're only
> +    * interested in storing one equivalent element at a time, and if we try to
> +    * insert a duplicate element it will remove the original. We could hack up
> +    * the comparison function to "know" which input is an instruction we
> +    * passed in and which is an array that's part of the entry, but that
> +    * wouldn't work because we need to pass an array to _mesa_set_add() in
> +    * vec_instr_add_or_rewrite() above, and _mesa_set_add() will call our
> +    * comparison function as well.
> +    */
> +   nir_array *temp = vec_instr_stack_create(instr_set);
> +   vec_instr_stack_push(temp, instr);
> +   struct set_entry *entry = _mesa_set_search(instr_set, temp);
> +   ralloc_free(temp);

See above comment.

> +
> +   if (entry) {
> +      nir_array *stack = (nir_array *) entry->key;
> +
> +      if (nir_array_size(stack, nir_instr *) > 1)
> +         vec_instr_stack_pop(stack, instr);
> +      else
> +         _mesa_set_remove(instr_set, entry);
> +   }
> +}
> +
> +static bool
> +vectorize_block(nir_block *block, struct set *instr_set)
> +{
> +   bool progress = false;
> +
> +   nir_foreach_instr_safe(block, instr) {
> +      if (vec_instr_set_add_or_rewrite(instr_set, instr))
> +         progress = true;
> +   }
> +
> +   for (unsigned i = 0; i < block->num_dom_children; i++) {
> +      nir_block *child = block->dom_children[i];
> +      progress |= vectorize_block(child, instr_set);
> +   }
> +
> +   nir_foreach_instr_reverse(block, instr)
> +      vec_instr_set_remove(instr_set, instr);
> +
> +   return progress;
> +}
> +
> +static bool
> +nir_opt_vectorize_impl(nir_function_impl *impl)
> +{
> +   struct set *instr_set = vec_instr_set_create();
> +
> +   nir_metadata_require(impl, nir_metadata_dominance);
> +
> +   bool progress = vectorize_block(nir_start_block(impl), instr_set);
> +
> +   if (progress)
> +      nir_metadata_preserve(impl, nir_metadata_block_index |
> +                                  nir_metadata_dominance);
> +
> +   vec_instr_set_destroy(instr_set);
> +   return progress;
> +}
> +
> +bool
> +nir_opt_vectorize(nir_shader *shader)
> +{
> +   bool progress = false;
> +
> +   nir_foreach_overload(shader, overload) {
> +      if (overload->impl)
> +         progress |= nir_opt_vectorize_impl(overload->impl);
> +   }
> +
> +   return progress;
> +}
> +
> --
> 2.4.3
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/mesa-dev


More information about the mesa-dev mailing list