[Mesa-dev] [RFC] nir: add pass to lower atomic counters to SSBO
Jason Ekstrand
jason at jlekstrand.net
Thu Apr 13 15:44:38 UTC 2017
Nice! This may let us delete some back-end code too. I'll review later.
On Thu, Apr 13, 2017 at 8:23 AM, Rob Clark <robdclark at gmail.com> wrote:
> This is equivalent to what mesa/st does in glsl_to_tgsi. For most hw
> there isn't a particularly good reason to treat these differently.
>
> Signed-off-by: Rob Clark <robdclark at gmail.com>
> ---
> This is part of a larger patchset I'm working on to add SSBO and compute
> shader support in mesa/st glsl_to_nir path, and compute/SSBO support in
> freedreno. Still working on a few things and some cleanup on the rest
> of it, but I figured I should send this out to get some early comments.
>
> src/compiler/Makefile.sources | 1 +
> src/compiler/nir/nir.h | 1 +
> src/compiler/nir/nir_lower_atomics_to_ssbo.c | 215
> +++++++++++++++++++++++++++
> 3 files changed, 217 insertions(+)
> create mode 100644 src/compiler/nir/nir_lower_atomics_to_ssbo.c
>
> diff --git a/src/compiler/Makefile.sources b/src/compiler/Makefile.sources
> index 2455d4e..b2a3a42 100644
> --- a/src/compiler/Makefile.sources
> +++ b/src/compiler/Makefile.sources
> @@ -208,6 +208,7 @@ NIR_FILES = \
> nir/nir_lower_64bit_packing.c \
> nir/nir_lower_alu_to_scalar.c \
> nir/nir_lower_atomics.c \
> + nir/nir_lower_atomics_to_ssbo.c \
> nir/nir_lower_bitmap.c \
> nir/nir_lower_clamp_color_outputs.c \
> nir/nir_lower_clip.c \
> diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
> index ce5b434..be35930 100644
> --- a/src/compiler/nir/nir.h
> +++ b/src/compiler/nir/nir.h
> @@ -2546,6 +2546,7 @@ void nir_lower_bitmap(nir_shader *shader, const
> nir_lower_bitmap_options *option
>
> bool nir_lower_atomics(nir_shader *shader,
> const struct gl_shader_program *shader_program);
> +bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
> bool nir_lower_to_source_mods(nir_shader *shader);
>
> bool nir_lower_gs_intrinsics(nir_shader *shader);
> diff --git a/src/compiler/nir/nir_lower_atomics_to_ssbo.c
> b/src/compiler/nir/nir_lower_atomics_to_ssbo.c
> new file mode 100644
> index 0000000..f382e29
> --- /dev/null
> +++ b/src/compiler/nir/nir_lower_atomics_to_ssbo.c
> @@ -0,0 +1,215 @@
> +/*
> + * Copyright © 2017 Red Hat
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the
> "Software"),
> + * to deal in the Software without restriction, including without
> limitation
> + * the rights to use, copy, modify, merge, publish, distribute,
> sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> next
> + * paragraph) shall be included in all copies or substantial portions of
> the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
> SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
> OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> DEALINGS
> + * IN THE SOFTWARE.
> + *
> + * Authors:
> + * Rob Clark <robclark at freedesktop.org>
> + */
> +
> +#include "nir.h"
> +#include "nir_builder.h"
> +
> +/*
> + * Remap atomic counters to SSBOs. Atomic counters get remapped to
> + * SSBO binding points [0..ssbo_offset) and the original SSBOs are
> + * remapped to [ssbo_offset..n) (mostly to align with what mesa/st
> + * does.
> + */
> +
> +static bool
> +lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset,
> nir_builder *b)
> +{
> + nir_intrinsic_op op;
> + switch (instr->intrinsic) {
> + case nir_intrinsic_ssbo_atomic_add:
> + case nir_intrinsic_ssbo_atomic_imin:
> + case nir_intrinsic_ssbo_atomic_umin:
> + case nir_intrinsic_ssbo_atomic_imax:
> + case nir_intrinsic_ssbo_atomic_umax:
> + case nir_intrinsic_ssbo_atomic_and:
> + case nir_intrinsic_ssbo_atomic_or:
> + case nir_intrinsic_ssbo_atomic_xor:
> + case nir_intrinsic_ssbo_atomic_exchange:
> + case nir_intrinsic_ssbo_atomic_comp_swap:
> + case nir_intrinsic_store_ssbo:
> + case nir_intrinsic_load_ssbo:
> + /* keep same opcode, remap buffer_index */
> + op = instr->intrinsic;
> + break;
> + case nir_intrinsic_atomic_counter_inc:
> + case nir_intrinsic_atomic_counter_add:
> + case nir_intrinsic_atomic_counter_dec:
> + /* inc and dec get remapped to add: */
> + op = nir_intrinsic_ssbo_atomic_add;
> + break;
> + case nir_intrinsic_atomic_counter_read:
> + op = nir_intrinsic_load_ssbo;
> + break;
> + case nir_intrinsic_atomic_counter_min:
> + op = nir_intrinsic_ssbo_atomic_umin;
> + break;
> + case nir_intrinsic_atomic_counter_max:
> + op = nir_intrinsic_ssbo_atomic_umax;
> + break;
> + case nir_intrinsic_atomic_counter_and:
> + op = nir_intrinsic_ssbo_atomic_and;
> + break;
> + case nir_intrinsic_atomic_counter_or:
> + op = nir_intrinsic_ssbo_atomic_or;
> + break;
> + case nir_intrinsic_atomic_counter_xor:
> + op = nir_intrinsic_ssbo_atomic_xor;
> + break;
> + case nir_intrinsic_atomic_counter_exchange:
> + op = nir_intrinsic_ssbo_atomic_exchange;
> + break;
> + case nir_intrinsic_atomic_counter_comp_swap:
> + op = nir_intrinsic_ssbo_atomic_comp_swap;
> + break;
> + default:
> + return false;
> + }
> +
> + b->cursor = nir_before_instr(&instr->instr);
> +
> + /* easy case, just remap SSBO buffer index: */
> + if (op == instr->intrinsic) {
> + unsigned srcn = (op == nir_intrinsic_store_ssbo) ? 1 : 0;
> + nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[srcn], 1);
> + nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b,
> ssbo_offset));
> + nir_instr_rewrite_src(&instr->instr,
> + &instr->src[srcn],
> + nir_src_for_ssa(new_idx));
> +
> + return true;
> + }
> +
> + nir_ssa_def *buffer = nir_imm_int(b, nir_intrinsic_base(instr));
> + nir_ssa_def *temp = NULL;
> + nir_intrinsic_instr *new_instr =
> + nir_intrinsic_instr_create(ralloc_parent(instr), op);
> +
> + /* a couple instructions need special handling since they don't map
> + * 1:1 with ssbo atomics
> + */
> + switch (instr->intrinsic) {
> + case nir_intrinsic_atomic_counter_inc:
> + /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
> + temp = nir_imm_int(b, +1);
> + new_instr->src[0] = nir_src_for_ssa(buffer);
> + new_instr->src[1] = instr->src[0];
> + new_instr->src[2] = nir_src_for_ssa(temp);
> + break;
> + case nir_intrinsic_atomic_counter_dec:
> + /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */
> + /* NOTE semantic difference so we adjust the return value below */
> + temp = nir_imm_int(b, -1);
> + new_instr->src[0] = nir_src_for_ssa(buffer);
> + new_instr->src[1] = instr->src[0];
> + new_instr->src[2] = nir_src_for_ssa(temp);
> + break;
> + case nir_intrinsic_atomic_counter_read:
> + /* remapped to load_ssbo: { buffer_idx, offset } */
> + new_instr->src[0] = nir_src_for_ssa(buffer);
> + new_instr->src[1] = instr->src[0];
> + break;
> + default:
> + /* remapped to ssbo_atomic_x: { buffer_idx, offset, data,
> (compare)? } */
> + new_instr->src[0] = nir_src_for_ssa(buffer);
> + new_instr->src[1] = instr->src[0];
> + new_instr->src[2] = instr->src[1];
> + if (op == nir_intrinsic_ssbo_atomic_comp_swap)
> + new_instr->src[3] = instr->src[2];
> + break;
> + }
> +
> + nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
> + instr->dest.ssa.num_components,
> + instr->dest.ssa.bit_size, NULL);
> + nir_instr_insert_before(&instr->instr, &new_instr->instr);
> + nir_instr_remove(&instr->instr);
> +
> + if (instr->intrinsic == nir_intrinsic_atomic_counter_dec) {
> + b->cursor = nir_after_instr(&new_instr->instr);
> + nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
> + nir_ssa_def_rewrite_uses(&instr->dest.ssa,
> nir_src_for_ssa(result));
> + } else {
> + nir_ssa_def_rewrite_uses(&instr->dest.ssa,
> nir_src_for_ssa(&new_instr->dest.ssa));
> + }
> +
> + return true;
> +}
> +
> +bool
> +nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset)
> +{
> + bool progress = false;
> +
> + nir_foreach_function(function, shader) {
> + if (function->impl) {
> + nir_builder builder;
> + nir_builder_init(&builder, function->impl);
> + nir_foreach_block(block, function->impl) {
> + nir_foreach_instr_safe(instr, block) {
> + if (instr->type == nir_instr_type_intrinsic)
> + progress |= lower_instr(nir_instr_as_intrinsic(instr),
> + ssbo_offset, &builder);
> + }
> + }
> +
> + nir_metadata_preserve(function->impl, nir_metadata_block_index |
> + nir_metadata_dominance);
> + }
> + }
> +
> + if (progress) {
> + /* replace atomic_uint uniforms with ssbo's: */
> + unsigned replaced = 0;
> + nir_foreach_variable_safe(var, &shader->uniforms) {
> + if (glsl_get_base_type(var->type) == GLSL_TYPE_ATOMIC_UINT) {
> + exec_node_remove(&var->node);
> +
> + if (replaced & (1 << var->data.binding))
> + continue;
> +
> + nir_variable *ssbo;
> + char name[16];
> +
> + // TODO can we declare an array without size? Or do we have
> + // to find all the atomic_uint's in one pass and figure out
> + // max offset??
> + const struct glsl_type *type =
> + glsl_array_type(glsl_uint_type(), 256);
> +
> + snprintf(name, sizeof(name), "counter%d", var->data.binding);
> +
> + ssbo = nir_variable_create(shader, nir_var_shader_storage,
> + type, name);
> + ssbo->data.binding = var->data.binding;
> +
> + replaced |= (1 << var->data.binding);
> + }
> + }
> + }
> +
> + return progress;
> +}
> +
> --
> 2.9.3
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20170413/108065c6/attachment-0001.html>
More information about the mesa-dev
mailing list