[Lima] [PATCH v2 2/8] nir: add load uniform lower to scalar

Qiang Yu yuq825 at gmail.com
Fri Mar 22 14:28:35 UTC 2019


This is needed for lima gp compiler.

Signed-off-by: Qiang Yu <yuq825 at gmail.com>
---
 src/compiler/nir/nir_intrinsics.py        |  4 +--
 src/compiler/nir/nir_lower_io.c           |  2 +-
 src/compiler/nir/nir_lower_io_to_scalar.c | 41 +++++++++++++++++++++--
 3 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py
index ea092a991ca..3c67cbcb04d 100644
--- a/src/compiler/nir/nir_intrinsics.py
+++ b/src/compiler/nir/nir_intrinsics.py
@@ -607,8 +607,8 @@ def load(name, num_srcs, indices=[], flags=[]):
     intrinsic("load_" + name, [1] * num_srcs, dest_comp=0, indices=indices,
               flags=flags)
 
-# src[] = { offset }. const_index[] = { base, range }
-load("uniform", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
+# src[] = { offset }. const_index[] = { base, range, component }
+load("uniform", 1, [BASE, RANGE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER])
 # src[] = { buffer_index, offset }. const_index[] = { align_mul, align_offset }
 load("ubo", 2, [ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER])
 # src[] = { offset }. const_index[] = { base, component }
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index 749ac91d47e..a852ee1f34a 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -251,7 +251,7 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
    load->num_components = intrin->num_components;
 
    nir_intrinsic_set_base(load, var->data.driver_location);
-   if (mode == nir_var_shader_in || mode == nir_var_shader_out)
+   if (mode == nir_var_shader_in || mode == nir_var_shader_out || mode == nir_var_uniform)
       nir_intrinsic_set_component(load, component);
 
    if (load->intrinsic == nir_intrinsic_load_uniform)
diff --git a/src/compiler/nir/nir_lower_io_to_scalar.c b/src/compiler/nir/nir_lower_io_to_scalar.c
index 559d80b214a..1f0990d5dc5 100644
--- a/src/compiler/nir/nir_lower_io_to_scalar.c
+++ b/src/compiler/nir/nir_lower_io_to_scalar.c
@@ -27,8 +27,8 @@
 
 /** @file nir_lower_io_to_scalar.c
  *
- * Replaces nir_load_input/nir_store_output operations with num_components !=
- * 1 with individual per-channel operations.
+ * Replaces nir_load_input/nir_store_output/nir_load_uniform operations
+ * with num_components != 1 with individual per-channel operations.
  */
 
 static void
@@ -63,6 +63,39 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
    nir_instr_remove(&intr->instr);
 }
 
+static void
+lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
+{
+   b->cursor = nir_before_instr(&intr->instr);
+
+   assert(intr->dest.is_ssa);
+
+   nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
+
+   for (unsigned i = 0; i < intr->num_components; i++) {
+      nir_intrinsic_instr *chan_intr =
+         nir_intrinsic_instr_create(b->shader, intr->intrinsic);
+      nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
+                        1, intr->dest.ssa.bit_size, NULL);
+      chan_intr->num_components = 1;
+
+      nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
+      nir_intrinsic_set_component(chan_intr, nir_intrinsic_component(intr) + i);
+      nir_intrinsic_set_range(chan_intr, nir_intrinsic_range(intr));
+      /* offset */
+      nir_src_copy(&chan_intr->src[0], &intr->src[0], chan_intr);
+
+      nir_builder_instr_insert(b, &chan_intr->instr);
+
+      loads[i] = &chan_intr->dest.ssa;
+   }
+
+   nir_ssa_def_rewrite_uses(&intr->dest.ssa,
+                            nir_src_for_ssa(nir_vec(b, loads,
+                                                    intr->num_components)));
+   nir_instr_remove(&intr->instr);
+}
+
 static void
 lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
 {
@@ -120,6 +153,10 @@ nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask)
                   if (mask & nir_var_shader_out)
                      lower_store_output_to_scalar(&b, intr);
                   break;
+               case nir_intrinsic_load_uniform:
+                  if (mask & nir_var_uniform)
+                     lower_load_uniform_to_scalar(&b, intr);
+                  break;
                default:
                   break;
                }
-- 
2.17.1



More information about the lima mailing list