<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Thu, Jul 14, 2016 at 10:49 AM, Kenneth Graunke <span dir="ltr"><<a href="mailto:kenneth@whitecape.org" target="_blank">kenneth@whitecape.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">The original function was becoming a bit hard to read, with the details<br>
of creating and filling out load/store/atomic atomics all in one<br>
function.<br>
<br>
This patch makes helpers for creating each type of intrinsic, and also<br>
combines them with the *_op() helpers, as they're closely coupled and<br>
not too large.<br>
<br>
Signed-off-by: Kenneth Graunke <<a href="mailto:kenneth@whitecape.org">kenneth@whitecape.org</a>><br>
---<br>
 src/compiler/nir/nir_lower_io.c | 179 ++++++++++++++++++++--------------------<br>
 1 file changed, 88 insertions(+), 91 deletions(-)<br>
<br>
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c<br>
index 914e0e1..a2453b0 100644<br>
--- a/src/compiler/nir/nir_lower_io.c<br>
+++ b/src/compiler/nir/nir_lower_io.c<br>
@@ -167,18 +167,22 @@ get_io_offset(nir_builder *b, nir_deref_var *deref,<br>
    return offset;<br>
 }<br>
<br>
-static nir_intrinsic_op<br>
-load_op(nir_variable_mode mode, bool per_vertex)<br>
+static nir_intrinsic_instr *<br>
+lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,<br>
+           nir_ssa_def *vertex_index, nir_ssa_def *offset)<br>
 {<br>
+   nir_variable *var = intrin->variables[0]->var;<br>
+   nir_variable_mode mode = var->data.mode;<br>
+<br>
    nir_intrinsic_op op;<br>
    switch (mode) {<br>
    case nir_var_shader_in:<br>
-      op = per_vertex ? nir_intrinsic_load_per_vertex_input :<br>
-                        nir_intrinsic_load_input;<br>
+      op = vertex_index ? nir_intrinsic_load_per_vertex_input :<br>
+                          nir_intrinsic_load_input;<br>
       break;<br>
    case nir_var_shader_out:<br>
-      op = per_vertex ? nir_intrinsic_load_per_vertex_output :<br>
-                        nir_intrinsic_load_output;<br>
+      op = vertex_index ? nir_intrinsic_load_per_vertex_output :<br>
+                          nir_intrinsic_load_output;<br>
       break;<br>
    case nir_var_uniform:<br>
       op = nir_intrinsic_load_uniform;<br>
@@ -189,33 +193,73 @@ load_op(nir_variable_mode mode, bool per_vertex)<br>
    default:<br>
       unreachable("Unknown variable mode");<br>
    }<br>
-   return op;<br>
+<br>
+   nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, op);<br>
+   load->num_components = intrin->num_components;<br>
+<br>
+   nir_intrinsic_set_base(load, var->data.driver_location);<br>
+   if (mode == nir_var_shader_in || mode == nir_var_shader_out) {<br>
+      nir_intrinsic_set_component(load, var->data.location_frac);<br>
+   }<br></blockquote><div><br></div><div>We don't need the braces<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+<br>
+   if (load->intrinsic == nir_intrinsic_load_uniform) {<br>
+      nir_intrinsic_set_range(load, state->type_size(var->type));<br>
+   }<br></blockquote><div><br></div><div>here either<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+<br>
+   if (vertex_index)<br>
+      load->src[0] = nir_src_for_ssa(vertex_index);<br>
+<br>
+   load->src[vertex_index ? 1 : 0] = nir_src_for_ssa(offset);<br>
+<br>
+   return load;<br>
 }<br>
<br>
-static nir_intrinsic_op<br>
-store_op(struct lower_io_state *state,<br>
-         nir_variable_mode mode, bool per_vertex)<br>
+static nir_intrinsic_instr *<br>
+lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,<br>
+            nir_ssa_def *vertex_index, nir_ssa_def *offset)<br>
 {<br>
+   nir_variable *var = intrin->variables[0]->var;<br>
+   nir_variable_mode mode = var->data.mode;<br>
+<br>
    nir_intrinsic_op op;<br>
-   switch (mode) {<br>
-   case nir_var_shader_out:<br>
-      op = per_vertex ? nir_intrinsic_store_per_vertex_output :<br>
-                        nir_intrinsic_store_output;<br>
-      break;<br>
-   case nir_var_shared:<br>
+   if (mode == nir_var_shared) {<br>
       op = nir_intrinsic_store_shared;<br>
-      break;<br>
-   default:<br>
-      unreachable("Unknown variable mode");<br>
+   } else {<br>
+      assert(mode == nir_var_shader_out);<br>
+      op = vertex_index ? nir_intrinsic_store_per_vertex_output :<br>
+                          nir_intrinsic_store_output;<br>
    }<br>
-   return op;<br>
+<br>
+   nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, op);<br>
+   store->num_components = intrin->num_components;<br>
+<br>
+   nir_src_copy(&store->src[0], &intrin->src[0], store);<br>
+<br>
+   nir_intrinsic_set_base(store, var->data.driver_location);<br>
+   if (mode == nir_var_shader_out) {<br>
+      nir_intrinsic_set_component(store, var->data.location_frac);<br>
+   }<br></blockquote><div><br></div><div>braces<br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+   nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));<br>
+<br>
+   if (vertex_index)<br>
+      store->src[1] = nir_src_for_ssa(vertex_index);<br>
+<br>
+   store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);<br>
+<br>
+   return store;<br>
 }<br>
<br>
-static nir_intrinsic_op<br>
-atomic_op(nir_intrinsic_op opcode)<br>
+static nir_intrinsic_instr *<br>
+lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,<br>
+             nir_ssa_def *offset)<br>
 {<br>
-   switch (opcode) {<br>
-#define OP(O) case nir_intrinsic_var_##O: return nir_intrinsic_shared_##O;<br>
+   nir_variable *var = intrin->variables[0]->var;<br>
+<br>
+   assert(var->data.mode == nir_var_shared);<br>
+<br>
+   nir_intrinsic_op op;<br>
+   switch (intrin->intrinsic) {<br>
+#define OP(O) case nir_intrinsic_var_##O: op = nir_intrinsic_shared_##O; break;<br>
    OP(atomic_exchange)<br>
    OP(atomic_comp_swap)<br>
    OP(atomic_add)<br>
@@ -230,6 +274,18 @@ atomic_op(nir_intrinsic_op opcode)<br>
    default:<br>
       unreachable("Invalid atomic");<br>
    }<br>
+<br>
+   nir_intrinsic_instr *atomic =<br>
+      nir_intrinsic_instr_create(state->mem_ctx, op);<br>
+<br>
+   atomic->src[0] = nir_src_for_ssa(offset);<br>
+   atomic->const_index[0] = var->data.driver_location;<br></blockquote><div><br></div><div>Use nir_intrinsic_set_base here?<br><br></div><div>I like the cleanup!  Sorry for the nitpicks, but if we're going to clean it up, let's clean it up. :)<br><br></div><div>Reviewed-by: Jason Ekstrand <<a href="mailto:jason@jlekstrand.net">jason@jlekstrand.net</a>><br></div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+<br>
+   for (unsigned i = 0; i < nir_op_infos[intrin->intrinsic].num_inputs; i++) {<br>
+      nir_src_copy(&atomic->src[i+1], &intrin->src[i], atomic);<br>
+   }<br>
+<br>
+   return atomic;<br>
 }<br>
<br>
 static bool<br>
@@ -282,7 +338,7 @@ nir_lower_io_block(nir_block *block,<br>
          is_per_vertex_input(state, var) || is_per_vertex_output(state, var);<br>
<br>
       nir_ssa_def *offset;<br>
-      nir_ssa_def *vertex_index;<br>
+      nir_ssa_def *vertex_index = NULL;<br>
<br>
       offset = get_io_offset(b, intrin->variables[0],<br>
                              per_vertex ? &vertex_index : NULL,<br>
@@ -291,56 +347,13 @@ nir_lower_io_block(nir_block *block,<br>
       nir_intrinsic_instr *replacement;<br>
<br>
       switch (intrin->intrinsic) {<br>
-      case nir_intrinsic_load_var: {<br>
-         nir_intrinsic_instr *load =<br>
-            nir_intrinsic_instr_create(state->mem_ctx,<br>
-                                       load_op(mode, per_vertex));<br>
-         load->num_components = intrin->num_components;<br>
-<br>
-         nir_intrinsic_set_base(load,<br>
-            var->data.driver_location);<br>
-         if (mode == nir_var_shader_in || mode == nir_var_shader_out) {<br>
-            nir_intrinsic_set_component(load, var->data.location_frac);<br>
-         }<br>
-<br>
-         if (load->intrinsic == nir_intrinsic_load_uniform) {<br>
-            nir_intrinsic_set_range(load, state->type_size(var->type));<br>
-         }<br>
-<br>
-         if (per_vertex)<br>
-            load->src[0] = nir_src_for_ssa(vertex_index);<br>
-<br>
-         load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(offset);<br>
-<br>
-         replacement = load;<br>
+      case nir_intrinsic_load_var:<br>
+         replacement = lower_load(intrin, state, vertex_index, offset);<br>
          break;<br>
-      }<br>
-<br>
-      case nir_intrinsic_store_var: {<br>
-         assert(mode == nir_var_shader_out || mode == nir_var_shared);<br>
-<br>
-         nir_intrinsic_instr *store =<br>
-            nir_intrinsic_instr_create(state->mem_ctx,<br>
-                                       store_op(state, mode, per_vertex));<br>
-         store->num_components = intrin->num_components;<br>
-<br>
-         nir_src_copy(&store->src[0], &intrin->src[0], store);<br>
<br>
-         nir_intrinsic_set_base(store,<br>
-            var->data.driver_location);<br>
-         if (mode == nir_var_shader_out) {<br>
-            nir_intrinsic_set_component(store, var->data.location_frac);<br>
-         }<br>
-         nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));<br>
-<br>
-         if (per_vertex)<br>
-            store->src[1] = nir_src_for_ssa(vertex_index);<br>
-<br>
-         store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(offset);<br>
-<br>
-         replacement = store;<br>
+      case nir_intrinsic_store_var:<br>
+         replacement = lower_store(intrin, state, vertex_index, offset);<br>
          break;<br>
-      }<br>
<br>
       case nir_intrinsic_var_atomic_add:<br>
       case nir_intrinsic_var_atomic_imin:<br>
@@ -351,26 +364,10 @@ nir_lower_io_block(nir_block *block,<br>
       case nir_intrinsic_var_atomic_or:<br>
       case nir_intrinsic_var_atomic_xor:<br>
       case nir_intrinsic_var_atomic_exchange:<br>
-      case nir_intrinsic_var_atomic_comp_swap: {<br>
-         assert(mode == nir_var_shared);<br>
-<br>
-         nir_intrinsic_instr *atomic =<br>
-            nir_intrinsic_instr_create(state->mem_ctx,<br>
-                                       atomic_op(intrin->intrinsic));<br>
-<br>
-         atomic->src[0] = nir_src_for_ssa(offset);<br>
-<br>
-         atomic->const_index[0] = var->data.driver_location;<br>
-<br>
-         for (unsigned i = 0;<br>
-              i < nir_op_infos[intrin->intrinsic].num_inputs;<br>
-              i++) {<br>
-            nir_src_copy(&atomic->src[i+1], &intrin->src[i], atomic);<br>
-         }<br>
-<br>
-         replacement = atomic;<br>
+      case nir_intrinsic_var_atomic_comp_swap:<br>
+         assert(vertex_index == NULL);<br>
+         replacement = lower_atomic(intrin, state, offset);<br>
          break;<br>
-      }<br>
<br>
       default:<br>
          break;<br>
<span class="HOEnZb"><font color="#888888">--<br>
2.9.0<br>
<br>
_______________________________________________<br>
mesa-dev mailing list<br>
<a href="mailto:mesa-dev@lists.freedesktop.org">mesa-dev@lists.freedesktop.org</a><br>
<a href="https://lists.freedesktop.org/mailman/listinfo/mesa-dev" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/mesa-dev</a><br>
</font></span></blockquote></div><br></div></div>