Mesa (main): radv: avoid providing an align_offset to intrinsic builders

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Fri Jan 21 14:14:00 UTC 2022


Module: Mesa
Branch: main
Commit: 8951608f08721e73f68702eb0db7bacb7aded2d7
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=8951608f08721e73f68702eb0db7bacb7aded2d7

Author: Rhys Perry <pendingchaos02 at gmail.com>
Date:   Thu Jan  6 19:03:00 2022 +0000

radv: avoid providing an align_offset to intrinsic builders

Signed-off-by: Rhys Perry <pendingchaos02 at gmail.com>
Reviewed-by: Emma Anholt <emma at anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14455>

---

 src/amd/vulkan/radv_acceleration_structure.c | 196 +++++++++++----------------
 src/amd/vulkan/radv_pipeline_rt.c            |  50 +++----
 src/amd/vulkan/radv_shader.c                 |   3 +-
 3 files changed, 105 insertions(+), 144 deletions(-)

diff --git a/src/amd/vulkan/radv_acceleration_structure.c b/src/amd/vulkan/radv_acceleration_structure.c
index 397adee0db0..3950028bf51 100644
--- a/src/amd/vulkan/radv_acceleration_structure.c
+++ b/src/amd/vulkan/radv_acceleration_structure.c
@@ -721,16 +721,16 @@ get_indices(nir_builder *b, nir_ssa_def *addr, nir_ssa_def *type, nir_ssa_def *i
       nir_ssa_def *indices[3];
       for (unsigned i = 0; i < 3; ++i) {
          indices[i] = nir_build_load_global(
-            b, 1, 16, nir_iadd(b, addr, nir_u2u64(b, nir_iadd(b, index_id, nir_imm_int(b, 2 * i)))),
-            .align_offset = 0);
+            b, 1, 16,
+            nir_iadd(b, addr, nir_u2u64(b, nir_iadd(b, index_id, nir_imm_int(b, 2 * i)))));
       }
       nir_store_var(b, result, nir_u2u32(b, nir_vec(b, indices, 3)), 7);
    }
    nir_push_else(b, NULL);
    {
       nir_ssa_def *index_id = nir_umul24(b, id, nir_imm_int(b, 12));
-      nir_ssa_def *indices = nir_build_load_global(
-         b, 3, 32, nir_iadd(b, addr, nir_u2u64(b, index_id)), .align_offset = 0);
+      nir_ssa_def *indices =
+         nir_build_load_global(b, 3, 32, nir_iadd(b, addr, nir_u2u64(b, index_id)));
       nir_store_var(b, result, indices, 7);
    }
    nir_pop_if(b, NULL);
@@ -750,8 +750,8 @@ get_indices(nir_builder *b, nir_ssa_def *addr, nir_ssa_def *type, nir_ssa_def *i
       nir_push_else(b, NULL);
       {
          for (unsigned i = 0; i < 3; ++i) {
-            indices[i] = nir_build_load_global(b, 1, 8, nir_iadd(b, addr, nir_u2u64(b, indices[i])),
-                                               .align_offset = 0);
+            indices[i] =
+               nir_build_load_global(b, 1, 8, nir_iadd(b, addr, nir_u2u64(b, indices[i])));
          }
          nir_store_var(b, result, nir_u2u32(b, nir_vec(b, indices, 3)), 7);
       }
@@ -784,9 +784,8 @@ get_vertices(nir_builder *b, nir_ssa_def *addresses, nir_ssa_def *format, nir_ss
          switch (formats[f]) {
          case VK_FORMAT_R32G32B32_SFLOAT:
          case VK_FORMAT_R32G32B32A32_SFLOAT:
-            nir_store_var(
-               b, results[i],
-               nir_build_load_global(b, 3, 32, nir_channel(b, addresses, i), .align_offset = 0), 7);
+            nir_store_var(b, results[i],
+                          nir_build_load_global(b, 3, 32, nir_channel(b, addresses, i)), 7);
             break;
          case VK_FORMAT_R32G32_SFLOAT:
          case VK_FORMAT_R16G16_SFLOAT:
@@ -803,8 +802,7 @@ get_vertices(nir_builder *b, nir_ssa_def *addresses, nir_ssa_def *format, nir_ss
             nir_ssa_def *addr = nir_channel(b, addresses, i);
             for (unsigned j = 0; j < components; ++j)
                values[j] = nir_build_load_global(
-                  b, 1, comp_bits, nir_iadd(b, addr, nir_imm_int64(b, j * comp_bytes)),
-                  .align_offset = 0);
+                  b, 1, comp_bits, nir_iadd(b, addr, nir_imm_int64(b, j * comp_bytes)));
 
             for (unsigned j = components; j < 3; ++j)
                values[j] = nir_imm_intN_t(b, 0, comp_bits);
@@ -982,18 +980,14 @@ build_leaf_shader(struct radv_device *dev)
       nir_push_if(&b, nir_ine(&b, transform_addr, nir_imm_int64(&b, 0)));
       nir_store_var(
          &b, transform[0],
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 0)),
-                               .align_offset = 0),
-         0xf);
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 0))), 0xf);
       nir_store_var(
          &b, transform[1],
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 16)),
-                               .align_offset = 0),
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 16))),
          0xf);
       nir_store_var(
          &b, transform[2],
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 32)),
-                               .align_offset = 0),
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 32))),
          0xf);
       nir_pop_if(&b, NULL);
 
@@ -1011,11 +1005,11 @@ build_leaf_shader(struct radv_device *dev)
       for (unsigned i = 0; i < 4; ++i) {
          nir_build_store_global(&b, nir_vec(&b, node_data + i * 4, 4),
                                 nir_iadd(&b, triangle_node_dst_addr, nir_imm_int64(&b, i * 16)),
-                                .align_mul = 16, .align_offset = 0);
+                                .align_mul = 16);
       }
 
       nir_ssa_def *node_id = nir_ushr(&b, node_offset, nir_imm_int(&b, 3));
-      nir_build_store_global(&b, node_id, scratch_addr, .align_offset = 0);
+      nir_build_store_global(&b, node_id, scratch_addr);
    }
    nir_push_else(&b, NULL);
    nir_push_if(&b, nir_ieq(&b, geom_type, nir_imm_int(&b, VK_GEOMETRY_TYPE_AABBS_KHR)));
@@ -1028,14 +1022,14 @@ build_leaf_shader(struct radv_device *dev)
       nir_ssa_def *aabb_node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset));
       nir_ssa_def *node_id =
          nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 7));
-      nir_build_store_global(&b, node_id, scratch_addr, .align_offset = 0);
+      nir_build_store_global(&b, node_id, scratch_addr);
 
       aabb_addr = nir_iadd(&b, aabb_addr, nir_u2u64(&b, nir_imul(&b, aabb_stride, global_id)));
 
-      nir_ssa_def *min_bound = nir_build_load_global(
-         &b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 0)), .align_offset = 0);
-      nir_ssa_def *max_bound = nir_build_load_global(
-         &b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 12)), .align_offset = 0);
+      nir_ssa_def *min_bound =
+         nir_build_load_global(&b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 0)));
+      nir_ssa_def *max_bound =
+         nir_build_load_global(&b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 12)));
 
       nir_ssa_def *values[] = {nir_channel(&b, min_bound, 0),
                                nir_channel(&b, min_bound, 1),
@@ -1048,10 +1042,10 @@ build_leaf_shader(struct radv_device *dev)
 
       nir_build_store_global(&b, nir_vec(&b, values + 0, 4),
                              nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 0)),
-                             .align_mul = 16, .align_offset = 0);
+                             .align_mul = 16);
       nir_build_store_global(&b, nir_vec(&b, values + 4, 4),
                              nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 16)),
-                             .align_mul = 16, .align_offset = 0);
+                             .align_mul = 16);
    }
    nir_push_else(&b, NULL);
    { /* Instances */
@@ -1062,8 +1056,8 @@ build_leaf_shader(struct radv_device *dev)
       {
          nir_ssa_def *ptr = nir_iadd(&b, nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 3)),
                                      nir_u2u64(&b, nir_imul(&b, global_id, nir_imm_int(&b, 8))));
-         nir_ssa_def *addr = nir_pack_64_2x32(
-            &b, nir_build_load_global(&b, 2, 32, ptr, .align_mul = 8, .align_offset = 0));
+         nir_ssa_def *addr =
+            nir_pack_64_2x32(&b, nir_build_load_global(&b, 2, 32, ptr, .align_mul = 8));
          nir_store_var(&b, instance_addr_var, addr, 1);
       }
       nir_push_else(&b, NULL);
@@ -1076,21 +1070,18 @@ build_leaf_shader(struct radv_device *dev)
       nir_ssa_def *instance_addr = nir_load_var(&b, instance_addr_var);
 
       nir_ssa_def *inst_transform[] = {
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 0)),
-                               .align_offset = 0),
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 16)),
-                               .align_offset = 0),
-         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 32)),
-                               .align_offset = 0)};
-      nir_ssa_def *inst3 = nir_build_load_global(
-         &b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 48)), .align_offset = 0);
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 0))),
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 16))),
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 32)))};
+      nir_ssa_def *inst3 =
+         nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 48)));
 
       nir_ssa_def *node_offset =
          nir_iadd(&b, node_dst_offset, nir_umul24(&b, global_id, nir_imm_int(&b, 128)));
       node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset));
       nir_ssa_def *node_id =
          nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 6));
-      nir_build_store_global(&b, node_id, scratch_addr, .align_offset = 0);
+      nir_build_store_global(&b, node_id, scratch_addr);
 
       nir_variable *bounds[2] = {
          nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "min_bound"),
@@ -1102,12 +1093,12 @@ build_leaf_shader(struct radv_device *dev)
 
       nir_ssa_def *header_addr = nir_pack_64_2x32(&b, nir_channels(&b, inst3, 12));
       nir_push_if(&b, nir_ine(&b, header_addr, nir_imm_int64(&b, 0)));
-      nir_ssa_def *header_root_offset = nir_build_load_global(
-         &b, 1, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 0)), .align_offset = 0);
-      nir_ssa_def *header_min = nir_build_load_global(
-         &b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 8)), .align_offset = 0);
-      nir_ssa_def *header_max = nir_build_load_global(
-         &b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 20)), .align_offset = 0);
+      nir_ssa_def *header_root_offset =
+         nir_build_load_global(&b, 1, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 0)));
+      nir_ssa_def *header_min =
+         nir_build_load_global(&b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 8)));
+      nir_ssa_def *header_max =
+         nir_build_load_global(&b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 20)));
 
       nir_ssa_def *bound_defs[2][3];
       for (unsigned i = 0; i < 3; ++i) {
@@ -1133,8 +1124,7 @@ build_leaf_shader(struct radv_device *dev)
             vals[j] = nir_channel(&b, inst_transform[j], i);
 
          nir_build_store_global(&b, nir_vec(&b, vals, 3),
-                                nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 92 + 12 * i)),
-                                .align_offset = 0);
+                                nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 92 + 12 * i)));
       }
 
       nir_ssa_def *m_in[3][3], *m_out[3][3], *m_vec[3][4];
@@ -1150,8 +1140,7 @@ build_leaf_shader(struct radv_device *dev)
 
       for (unsigned i = 0; i < 3; ++i) {
          nir_build_store_global(&b, nir_vec(&b, m_vec[i], 4),
-                                nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 16 * i)),
-                                .align_offset = 0);
+                                nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 16 * i)));
       }
 
       nir_ssa_def *out0[4] = {
@@ -1159,14 +1148,13 @@ build_leaf_shader(struct radv_device *dev)
          nir_channel(&b, nir_unpack_64_2x32(&b, header_addr), 1), nir_channel(&b, inst3, 0),
          nir_channel(&b, inst3, 1)};
       nir_build_store_global(&b, nir_vec(&b, out0, 4),
-                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)), .align_offset = 0);
-      nir_build_store_global(&b, global_id, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 88)),
-                             .align_offset = 0);
+                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)));
+      nir_build_store_global(&b, global_id, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 88)));
       nir_pop_if(&b, NULL);
       nir_build_store_global(&b, nir_load_var(&b, bounds[0]),
-                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 64)), .align_offset = 0);
+                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 64)));
       nir_build_store_global(&b, nir_load_var(&b, bounds[1]),
-                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 76)), .align_offset = 0);
+                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 76)));
    }
    nir_pop_if(&b, NULL);
    nir_pop_if(&b, NULL);
@@ -1187,8 +1175,8 @@ determine_bounds(nir_builder *b, nir_ssa_def *node_addr, nir_ssa_def *node_id,
    {
       nir_ssa_def *positions[3];
       for (unsigned i = 0; i < 3; ++i)
-         positions[i] = nir_build_load_global(
-            b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)), .align_offset = 0);
+         positions[i] =
+            nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)));
       nir_ssa_def *bounds[] = {positions[0], positions[0]};
       for (unsigned i = 1; i < 3; ++i) {
          bounds[0] = nir_fmin(b, bounds[0], positions[i]);
@@ -1204,8 +1192,7 @@ determine_bounds(nir_builder *b, nir_ssa_def *node_addr, nir_ssa_def *node_id,
       for (unsigned i = 0; i < 4; ++i)
          for (unsigned j = 0; j < 2; ++j)
             input_bounds[i][j] = nir_build_load_global(
-               b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 16 + i * 24 + j * 12)),
-               .align_offset = 0);
+               b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 16 + i * 24 + j * 12)));
       nir_ssa_def *bounds[] = {input_bounds[0][0], input_bounds[0][1]};
       for (unsigned i = 1; i < 4; ++i) {
          bounds[0] = nir_fmin(b, bounds[0], input_bounds[i][0]);
@@ -1220,8 +1207,8 @@ determine_bounds(nir_builder *b, nir_ssa_def *node_addr, nir_ssa_def *node_id,
    { /* Instances */
       nir_ssa_def *bounds[2];
       for (unsigned i = 0; i < 2; ++i)
-         bounds[i] = nir_build_load_global(
-            b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 64 + i * 12)), .align_offset = 0);
+         bounds[i] =
+            nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 64 + i * 12)));
       nir_store_var(b, bounds_vars[0], bounds[0], 7);
       nir_store_var(b, bounds_vars[1], bounds[1], 7);
    }
@@ -1229,8 +1216,8 @@ determine_bounds(nir_builder *b, nir_ssa_def *node_addr, nir_ssa_def *node_id,
    { /* AABBs */
       nir_ssa_def *bounds[2];
       for (unsigned i = 0; i < 2; ++i)
-         bounds[i] = nir_build_load_global(
-            b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)), .align_offset = 0);
+         bounds[i] =
+            nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)));
       nir_store_var(b, bounds_vars[0], bounds[0], 7);
       nir_store_var(b, bounds_vars[1], bounds[1], 7);
    }
@@ -1287,11 +1274,9 @@ build_internal_shader(struct radv_device *dev)
       &b, 4, 32,
       nir_iadd(&b, scratch_addr,
                nir_u2u64(&b, nir_iadd(&b, src_scratch_offset,
-                                      nir_ishl(&b, global_id, nir_imm_int(&b, 4))))),
-      .align_offset = 0);
+                                      nir_ishl(&b, global_id, nir_imm_int(&b, 4))))));
 
-   nir_build_store_global(&b, src_nodes, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)),
-                          .align_offset = 0);
+   nir_build_store_global(&b, src_nodes, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)));
 
    nir_ssa_def *total_bounds[2] = {
       nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7),
@@ -1310,11 +1295,9 @@ build_internal_shader(struct radv_device *dev)
       determine_bounds(&b, node_addr, nir_channel(&b, src_nodes, i), bounds);
       nir_pop_if(&b, NULL);
       nir_build_store_global(&b, nir_load_var(&b, bounds[0]),
-                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 24 * i)),
-                             .align_offset = 0);
+                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 24 * i)));
       nir_build_store_global(&b, nir_load_var(&b, bounds[1]),
-                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 28 + 24 * i)),
-                             .align_offset = 0);
+                             nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 28 + 24 * i)));
       total_bounds[0] = nir_fmin(&b, total_bounds[0], nir_load_var(&b, bounds[0]));
       total_bounds[1] = nir_fmax(&b, total_bounds[1], nir_load_var(&b, bounds[1]));
    }
@@ -1324,14 +1307,12 @@ build_internal_shader(struct radv_device *dev)
    nir_ssa_def *dst_scratch_addr = nir_iadd(
       &b, scratch_addr,
       nir_u2u64(&b, nir_iadd(&b, dst_scratch_offset, nir_ishl(&b, global_id, nir_imm_int(&b, 2)))));
-   nir_build_store_global(&b, node_id, dst_scratch_addr, .align_offset = 0);
+   nir_build_store_global(&b, node_id, dst_scratch_addr);
 
    nir_push_if(&b, fill_header);
-   nir_build_store_global(&b, node_id, node_addr, .align_offset = 0);
-   nir_build_store_global(&b, total_bounds[0], nir_iadd(&b, node_addr, nir_imm_int64(&b, 8)),
-                          .align_offset = 0);
-   nir_build_store_global(&b, total_bounds[1], nir_iadd(&b, node_addr, nir_imm_int64(&b, 20)),
-                          .align_offset = 0);
+   nir_build_store_global(&b, node_id, node_addr);
+   nir_build_store_global(&b, total_bounds[0], nir_iadd(&b, node_addr, nir_imm_int64(&b, 8)));
+   nir_build_store_global(&b, total_bounds[1], nir_iadd(&b, node_addr, nir_imm_int64(&b, 20)));
    nir_pop_if(&b, NULL);
    return b.shader;
 }
@@ -1397,27 +1378,24 @@ build_copy_shader(struct radv_device *dev)
       nir_ssa_def *instance_count = nir_build_load_global(
          &b, 1, 32,
          nir_iadd(&b, src_base_addr,
-                  nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, instance_count))),
-         .align_offset = 0);
+                  nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, instance_count))));
       nir_ssa_def *compacted_size = nir_build_load_global(
          &b, 1, 64,
          nir_iadd(&b, src_base_addr,
-                  nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))),
-         .align_offset = 0);
+                  nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))));
       nir_ssa_def *serialization_size = nir_build_load_global(
          &b, 1, 64,
-         nir_iadd(&b, src_base_addr,
-                  nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, serialization_size))),
-         .align_offset = 0);
+         nir_iadd(
+            &b, src_base_addr,
+            nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, serialization_size))));
 
       nir_store_var(&b, compacted_size_var, compacted_size, 1);
       nir_store_var(
          &b, instance_offset_var,
-         nir_build_load_global(
-            &b, 1, 32,
-            nir_iadd(&b, src_base_addr,
-                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, instance_offset))),
-            .align_offset = 0),
+         nir_build_load_global(&b, 1, 32,
+                               nir_iadd(&b, src_base_addr,
+                                        nir_imm_int64(&b, offsetof(struct radv_accel_struct_header,
+                                                                   instance_offset)))),
          1);
       nir_store_var(&b, instance_count_var, instance_count, 1);
 
@@ -1433,20 +1411,17 @@ build_copy_shader(struct radv_device *dev)
             &b, serialization_size,
             nir_iadd(&b, dst_base_addr,
                      nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
-                                                serialization_size))),
-            .align_offset = 0);
+                                                serialization_size))));
          nir_build_store_global(
             &b, compacted_size,
             nir_iadd(&b, dst_base_addr,
                      nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
-                                                compacted_size))),
-            .align_offset = 0);
+                                                compacted_size))));
          nir_build_store_global(
             &b, nir_u2u64(&b, instance_count),
             nir_iadd(&b, dst_base_addr,
                      nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
-                                                instance_count))),
-            .align_offset = 0);
+                                                instance_count))));
       }
       nir_pop_if(&b, NULL);
    }
@@ -1457,8 +1432,7 @@ build_copy_shader(struct radv_device *dev)
          &b, 1, 32,
          nir_iadd(&b, src_base_addr,
                   nir_imm_int64(
-                     &b, offsetof(struct radv_accel_struct_serialization_header, instance_count))),
-         .align_offset = 0);
+                     &b, offsetof(struct radv_accel_struct_serialization_header, instance_count))));
       nir_ssa_def *src_offset =
          nir_iadd(&b, nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)),
                   nir_imul(&b, instance_count, nir_imm_int(&b, sizeof(uint64_t))));
@@ -1469,16 +1443,14 @@ build_copy_shader(struct radv_device *dev)
          nir_build_load_global(
             &b, 1, 64,
             nir_iadd(&b, header_addr,
-                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))),
-            .align_offset = 0),
+                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size)))),
          1);
       nir_store_var(
          &b, instance_offset_var,
-         nir_build_load_global(
-            &b, 1, 32,
-            nir_iadd(&b, header_addr,
-                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, instance_offset))),
-            .align_offset = 0),
+         nir_build_load_global(&b, 1, 32,
+                               nir_iadd(&b, header_addr,
+                                        nir_imm_int64(&b, offsetof(struct radv_accel_struct_header,
+                                                                   instance_offset)))),
          1);
       nir_store_var(&b, instance_count_var, instance_count, 1);
       nir_store_var(&b, src_offset_var, src_offset, 1);
@@ -1491,8 +1463,7 @@ build_copy_shader(struct radv_device *dev)
          nir_build_load_global(
             &b, 1, 64,
             nir_iadd(&b, src_base_addr,
-                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))),
-            .align_offset = 0),
+                     nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size)))),
          1);
 
       nir_store_var(&b, src_offset_var, nir_imm_int(&b, 0), 1);
@@ -1509,8 +1480,7 @@ build_copy_shader(struct radv_device *dev)
    nir_ssa_def *compacted_size = nir_build_load_global(
       &b, 1, 32,
       nir_iadd(&b, src_base_addr,
-               nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))),
-      .align_offset = 0);
+               nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))));
 
    nir_push_loop(&b);
    {
@@ -1522,8 +1492,7 @@ build_copy_shader(struct radv_device *dev)
          nir_ssa_def *src_addr = nir_iadd(&b, src_base_addr, nir_u2u64(&b, src_offset));
          nir_ssa_def *dst_addr = nir_iadd(&b, dst_base_addr, nir_u2u64(&b, dst_offset));
 
-         nir_ssa_def *value =
-            nir_build_load_global(&b, 4, 32, src_addr, .align_mul = 16, .align_offset = 0);
+         nir_ssa_def *value = nir_build_load_global(&b, 4, 32, src_addr, .align_mul = 16);
          nir_store_var(&b, value_var, value, 0xf);
 
          nir_ssa_def *instance_offset = nir_isub(&b, offset, nir_load_var(&b, instance_offset_var));
@@ -1549,8 +1518,8 @@ build_copy_shader(struct radv_device *dev)
                            nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)));
                instance_addr = nir_iadd(&b, dst_base_addr, nir_u2u64(&b, instance_addr));
 
-               nir_build_store_global(&b, nir_channels(&b, value, 3), instance_addr, .align_mul = 8,
-                                      .align_offset = 0);
+               nir_build_store_global(&b, nir_channels(&b, value, 3), instance_addr,
+                                      .align_mul = 8);
             }
             nir_push_else(&b, NULL);
             {
@@ -1561,8 +1530,8 @@ build_copy_shader(struct radv_device *dev)
                            nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)));
                instance_addr = nir_iadd(&b, src_base_addr, nir_u2u64(&b, instance_addr));
 
-               nir_ssa_def *instance_value = nir_build_load_global(
-                  &b, 2, 32, instance_addr, .align_mul = 8, .align_offset = 0);
+               nir_ssa_def *instance_value =
+                  nir_build_load_global(&b, 2, 32, instance_addr, .align_mul = 8);
 
                nir_ssa_def *values[] = {
                   nir_channel(&b, instance_value, 0),
@@ -1579,8 +1548,7 @@ build_copy_shader(struct radv_device *dev)
 
          nir_store_var(&b, offset_var, nir_iadd(&b, offset, increment), 1);
 
-         nir_build_store_global(&b, nir_load_var(&b, value_var), dst_addr, .align_mul = 16,
-                                .align_offset = 0);
+         nir_build_store_global(&b, nir_load_var(&b, value_var), dst_addr, .align_mul = 16);
       }
       nir_push_else(&b, NULL);
       {
diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c
index 8c0f3b2ace3..bc050eedf86 100644
--- a/src/amd/vulkan/radv_pipeline_rt.c
+++ b/src/amd/vulkan/radv_pipeline_rt.c
@@ -348,7 +348,7 @@ load_sbt_entry(nir_builder *b, const struct rt_variables *vars, nir_ssa_def *idx
    nir_ssa_def *load_addr = addr;
    if (offset)
       load_addr = nir_iadd(b, load_addr, nir_imm_int64(b, offset));
-   nir_ssa_def *v_idx = nir_build_load_global(b, 1, 32, load_addr, .align_offset = 0);
+   nir_ssa_def *v_idx = nir_build_load_global(b, 1, 32, load_addr);
 
    nir_store_var(b, vars->idx, v_idx, 1);
 
@@ -629,10 +629,9 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
 
                   val = nir_vec(&b_shader, vals, 3);
                } else {
-                  val = nir_build_load_global(
-                     &b_shader, 3, 32,
-                     nir_iadd(&b_shader, instance_node_addr, nir_imm_int64(&b_shader, 92 + c * 12)),
-                     .align_offset = 0);
+                  val = nir_build_load_global(&b_shader, 3, 32,
+                                              nir_iadd(&b_shader, instance_node_addr,
+                                                       nir_imm_int64(&b_shader, 92 + c * 12)));
                }
                b_shader.cursor = nir_instr_remove(instr);
                nir_ssa_def_rewrite_uses(&intr->dest.ssa, val);
@@ -1260,8 +1259,7 @@ insert_traversal_triangle_case(struct radv_device *device,
       nir_ssa_def *triangle_info = nir_build_load_global(
          b, 2, 32,
          nir_iadd(b, build_node_to_addr(device, b, bvh_node),
-                  nir_imm_int64(b, offsetof(struct radv_bvh_triangle_node, triangle_id))),
-         .align_offset = 0);
+                  nir_imm_int64(b, offsetof(struct radv_bvh_triangle_node, triangle_id))));
       nir_ssa_def *primitive_id = nir_channel(b, triangle_info, 0);
       nir_ssa_def *geometry_id_and_flags = nir_channel(b, triangle_info, 1);
       nir_ssa_def *geometry_id = nir_iand(b, geometry_id_and_flags, nir_imm_int(b, 0xfffffff));
@@ -1365,8 +1363,8 @@ insert_traversal_aabb_case(struct radv_device *device,
    RADV_FROM_HANDLE(radv_pipeline_layout, layout, pCreateInfo->layout);
 
    nir_ssa_def *node_addr = build_node_to_addr(device, b, bvh_node);
-   nir_ssa_def *triangle_info = nir_build_load_global(
-      b, 2, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 24)), .align_offset = 0);
+   nir_ssa_def *triangle_info =
+      nir_build_load_global(b, 2, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 24)));
    nir_ssa_def *primitive_id = nir_channel(b, triangle_info, 0);
    nir_ssa_def *geometry_id_and_flags = nir_channel(b, triangle_info, 1);
    nir_ssa_def *geometry_id = nir_iand(b, geometry_id_and_flags, nir_imm_int(b, 0xfffffff));
@@ -1443,10 +1441,10 @@ insert_traversal_aabb_case(struct radv_device *device,
          nir_ssa_def *vec3_inf =
             nir_channels(b, nir_imm_vec4(b, INFINITY, INFINITY, INFINITY, 0), 0x7);
 
-         nir_ssa_def *bvh_lo = nir_build_load_global(
-            b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 0)), .align_offset = 0);
-         nir_ssa_def *bvh_hi = nir_build_load_global(
-            b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 12)), .align_offset = 0);
+         nir_ssa_def *bvh_lo =
+            nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 0)));
+         nir_ssa_def *bvh_hi =
+            nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 12)));
 
          bvh_lo = nir_fsub(b, bvh_lo, nir_load_var(b, trav_vars->origin));
          bvh_hi = nir_fsub(b, bvh_hi, nir_load_var(b, trav_vars->origin));
@@ -1820,9 +1818,8 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
    {
       nir_store_var(b, trav_vars.bvh_base, build_addr_to_node(b, accel_struct), 1);
 
-      nir_ssa_def *bvh_root =
-         nir_build_load_global(b, 1, 32, accel_struct, .access = ACCESS_NON_WRITEABLE,
-                               .align_mul = 64, .align_offset = 0);
+      nir_ssa_def *bvh_root = nir_build_load_global(
+         b, 1, 32, accel_struct, .access = ACCESS_NON_WRITEABLE, .align_mul = 64);
 
       /* We create a BVH descriptor that covers the entire memory range. That way we can always
        * use the same descriptor, which avoids divergence when different rays hit different
@@ -1840,8 +1837,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
       nir_store_var(b, trav_vars.instance_addr, nir_imm_int64(b, 0), 1);
 
       nir_store_var(b, trav_vars.stack, nir_iadd(b, stack_base, stack_entry_stride_def), 1);
-      nir_store_shared(b, bvh_root, stack_base, .base = 0, .align_mul = stack_entry_size,
-                       .align_offset = 0);
+      nir_store_shared(b, bvh_root, stack_base, .base = 0, .align_mul = stack_entry_size);
 
       nir_store_var(b, trav_vars.top_stack, nir_imm_int(b, 0), 1);
 
@@ -1867,7 +1863,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
                     nir_isub(b, nir_load_var(b, trav_vars.stack), stack_entry_stride_def), 1);
 
       nir_ssa_def *bvh_node = nir_load_shared(b, 1, 32, nir_load_var(b, trav_vars.stack), .base = 0,
-                                              .align_mul = stack_entry_size, .align_offset = 0);
+                                              .align_mul = stack_entry_size);
       nir_ssa_def *bvh_node_type = nir_iand(b, bvh_node, nir_imm_int(b, 7));
 
       bvh_node = nir_iadd(b, nir_load_var(b, trav_vars.bvh_base), nir_u2u(b, bvh_node, 64));
@@ -1895,8 +1891,8 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
             {
                /* instance */
                nir_ssa_def *instance_node_addr = build_node_to_addr(device, b, bvh_node);
-               nir_ssa_def *instance_data = nir_build_load_global(
-                  b, 4, 32, instance_node_addr, .align_mul = 64, .align_offset = 0);
+               nir_ssa_def *instance_data =
+                  nir_build_load_global(b, 4, 32, instance_node_addr, .align_mul = 64);
                nir_ssa_def *wto_matrix[] = {
                   nir_build_load_global(b, 4, 32,
                                         nir_iadd(b, instance_node_addr, nir_imm_int64(b, 16)),
@@ -1908,8 +1904,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
                                         nir_iadd(b, instance_node_addr, nir_imm_int64(b, 48)),
                                         .align_mul = 64, .align_offset = 48)};
                nir_ssa_def *instance_id = nir_build_load_global(
-                  b, 1, 32, nir_iadd(b, instance_node_addr, nir_imm_int64(b, 88)),
-                  .align_offset = 0);
+                  b, 1, 32, nir_iadd(b, instance_node_addr, nir_imm_int64(b, 88)));
                nir_ssa_def *instance_and_mask = nir_channel(b, instance_data, 2);
                nir_ssa_def *instance_mask = nir_ushr(b, instance_and_mask, nir_imm_int(b, 24));
 
@@ -1924,10 +1919,9 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
                              build_addr_to_node(
                                 b, nir_pack_64_2x32(b, nir_channels(b, instance_data, 0x3))),
                              1);
-               nir_store_shared(b,
-                                nir_iand(b, nir_channel(b, instance_data, 0), nir_imm_int(b, 63)),
-                                nir_load_var(b, trav_vars.stack), .base = 0,
-                                .align_mul = stack_entry_size, .align_offset = 0);
+               nir_store_shared(
+                  b, nir_iand(b, nir_channel(b, instance_data, 0), nir_imm_int(b, 63)),
+                  nir_load_var(b, trav_vars.stack), .base = 0, .align_mul = stack_entry_size);
                nir_store_var(b, trav_vars.stack,
                              nir_iadd(b, nir_load_var(b, trav_vars.stack), stack_entry_stride_def),
                              1);
@@ -1966,7 +1960,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf
                nir_push_if(b, nir_ine(b, new_node, nir_imm_int(b, 0xffffffff)));
                {
                   nir_store_shared(b, new_node, nir_load_var(b, trav_vars.stack), .base = 0,
-                                   .align_mul = stack_entry_size, .align_offset = 0);
+                                   .align_mul = stack_entry_size);
                   nir_store_var(
                      b, trav_vars.stack,
                      nir_iadd(b, nir_load_var(b, trav_vars.stack), stack_entry_stride_def), 1);
diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c
index 28d1264abab..3af59f675cd 100644
--- a/src/amd/vulkan/radv_shader.c
+++ b/src/amd/vulkan/radv_shader.c
@@ -328,8 +328,7 @@ lower_intrinsics(nir_shader *nir, const struct radv_pipeline_key *key,
                                         nir_iadd(&b, nir_channel(&b, intrin->src[0].ssa, 0),
                                                  nir_channel(&b, intrin->src[0].ssa, 1)));
 
-               def = nir_build_load_global(&b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE,
-                                           .align_offset = 0);
+               def = nir_build_load_global(&b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
             } else {
                def = nir_vector_insert_imm(&b, intrin->src[0].ssa, nir_imm_int(&b, 0), 2);
             }



More information about the mesa-commit mailing list