Mesa (master): nir/load_store_vectorize: Add support for nir_var_mem_global

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Fri Apr 3 20:51:09 UTC 2020


Module: Mesa
Branch: master
Commit: 36a32af00897ee32ace344d020a8522c0a8c1a92
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=36a32af00897ee32ace344d020a8522c0a8c1a92

Author: Jason Ekstrand <jason at jlekstrand.net>
Date:   Tue Mar 31 03:22:57 2020 -0500

nir/load_store_vectorize: Add support for nir_var_mem_global

Reviewed-by: Rhys Perry <pendingchaos02 at gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4367>

---

 src/compiler/nir/nir_opt_load_store_vectorize.c | 57 +++++++++++++++++++++++--
 1 file changed, 53 insertions(+), 4 deletions(-)

diff --git a/src/compiler/nir/nir_opt_load_store_vectorize.c b/src/compiler/nir/nir_opt_load_store_vectorize.c
index fded9f5ca49..cf8d0ef0ddd 100644
--- a/src/compiler/nir/nir_opt_load_store_vectorize.c
+++ b/src/compiler/nir/nir_opt_load_store_vectorize.c
@@ -80,6 +80,8 @@ case nir_intrinsic_##op: {\
    STORE(0, deref, -1, -1, 0, 1)
    LOAD(nir_var_mem_shared, shared, -1, 0, -1)
    STORE(nir_var_mem_shared, shared, -1, 1, -1, 0)
+   LOAD(nir_var_mem_global, global, -1, 0, -1)
+   STORE(nir_var_mem_global, global, -1, 1, -1, 0)
    ATOMIC(nir_var_mem_ssbo, ssbo, add, 0, 1, -1, 2)
    ATOMIC(nir_var_mem_ssbo, ssbo, imin, 0, 1, -1, 2)
    ATOMIC(nir_var_mem_ssbo, ssbo, umin, 0, 1, -1, 2)
@@ -122,6 +124,20 @@ case nir_intrinsic_##op: {\
    ATOMIC(nir_var_mem_shared, shared, fmin, -1, 0, -1, 1)
    ATOMIC(nir_var_mem_shared, shared, fmax, -1, 0, -1, 1)
    ATOMIC(nir_var_mem_shared, shared, fcomp_swap, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, add, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, imin, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, umin, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, imax, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, umax, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, and, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, or, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, xor, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, exchange, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, comp_swap, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, fadd, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, fmin, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, fmax, -1, 0, -1, 1)
+   ATOMIC(nir_var_mem_global, global, fcomp_swap, -1, 0, -1, 1)
    default:
       break;
 #undef ATOMIC
@@ -515,6 +531,27 @@ get_variable_mode(struct entry *entry)
    return entry->deref->mode;
 }
 
+static unsigned
+mode_to_index(nir_variable_mode mode)
+{
+   assert(util_bitcount(mode) == 1);
+
+   /* Globals and SSBOs should be tracked together */
+   if (mode == nir_var_mem_global)
+      mode = nir_var_mem_ssbo;
+
+   return ffs(mode) - 1;
+}
+
+static nir_variable_mode
+aliasing_modes(nir_variable_mode modes)
+{
+   /* Global and SSBO can alias */
+   if (modes & (nir_var_mem_ssbo | nir_var_mem_global))
+      modes |= nir_var_mem_ssbo | nir_var_mem_global;
+   return modes;
+}
+
 static struct entry *
 create_entry(struct vectorize_ctx *ctx,
              const struct intrinsic_info *info,
@@ -952,7 +989,8 @@ compare_entries(struct entry *a, struct entry *b)
 static bool
 may_alias(struct entry *a, struct entry *b)
 {
-   assert(get_variable_mode(a) == get_variable_mode(b));
+   assert(mode_to_index(get_variable_mode(a)) ==
+          mode_to_index(get_variable_mode(b)));
 
    /* if the resources/variables are definitively different and both have
     * ACCESS_RESTRICT, we can assume they do not alias. */
@@ -989,7 +1027,7 @@ check_for_aliasing(struct vectorize_ctx *ctx, struct entry *first, struct entry
                nir_var_mem_push_const | nir_var_mem_ubo))
       return false;
 
-   unsigned mode_index = ffs(mode) - 1;
+   unsigned mode_index = mode_to_index(mode);
    if (first->is_store) {
       /* find first entry that aliases "first" */
       list_for_each_entry_from(struct entry, next, first, &ctx->entries[mode_index], head) {
@@ -1031,6 +1069,10 @@ try_vectorize(nir_function_impl *impl, struct vectorize_ctx *ctx,
               struct entry *low, struct entry *high,
               struct entry *first, struct entry *second)
 {
+   if (!(get_variable_mode(first) & ctx->modes) ||
+       !(get_variable_mode(second) & ctx->modes))
+      return false;
+
    if (check_for_aliasing(ctx, first, second))
       return false;
 
@@ -1188,6 +1230,13 @@ handle_barrier(struct vectorize_ctx *ctx, bool *progress, nir_function_impl *imp
 
    while (modes) {
       unsigned mode_index = u_bit_scan(&modes);
+      if ((1 << mode_index) == nir_var_mem_global) {
+         /* Global should be rolled in with SSBO */
+         assert(list_is_empty(&ctx->entries[mode_index]));
+         assert(ctx->loads[mode_index] == NULL);
+         assert(ctx->stores[mode_index] == NULL);
+         continue;
+      }
 
       if (acquire)
          *progress |= vectorize_entries(ctx, impl, ctx->loads[mode_index]);
@@ -1230,9 +1279,9 @@ process_block(nir_function_impl *impl, struct vectorize_ctx *ctx, nir_block *blo
       nir_variable_mode mode = info->mode;
       if (!mode)
          mode = nir_src_as_deref(intrin->src[info->deref_src])->mode;
-      if (!(mode & ctx->modes))
+      if (!(mode & aliasing_modes(ctx->modes)))
          continue;
-      unsigned mode_index = ffs(mode) - 1;
+      unsigned mode_index = mode_to_index(mode);
 
       /* create entry */
       struct entry *entry = create_entry(ctx, info, intrin);



More information about the mesa-commit mailing list