Mesa (main): zink: remove repeated lazy batch dd casts

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Wed Aug 18 20:57:40 UTC 2021


Module: Mesa
Branch: main
Commit: f676e6a64b505d1468b6f2876222fd83619b460f
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=f676e6a64b505d1468b6f2876222fd83619b460f

Author: Mike Blumenkrantz <michael.blumenkrantz at gmail.com>
Date:   Fri May 21 17:44:02 2021 -0400

zink: remove repeated lazy batch dd casts

these all have an ergonomic cost

Reviewed-by: Hoe Hao Cheng <haochengho12907 at gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12427>

---

 src/gallium/drivers/zink/zink_descriptors_lazy.c | 98 ++++++++++++------------
 1 file changed, 51 insertions(+), 47 deletions(-)

diff --git a/src/gallium/drivers/zink/zink_descriptors_lazy.c b/src/gallium/drivers/zink/zink_descriptors_lazy.c
index 8b10b701d6d..a1f88b6fa47 100644
--- a/src/gallium/drivers/zink/zink_descriptors_lazy.c
+++ b/src/gallium/drivers/zink/zink_descriptors_lazy.c
@@ -305,11 +305,11 @@ create_pool(struct zink_screen *screen, unsigned num_type_sizes, VkDescriptorPoo
 }
 
 static struct zink_descriptor_pool *
-get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_state *bs, bool is_compute);
+get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute);
 
 static struct zink_descriptor_pool *
 check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct hash_entry *he, struct zink_program *pg,
-                 enum zink_descriptor_type type, struct zink_batch_state *bs, bool is_compute)
+                 enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
@@ -317,10 +317,10 @@ check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, st
       unsigned sets_to_alloc = MIN2(MAX2(pool->sets_alloc * 10, 10), ZINK_DEFAULT_MAX_DESCS) - pool->sets_alloc;
       if (!sets_to_alloc) {
          /* overflowed pool: queue for deletion on next reset */
-         util_dynarray_append(&bdd_lazy(bs)->overflowed_pools, struct zink_descriptor_pool*, pool);
-         _mesa_hash_table_remove(&bdd_lazy(bs)->pools[type], he);
+         util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
+         _mesa_hash_table_remove(&bdd->pools[type], he);
          ctx->oom_flush = true;
-         return get_descriptor_pool_lazy(ctx, pg, type, bs, is_compute);
+         return get_descriptor_pool_lazy(ctx, pg, type, bdd, is_compute);
       }
       if (!zink_descriptor_util_alloc_sets(screen, pg->dsl[type + 1],
                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc))
@@ -331,9 +331,9 @@ check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, st
 }
 
 static struct zink_descriptor_pool *
-create_push_pool(struct zink_screen *screen, struct zink_batch_state *bs, bool is_compute)
+create_push_pool(struct zink_screen *screen, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
 {
-   struct zink_descriptor_pool *pool = rzalloc(bs, struct zink_descriptor_pool);
+   struct zink_descriptor_pool *pool = rzalloc(bdd, struct zink_descriptor_pool);
    VkDescriptorPoolSize sizes;
    sizes.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
    if (is_compute)
@@ -345,7 +345,7 @@ create_push_pool(struct zink_screen *screen, struct zink_batch_state *bs, bool i
 }
 
 static struct zink_descriptor_pool *
-check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_state *bs, bool is_compute)
+check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    /* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
@@ -353,10 +353,10 @@ check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *poo
       unsigned sets_to_alloc = MIN2(MAX2(pool->sets_alloc * 10, 10), ZINK_DEFAULT_MAX_DESCS) - pool->sets_alloc;
       if (!sets_to_alloc) {
          /* overflowed pool: queue for deletion on next reset */
-         util_dynarray_append(&bdd_lazy(bs)->overflowed_pools, struct zink_descriptor_pool*, pool);
-         bdd_lazy(bs)->push_pool[is_compute] = create_push_pool(screen, bs, is_compute);
+         util_dynarray_append(&bdd->overflowed_pools, struct zink_descriptor_pool*, pool);
+         bdd->push_pool[is_compute] = create_push_pool(screen, bdd, is_compute);
          ctx->oom_flush = true;
-         return check_push_pool_alloc(ctx, bdd_lazy(bs)->push_pool[is_compute], bs, is_compute);
+         return check_push_pool_alloc(ctx, bdd->push_pool[is_compute], bdd, is_compute);
       }
       if (!zink_descriptor_util_alloc_sets(screen, ctx->dd->push_dsl[is_compute]->layout,
                                            pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc))
@@ -367,16 +367,16 @@ check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *poo
 }
 
 static struct zink_descriptor_pool *
-get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_state *bs, bool is_compute)
+get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
-   struct hash_entry *he = _mesa_hash_table_search(&bdd_lazy(bs)->pools[type], pg->dd->layout_key[type]);
+   struct hash_entry *he = _mesa_hash_table_search(&bdd->pools[type], pg->dd->layout_key[type]);
    struct zink_descriptor_pool *pool;
    if (he) {
       pool = he->data;
-      return check_pool_alloc(ctx, pool, he, pg, type, bs, is_compute);
+      return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
    }
-   pool = rzalloc(bs, struct zink_descriptor_pool);
+   pool = rzalloc(bdd, struct zink_descriptor_pool);
    if (!pool)
       return NULL;
    unsigned idx = zink_descriptor_type_to_size_idx(type);
@@ -389,8 +389,8 @@ get_descriptor_pool_lazy(struct zink_context *ctx, struct zink_program *pg, enum
       ralloc_free(pool);
       return NULL;
    }
-   _mesa_hash_table_insert(&bdd_lazy(bs)->pools[type], pg->dd->layout_key[type], pool);
-   return check_pool_alloc(ctx, pool, he, pg, type, bs, is_compute);
+   _mesa_hash_table_insert(&bdd->pools[type], pg->dd->layout_key[type], pool);
+   return check_pool_alloc(ctx, pool, he, pg, type, bdd, is_compute);
 }
 
 ALWAYS_INLINE static VkDescriptorSet
@@ -404,11 +404,11 @@ get_descriptor_set_lazy(struct zink_descriptor_pool *pool)
 }
 
 static bool
-populate_sets(struct zink_context *ctx, struct zink_program *pg, uint8_t *changed_sets, bool need_push, VkDescriptorSet *sets)
+populate_sets(struct zink_context *ctx, struct zink_batch_descriptor_data_lazy *bdd,
+              struct zink_program *pg, uint8_t *changed_sets, bool need_push, VkDescriptorSet *sets)
 {
-   struct zink_batch_state *bs = ctx->batch.state;
    if (need_push && !zink_screen(ctx->base.screen)->info.have_KHR_push_descriptor) {
-         struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd_lazy(bs)->push_pool[pg->is_compute], bs, pg->is_compute);
+         struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[pg->is_compute], bdd, pg->is_compute);
          sets[0] = get_descriptor_set_lazy(pool);
          if (!sets[0])
             return false;
@@ -416,7 +416,7 @@ populate_sets(struct zink_context *ctx, struct zink_program *pg, uint8_t *change
       sets[0] = VK_NULL_HANDLE;
    u_foreach_bit(type, *changed_sets) {
       if (pg->dd->layout_key[type]) {
-         struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bs, pg->is_compute);
+         struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bdd, pg->is_compute);
          sets[type + 1] = get_descriptor_set_lazy(pool);
       } else
          sets[type + 1] = ctx->dd->dummy_set;
@@ -439,37 +439,38 @@ zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    struct zink_batch *batch = &ctx->batch;
    struct zink_batch_state *bs = ctx->batch.state;
+   struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
    struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
 
-   bool batch_changed = !bdd_lazy(bs)->pg[is_compute];
+   bool batch_changed = !bdd->pg[is_compute];
    if (batch_changed) {
       /* update all sets and bind null sets */
       dd_lazy(ctx)->state_changed[is_compute] = pg->dd->binding_usage;
       dd_lazy(ctx)->push_state_changed[is_compute] = !!pg->dd->push_usage;
    }
 
-   if (pg != bdd_lazy(bs)->pg[is_compute]) {
+   if (pg != bdd->pg[is_compute]) {
       /* if we don't already know that we have to update all sets,
        * check to see if any dsls changed
        *
        * also always update the dsl pointers on program change
        */
-       for (unsigned i = 0; i < ARRAY_SIZE(bdd_lazy(bs)->dsl[is_compute]); i++) {
+       for (unsigned i = 0; i < ARRAY_SIZE(bdd->dsl[is_compute]); i++) {
           /* push set is already detected, start at 1 */
-          if (bdd_lazy(bs)->dsl[is_compute][i] != pg->dsl[i + 1])
+          if (bdd->dsl[is_compute][i] != pg->dsl[i + 1])
              dd_lazy(ctx)->state_changed[is_compute] |= BITFIELD_BIT(i);
-          bdd_lazy(bs)->dsl[is_compute][i] = pg->dsl[i + 1];
+          bdd->dsl[is_compute][i] = pg->dsl[i + 1];
        }
-       dd_lazy(ctx)->push_state_changed[is_compute] |= bdd_lazy(bs)->push_usage[is_compute] != pg->dd->push_usage;
-       bdd_lazy(bs)->push_usage[is_compute] = pg->dd->push_usage;
+       dd_lazy(ctx)->push_state_changed[is_compute] |= bdd->push_usage[is_compute] != pg->dd->push_usage;
+       bdd->push_usage[is_compute] = pg->dd->push_usage;
    }
-   bdd_lazy(bs)->pg[is_compute] = pg;
+   bdd->pg[is_compute] = pg;
 
    VkDescriptorSet desc_sets[5];
    uint8_t changed_sets = pg->dd->binding_usage & dd_lazy(ctx)->state_changed[is_compute];
    bool need_push = pg->dd->push_usage &&
                     (dd_lazy(ctx)->push_state_changed[is_compute] || batch_changed);
-   if (!populate_sets(ctx, pg, &changed_sets, need_push, desc_sets)) {
+   if (!populate_sets(ctx, bdd, pg, &changed_sets, need_push, desc_sets)) {
       debug_printf("ZINK: couldn't get descriptor sets!\n");
       return;
    }
@@ -512,7 +513,7 @@ zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
       dd_lazy(ctx)->push_state_changed[is_compute] = false;
    }
    /* set again in case of flushing */
-   bdd_lazy(bs)->pg[is_compute] = pg;
+   bdd->pg[is_compute] = pg;
    ctx->dd->pg[is_compute] = pg;
 }
 
@@ -530,17 +531,18 @@ zink_batch_descriptor_deinit_lazy(struct zink_screen *screen, struct zink_batch_
 {
    if (!bs->dd)
       return;
+   struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
    if (screen->info.have_KHR_descriptor_update_template) {
       for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
-         hash_table_foreach(&bdd_lazy(bs)->pools[i], entry) {
+         hash_table_foreach(&bdd->pools[i], entry) {
             struct zink_descriptor_pool *pool = (void*)entry->data;
             vkDestroyDescriptorPool(screen->dev, pool->pool, NULL);
          }
       }
-      if (bdd_lazy(bs)->push_pool[0])
-         vkDestroyDescriptorPool(screen->dev, bdd_lazy(bs)->push_pool[0]->pool, NULL);
-      if (bdd_lazy(bs)->push_pool[1])
-         vkDestroyDescriptorPool(screen->dev, bdd_lazy(bs)->push_pool[1]->pool, NULL);
+      if (bdd->push_pool[0])
+         vkDestroyDescriptorPool(screen->dev, bdd->push_pool[0]->pool, NULL);
+      if (bdd->push_pool[1])
+         vkDestroyDescriptorPool(screen->dev, bdd->push_pool[1]->pool, NULL);
    }
    ralloc_free(bs->dd);
 }
@@ -557,25 +559,26 @@ zink_batch_descriptor_reset_lazy(struct zink_screen *screen, struct zink_batch_s
 {
    if (!screen->info.have_KHR_descriptor_update_template)
       return;
+   struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
-      hash_table_foreach(&bdd_lazy(bs)->pools[i], entry) {
+      hash_table_foreach(&bdd->pools[i], entry) {
          const struct zink_descriptor_layout_key *key = entry->key;
          struct zink_descriptor_pool *pool = (void*)entry->data;
          if (key->use_count)
             pool->set_idx = 0;
          else {
             pool_destroy(screen, pool);
-            _mesa_hash_table_remove(&bdd_lazy(bs)->pools[i], entry);
+            _mesa_hash_table_remove(&bdd->pools[i], entry);
          }
       }
    }
    for (unsigned i = 0; i < 2; i++) {
-      bdd_lazy(bs)->pg[i] = NULL;
-      if (bdd_lazy(bs)->push_pool[i])
-         bdd_lazy(bs)->push_pool[i]->set_idx = 0;
+      bdd->pg[i] = NULL;
+      if (bdd->push_pool[i])
+         bdd->push_pool[i]->set_idx = 0;
    }
-   while (util_dynarray_num_elements(&bdd_lazy(bs)->overflowed_pools, struct zink_descriptor_pool*)) {
-      struct zink_descriptor_pool *pool = util_dynarray_pop(&bdd_lazy(bs)->overflowed_pools, struct zink_descriptor_pool*);
+   while (util_dynarray_num_elements(&bdd->overflowed_pools, struct zink_descriptor_pool*)) {
+      struct zink_descriptor_pool *pool = util_dynarray_pop(&bdd->overflowed_pools, struct zink_descriptor_pool*);
       pool_destroy(screen, pool);
    }
 }
@@ -588,14 +591,15 @@ zink_batch_descriptor_init_lazy(struct zink_screen *screen, struct zink_batch_st
       return false;
    if (!screen->info.have_KHR_descriptor_update_template)
       return true;
+   struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
    for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
-      if (!_mesa_hash_table_init(&bdd_lazy(bs)->pools[i], bs->dd, _mesa_hash_pointer, _mesa_key_pointer_equal))
+      if (!_mesa_hash_table_init(&bdd->pools[i], bs->dd, _mesa_hash_pointer, _mesa_key_pointer_equal))
          return false;
    }
-   util_dynarray_init(&bdd_lazy(bs)->overflowed_pools, bs->dd);
+   util_dynarray_init(&bdd->overflowed_pools, bs->dd);
    if (!screen->info.have_KHR_push_descriptor) {
-      bdd_lazy(bs)->push_pool[0] = create_push_pool(screen, bs, false);
-      bdd_lazy(bs)->push_pool[1] = create_push_pool(screen, bs, true);
+      bdd->push_pool[0] = create_push_pool(screen, bdd, false);
+      bdd->push_pool[1] = create_push_pool(screen, bdd, true);
    }
    return true;
 }



More information about the mesa-commit mailing list