Mesa (main): zink: use uint64_t for batch_id

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Thu Jun 16 00:46:41 UTC 2022


Module: Mesa
Branch: main
Commit: d301883aba5a60d8d748a82a5777aa0701bdcaca
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=d301883aba5a60d8d748a82a5777aa0701bdcaca

Author: Mike Blumenkrantz <michael.blumenkrantz at gmail.com>
Date:   Fri Mar 25 14:00:21 2022 -0400

zink: use uint64_t for batch_id

this maps directly to the vulkan api and allows removal of timeline
wrapping code

the consequence of this is a ~0.26% reduction in drawoverhead performance
on base cases (n=1000), but the simplification and deletions seem worth it

Reviewed-by: Adam Jackson <ajax at redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17045>

---

 src/gallium/drivers/zink/zink_batch.c   |  8 +-------
 src/gallium/drivers/zink/zink_context.c |  4 ++--
 src/gallium/drivers/zink/zink_context.h |  4 ++--
 src/gallium/drivers/zink/zink_fence.h   |  2 +-
 src/gallium/drivers/zink/zink_kopper.c  |  4 +++-
 src/gallium/drivers/zink/zink_screen.c  | 25 ++++---------------------
 src/gallium/drivers/zink/zink_screen.h  | 25 +++++++++++++------------
 7 files changed, 26 insertions(+), 46 deletions(-)

diff --git a/src/gallium/drivers/zink/zink_batch.c b/src/gallium/drivers/zink/zink_batch.c
index 514a4b77b01..22ee110cb68 100644
--- a/src/gallium/drivers/zink/zink_batch.c
+++ b/src/gallium/drivers/zink/zink_batch.c
@@ -346,16 +346,10 @@ submit_queue(void *data, void *gdata, int thread_index)
    VkSubmitInfo si[2] = {0};
    int num_si = 2;
    while (!bs->fence.batch_id)
-      bs->fence.batch_id = p_atomic_inc_return(&screen->curr_batch);
+      bs->fence.batch_id = (uint32_t)p_atomic_inc_return(&screen->curr_batch);
    bs->usage.usage = bs->fence.batch_id;
    bs->usage.unflushed = false;
 
-   if (screen->last_finished > bs->fence.batch_id && bs->fence.batch_id == 1) {
-      if (!zink_screen_init_semaphore(screen)) {
-         debug_printf("timeline init failed, things are about to go dramatically wrong.");
-      }
-   }
-
    uint64_t batch_id = bs->fence.batch_id;
    /* first submit is just for acquire waits since they have a separate array */
    si[0].sType = si[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
diff --git a/src/gallium/drivers/zink/zink_context.c b/src/gallium/drivers/zink/zink_context.c
index be5c81ddc11..36f304f5e88 100644
--- a/src/gallium/drivers/zink/zink_context.c
+++ b/src/gallium/drivers/zink/zink_context.c
@@ -3196,7 +3196,7 @@ zink_fence_wait(struct pipe_context *pctx)
 }
 
 void
-zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id)
+zink_wait_on_batch(struct zink_context *ctx, uint64_t batch_id)
 {
    struct zink_batch_state *bs;
    if (!batch_id) {
@@ -3212,7 +3212,7 @@ zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id)
 }
 
 bool
-zink_check_batch_completion(struct zink_context *ctx, uint32_t batch_id)
+zink_check_batch_completion(struct zink_context *ctx, uint64_t batch_id)
 {
    assert(ctx->batch.state);
    if (!batch_id)
diff --git a/src/gallium/drivers/zink/zink_context.h b/src/gallium/drivers/zink/zink_context.h
index 97587062cfb..627cd20e5d5 100644
--- a/src/gallium/drivers/zink/zink_context.h
+++ b/src/gallium/drivers/zink/zink_context.h
@@ -403,10 +403,10 @@ void
 zink_fence_wait(struct pipe_context *ctx);
 
 void
-zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id);
+zink_wait_on_batch(struct zink_context *ctx, uint64_t batch_id);
 
 bool
-zink_check_batch_completion(struct zink_context *ctx, uint32_t batch_id);
+zink_check_batch_completion(struct zink_context *ctx, uint64_t batch_id);
 
 void
 zink_flush_queue(struct zink_context *ctx);
diff --git a/src/gallium/drivers/zink/zink_fence.h b/src/gallium/drivers/zink/zink_fence.h
index 18567346826..8bb8a881dfc 100644
--- a/src/gallium/drivers/zink/zink_fence.h
+++ b/src/gallium/drivers/zink/zink_fence.h
@@ -50,7 +50,7 @@ struct zink_tc_fence {
 };
 
 struct zink_fence {
-   uint32_t batch_id;
+   uint64_t batch_id;
    bool submitted;
    bool completed;
 };
diff --git a/src/gallium/drivers/zink/zink_kopper.c b/src/gallium/drivers/zink/zink_kopper.c
index e6d5156d1d3..9a1b650be32 100644
--- a/src/gallium/drivers/zink/zink_kopper.c
+++ b/src/gallium/drivers/zink/zink_kopper.c
@@ -672,7 +672,9 @@ kopper_present(void *data, void *gdata, int thread_idx)
    }
    /* queue this wait semaphore for deletion on completion of the next batch */
    assert(screen->curr_batch > 0);
-   uint32_t next = screen->curr_batch + 1;
+   uint32_t next = (uint32_t)screen->curr_batch + 1;
+   /* handle overflow */
+   next = MAX2(next + 1, 1);
    struct hash_entry *he = _mesa_hash_table_search(swapchain->presents, (void*)(uintptr_t)next);
    if (he)
       arr = he->data;
diff --git a/src/gallium/drivers/zink/zink_screen.c b/src/gallium/drivers/zink/zink_screen.c
index 65dd99a12eb..1934983f818 100644
--- a/src/gallium/drivers/zink/zink_screen.c
+++ b/src/gallium/drivers/zink/zink_screen.c
@@ -1259,8 +1259,6 @@ zink_destroy_screen(struct pipe_screen *pscreen)
 
    if (screen->sem)
       VKSCR(DestroySemaphore)(screen->dev, screen->sem, NULL);
-   if (screen->prev_sem)
-      VKSCR(DestroySemaphore)(screen->dev, screen->prev_sem, NULL);
 
    if (screen->fence)
       VKSCR(DestroyFence)(screen->dev, screen->fence, NULL);
@@ -1741,29 +1739,16 @@ zink_screen_init_semaphore(struct zink_screen *screen)
 {
    VkSemaphoreCreateInfo sci = {0};
    VkSemaphoreTypeCreateInfo tci = {0};
-   VkSemaphore sem;
    sci.pNext = &tci;
    sci.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
    tci.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO;
    tci.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE;
 
-   if (VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &sem) == VK_SUCCESS) {
-      /* semaphore signal values can never decrease,
-       * so we need a new semaphore anytime we overflow
-       */
-      if (screen->prev_sem)
-         VKSCR(DestroySemaphore)(screen->dev, screen->prev_sem, NULL);
-      screen->prev_sem = screen->sem;
-      screen->sem = sem;
-      return true;
-   } else {
-      mesa_loge("ZINK: vkCreateSemaphore failed");
-   }
-   return false;
+   return VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &screen->sem) == VK_SUCCESS;
 }
 
 bool
-zink_screen_timeline_wait(struct zink_screen *screen, uint32_t batch_id, uint64_t timeout)
+zink_screen_timeline_wait(struct zink_screen *screen, uint64_t batch_id, uint64_t timeout)
 {
    VkSemaphoreWaitInfo wi = {0};
 
@@ -1772,10 +1757,8 @@ zink_screen_timeline_wait(struct zink_screen *screen, uint32_t batch_id, uint64_
 
    wi.sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO;
    wi.semaphoreCount = 1;
-   /* handle batch_id overflow */
-   wi.pSemaphores = batch_id > screen->curr_batch ? &screen->prev_sem : &screen->sem;
-   uint64_t batch_id64 = batch_id;
-   wi.pValues = &batch_id64;
+   wi.pSemaphores = &screen->sem;
+   wi.pValues = &batch_id;
    bool success = false;
    if (screen->device_lost)
       return true;
diff --git a/src/gallium/drivers/zink/zink_screen.h b/src/gallium/drivers/zink/zink_screen.h
index b14aff9bdda..6912716018b 100644
--- a/src/gallium/drivers/zink/zink_screen.h
+++ b/src/gallium/drivers/zink/zink_screen.h
@@ -99,10 +99,9 @@ struct zink_screen {
 
    bool threaded;
    bool is_cpu;
-   uint32_t curr_batch; //the current batch id
-   uint32_t last_finished; //this is racy but ultimately doesn't matter
+   uint64_t curr_batch; //the current batch id
+   uint32_t last_finished;
    VkSemaphore sem;
-   VkSemaphore prev_sem;
    VkFence fence;
    struct util_queue flush_queue;
    struct zink_context *copy_context;
@@ -208,36 +207,38 @@ struct zink_screen {
 
 /* update last_finished to account for batch_id wrapping */
 static inline void
-zink_screen_update_last_finished(struct zink_screen *screen, uint32_t batch_id)
+zink_screen_update_last_finished(struct zink_screen *screen, uint64_t batch_id)
 {
+   const uint32_t check_id = (uint32_t)batch_id;
    /* last_finished may have wrapped */
    if (screen->last_finished < UINT_MAX / 2) {
       /* last_finished has wrapped, batch_id has not */
-      if (batch_id > UINT_MAX / 2)
+      if (check_id > UINT_MAX / 2)
          return;
-   } else if (batch_id < UINT_MAX / 2) {
+   } else if (check_id < UINT_MAX / 2) {
       /* batch_id has wrapped, last_finished has not */
-      screen->last_finished = batch_id;
+      screen->last_finished = check_id;
       return;
    }
    /* neither have wrapped */
-   screen->last_finished = MAX2(batch_id, screen->last_finished);
+   screen->last_finished = MAX2(check_id, screen->last_finished);
 }
 
 /* check a batch_id against last_finished while accounting for wrapping */
 static inline bool
 zink_screen_check_last_finished(struct zink_screen *screen, uint32_t batch_id)
 {
+   const uint32_t check_id = (uint32_t)batch_id;
    /* last_finished may have wrapped */
    if (screen->last_finished < UINT_MAX / 2) {
       /* last_finished has wrapped, batch_id has not */
-      if (batch_id > UINT_MAX / 2)
+      if (check_id > UINT_MAX / 2)
          return true;
-   } else if (batch_id < UINT_MAX / 2) {
+   } else if (check_id < UINT_MAX / 2) {
       /* batch_id has wrapped, last_finished has not */
       return false;
    }
-   return screen->last_finished >= batch_id;
+   return screen->last_finished >= check_id;
 }
 
 bool
@@ -281,7 +282,7 @@ VkFormat
 zink_get_format(struct zink_screen *screen, enum pipe_format format);
 
 bool
-zink_screen_timeline_wait(struct zink_screen *screen, uint32_t batch_id, uint64_t timeout);
+zink_screen_timeline_wait(struct zink_screen *screen, uint64_t batch_id, uint64_t timeout);
 
 bool
 zink_is_depth_format_supported(struct zink_screen *screen, VkFormat format);



More information about the mesa-commit mailing list