Mesa (main): venus: cache shmems

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Wed Dec 15 19:12:29 UTC 2021


Module: Mesa
Branch: main
Commit: 9c81de7df296fd3854c2a7cc3f2a994076b6a5f6
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=9c81de7df296fd3854c2a7cc3f2a994076b6a5f6

Author: Chia-I Wu <olvaffe at gmail.com>
Date:   Fri Dec 10 15:38:05 2021 -0800

venus: cache shmems

Shmems are allocated internally and are only for CPU access.  They can
be easily cached.

Venus have 4 sources of shmem allocations

 - the ring buffer
 - the reply stream
 - the indirection submission upload cs
 - one cs for each vn_command_buffer

The first one is allocated only once.  The other three reallocate
occasionally.  The frequencies depend on the workloads.

Signed-off-by: Chia-I Wu <olvaffe at gmail.com>
Reviewed-by: Ryan Neph <ryanneph at google.com>
Reviewed-by: Yiwei Zhang <zzyiwei at chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14179>

---

 src/virtio/vulkan/meson.build            |   1 +
 src/virtio/vulkan/vn_common.h            |   1 +
 src/virtio/vulkan/vn_renderer.h          |   3 +
 src/virtio/vulkan/vn_renderer_internal.c | 178 +++++++++++++++++++++++++++++++
 src/virtio/vulkan/vn_renderer_internal.h |  57 ++++++++++
 src/virtio/vulkan/vn_renderer_virtgpu.c  |  32 +++++-
 src/virtio/vulkan/vn_renderer_vtest.c    |  32 +++++-
 7 files changed, 298 insertions(+), 6 deletions(-)

diff --git a/src/virtio/vulkan/meson.build b/src/virtio/vulkan/meson.build
index 6c0d9cb32b5..adeb51e8e42 100644
--- a/src/virtio/vulkan/meson.build
+++ b/src/virtio/vulkan/meson.build
@@ -47,6 +47,7 @@ libvn_files = files(
   'vn_queue.c',
   'vn_render_pass.c',
   'vn_ring.c',
+  'vn_renderer_internal.c',
   'vn_renderer_util.c',
   'vn_renderer_virtgpu.c',
   'vn_renderer_vtest.c',
diff --git a/src/virtio/vulkan/vn_common.h b/src/virtio/vulkan/vn_common.h
index 5781da2d168..272311493a1 100644
--- a/src/virtio/vulkan/vn_common.h
+++ b/src/virtio/vulkan/vn_common.h
@@ -28,6 +28,7 @@
 #include "util/list.h"
 #include "util/macros.h"
 #include "util/os_time.h"
+#include "util/simple_mtx.h"
 #include "util/u_math.h"
 #include "util/xmlconfig.h"
 #include "vk_alloc.h"
diff --git a/src/virtio/vulkan/vn_renderer.h b/src/virtio/vulkan/vn_renderer.h
index 9118548bfeb..ea3cff5552e 100644
--- a/src/virtio/vulkan/vn_renderer.h
+++ b/src/virtio/vulkan/vn_renderer.h
@@ -14,6 +14,9 @@ struct vn_renderer_shmem {
    uint32_t res_id;
    size_t mmap_size; /* for internal use only (i.e., munmap) */
    void *mmap_ptr;
+
+   struct list_head cache_head;
+   int64_t cache_timestamp;
 };
 
 struct vn_renderer_bo {
diff --git a/src/virtio/vulkan/vn_renderer_internal.c b/src/virtio/vulkan/vn_renderer_internal.c
new file mode 100644
index 00000000000..4e5027a905b
--- /dev/null
+++ b/src/virtio/vulkan/vn_renderer_internal.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "vn_renderer_internal.h"
+
+/* 3 seconds */
+#define VN_RENDERER_SHMEM_CACHE_EXPIRACY (3ll * 1000 * 1000)
+
+void
+vn_renderer_shmem_cache_init(struct vn_renderer_shmem_cache *cache,
+                             struct vn_renderer *renderer,
+                             vn_renderer_shmem_cache_destroy_func destroy_func)
+{
+   /* cache->bucket_mask is 32-bit and u_bit_scan is used */
+   static_assert(ARRAY_SIZE(cache->buckets) <= 32, "");
+
+   cache->renderer = renderer;
+   cache->destroy_func = destroy_func;
+
+   simple_mtx_init(&cache->mutex, mtx_plain);
+
+   for (uint32_t i = 0; i < ARRAY_SIZE(cache->buckets); i++) {
+      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[i];
+      list_inithead(&bucket->shmems);
+   }
+
+   cache->initialized = true;
+}
+
+void
+vn_renderer_shmem_cache_fini(struct vn_renderer_shmem_cache *cache)
+{
+   if (!cache->initialized)
+      return;
+
+   while (cache->bucket_mask) {
+      const int idx = u_bit_scan(&cache->bucket_mask);
+      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
+
+      list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
+                               &bucket->shmems, cache_head)
+         cache->destroy_func(cache->renderer, shmem);
+   }
+
+   simple_mtx_destroy(&cache->mutex);
+}
+
+static struct vn_renderer_shmem_bucket *
+choose_bucket(struct vn_renderer_shmem_cache *cache,
+              size_t size,
+              int *out_idx)
+{
+   assert(size);
+   if (unlikely(!util_is_power_of_two_or_zero64(size)))
+      return NULL;
+
+   const uint32_t idx = ffsll(size) - 1;
+   if (unlikely(idx >= ARRAY_SIZE(cache->buckets)))
+      return NULL;
+
+   *out_idx = idx;
+   return &cache->buckets[idx];
+}
+
+static void
+vn_renderer_shmem_cache_remove_expired_locked(
+   struct vn_renderer_shmem_cache *cache, int64_t now)
+{
+   uint32_t bucket_mask = cache->bucket_mask;
+   while (bucket_mask) {
+      const int idx = u_bit_scan(&bucket_mask);
+      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
+
+      assert(!list_is_empty(&bucket->shmems));
+      const struct vn_renderer_shmem *last_shmem = list_last_entry(
+         &bucket->shmems, struct vn_renderer_shmem, cache_head);
+
+      /* remove expired shmems but keep at least the last one */
+      list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
+                               &bucket->shmems, cache_head) {
+         if (shmem == last_shmem ||
+             now - shmem->cache_timestamp < VN_RENDERER_SHMEM_CACHE_EXPIRACY)
+            break;
+
+         list_del(&shmem->cache_head);
+         cache->destroy_func(cache->renderer, shmem);
+      }
+   }
+}
+
+bool
+vn_renderer_shmem_cache_add(struct vn_renderer_shmem_cache *cache,
+                            struct vn_renderer_shmem *shmem)
+{
+   assert(!vn_refcount_is_valid(&shmem->refcount));
+
+   int idx;
+   struct vn_renderer_shmem_bucket *bucket =
+      choose_bucket(cache, shmem->mmap_size, &idx);
+   if (!bucket)
+      return false;
+
+   const int64_t now = os_time_get();
+   shmem->cache_timestamp = now;
+
+   simple_mtx_lock(&cache->mutex);
+
+   vn_renderer_shmem_cache_remove_expired_locked(cache, now);
+
+   list_addtail(&shmem->cache_head, &bucket->shmems);
+   cache->bucket_mask |= 1 << idx;
+
+   simple_mtx_unlock(&cache->mutex);
+
+   return true;
+}
+
+struct vn_renderer_shmem *
+vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
+                            size_t size)
+{
+   int idx;
+   struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx);
+   if (!bucket) {
+      simple_mtx_lock(&cache->mutex);
+      cache->debug.cache_skip_count++;
+      simple_mtx_unlock(&cache->mutex);
+      return NULL;
+   }
+
+   struct vn_renderer_shmem *shmem = NULL;
+
+   simple_mtx_lock(&cache->mutex);
+   if (cache->bucket_mask & (1 << idx)) {
+      assert(!list_is_empty(&bucket->shmems));
+      shmem = list_first_entry(&bucket->shmems, struct vn_renderer_shmem,
+                               cache_head);
+      list_del(&shmem->cache_head);
+
+      if (list_is_empty(&bucket->shmems))
+         cache->bucket_mask &= ~(1 << idx);
+
+      cache->debug.cache_hit_count++;
+   } else {
+      cache->debug.cache_miss_count++;
+   }
+   simple_mtx_unlock(&cache->mutex);
+
+   return shmem;
+}
+
+/* for debugging only */
+void
+vn_renderer_shmem_cache_debug_dump(struct vn_renderer_shmem_cache *cache)
+{
+   simple_mtx_lock(&cache->mutex);
+
+   vn_log(NULL, "dumping shmem cache");
+   vn_log(NULL, "  cache skip: %d", cache->debug.cache_skip_count);
+   vn_log(NULL, "  cache hit: %d", cache->debug.cache_hit_count);
+   vn_log(NULL, "  cache miss: %d", cache->debug.cache_miss_count);
+
+   uint32_t bucket_mask = cache->bucket_mask;
+   while (bucket_mask) {
+      const int idx = u_bit_scan(&bucket_mask);
+      const struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
+      uint32_t count = 0;
+      list_for_each_entry(struct vn_renderer_shmem, shmem, &bucket->shmems,
+                          cache_head)
+         count++;
+      if (count)
+         vn_log(NULL, "  buckets[%d]: %d shmems", idx, count);
+   }
+
+   simple_mtx_unlock(&cache->mutex);
+}
diff --git a/src/virtio/vulkan/vn_renderer_internal.h b/src/virtio/vulkan/vn_renderer_internal.h
new file mode 100644
index 00000000000..3589788b0b3
--- /dev/null
+++ b/src/virtio/vulkan/vn_renderer_internal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2021 Google LLC
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef VN_RENDERER_INTERNAL_H
+#define VN_RENDERER_INTERNAL_H
+
+#include "vn_renderer.h"
+
+typedef void (*vn_renderer_shmem_cache_destroy_func)(
+   struct vn_renderer *renderer, struct vn_renderer_shmem *shmem);
+
+struct vn_renderer_shmem_cache {
+   bool initialized;
+
+   struct vn_renderer *renderer;
+   vn_renderer_shmem_cache_destroy_func destroy_func;
+
+   simple_mtx_t mutex;
+
+   /* cache shmems up to 2^26 in size (see choose_bucket) */
+   struct vn_renderer_shmem_bucket {
+      struct list_head shmems;
+   } buckets[27];
+
+   /* which buckets have shmems */
+   uint32_t bucket_mask;
+
+   struct {
+      uint32_t cache_skip_count;
+      uint32_t cache_hit_count;
+      uint32_t cache_miss_count;
+   } debug;
+};
+
+void
+vn_renderer_shmem_cache_init(
+   struct vn_renderer_shmem_cache *cache,
+   struct vn_renderer *renderer,
+   vn_renderer_shmem_cache_destroy_func destroy_func);
+
+void
+vn_renderer_shmem_cache_fini(struct vn_renderer_shmem_cache *cache);
+
+bool
+vn_renderer_shmem_cache_add(struct vn_renderer_shmem_cache *cache,
+                            struct vn_renderer_shmem *shmem);
+
+struct vn_renderer_shmem *
+vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
+                            size_t size);
+
+void
+vn_renderer_shmem_cache_debug_dump(struct vn_renderer_shmem_cache *cache);
+
+#endif /* VN_RENDERER_INTERNAL_H */
diff --git a/src/virtio/vulkan/vn_renderer_virtgpu.c b/src/virtio/vulkan/vn_renderer_virtgpu.c
index 0b83f52a5ab..f8ea95b0f3b 100644
--- a/src/virtio/vulkan/vn_renderer_virtgpu.c
+++ b/src/virtio/vulkan/vn_renderer_virtgpu.c
@@ -17,7 +17,7 @@
 #define VIRGL_RENDERER_UNSTABLE_APIS
 #include "virtio-gpu/virglrenderer_hw.h"
 
-#include "vn_renderer.h"
+#include "vn_renderer_internal.h"
 
 /* XXX WIP kernel uapi */
 #ifndef VIRTGPU_PARAM_CONTEXT_INIT
@@ -111,6 +111,8 @@ struct virtgpu {
    struct util_sparse_array bo_array;
 
    mtx_t dma_buf_import_mutex;
+
+   struct vn_renderer_shmem_cache shmem_cache;
 };
 
 #ifdef SIMULATE_SYNCOBJ
@@ -1271,8 +1273,8 @@ virtgpu_bo_create_from_device_memory(
 }
 
 static void
-virtgpu_shmem_destroy(struct vn_renderer *renderer,
-                      struct vn_renderer_shmem *_shmem)
+virtgpu_shmem_destroy_now(struct vn_renderer *renderer,
+                          struct vn_renderer_shmem *_shmem)
 {
    struct virtgpu *gpu = (struct virtgpu *)renderer;
    struct virtgpu_shmem *shmem = (struct virtgpu_shmem *)_shmem;
@@ -1281,11 +1283,30 @@ virtgpu_shmem_destroy(struct vn_renderer *renderer,
    virtgpu_ioctl_gem_close(gpu, shmem->gem_handle);
 }
 
+static void
+virtgpu_shmem_destroy(struct vn_renderer *renderer,
+                      struct vn_renderer_shmem *shmem)
+{
+   struct virtgpu *gpu = (struct virtgpu *)renderer;
+
+   if (vn_renderer_shmem_cache_add(&gpu->shmem_cache, shmem))
+      return;
+
+   virtgpu_shmem_destroy_now(&gpu->base, shmem);
+}
+
 static struct vn_renderer_shmem *
 virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)
 {
    struct virtgpu *gpu = (struct virtgpu *)renderer;
 
+   struct vn_renderer_shmem *cached_shmem =
+      vn_renderer_shmem_cache_get(&gpu->shmem_cache, size);
+   if (cached_shmem) {
+      cached_shmem->refcount = VN_REFCOUNT_INIT(1);
+      return cached_shmem;
+   }
+
    uint32_t res_id;
    uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(
       gpu, gpu->shmem_blob_mem, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
@@ -1381,6 +1402,8 @@ virtgpu_destroy(struct vn_renderer *renderer,
 {
    struct virtgpu *gpu = (struct virtgpu *)renderer;
 
+   vn_renderer_shmem_cache_fini(&gpu->shmem_cache);
+
    if (gpu->fd >= 0)
       close(gpu->fd);
 
@@ -1589,6 +1612,9 @@ virtgpu_init(struct virtgpu *gpu)
 
    virtgpu_init_shmem_blob_mem(gpu);
 
+   vn_renderer_shmem_cache_init(&gpu->shmem_cache, &gpu->base,
+                                virtgpu_shmem_destroy_now);
+
    gpu->base.ops.destroy = virtgpu_destroy;
    gpu->base.ops.get_info = virtgpu_get_info;
    gpu->base.ops.submit = virtgpu_submit;
diff --git a/src/virtio/vulkan/vn_renderer_vtest.c b/src/virtio/vulkan/vn_renderer_vtest.c
index ebee2f3b9a1..705e139df15 100644
--- a/src/virtio/vulkan/vn_renderer_vtest.c
+++ b/src/virtio/vulkan/vn_renderer_vtest.c
@@ -22,7 +22,7 @@
 #include "virtio-gpu/virglrenderer_hw.h"
 #include "vtest/vtest_protocol.h"
 
-#include "vn_renderer.h"
+#include "vn_renderer_internal.h"
 
 #define VTEST_PCI_VENDOR_ID 0x1af4
 #define VTEST_PCI_DEVICE_ID 0x1050
@@ -66,6 +66,8 @@ struct vtest {
 
    struct util_sparse_array shmem_array;
    struct util_sparse_array bo_array;
+
+   struct vn_renderer_shmem_cache shmem_cache;
 };
 
 static int
@@ -777,8 +779,8 @@ vtest_bo_create_from_device_memory(
 }
 
 static void
-vtest_shmem_destroy(struct vn_renderer *renderer,
-                    struct vn_renderer_shmem *_shmem)
+vtest_shmem_destroy_now(struct vn_renderer *renderer,
+                        struct vn_renderer_shmem *_shmem)
 {
    struct vtest *vtest = (struct vtest *)renderer;
    struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
@@ -790,11 +792,30 @@ vtest_shmem_destroy(struct vn_renderer *renderer,
    mtx_unlock(&vtest->sock_mutex);
 }
 
+static void
+vtest_shmem_destroy(struct vn_renderer *renderer,
+                    struct vn_renderer_shmem *shmem)
+{
+   struct vtest *vtest = (struct vtest *)renderer;
+
+   if (vn_renderer_shmem_cache_add(&vtest->shmem_cache, shmem))
+      return;
+
+   vtest_shmem_destroy_now(&vtest->base, shmem);
+}
+
 static struct vn_renderer_shmem *
 vtest_shmem_create(struct vn_renderer *renderer, size_t size)
 {
    struct vtest *vtest = (struct vtest *)renderer;
 
+   struct vn_renderer_shmem *cached_shmem =
+      vn_renderer_shmem_cache_get(&vtest->shmem_cache, size);
+   if (cached_shmem) {
+      cached_shmem->refcount = VN_REFCOUNT_INIT(1);
+      return cached_shmem;
+   }
+
    mtx_lock(&vtest->sock_mutex);
    int res_fd;
    uint32_t res_id = vtest_vcmd_resource_create_blob(
@@ -934,6 +955,8 @@ vtest_destroy(struct vn_renderer *renderer,
 {
    struct vtest *vtest = (struct vtest *)renderer;
 
+   vn_renderer_shmem_cache_fini(&vtest->shmem_cache);
+
    if (vtest->sock_fd >= 0) {
       shutdown(vtest->sock_fd, SHUT_RDWR);
       close(vtest->sock_fd);
@@ -1025,6 +1048,9 @@ vtest_init(struct vtest *vtest)
                               ? VCMD_BLOB_TYPE_HOST3D
                               : VCMD_BLOB_TYPE_GUEST;
 
+   vn_renderer_shmem_cache_init(&vtest->shmem_cache, &vtest->base,
+                                vtest_shmem_destroy_now);
+
    vtest_vcmd_context_init(vtest, vtest->capset.id);
 
    vtest->base.ops.destroy = vtest_destroy;



More information about the mesa-commit mailing list