[Mesa-dev] [PATCH 02/14] gallium/pipebuffer: add pb_slab utility

Nicolai Hähnle nhaehnle at gmail.com
Tue Sep 13 09:56:13 UTC 2016


From: Nicolai Hähnle <nicolai.haehnle at amd.com>

This is a simple framework for slab allocation from buffers that fits into
the buffer management scheme of the radeon and amdgpu winsyses where bufmgrs
aren't used.

The utility knows about different sized allocations and explicitly manages
reclaim of allocations that have pending fences. It manages all the free lists
but does not actually touch buffer objects directly, relying on callbacks for
that.
---
 src/gallium/auxiliary/Makefile.sources     |   2 +
 src/gallium/auxiliary/pipebuffer/pb_slab.c | 195 +++++++++++++++++++++++++++++
 src/gallium/auxiliary/pipebuffer/pb_slab.h | 115 +++++++++++++++++
 3 files changed, 312 insertions(+)
 create mode 100644 src/gallium/auxiliary/pipebuffer/pb_slab.c
 create mode 100644 src/gallium/auxiliary/pipebuffer/pb_slab.h

diff --git a/src/gallium/auxiliary/Makefile.sources b/src/gallium/auxiliary/Makefile.sources
index f8954c9..ed9eaa8 100644
--- a/src/gallium/auxiliary/Makefile.sources
+++ b/src/gallium/auxiliary/Makefile.sources
@@ -88,20 +88,22 @@ C_SOURCES := \
 	pipebuffer/pb_bufmgr_alt.c \
 	pipebuffer/pb_bufmgr_cache.c \
 	pipebuffer/pb_bufmgr_debug.c \
 	pipebuffer/pb_bufmgr.h \
 	pipebuffer/pb_bufmgr_mm.c \
 	pipebuffer/pb_bufmgr_ondemand.c \
 	pipebuffer/pb_bufmgr_pool.c \
 	pipebuffer/pb_bufmgr_slab.c \
 	pipebuffer/pb_cache.c \
 	pipebuffer/pb_cache.h \
+	pipebuffer/pb_slab.c \
+	pipebuffer/pb_slab.h \
 	pipebuffer/pb_validate.c \
 	pipebuffer/pb_validate.h \
 	postprocess/filters.h \
 	postprocess/postprocess.h \
 	postprocess/pp_celshade.c \
 	postprocess/pp_celshade.h \
 	postprocess/pp_colors.c \
 	postprocess/pp_colors.h \
 	postprocess/pp_filters.h \
 	postprocess/pp_init.c \
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.c b/src/gallium/auxiliary/pipebuffer/pb_slab.c
new file mode 100644
index 0000000..e419446
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pb_slab.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+static void
+pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
+{
+   struct pb_slab *slab = entry->slab;
+
+   LIST_DEL(&entry->head); /* remove from reclaim list */
+   LIST_ADD(&entry->head, &slab->free);
+   slab->num_free++;
+
+   /* Add slab to the group's list if it isn't already linked. */
+   if (!slab->head.next) {
+      struct pb_slab_group *group = &slabs->groups[entry->group_index];
+      LIST_ADDTAIL(&slab->head, &group->slabs);
+   }
+
+   if (slab->num_free >= slab->num_entries) {
+      LIST_DEL(&slab->head);
+      slabs->slab_free(slabs->priv, slab);
+   }
+}
+
+static void
+pb_slabs_reclaim_locked(struct pb_slabs *slabs)
+{
+   while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+      struct pb_slab_entry *entry =
+         LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+
+      if (!slabs->can_reclaim(slabs->priv, entry))
+         break;
+
+      pb_slab_reclaim(slabs, entry);
+   }
+}
+
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
+{
+   unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
+   unsigned group_index;
+   struct pb_slab_group *group;
+   struct pb_slab *slab;
+   struct pb_slab_entry *entry;
+
+   assert(order < slabs->min_order + slabs->num_orders);
+   assert(heap < slabs->num_heaps);
+
+   group_index = heap * slabs->num_orders + (order - slabs->min_order);
+   group = &slabs->groups[group_index];
+
+   pipe_mutex_lock(slabs->mutex);
+
+   /* If there is no candidate slab at all, or the first slab has no free
+    * entries, try reclaiming entries.
+    */
+   if (LIST_IS_EMPTY(&group->slabs) ||
+       LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
+      pb_slabs_reclaim_locked(slabs);
+
+   /* Remove slabs without free entries. */
+   while (!LIST_IS_EMPTY(&group->slabs)) {
+      slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
+      if (!LIST_IS_EMPTY(&slab->free))
+         break;
+
+      LIST_DEL(&slab->head);
+   }
+
+   if (LIST_IS_EMPTY(&group->slabs)) {
+      /* Drop the mutex temporarily to prevent a deadlock where the allocation
+       * calls back into slab functions (most likely to happen for
+       * pb_slab_reclaim if memory is low).
+       *
+       * There's a chance that racing threads will end up allocating multiple
+       * slabs for the same group, but that doesn't hurt correctness.
+       */
+      pipe_mutex_unlock(slabs->mutex);
+      slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
+      if (!slab)
+         return NULL;
+      pipe_mutex_lock(slabs->mutex);
+
+      LIST_ADD(&slab->head, &group->slabs);
+   }
+
+   entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
+   LIST_DEL(&entry->head);
+   slab->num_free--;
+
+   pipe_mutex_unlock(slabs->mutex);
+
+   return entry;
+}
+
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
+{
+   pipe_mutex_lock(slabs->mutex);
+   LIST_ADDTAIL(&entry->head, &slabs->reclaim);
+   pipe_mutex_unlock(slabs->mutex);
+}
+
+void
+pb_slabs_reclaim(struct pb_slabs *slabs)
+{
+   pipe_mutex_lock(slabs->mutex);
+   pb_slabs_reclaim_locked(slabs);
+   pipe_mutex_unlock(slabs->mutex);
+}
+
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+              unsigned min_order, unsigned max_order,
+              unsigned num_heaps,
+              void *priv,
+              slab_can_reclaim_fn *can_reclaim,
+              slab_alloc_fn *slab_alloc,
+              slab_free_fn *slab_free)
+{
+   unsigned num_groups;
+   unsigned i;
+
+   assert(min_order <= max_order);
+   assert(max_order < sizeof(unsigned) * 8 - 1);
+
+   slabs->min_order = min_order;
+   slabs->num_orders = max_order - min_order + 1;
+   slabs->num_heaps = num_heaps;
+
+   slabs->priv = priv;
+   slabs->can_reclaim = can_reclaim;
+   slabs->slab_alloc = slab_alloc;
+   slabs->slab_free = slab_free;
+
+   LIST_INITHEAD(&slabs->reclaim);
+
+   num_groups = slabs->num_orders * slabs->num_heaps;
+   slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
+   if (!slabs->groups)
+      return false;
+
+   for (i = 0; i < num_groups; ++i) {
+      struct pb_slab_group *group = &slabs->groups[i];
+      LIST_INITHEAD(&group->slabs);
+   }
+
+   pipe_mutex_init(slabs->mutex);
+
+   return true;
+}
+
+void
+pb_slabs_deinit(struct pb_slabs *slabs)
+{
+   while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+      struct pb_slab_entry *entry =
+         LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
+      pb_slab_reclaim(slabs, entry);
+   }
+
+   FREE(slabs->groups);
+   pipe_mutex_destroy(slabs->mutex);
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_slab.h b/src/gallium/auxiliary/pipebuffer/pb_slab.h
new file mode 100644
index 0000000..1d5cfdc
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_slab.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef PB_SLAB_H
+#define PB_SLAB_H
+
+#include "pb_buffer.h"
+#include "util/list.h"
+#include "os/os_thread.h"
+
+struct pb_slab;
+struct pb_slabs;
+
+struct pb_slab_entry
+{
+   struct list_head head;
+   struct pb_slab *slab; /* the slab that contains this buffer */
+   unsigned group_index;
+};
+
+struct pb_slab
+{
+   struct list_head head;
+
+   struct list_head free;
+   unsigned num_free;
+   unsigned num_entries;
+};
+
+typedef struct pb_slab *(slab_alloc_fn)(void *priv,
+                                        unsigned heap,
+                                        unsigned entry_size,
+                                        unsigned group_index);
+typedef void (slab_free_fn)(void *priv, struct pb_slab *);
+typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
+
+struct pb_slab_group
+{
+   /* Slabs with allocation candidates. Typically, slabs in this list should
+    * have some free entries.
+    *
+    * However, when the head becomes full we purposefully keep it around
+    * until the next allocation attempt, at which time we try a reclaim.
+    * The intention is to keep serving allocations from the same slab as long
+    * as possible for better locality.
+    *
+    * Due to a race in new slab allocation, additional slabs in this list
+    * can be fully allocated as well.
+    */
+   struct list_head slabs;
+};
+
+struct pb_slabs
+{
+   pipe_mutex mutex;
+
+   unsigned min_order;
+   unsigned num_orders;
+   unsigned num_heaps;
+
+   struct pb_slab_group *groups;
+   struct list_head reclaim; /* list of buffers waiting to be reclaimed */
+
+   void *priv;
+   slab_can_reclaim_fn *can_reclaim;
+   slab_alloc_fn *slab_alloc;
+   slab_free_fn *slab_free;
+};
+
+struct pb_slab_entry *
+pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
+
+void
+pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
+
+void
+pb_slabs_reclaim(struct pb_slabs *slabs);
+
+bool
+pb_slabs_init(struct pb_slabs *slabs,
+              unsigned min_order, unsigned max_order,
+              unsigned num_heaps,
+              void *priv,
+              slab_can_reclaim_fn *can_reclaim,
+              slab_alloc_fn *slab_alloc,
+              slab_free_fn *slab_free);
+
+void
+pb_slabs_deinit(struct pb_slabs *slabs);
+
+#endif
-- 
2.7.4



More information about the mesa-dev mailing list