[Mesa-dev] [PATCH v2] util/slab: re-design to allow migration between pools (v2)

Marek Olšák maraeo at gmail.com
Fri Sep 30 12:13:10 UTC 2016


intptr_t reads and writes aren't atomic. p_atomic_set and
p_atomic_read functions don't do anything for atomicity. See:

#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))

Marek

On Wed, Sep 28, 2016 at 9:47 AM, Nicolai Hähnle <nhaehnle at gmail.com> wrote:
> From: Nicolai Hähnle <nicolai.haehnle at amd.com>
>
> This is basically a re-write of the slab allocator into a design where
> multiple child pools are linked to a parent pool. The intention is that
> every (GL, pipe) context has its own child pool, while the corresponding
> parent pool is held by the winsys or screen, or possibly the GL share group.
>
> The fast path is still used when objects are freed by the same child pool
> that allocated them. However, it is now also possible to free an object in a
> different pool, as long as they belong to the same parent. Objects also
> survive the destruction of the (child) pool from which they were allocated.
>
> The slow path will return freed objects to the child pool from which they
> were originally allocated. If that child pool was destroyed, the corresponding
> page is considered an orphan and will be freed once all objects in it have
> been freed.
>
> This allocation pattern is required for pipe_transfers that correspond to
> (GL) buffer object mappings when the mapping is created in one context
> which is later destroyed while other contexts of the same share group live
> on -- see the bug report referenced below.
>
> Note that individual drivers do need to migrate to the new interface in
> order to benefit and fix the bug.
>
> v2: use singly-linked lists everywhere
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=97894
> ---
>  src/util/slab.c | 285 +++++++++++++++++++++++++++++++++++++++++---------------
>  src/util/slab.h |  62 +++++++++---
>  2 files changed, 258 insertions(+), 89 deletions(-)
>
> diff --git a/src/util/slab.c b/src/util/slab.c
> index af75152..35540bd 100644
> --- a/src/util/slab.c
> +++ b/src/util/slab.c
> @@ -16,166 +16,303 @@
>   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
>   * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
>   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
>   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
>   * USE OR OTHER DEALINGS IN THE SOFTWARE. */
>
>  #include "slab.h"
>  #include "macros.h"
> -#include "simple_list.h"
> +#include "u_atomic.h"
>  #include <stdint.h>
>  #include <stdbool.h>
>  #include <string.h>
>
>  #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
>
> +#define SLAB_MAGIC_ALLOCATED 0xcafe4321
> +#define SLAB_MAGIC_FREE 0x7ee01234
> +
>  #ifdef DEBUG
> -#define SLAB_MAGIC 0xcafe4321
> -#define SET_MAGIC(element)   (element)->magic = SLAB_MAGIC
> -#define CHECK_MAGIC(element) assert((element)->magic == SLAB_MAGIC)
> +#define SET_MAGIC(element, value)   (element)->magic = (value)
> +#define CHECK_MAGIC(element, value) assert((element)->magic == (value))
>  #else
> -#define SET_MAGIC(element)
> -#define CHECK_MAGIC(element)
> +#define SET_MAGIC(element, value)
> +#define CHECK_MAGIC(element, value)
>  #endif
>
>  /* One array element within a big buffer. */
>  struct slab_element_header {
> -   /* The next free element. */
> -   struct slab_element_header *next_free;
> +   /* The next element in the free or migrated list. */
> +   struct slab_element_header *next;
> +
> +   /* This is either
> +    * - a pointer to the child pool to which this element belongs, or
> +    * - a pointer to the orphaned page of the element, with the least
> +    *   significant bit set to 1.
> +    */
> +   intptr_t owner;
>
>  #ifdef DEBUG
> -   /* Use intptr_t to keep the header aligned to a pointer size. */
>     intptr_t magic;
>  #endif
>  };
>
> +/* The page is an array of allocations in one block. */
> +struct slab_page_header {
> +   union {
> +      /* Next page in the same child pool. */
> +      struct slab_page_header *next;
> +
> +      /* Number of remaining, non-freed elements (for orphaned pages). */
> +      unsigned num_remaining;
> +   } u;
> +   /* Memory after the last member is dedicated to the page itself.
> +    * The allocated size is always larger than this structure.
> +    */
> +};
> +
> +
>  static struct slab_element_header *
> -slab_get_element(struct slab_mempool *pool,
> +slab_get_element(struct slab_parent_pool *parent,
>                   struct slab_page_header *page, unsigned index)
>  {
>     return (struct slab_element_header*)
> -          ((uint8_t*)&page[1] + (pool->element_size * index));
> +          ((uint8_t*)&page[1] + (parent->element_size * index));
> +}
> +
> +/* The given object/element belongs to an orphaned page (i.e. the owning child
> + * pool has been destroyed). Mark the element as freed and free the whole page
> + * when no elements are left in it.
> + */
> +static void
> +slab_free_orphaned(struct slab_element_header *elt)
> +{
> +   struct slab_page_header *page;
> +
> +   assert(elt->owner & 1);
> +
> +   page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
> +   if (!p_atomic_dec_return(&page->u.num_remaining))
> +      free(page);
> +}
> +
> +/**
> + * Create a parent pool for the allocation of same-sized objects.
> + *
> + * \param item_size     Size of one object.
> + * \param num_items     Number of objects to allocate at once.
> + */
> +void
> +slab_create_parent(struct slab_parent_pool *parent,
> +                   unsigned item_size,
> +                   unsigned num_items)
> +{
> +   mtx_init(&parent->mutex, mtx_plain);
> +   parent->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
> +                                sizeof(intptr_t));
> +   parent->num_elements = num_items;
> +}
> +
> +void
> +slab_destroy_parent(struct slab_parent_pool *parent)
> +{
> +   mtx_destroy(&parent->mutex);
> +}
> +
> +/**
> + * Create a child pool linked to the given parent.
> + */
> +void slab_create_child(struct slab_child_pool *pool,
> +                       struct slab_parent_pool *parent)
> +{
> +   pool->parent = parent;
> +   pool->pages = NULL;
> +   pool->free = NULL;
> +   pool->migrated = NULL;
> +}
> +
> +/**
> + * Destroy the child pool.
> + *
> + * Pages associated to the pool will be orphaned. They are eventually freed
> + * when all objects in them are freed.
> + */
> +void slab_destroy_child(struct slab_child_pool *pool)
> +{
> +   mtx_lock(&pool->parent->mutex);
> +
> +   while (pool->pages) {
> +      struct slab_page_header *page = pool->pages;
> +      pool->pages = page->u.next;
> +      page->u.num_remaining = pool->parent->num_elements;
> +
> +      for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
> +         struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
> +         p_atomic_set(&elt->owner, (intptr_t)page | 1);
> +      }
> +   }
> +
> +   while (pool->migrated) {
> +      struct slab_element_header *elt = pool->migrated;
> +      pool->migrated = elt->next;
> +      slab_free_orphaned(elt);
> +   }
> +
> +   mtx_unlock(&pool->parent->mutex);
> +
> +   while (pool->free) {
> +      struct slab_element_header *elt = pool->free;
> +      pool->free = elt->next;
> +      slab_free_orphaned(elt);
> +   }
> +
> +   /* Guard against use-after-free. */
> +   pool->parent = NULL;
>  }
>
>  static bool
> -slab_add_new_page(struct slab_mempool *pool)
> +slab_add_new_page(struct slab_child_pool *pool)
>  {
>     struct slab_page_header *page;
> -   struct slab_element_header *element;
>     unsigned i;
>
>     page = malloc(sizeof(struct slab_page_header) +
> -                 pool->num_elements * pool->element_size);
> +                 pool->parent->num_elements * pool->parent->element_size);
>     if (!page)
>        return false;
>
> -   if (!pool->list.prev)
> -      make_empty_list(&pool->list);
> -
> -   insert_at_tail(&pool->list, page);
> +   for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
> +      struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
> +      elt->owner = (intptr_t)pool;
> +      assert(!(elt->owner & 1));
>
> -   /* Mark all elements as free. */
> -   for (i = 0; i < pool->num_elements-1; i++) {
> -      element = slab_get_element(pool, page, i);
> -      element->next_free = slab_get_element(pool, page, i + 1);
> -      SET_MAGIC(element);
> +      elt->next = pool->free;
> +      pool->free = elt;
> +      SET_MAGIC(elt, SLAB_MAGIC_FREE);
>     }
>
> -   element = slab_get_element(pool, page, pool->num_elements - 1);
> -   element->next_free = pool->first_free;
> -   SET_MAGIC(element);
> -   pool->first_free = slab_get_element(pool, page, 0);
> +   page->u.next = pool->pages;
> +   pool->pages = page;
> +
>     return true;
>  }
>
>  /**
> - * Allocate an object from the slab. Single-threaded (no mutex).
> + * Allocate an object from the child pool. Single-threaded (i.e. the caller
> + * must ensure that no operation happens on the same child pool in another
> + * thread).
>   */
>  void *
> -slab_alloc_st(struct slab_mempool *pool)
> +slab_alloc(struct slab_child_pool *pool)
>  {
> -   struct slab_element_header *element;
> +   struct slab_element_header *elt;
>
> -   /* Allocate a new page. */
> -   if (!pool->first_free &&
> -       !slab_add_new_page(pool))
> -      return NULL;
> +   if (!pool->free) {
> +      /* First, collect elements that belong to us but were freed from a
> +       * different child pool.
> +       */
> +      mtx_lock(&pool->parent->mutex);
> +      pool->free = pool->migrated;
> +      pool->migrated = NULL;
> +      mtx_unlock(&pool->parent->mutex);
> +
> +      /* Now allocate a new page. */
> +      if (!pool->free && !slab_add_new_page(pool))
> +         return NULL;
> +   }
>
> -   element = pool->first_free;
> -   CHECK_MAGIC(element);
> -   pool->first_free = element->next_free;
> -   return &element[1];
> +   elt = pool->free;
> +   pool->free = elt->next;
> +
> +   CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
> +   SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
> +
> +   return &elt[1];
>  }
>
>  /**
> - * Free an object allocated from the slab. Single-threaded (no mutex).
> + * Free an object allocated from the slab. Single-threaded (i.e. the caller
> + * must ensure that no operation happens on the same child pool in another
> + * thread).
> + *
> + * Freeing an object in a different child pool from the one where it was
> + * allocated is allowed, as long the pool belong to the same parent. No
> + * additional locking is required in this case.
>   */
> -void
> -slab_free_st(struct slab_mempool *pool, void *ptr)
> +void slab_free(struct slab_child_pool *pool, void *ptr)
>  {
> -   struct slab_element_header *element =
> -      ((struct slab_element_header*)ptr - 1);
> +   struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
> +   intptr_t owner_int;
> +
> +   CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
> +   SET_MAGIC(elt, SLAB_MAGIC_FREE);
> +
> +   if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
> +      /* This is the simple case: The caller guarantees that we can safely
> +       * access the free list.
> +       */
> +      elt->next = pool->free;
> +      pool->free = elt;
> +      return;
> +   }
>
> -   CHECK_MAGIC(element);
> -   element->next_free = pool->first_free;
> -   pool->first_free = element;
> +   /* The slow case: migration or an orphaned page. */
> +   mtx_lock(&pool->parent->mutex);
> +
> +   /* Note: we _must_ re-read elt->owner here because the owning child pool
> +    * may have been destroyed by another thread in the meantime.
> +    */
> +   owner_int = p_atomic_read(&elt->owner);
> +
> +   if (!(owner_int & 1)) {
> +      struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
> +      elt->next = owner->migrated;
> +      owner->migrated = elt;
> +      mtx_unlock(&pool->parent->mutex);
> +   } else {
> +      mtx_unlock(&pool->parent->mutex);
> +
> +      slab_free_orphaned(elt);
> +   }
>  }
>
>  /**
> - * Allocate an object from the slab. Thread-safe.
> + * Allocate an object from the slab. Single-threaded (no mutex).
>   */
>  void *
> -slab_alloc_mt(struct slab_mempool *pool)
> +slab_alloc_st(struct slab_mempool *pool)
>  {
> -   void *mem;
> -
> -   mtx_lock(&pool->mutex);
> -   mem = slab_alloc_st(pool);
> -   mtx_unlock(&pool->mutex);
> -   return mem;
> +   return slab_alloc(&pool->child);
>  }
>
>  /**
> - * Free an object allocated from the slab. Thread-safe.
> + * Free an object allocated from the slab. Single-threaded (no mutex).
>   */
>  void
> -slab_free_mt(struct slab_mempool *pool, void *ptr)
> +slab_free_st(struct slab_mempool *pool, void *ptr)
>  {
> -   mtx_lock(&pool->mutex);
> -   slab_free_st(pool, ptr);
> -   mtx_unlock(&pool->mutex);
> +   slab_free(&pool->child, ptr);
>  }
>
>  void
>  slab_destroy(struct slab_mempool *pool)
>  {
> -   struct slab_page_header *page, *temp;
> -
> -   if (pool->list.next) {
> -      foreach_s(page, temp, &pool->list) {
> -         remove_from_list(page);
> -         free(page);
> -      }
> -   }
> -
> -   mtx_destroy(&pool->mutex);
> +   slab_destroy_child(&pool->child);
> +   slab_destroy_parent(&pool->parent);
>  }
>
>  /**
>   * Create an allocator for same-sized objects.
>   *
>   * \param item_size     Size of one object.
>   * \param num_items     Number of objects to allocate at once.
>   */
>  void
>  slab_create(struct slab_mempool *pool,
>              unsigned item_size,
>              unsigned num_items)
>  {
> -   mtx_init(&pool->mutex, mtx_plain);
> -   pool->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
> -                              sizeof(intptr_t));
> -   pool->num_elements = num_items;
> -   pool->first_free = NULL;
> +   slab_create_parent(&pool->parent, item_size, num_items);
> +   slab_create_child(&pool->child, &pool->parent);
>  }
> diff --git a/src/util/slab.h b/src/util/slab.h
> index 9d13f6a..e83f8ec 100644
> --- a/src/util/slab.h
> +++ b/src/util/slab.h
> @@ -16,47 +16,79 @@
>   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
>   * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
>   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
>   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
>   * USE OR OTHER DEALINGS IN THE SOFTWARE. */
>
>  /**
>   * Slab allocator for equally sized memory allocations.
> - * The thread-safe path ("*_mt" functions) is usually slower than malloc/free.
> - * The single-threaded path ("*_st" functions) is faster than malloc/free.
> + *
> + * Objects are allocated from "child" pools that are connected to a "parent"
> + * pool.
> + *
> + * Calls to slab_alloc/slab_free for the same child pool must not occur from
> + * multiple threads simultaneously.
> + *
> + * Allocations obtained from one child pool should usually be freed in the
> + * same child pool. Freeing an allocation in a different child pool associated
> + * to the same parent is allowed (and requires no locking by the caller), but
> + * it is discouraged because it implies a performance penalty.
> + *
> + * For convenience and to ease the transition, there is also a set of wrapper
> + * functions around a single parent-child pair.
>   */
>
>  #ifndef SLAB_H
>  #define SLAB_H
>
>  #include "c11/threads.h"
>
> -/* The page is an array of allocations in one block. */
> -struct slab_page_header {
> -   /* The header (linked-list pointers). */
> -   struct slab_page_header *prev, *next;
> +struct slab_element_header;
> +struct slab_page_header;
> +
> +struct slab_parent_pool {
> +   mtx_t mutex;
> +   unsigned element_size;
> +   unsigned num_elements;
> +};
> +
> +struct slab_child_pool {
> +   struct slab_parent_pool *parent;
> +
> +   struct slab_page_header *pages;
> +
> +   /* Free elements. */
> +   struct slab_element_header *free;
>
> -   /* Memory after the last member is dedicated to the page itself.
> -    * The allocated size is always larger than this structure.
> +   /* Elements that are owned by this pool but were freed with a different
> +    * pool as the argument to slab_free.
> +    *
> +    * This list is protected by the parent mutex.
>      */
> +   struct slab_element_header *migrated;
>  };
>
> +void slab_create_parent(struct slab_parent_pool *parent,
> +                        unsigned item_size,
> +                        unsigned num_items);
> +void slab_destroy_parent(struct slab_parent_pool *parent);
> +void slab_create_child(struct slab_child_pool *pool,
> +                       struct slab_parent_pool *parent);
> +void slab_destroy_child(struct slab_child_pool *pool);
> +void *slab_alloc(struct slab_child_pool *pool);
> +void slab_free(struct slab_child_pool *pool, void *ptr);
> +
>  struct slab_mempool {
> -   mtx_t mutex;
> -   unsigned element_size;
> -   unsigned num_elements;
> -   struct slab_element_header *first_free;
> -   struct slab_page_header list;
> +   struct slab_parent_pool parent;
> +   struct slab_child_pool child;
>  };
>
>  void slab_create(struct slab_mempool *pool,
>                   unsigned item_size,
>                   unsigned num_items);
>  void slab_destroy(struct slab_mempool *pool);
>  void *slab_alloc_st(struct slab_mempool *pool);
>  void slab_free_st(struct slab_mempool *pool, void *ptr);
> -void *slab_alloc_mt(struct slab_mempool *pool);
> -void slab_free_mt(struct slab_mempool *pool, void *ptr);
>
>  #endif
> --
> 2.7.4
>
> _______________________________________________
> mesa-dev mailing list
> mesa-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/mesa-dev


More information about the mesa-dev mailing list