[Libreoffice-commits] core.git: sal/Library_sal.mk sal/rtl

Tor Lillqvist tml at iki.fi
Fri Feb 22 08:52:24 PST 2013


 sal/Library_sal.mk              |   48 
 sal/rtl/alloc_arena.cxx         | 1334 +++++++++++++++++++++++++
 sal/rtl/alloc_arena.hxx         |  120 ++
 sal/rtl/alloc_cache.cxx         | 1707 ++++++++++++++++++++++++++++++++
 sal/rtl/alloc_cache.hxx         |  162 +++
 sal/rtl/alloc_fini.cxx          |  112 ++
 sal/rtl/alloc_global.cxx        |  383 +++++++
 sal/rtl/alloc_impl.hxx          |  235 ++++
 sal/rtl/bootstrap.cxx           | 1077 ++++++++++++++++++++
 sal/rtl/byteseq.cxx             |  258 ++++
 sal/rtl/cipher.cxx              | 1351 +++++++++++++++++++++++++
 sal/rtl/cmdargs.cxx             |  101 +
 sal/rtl/crc.cxx                 |  155 ++
 sal/rtl/digest.cxx              | 2133 ++++++++++++++++++++++++++++++++++++++++
 sal/rtl/hash.cxx                |  231 ++++
 sal/rtl/hash.hxx                |   34 
 sal/rtl/locale.cxx              |  322 ++++++
 sal/rtl/logfile.cxx             |  243 ++++
 sal/rtl/math.cxx                | 1259 +++++++++++++++++++++++
 sal/rtl/random.cxx              |  328 ++++++
 sal/rtl/rtl_process.cxx         |   53 
 sal/rtl/source/alloc_arena.cxx  | 1334 -------------------------
 sal/rtl/source/alloc_arena.hxx  |  120 --
 sal/rtl/source/alloc_cache.cxx  | 1707 --------------------------------
 sal/rtl/source/alloc_cache.hxx  |  162 ---
 sal/rtl/source/alloc_fini.cxx   |  112 --
 sal/rtl/source/alloc_global.cxx |  383 -------
 sal/rtl/source/alloc_impl.hxx   |  235 ----
 sal/rtl/source/bootstrap.cxx    | 1077 --------------------
 sal/rtl/source/byteseq.cxx      |  258 ----
 sal/rtl/source/cipher.cxx       | 1351 -------------------------
 sal/rtl/source/cmdargs.cxx      |  101 -
 sal/rtl/source/crc.cxx          |  155 --
 sal/rtl/source/digest.cxx       | 2133 ----------------------------------------
 sal/rtl/source/hash.cxx         |  231 ----
 sal/rtl/source/hash.hxx         |   34 
 sal/rtl/source/locale.cxx       |  322 ------
 sal/rtl/source/logfile.cxx      |  243 ----
 sal/rtl/source/math.cxx         | 1259 -----------------------
 sal/rtl/source/random.cxx       |  328 ------
 sal/rtl/source/rtl_process.cxx  |   53 
 sal/rtl/source/strbuf.cxx       |  162 ---
 sal/rtl/source/strimp.cxx       |   52 
 sal/rtl/source/strimp.hxx       |   54 -
 sal/rtl/source/string.cxx       |  381 -------
 sal/rtl/source/strtmpl.cxx      | 1775 ---------------------------------
 sal/rtl/source/surrogates.hxx   |   44 
 sal/rtl/source/unload.cxx       |  457 --------
 sal/rtl/source/uri.cxx          |  793 --------------
 sal/rtl/source/ustrbuf.cxx      |  236 ----
 sal/rtl/source/ustring.cxx      | 1202 ----------------------
 sal/rtl/source/uuid.cxx         |  172 ---
 sal/rtl/strbuf.cxx              |  162 +++
 sal/rtl/strimp.cxx              |   52 
 sal/rtl/strimp.hxx              |   54 +
 sal/rtl/string.cxx              |  381 +++++++
 sal/rtl/strtmpl.cxx             | 1775 +++++++++++++++++++++++++++++++++
 sal/rtl/surrogates.hxx          |   44 
 sal/rtl/unload.cxx              |  457 ++++++++
 sal/rtl/uri.cxx                 |  793 ++++++++++++++
 sal/rtl/ustrbuf.cxx             |  236 ++++
 sal/rtl/ustring.cxx             | 1202 ++++++++++++++++++++++
 sal/rtl/uuid.cxx                |  172 +++
 63 files changed, 16950 insertions(+), 16950 deletions(-)

New commits:
commit 393609f3f702bf5cf34e1495f32e78b75e3d5f17
Author: Tor Lillqvist <tml at iki.fi>
Date:   Fri Feb 22 18:49:53 2013 +0200

    Bin pointless single-child directory level
    
    Change-Id: I03de46fb6c095bb176fd25fc5f803be6d2d89bcf

diff --git a/sal/Library_sal.mk b/sal/Library_sal.mk
index 0bd28fa..1d88460 100644
--- a/sal/Library_sal.mk
+++ b/sal/Library_sal.mk
@@ -106,30 +106,30 @@ $(eval $(call gb_Library_add_exception_objects,sal,\
 	sal/osl/all/log  \
 	sal/osl/all/trace  \
 	sal/osl/all/utility \
-	sal/rtl/source/alloc_arena \
-	sal/rtl/source/alloc_cache \
-	sal/rtl/source/alloc_fini \
-	sal/rtl/source/alloc_global \
-	sal/rtl/source/bootstrap \
-	sal/rtl/source/byteseq \
-	sal/rtl/source/cipher \
-	sal/rtl/source/cmdargs \
-	sal/rtl/source/crc \
-	sal/rtl/source/digest \
-	sal/rtl/source/hash \
-	sal/rtl/source/locale \
-	sal/rtl/source/logfile \
-	sal/rtl/source/math \
-	sal/rtl/source/random \
-	sal/rtl/source/rtl_process \
-	sal/rtl/source/strbuf \
-	sal/rtl/source/strimp \
-	sal/rtl/source/string \
-	sal/rtl/source/unload \
-	sal/rtl/source/uri \
-	sal/rtl/source/ustrbuf \
-	sal/rtl/source/ustring \
-	sal/rtl/source/uuid \
+	sal/rtl/alloc_arena \
+	sal/rtl/alloc_cache \
+	sal/rtl/alloc_fini \
+	sal/rtl/alloc_global \
+	sal/rtl/bootstrap \
+	sal/rtl/byteseq \
+	sal/rtl/cipher \
+	sal/rtl/cmdargs \
+	sal/rtl/crc \
+	sal/rtl/digest \
+	sal/rtl/hash \
+	sal/rtl/locale \
+	sal/rtl/logfile \
+	sal/rtl/math \
+	sal/rtl/random \
+	sal/rtl/rtl_process \
+	sal/rtl/strbuf \
+	sal/rtl/strimp \
+	sal/rtl/string \
+	sal/rtl/unload \
+	sal/rtl/uri \
+	sal/rtl/ustrbuf \
+	sal/rtl/ustring \
+	sal/rtl/uuid \
 	sal/textenc/converter \
 	sal/textenc/convertsimple \
     sal/textenc/handleundefinedunicodetotextchar \
diff --git a/sal/rtl/alloc_arena.cxx b/sal/rtl/alloc_arena.cxx
new file mode 100644
index 0000000..663239f
--- /dev/null
+++ b/sal/rtl/alloc_arena.cxx
@@ -0,0 +1,1334 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/*
+ * This file is part of the LibreOffice project.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ * This file incorporates work covered by the following license notice:
+ *
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *   contributor license agreements. See the NOTICE file distributed
+ *   with this work for additional information regarding copyright
+ *   ownership. The ASF licenses this file to you under the Apache
+ *   License, Version 2.0 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.apache.org/licenses/LICENSE-2.0 .
+ */
+
+#define _BSD_SOURCE 1 /* sys/mman.h: MAP_ANON */
+#include "alloc_arena.hxx"
+
+#include "alloc_impl.hxx"
+#include "internal/rtllifecycle.h"
+#include "sal/macros.h"
+#include "osl/diagnose.h"
+
+#include <cassert>
+#include <string.h>
+#include <stdio.h>
+
+extern AllocMode alloc_mode;
+
+/* ================================================================= *
+ *
+ * arena internals.
+ *
+ * ================================================================= */
+
+/** g_arena_list
+ *  @internal
+ */
+struct rtl_arena_list_st
+{
+    rtl_memory_lock_type m_lock;
+    rtl_arena_type       m_arena_head;
+};
+
+static rtl_arena_list_st g_arena_list;
+
+
+/** gp_arena_arena
+ *  provided for arena_type allocations, and hash_table resizing.
+ *
+ *  @internal
+ */
+static rtl_arena_type * gp_arena_arena = 0;
+
+
+/** gp_machdep_arena
+ *
+ *  Low level virtual memory (pseudo) arena
+ *  (platform dependent implementation)
+ *
+ *  @internal
+ */
+static rtl_arena_type * gp_machdep_arena = 0;
+
+
+static void *
+SAL_CALL rtl_machdep_alloc (
+    rtl_arena_type * pArena,
+    sal_Size *       pSize
+);
+
+static void
+SAL_CALL rtl_machdep_free (
+    rtl_arena_type * pArena,
+    void *           pAddr,
+    sal_Size         nSize
+);
+
+static sal_Size
+rtl_machdep_pagesize();
+
+
+/** gp_default_arena
+ */
+rtl_arena_type * gp_default_arena = 0;
+
+
+/* ================================================================= */
+
+/** rtl_arena_segment_constructor()
+ */
+static int
+rtl_arena_segment_constructor (void * obj)
+{
+    rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj);
+
+    QUEUE_START_NAMED(segment, s);
+    QUEUE_START_NAMED(segment, f);
+
+    return (1);
+}
+
+
+/** rtl_arena_segment_destructor()
+ */
+static void
+rtl_arena_segment_destructor (void * obj)
+{
+    rtl_arena_segment_type * segment = static_cast< rtl_arena_segment_type * >(
+        obj);
+    assert(QUEUE_STARTED_NAMED(segment, s));
+    assert(QUEUE_STARTED_NAMED(segment, f));
+    (void) segment; // avoid warnings
+}
+
+/* ================================================================= */
+
+/** rtl_arena_segment_populate()
+ *
+ *  @precond  arena->m_lock acquired.
+ */
+static int
+rtl_arena_segment_populate (
+    rtl_arena_type * arena
+)
+{
+    rtl_arena_segment_type *span;
+    sal_Size                size = rtl_machdep_pagesize();
+
+    span = static_cast< rtl_arena_segment_type * >(
+        rtl_machdep_alloc(gp_machdep_arena, &size));
+    if (span != 0)
+    {
+        rtl_arena_segment_type *first, *last, *head;
+        sal_Size                count = size / sizeof(rtl_arena_segment_type);
+
+        /* insert onto reserve span list */
+        QUEUE_INSERT_TAIL_NAMED(&(arena->m_segment_reserve_span_head), span, s);
+        QUEUE_START_NAMED(span, f);
+        span->m_addr = (sal_uIntPtr)(span);
+        span->m_size = size;
+        span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
+
+        /* insert onto reserve list */
+        head  = &(arena->m_segment_reserve_head);
+        for (first = span + 1, last = span + count; first < last; ++first)
+        {
+            QUEUE_INSERT_TAIL_NAMED(head, first, s);
+            QUEUE_START_NAMED(first, f);
+            first->m_addr = 0;
+            first->m_size = 0;
+            first->m_type = 0;
+        }
+    }
+    return (span != 0);
+}
+
+
+/** rtl_arena_segment_get()
+ *
+ *  @precond  arena->m_lock acquired.
+ *  @precond  (*ppSegment == 0)
+ */
+static RTL_MEMORY_INLINE void
+rtl_arena_segment_get (
+    rtl_arena_type *          arena,
+    rtl_arena_segment_type ** ppSegment
+)
+{
+    rtl_arena_segment_type * head;
+
+    assert(*ppSegment == 0);
+
+    head = &(arena->m_segment_reserve_head);
+    if ((head->m_snext != head) || rtl_arena_segment_populate (arena))
+    {
+        (*ppSegment) = head->m_snext;
+        QUEUE_REMOVE_NAMED((*ppSegment), s);
+    }
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_arena_segment_get)
+#endif
+
+
+/** rtl_arena_segment_put()
+ *
+ *  @precond  arena->m_lock acquired.
+ *  @postcond (*ppSegment == 0)
+ */
+static RTL_MEMORY_INLINE void
+rtl_arena_segment_put (
+    rtl_arena_type *          arena,
+    rtl_arena_segment_type ** ppSegment
+)
+{
+    rtl_arena_segment_type * head;
+
+    assert(QUEUE_STARTED_NAMED((*ppSegment), s));
+    assert(QUEUE_STARTED_NAMED((*ppSegment), f));
+
+    (*ppSegment)->m_addr = 0;
+    (*ppSegment)->m_size = 0;
+
+    assert((*ppSegment)->m_type != RTL_ARENA_SEGMENT_TYPE_HEAD);
+    (*ppSegment)->m_type = 0;
+
+    /* keep as reserve */
+    head = &(arena->m_segment_reserve_head);
+    QUEUE_INSERT_HEAD_NAMED(head, (*ppSegment), s);
+
+    /* clear */
+    (*ppSegment) = 0;
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_arena_segment_put)
+#endif
+
+/* ================================================================= */
+
+/** rtl_arena_freelist_insert()
+ *
+ *  @precond arena->m_lock acquired.
+ */
+static RTL_MEMORY_INLINE void
+rtl_arena_freelist_insert (
+    rtl_arena_type *         arena,
+    rtl_arena_segment_type * segment
+)
+{
+    rtl_arena_segment_type * head;
+
+    head = &(arena->m_freelist_head[highbit(segment->m_size) - 1]);
+    QUEUE_INSERT_TAIL_NAMED(head, segment, f);
+
+    arena->m_freelist_bitmap |= head->m_size;
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_arena_freelist_insert)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_arena_freelist_remove()
+ *
+ *  @precond arena->m_lock acquired.
+ */
+static RTL_MEMORY_INLINE void
+rtl_arena_freelist_remove (
+    rtl_arena_type *         arena,
+    rtl_arena_segment_type * segment
+)
+{
+    if ((segment->m_fnext->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) &&
+        (segment->m_fprev->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD)    )
+    {
+        rtl_arena_segment_type * head;
+
+        head = segment->m_fprev;
+        assert(arena->m_freelist_bitmap & head->m_size);
+        arena->m_freelist_bitmap ^= head->m_size;
+    }
+    QUEUE_REMOVE_NAMED(segment, f);
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_arena_freelist_remove)
+#endif /* __SUNPRO_C */
+
+
+/* ================================================================= */
+
+/** RTL_ARENA_HASH_INDEX()
+ */
+#define RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
+     ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
+
+#define RTL_ARENA_HASH_INDEX(arena, addr) \
+    RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
+
+/** rtl_arena_hash_rescale()
+ *
+ * @precond arena->m_lock released.
+ */
+static void
+rtl_arena_hash_rescale (
+    rtl_arena_type * arena,
+    sal_Size         new_size
+)
+{
+    rtl_arena_segment_type ** new_table;
+    sal_Size                  new_bytes;
+
+    new_bytes = new_size * sizeof(rtl_arena_segment_type*);
+    new_table = (rtl_arena_segment_type **)rtl_arena_alloc (gp_arena_arena, &new_bytes);
+
+    if (new_table != 0)
+    {
+        rtl_arena_segment_type ** old_table;
+        sal_Size                  old_size, i;
+
+        memset (new_table, 0, new_bytes);
+
+        RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
+
+        old_table = arena->m_hash_table;
+        old_size  = arena->m_hash_size;
+
+        // SAL_INFO(
+        //  "sal.rtl",
+        //  "rtl_arena_hash_rescale(" << arena->m_name << "): nseg: "
+        //      << (arena->m_stats.m_alloc - arena->m_stats.m_free) << " (ave: "
+        //      << ((arena->m_stats.m_alloc - arena->m_stats.m_free)
+        //          >> arena->m_hash_shift)
+        //      << "), frees: " << arena->m_stats.m_free << " [old_size: "
+        //      << old_size << ", new_size: " << new_size << ']');
+
+        arena->m_hash_table = new_table;
+        arena->m_hash_size  = new_size;
+        arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
+
+        for (i = 0; i < old_size; i++)
+        {
+            rtl_arena_segment_type * curr = old_table[i];
+            while (curr != 0)
+            {
+                rtl_arena_segment_type  * next = curr->m_fnext;
+                rtl_arena_segment_type ** head;
+
+                head = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, curr->m_addr)]);
+                curr->m_fnext = (*head);
+                (*head) = curr;
+
+                curr = next;
+            }
+            old_table[i] = 0;
+        }
+
+        RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+
+        if (old_table != arena->m_hash_table_0)
+        {
+            sal_Size old_bytes = old_size * sizeof(rtl_arena_segment_type*);
+            rtl_arena_free (gp_arena_arena, old_table, old_bytes);
+        }
+    }
+}
+
+
+/** rtl_arena_hash_insert()
+ *  ...and update stats.
+ */
+static RTL_MEMORY_INLINE void
+rtl_arena_hash_insert (
+    rtl_arena_type *         arena,
+    rtl_arena_segment_type * segment
+)
+{
+    rtl_arena_segment_type ** ppSegment;
+
+    ppSegment = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, segment->m_addr)]);
+
+    segment->m_fnext = (*ppSegment);
+    (*ppSegment) = segment;
+
+    arena->m_stats.m_alloc     += 1;
+    arena->m_stats.m_mem_alloc += segment->m_size;
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_arena_hash_insert)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_arena_hash_remove()
+ *  ...and update stats.
+ */
+static rtl_arena_segment_type *
+rtl_arena_hash_remove (
+    rtl_arena_type * arena,
+    sal_uIntPtr      addr,
+    sal_Size         size
+)
+{
+    rtl_arena_segment_type *segment, **segpp;
+    sal_Size lookups = 0;
+
+    segpp = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, addr)]);
+    while ((segment = *segpp) != 0)
+    {
+        if (segment->m_addr == addr)
+        {
+            *segpp = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
+            break;
+        }
+
+        /* update lookup miss stats */
+        lookups += 1;
+        segpp = &(segment->m_fnext);
+    }
+
+    assert(segment != 0); // bad free
+    if (segment != 0)
+    {
+        assert(segment->m_size == size);
+        (void) size; // avoid warnings
+
+        arena->m_stats.m_free      += 1;
+        arena->m_stats.m_mem_alloc -= segment->m_size;
+
+        if (lookups > 1)
+        {
+            sal_Size nseg = (sal_Size)(arena->m_stats.m_alloc - arena->m_stats.m_free);
+            if (nseg > 4 * arena->m_hash_size)
+            {
+                if (!(arena->m_flags & RTL_ARENA_FLAG_RESCALE))
+                {
+                    sal_Size ave = nseg >> arena->m_hash_shift;
+                    sal_Size new_size = arena->m_hash_size << (highbit(ave) - 1);
+
+                    arena->m_flags |= RTL_ARENA_FLAG_RESCALE;
+                    RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+                    rtl_arena_hash_rescale (arena, new_size);
+                    RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
+                    arena->m_flags &= ~RTL_ARENA_FLAG_RESCALE;
+                }
+            }
+        }
+    }
+
+    return (segment);
+}
+
+/* ================================================================= */
+
+/** rtl_arena_segment_alloc()
+ *  allocate (and remove) segment from freelist
+ *
+ *  @precond arena->m_lock acquired
+ *  @precond (*ppSegment == 0)
+ */
+static int
+rtl_arena_segment_alloc (
+    rtl_arena_type *          arena,
+    sal_Size                  size,
+    rtl_arena_segment_type ** ppSegment
+)
+{
+    int index = 0;
+
+    assert(*ppSegment == 0);
+    if (!RTL_MEMORY_ISP2(size))
+    {
+        int msb = highbit(size);
+        if (RTL_ARENA_FREELIST_SIZE == sal::static_int_cast< size_t >(msb))
+        {
+            /* highest possible freelist: fall back to first fit */
+            rtl_arena_segment_type *head, *segment;
+
+            head = &(arena->m_freelist_head[msb - 1]);
+            for (segment = head->m_fnext; segment != head; segment = segment->m_fnext)
+            {
+                if (segment->m_size >= size)
+                {
+                    /* allocate first fit segment */
+                    (*ppSegment) = segment;
+                    break;
+                }
+            }
+            goto dequeue_and_leave;
+        }
+
+        /* roundup to next power of 2 */
+        size = (1UL << msb);
+    }
+
+    index = lowbit(RTL_MEMORY_P2ALIGN(arena->m_freelist_bitmap, size));
+    if (index > 0)
+    {
+        /* instant fit: allocate first free segment */
+        rtl_arena_segment_type *head;
+
+        head = &(arena->m_freelist_head[index - 1]);
+        (*ppSegment) = head->m_fnext;
+        assert((*ppSegment) != head);
+    }
+
+dequeue_and_leave:
+    if (*ppSegment != 0)
+    {
+        /* remove from freelist */
+        rtl_arena_freelist_remove (arena, (*ppSegment));
+    }
+    return (*ppSegment != 0);
+}
+
+
+/** rtl_arena_segment_create()
+ *  import new (span) segment from source arena
+ *
+ *  @precond arena->m_lock acquired
+ *  @precond (*ppSegment == 0)
+ */
+static int
+rtl_arena_segment_create (
+    rtl_arena_type *          arena,
+    sal_Size                  size,
+    rtl_arena_segment_type ** ppSegment
+)
+{
+    assert((*ppSegment) == 0);
+    if (arena->m_source_alloc != 0)
+    {
+        rtl_arena_segment_get (arena, ppSegment);
+        if (*ppSegment != 0)
+        {
+            rtl_arena_segment_type * span = 0;
+            rtl_arena_segment_get (arena, &span);
+            if (span != 0)
+            {
+                /* import new span from source arena */
+                RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+
+                span->m_size = size;
+                span->m_addr = (sal_uIntPtr)(arena->m_source_alloc)(
+                    arena->m_source_arena, &(span->m_size));
+
+                RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
+                if (span->m_addr != 0)
+                {
+                    /* insert onto segment list, update stats */
+                    span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
+                    QUEUE_INSERT_HEAD_NAMED(&(arena->m_segment_head), span, s);
+                    arena->m_stats.m_mem_total += span->m_size;
+
+                    (*ppSegment)->m_addr = span->m_addr;
+                    (*ppSegment)->m_size = span->m_size;
+                    (*ppSegment)->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
+                    QUEUE_INSERT_HEAD_NAMED(span, (*ppSegment), s);
+
+                    /* report success */
+                    return (1);
+                }
+                rtl_arena_segment_put (arena, &span);
+            }
+            rtl_arena_segment_put (arena, ppSegment);
+        }
+    }
+    return (0);
+}
+
+
+/** rtl_arena_segment_coalesce()
+ *  mark as free and join with adjacent free segment(s)
+ *
+ *  @precond arena->m_lock acquired
+ *  @precond segment marked 'used'
+ */
+static void
+rtl_arena_segment_coalesce (
+    rtl_arena_type *         arena,
+    rtl_arena_segment_type * segment
+)
+{
+    rtl_arena_segment_type *next, *prev;
+
+    /* mark segment free */
+    assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_USED);
+    segment->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
+
+    /* try to merge w/ next segment */
+    next = segment->m_snext;
+    if (next->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
+    {
+        assert(segment->m_addr + segment->m_size == next->m_addr);
+        segment->m_size += next->m_size;
+
+        /* remove from freelist */
+        rtl_arena_freelist_remove (arena, next);
+
+        /* remove from segment list */
+        QUEUE_REMOVE_NAMED(next, s);
+
+        /* release segment descriptor */
+        rtl_arena_segment_put (arena, &next);
+    }
+
+    /* try to merge w/ prev segment */
+    prev = segment->m_sprev;
+    if (prev->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
+    {
+        assert(prev->m_addr + prev->m_size == segment->m_addr);
+        segment->m_addr  = prev->m_addr;
+        segment->m_size += prev->m_size;
+
+        /* remove from freelist */
+        rtl_arena_freelist_remove (arena, prev);
+
+        /* remove from segment list */
+        QUEUE_REMOVE_NAMED(prev, s);
+
+        /* release segment descriptor */
+        rtl_arena_segment_put (arena, &prev);
+    }
+}
+
+/* ================================================================= */
+
+/** rtl_arena_constructor()
+ */
+static void
+rtl_arena_constructor (void * obj)
+{
+    rtl_arena_type * arena = (rtl_arena_type*)(obj);
+    rtl_arena_segment_type * head;
+    size_t i;
+
+    memset (arena, 0, sizeof(rtl_arena_type));
+
+    QUEUE_START_NAMED(arena, arena_);
+
+    (void) RTL_MEMORY_LOCK_INIT(&(arena->m_lock));
+
+    head = &(arena->m_segment_reserve_span_head);
+    rtl_arena_segment_constructor (head);
+    head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
+
+    head = &(arena->m_segment_reserve_head);
+    rtl_arena_segment_constructor (head);
+    head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
+
+    head = &(arena->m_segment_head);
+    rtl_arena_segment_constructor (head);
+    head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
+
+    for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
+    {
+        head = &(arena->m_freelist_head[i]);
+        rtl_arena_segment_constructor (head);
+
+        head->m_size = (1UL << i);
+        head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
+    }
+
+    arena->m_hash_table = arena->m_hash_table_0;
+    arena->m_hash_size  = RTL_ARENA_HASH_SIZE;
+    arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
+}
+
+
+/** rtl_arena_destructor()
+ */
+static void
+rtl_arena_destructor (void * obj)
+{
+    rtl_arena_type * arena = (rtl_arena_type*)(obj);
+    rtl_arena_segment_type * head;
+    size_t i;
+
+    assert(QUEUE_STARTED_NAMED(arena, arena_));
+
+    RTL_MEMORY_LOCK_DESTROY(&(arena->m_lock));
+
+    head = &(arena->m_segment_reserve_span_head);
+    assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
+    rtl_arena_segment_destructor (head);
+
+    head = &(arena->m_segment_reserve_head);
+    assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
+    rtl_arena_segment_destructor (head);
+
+    head = &(arena->m_segment_head);
+    assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
+    rtl_arena_segment_destructor (head);
+
+    for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
+    {
+        head = &(arena->m_freelist_head[i]);
+
+        assert(head->m_size == (1UL << i));
+        assert(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
+
+        rtl_arena_segment_destructor (head);
+    }
+
+    assert(arena->m_hash_table == arena->m_hash_table_0);
+    assert(arena->m_hash_size  == RTL_ARENA_HASH_SIZE);
+    assert(
+        arena->m_hash_shift ==
+        sal::static_int_cast< unsigned >(highbit(arena->m_hash_size) - 1));
+}
+
+/* ================================================================= */
+
+/** rtl_arena_activate()
+ */
+static rtl_arena_type *
+rtl_arena_activate (
+    rtl_arena_type *   arena,
+    const char *       name,
+    sal_Size           quantum,
+    sal_Size           quantum_cache_max,
+    rtl_arena_type *   source_arena,
+    void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
+    void   (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size)
+)
+{
+    assert(arena != 0);
+    if (arena != 0)
+    {
+        (void) snprintf (arena->m_name, sizeof(arena->m_name), "%s", name);
+
+        if (!RTL_MEMORY_ISP2(quantum))
+        {
+            /* roundup to next power of 2 */
+            quantum = (1UL << highbit(quantum));
+        }
+        quantum_cache_max = RTL_MEMORY_P2ROUNDUP(quantum_cache_max, quantum);
+
+        arena->m_quantum = quantum;
+        arena->m_quantum_shift = highbit(arena->m_quantum) - 1;
+        arena->m_qcache_max = quantum_cache_max;
+
+        arena->m_source_arena = source_arena;
+        arena->m_source_alloc = source_alloc;
+        arena->m_source_free  = source_free;
+
+        if (arena->m_qcache_max > 0)
+        {
+            char namebuf[RTL_ARENA_NAME_LENGTH + 1];
+            int  i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
+
+            sal_Size size = n * sizeof(rtl_cache_type*);
+            arena->m_qcache_ptr = (rtl_cache_type**)rtl_arena_alloc (gp_arena_arena, &size);
+            if (!(arena->m_qcache_ptr))
+            {
+                /* out of memory */
+                return (0);
+            }
+            for (i = 1; i <= n; i++)
+            {
+                size = i * arena->m_quantum;
+                (void) snprintf (namebuf, sizeof(namebuf), "%s_%lu", arena->m_name, size);
+                arena->m_qcache_ptr[i - 1] = rtl_cache_create(namebuf, size, 0, NULL, NULL, NULL, NULL, arena, RTL_CACHE_FLAG_QUANTUMCACHE);
+            }
+        }
+
+        /* insert into arena list */
+        RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
+        QUEUE_INSERT_TAIL_NAMED(&(g_arena_list.m_arena_head), arena, arena_);
+        RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
+    }
+    return (arena);
+}
+
+/** rtl_arena_deactivate()
+ */
+static void
+rtl_arena_deactivate (
+    rtl_arena_type * arena
+)
+{
+    rtl_arena_segment_type * head, * segment;
+
+    /* remove from arena list */
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
+    QUEUE_REMOVE_NAMED(arena, arena_);
+    RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
+
+    /* cleanup quantum cache(s) */
+    if ((arena->m_qcache_max > 0) && (arena->m_qcache_ptr != 0))
+    {
+        int  i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
+        for (i = 1; i <= n; i++)
+        {
+            if (arena->m_qcache_ptr[i - 1] != 0)
+            {
+                rtl_cache_destroy (arena->m_qcache_ptr[i - 1]);
+                arena->m_qcache_ptr[i - 1] = 0;
+            }
+        }
+        rtl_arena_free (
+            gp_arena_arena,
+            arena->m_qcache_ptr,
+            n * sizeof(rtl_cache_type*));
+
+        arena->m_qcache_ptr = 0;
+    }
+
+    /* check for leaked segments */
+    // SAL_INFO(
+    //  "sal.rtl",
+    //  "rtl_arena_deactivate(" << arena->m_name << "): allocs: "
+    //      << arena->m_stats.m_alloc << ", frees: " << arena->m_stats.m_free
+    //      << "; total: " << arena->m_stats.m_mem_total << ", used: "
+    //      << arena->m_stats.m_mem_alloc);
+    if (arena->m_stats.m_alloc > arena->m_stats.m_free)
+    {
+        sal_Size i, n;
+
+        // SAL_INFO(
+        //  "sal.rtl",
+        //  "rtl_arena_deactivate(" << arena->m_name << "): cleaning up "
+        //      << (arena->m_stats.m_alloc - arena->m_stats.m_free)
+        //      << " leaked segment(s) [" << arena->m_stats.m_mem_alloc
+        //      << " bytes]");
+
+        /* cleanup still used segment(s) */
+        for (i = 0, n = arena->m_hash_size; i < n; i++)
+        {
+            while ((segment = arena->m_hash_table[i]) != 0)
+            {
+                /* pop from hash table */
+                arena->m_hash_table[i] = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
+
+                /* coalesce w/ adjacent free segment(s) */
+                rtl_arena_segment_coalesce (arena, segment);
+
+                /* insert onto freelist */
+                rtl_arena_freelist_insert (arena, segment);
+            }
+        }
+    }
+
+    /* cleanup hash table */
+    if (arena->m_hash_table != arena->m_hash_table_0)
+    {
+        rtl_arena_free (
+            gp_arena_arena,
+            arena->m_hash_table,
+            arena->m_hash_size * sizeof(rtl_arena_segment_type*));
+
+        arena->m_hash_table = arena->m_hash_table_0;
+        arena->m_hash_size  = RTL_ARENA_HASH_SIZE;
+        arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
+    }
+
+    /* cleanup segment list */
+    head = &(arena->m_segment_head);
+    for (segment = head->m_snext; segment != head; segment = head->m_snext)
+    {
+        if (segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
+        {
+            /* remove from freelist */
+            rtl_arena_freelist_remove (arena, segment);
+        }
+        else
+        {
+            /* can have only free and span segments here */
+            assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
+        }
+
+        /* remove from segment list */
+        QUEUE_REMOVE_NAMED(segment, s);
+
+        /* release segment descriptor */
+        rtl_arena_segment_put (arena, &segment);
+    }
+
+    /* cleanup segment reserve list */
+    head = &(arena->m_segment_reserve_head);
+    for (segment = head->m_snext; segment != head; segment = head->m_snext)
+    {
+        /* remove from segment list */
+        QUEUE_REMOVE_NAMED(segment, s);
+    }
+
+    /* cleanup segment reserve span(s) */
+    head = &(arena->m_segment_reserve_span_head);
+    for (segment = head->m_snext; segment != head; segment = head->m_snext)
+    {
+        /* can have only span segments here */
+        assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
+
+        /* remove from segment list */
+        QUEUE_REMOVE_NAMED(segment, s);
+
+        /* return span to g_machdep_arena */
+        rtl_machdep_free (gp_machdep_arena, (void*)(segment->m_addr), segment->m_size);
+    }
+}
+
+/* ================================================================= *
+ *
+ * arena implementation.
+ *
+ * ================================================================= */
+
+/** rtl_arena_create()
+ */
+rtl_arena_type *
+SAL_CALL rtl_arena_create (
+    const char *       name,
+    sal_Size           quantum,
+    sal_Size           quantum_cache_max,
+    rtl_arena_type *   source_arena,
+    void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
+    void   (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size),
+    SAL_UNUSED_PARAMETER int
+) SAL_THROW_EXTERN_C()
+{
+    rtl_arena_type * result = 0;
+    sal_Size         size   = sizeof(rtl_arena_type);
+
+try_alloc:
+    result = (rtl_arena_type*)rtl_arena_alloc (gp_arena_arena, &size);
+    if (result != 0)
+    {
+        rtl_arena_type * arena = result;
+        rtl_arena_constructor (arena);
+
+        if (!source_arena)
+        {
+            assert(gp_default_arena != 0);
+            source_arena = gp_default_arena;
+        }
+
+        result = rtl_arena_activate (
+            arena,
+            name,
+            quantum,
+            quantum_cache_max,
+            source_arena,
+            source_alloc,
+            source_free
+        );
+
+        if (result == 0)
+        {
+            rtl_arena_deactivate (arena);
+            rtl_arena_destructor (arena);
+            rtl_arena_free (gp_arena_arena, arena, size);
+        }
+    }
+    else if (gp_arena_arena == 0)
+    {
+        ensureArenaSingleton();
+        if (gp_arena_arena)
+        {
+            /* try again */
+            goto try_alloc;
+        }
+    }
+    return (result);
+}
+
+/** rtl_arena_destroy()
+ */
+void
+SAL_CALL rtl_arena_destroy (
+    rtl_arena_type * arena
+) SAL_THROW_EXTERN_C()
+{
+    if (arena != 0)
+    {
+        rtl_arena_deactivate (arena);
+        rtl_arena_destructor (arena);
+        rtl_arena_free (gp_arena_arena, arena, sizeof(rtl_arena_type));
+    }
+}
+
+/** rtl_arena_alloc()
+ */
+void *
+SAL_CALL rtl_arena_alloc (
+    rtl_arena_type * arena,
+    sal_Size *       pSize
+) SAL_THROW_EXTERN_C()
+{
+    void * addr = 0;
+
+    if ((arena != 0) && (pSize != 0))
+    {
+        sal_Size size;
+
+        if (alloc_mode == AMode_SYSTEM)
+            return rtl_allocateMemory(*pSize);
+
+        size = RTL_MEMORY_ALIGN((*pSize), arena->m_quantum);
+        if (size > arena->m_qcache_max)
+        {
+            /* allocate from segment list */
+            rtl_arena_segment_type *segment = 0;
+
+            RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
+            if (rtl_arena_segment_alloc (arena, size, &segment) ||
+                rtl_arena_segment_create(arena, size, &segment)    )
+            {
+                /* shrink to fit */
+                sal_Size oversize;
+
+                /* mark segment used */
+                assert(segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE);
+                segment->m_type = RTL_ARENA_SEGMENT_TYPE_USED;
+
+                /* resize */
+                assert(segment->m_size >= size);
+                oversize = segment->m_size - size;
+                if ((oversize >= arena->m_quantum) && (oversize >= arena->m_qcache_max))
+                {
+                    rtl_arena_segment_type * remainder = 0;
+                    rtl_arena_segment_get (arena, &remainder);
+                    if (remainder != 0)
+                    {
+                        segment->m_size = size;
+
+                        remainder->m_addr = segment->m_addr + segment->m_size;
+                        remainder->m_size = oversize;
+                        remainder->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
+                        QUEUE_INSERT_HEAD_NAMED(segment, remainder, s);
+
+                        rtl_arena_freelist_insert (arena, remainder);
+                    }
+                }
+
+                rtl_arena_hash_insert (arena, segment);
+
+                (*pSize) = segment->m_size;
+                addr = (void*)(segment->m_addr);
+            }
+            RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+        }
+        else if (size > 0)
+        {
+            /* allocate from quantum cache(s) */
+            int index = (size >> arena->m_quantum_shift) - 1;
+            assert(arena->m_qcache_ptr[index] != 0);
+
+            addr = rtl_cache_alloc (arena->m_qcache_ptr[index]);
+            if (addr != 0)
+                (*pSize) = size;
+        }
+    }
+    return (addr);
+}
+
+/** rtl_arena_free()
+ */
+void
+SAL_CALL rtl_arena_free (
+    rtl_arena_type * arena,
+    void *           addr,
+    sal_Size         size
+) SAL_THROW_EXTERN_C()
+{
+    if (arena != 0)
+    {
+        if (alloc_mode == AMode_SYSTEM)
+        {
+            rtl_freeMemory(addr);
+            return;
+        }
+
+        size = RTL_MEMORY_ALIGN(size, arena->m_quantum);
+        if (size > arena->m_qcache_max)
+        {
+            /* free to segment list */
+            rtl_arena_segment_type * segment;
+
+            RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
+
+            segment = rtl_arena_hash_remove (arena, (sal_uIntPtr)(addr), size);
+            if (segment != 0)
+            {
+                rtl_arena_segment_type *next, *prev;
+
+                /* coalesce w/ adjacent free segment(s) */
+                rtl_arena_segment_coalesce (arena, segment);
+
+                /* determine (new) next and prev segment */
+                next = segment->m_snext, prev = segment->m_sprev;
+
+                /* entire span free when prev is a span, and next is either a span or a list head */
+                if (((prev->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)) &&
+                    ((next->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)  ||
+                     (next->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD))    )
+                {
+                    assert(
+                        prev->m_addr == segment->m_addr
+                        && prev->m_size == segment->m_size);
+
+                    if (arena->m_source_free)
+                    {
+                        addr = (void*)(prev->m_addr);
+                        size = prev->m_size;
+
+                        /* remove from segment list */
+                        QUEUE_REMOVE_NAMED(segment, s);
+
+                        /* release segment descriptor */
+                        rtl_arena_segment_put (arena, &segment);
+
+                        /* remove from segment list */
+                        QUEUE_REMOVE_NAMED(prev, s);
+
+                        /* release (span) segment descriptor */
+                        rtl_arena_segment_put (arena, &prev);
+
+                        /* update stats, return span to source arena */
+                        arena->m_stats.m_mem_total -= size;
+                        RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+
+                        (arena->m_source_free)(arena->m_source_arena, addr, size);
+                        return;
+                    }
+                }
+
+                /* insert onto freelist */
+                rtl_arena_freelist_insert (arena, segment);
+            }
+
+            RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
+        }
+        else if (size > 0)
+        {
+            /* free to quantum cache(s) */
+            int index = (size >> arena->m_quantum_shift) - 1;
+            assert(arena->m_qcache_ptr[index] != 0);
+
+            rtl_cache_free (arena->m_qcache_ptr[index], addr);
+        }
+    }
+}
+
+/* ================================================================= *
+ *
+ * machdep internals.
+ *
+ * ================================================================= */
+
+#if defined(SAL_UNX)
+#include <sys/mman.h>
+#elif defined(SAL_W32)
+#define MAP_FAILED 0
+#endif /* SAL_UNX || SAL_W32 */
+
+/** rtl_machdep_alloc()
+ */
+static void *
+SAL_CALL rtl_machdep_alloc (
+    rtl_arena_type * pArena,
+    sal_Size *       pSize
+)
+{
+    void *   addr;
+    sal_Size size = (*pSize);
+
+    assert(pArena == gp_machdep_arena);
+
+#if defined(SOLARIS) && defined(SPARC)
+    /* see @ mmap(2) man pages */
+    size += (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
+    if (size > (4 << 20))
+        size = RTL_MEMORY_P2ROUNDUP(size, (4 << 20));
+    else if (size > (512 << 10))
+        size = RTL_MEMORY_P2ROUNDUP(size, (512 << 10));
+    else
+        size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
+    size -= (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
+#else
+    /* default allocation granularity */
+    if(pArena->m_quantum < (64 << 10))
+    {
+        size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
+    }
+    else
+    {
+        size = RTL_MEMORY_P2ROUNDUP(size, pArena->m_quantum);
+    }
+#endif
+
+#if defined(SAL_UNX)
+    addr = mmap (NULL, (size_t)(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+#elif defined(SAL_W32)
+    addr = VirtualAlloc (NULL, (SIZE_T)(size), MEM_COMMIT, PAGE_READWRITE);
+#endif /* (SAL_UNX || SAL_W32) */
+
+    if (addr != MAP_FAILED)
+    {
+        pArena->m_stats.m_alloc += 1;
+        pArena->m_stats.m_mem_total += size;
+        pArena->m_stats.m_mem_alloc += size;
+
+        (*pSize) = size;
+        return (addr);
+    }
+    return (NULL);
+}
+
+/** rtl_machdep_free()
+ */
+static void
+SAL_CALL rtl_machdep_free (
+    rtl_arena_type * pArena,
+    void *           pAddr,
+    sal_Size         nSize
+)
+{
+    assert(pArena == gp_machdep_arena);
+
+    pArena->m_stats.m_free += 1;
+    pArena->m_stats.m_mem_total -= nSize;
+    pArena->m_stats.m_mem_alloc -= nSize;
+
+#if defined(SAL_UNX)
+    (void) munmap(pAddr, nSize);
+#elif defined(SAL_W32)
+    (void) VirtualFree ((LPVOID)(pAddr), (SIZE_T)(0), MEM_RELEASE);
+#endif /* (SAL_UNX || SAL_W32) */
+}
+
+/** rtl_machdep_pagesize()
+ */
+static sal_Size
+rtl_machdep_pagesize()
+{
+#if defined(SAL_UNX)
+#if defined(FREEBSD) || defined(NETBSD) || defined(DRAGONFLY)
+    return ((sal_Size)getpagesize());
+#else  /* POSIX */
+    return ((sal_Size)sysconf(_SC_PAGESIZE));
+#endif /* xBSD || POSIX */
+#elif defined(SAL_W32)
+    SYSTEM_INFO info;
+    GetSystemInfo (&info);
+    return ((sal_Size)(info.dwPageSize));
+#endif /* (SAL_UNX || SAL_W32) */
+}
+
+/* ================================================================= *
+ *
+ * arena initialization.
+ *
+ * ================================================================= */
+
+void
+rtl_arena_init()
+{
+    {
+        /* list of arenas */
+        RTL_MEMORY_LOCK_INIT(&(g_arena_list.m_lock));
+        rtl_arena_constructor (&(g_arena_list.m_arena_head));
+    }
+    {
+        /* machdep (pseudo) arena */
+        static rtl_arena_type g_machdep_arena;
+
+        assert(gp_machdep_arena == 0);
+        rtl_arena_constructor (&g_machdep_arena);
+
+        gp_machdep_arena = rtl_arena_activate (
+            &g_machdep_arena,
+            "rtl_machdep_arena",
+            rtl_machdep_pagesize(),
+            0,       /* no quantum caching */
+            0, 0, 0  /* no source */
+        );
+        assert(gp_machdep_arena != 0);
+    }
+    {
+        /* default arena */
+        static rtl_arena_type g_default_arena;
+
+        assert(gp_default_arena == 0);
+        rtl_arena_constructor (&g_default_arena);
+
+        gp_default_arena = rtl_arena_activate (
+            &g_default_arena,
+            "rtl_default_arena",
+            rtl_machdep_pagesize(),
+            0,                 /* no quantum caching */
+            gp_machdep_arena,  /* source */
+            rtl_machdep_alloc,
+            rtl_machdep_free
+        );
+        assert(gp_default_arena != 0);
+    }
+    {
+        /* arena internal arena */
+        static rtl_arena_type g_arena_arena;
+
+        assert(gp_arena_arena == 0);
+        rtl_arena_constructor (&g_arena_arena);
+
+        gp_arena_arena = rtl_arena_activate (
+            &g_arena_arena,
+            "rtl_arena_internal_arena",
+            64,                /* quantum */
+            0,                 /* no quantum caching */
+            gp_default_arena,  /* source */
+            rtl_arena_alloc,
+            rtl_arena_free
+        );
+        assert(gp_arena_arena != 0);
+    }
+    // SAL_INFO("sal.rtl", "rtl_arena_init completed");
+}
+
+/* ================================================================= */
+
+void
+rtl_arena_fini()
+{
+    if (gp_arena_arena != 0)
+    {
+        rtl_arena_type * arena, * head;
+
+        RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
+        head = &(g_arena_list.m_arena_head);
+
+        for (arena = head->m_arena_next; arena != head; arena = arena->m_arena_next)
+        {
+            // SAL_INFO(
+            //  "sal.rtl",
+            //  "rtl_arena_fini(" << arena->m_name << "): allocs: "
+            //      << arena->m_stats.m_alloc << ", frees: "
+            //      << arena->m_stats.m_free << "; total: "
+            //      << arena->m_stats.m_mem_total << ", used: "
+            //      << arena->m_stats.m_mem_alloc);
+        }
+        RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
+    }
+    // SAL_INFO("sal.rtl", "rtl_arena_fini completed");
+}
+
+/* ================================================================= */
+
+/* vim:set shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/sal/rtl/alloc_arena.hxx b/sal/rtl/alloc_arena.hxx
new file mode 100644
index 0000000..282d9f2
--- /dev/null
+++ b/sal/rtl/alloc_arena.hxx
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/*
+ * This file is part of the LibreOffice project.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ * This file incorporates work covered by the following license notice:
+ *
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *   contributor license agreements. See the NOTICE file distributed
+ *   with this work for additional information regarding copyright
+ *   ownership. The ASF licenses this file to you under the Apache
+ *   License, Version 2.0 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.apache.org/licenses/LICENSE-2.0 .
+ */
+
+#ifndef INCLUDED_RTL_ALLOC_ARENA_HXX
+#define INCLUDED_RTL_ALLOC_ARENA_HXX
+
+#include "sal/types.h"
+#include "rtl/alloc.h"
+#include "alloc_impl.hxx"
+
+/** rtl_arena_stat_type
+ *  @internal
+ */
+struct rtl_arena_stat_type
+{
+    sal_uInt64 m_alloc;
+    sal_uInt64 m_free;
+
+    sal_Size   m_mem_total;
+    sal_Size   m_mem_alloc;
+};
+
+
+/** rtl_arena_segment_type
+ *  @internal
+ */
+#define RTL_ARENA_SEGMENT_TYPE_HEAD ((sal_Size)(0x01))
+#define RTL_ARENA_SEGMENT_TYPE_SPAN ((sal_Size)(0x02))
+#define RTL_ARENA_SEGMENT_TYPE_FREE ((sal_Size)(0x04))
+#define RTL_ARENA_SEGMENT_TYPE_USED ((sal_Size)(0x08))
+
+struct rtl_arena_segment_type
+{
+    /* segment list linkage */
+    rtl_arena_segment_type * m_snext;
+    rtl_arena_segment_type * m_sprev;
+
+    /* free/used list linkage */
+    rtl_arena_segment_type * m_fnext;
+    rtl_arena_segment_type * m_fprev;
+
+    /* segment description */
+    sal_uIntPtr         m_addr;
+    sal_Size            m_size;
+    sal_Size            m_type;
+};
+
+
+/** rtl_arena_type
+ *  @internal
+ */
+#define RTL_ARENA_FREELIST_SIZE (sizeof(void*) * 8)
+#define RTL_ARENA_HASH_SIZE     64
+
+#define RTL_ARENA_FLAG_RESCALE  1 /* within hash rescale operation */
+
+struct rtl_arena_st
+{
+    /* linkage */
+    rtl_arena_type *          m_arena_next;
+    rtl_arena_type *          m_arena_prev;
+
+    /* properties */
+    char                      m_name[RTL_ARENA_NAME_LENGTH + 1];
+    long                      m_flags;
+
+    rtl_memory_lock_type      m_lock;
+    rtl_arena_stat_type       m_stats;
+
+    rtl_arena_type *          m_source_arena;
+    void * (SAL_CALL * m_source_alloc)(rtl_arena_type *, sal_Size *);
+    void   (SAL_CALL * m_source_free) (rtl_arena_type *, void *, sal_Size);
+
+    sal_Size                  m_quantum;
+    sal_Size                  m_quantum_shift; /* log2(m_quantum) */
+
+    rtl_arena_segment_type    m_segment_reserve_span_head;
+    rtl_arena_segment_type    m_segment_reserve_head;
+
+    rtl_arena_segment_type    m_segment_head;
+
+    rtl_arena_segment_type    m_freelist_head[RTL_ARENA_FREELIST_SIZE];
+    sal_Size                  m_freelist_bitmap;
+
+    rtl_arena_segment_type ** m_hash_table;
+    rtl_arena_segment_type *  m_hash_table_0[RTL_ARENA_HASH_SIZE];
+    sal_Size                  m_hash_size;  /* m_hash_mask + 1   */
+    sal_Size                  m_hash_shift; /* log2(m_hash_size) */
+
+    sal_Size                  m_qcache_max;
+    rtl_cache_type **         m_qcache_ptr;
+};
+
+
+/** gp_default_arena
+ *  default arena with pagesize quantum
+ *
+ *  @internal
+ */
+extern rtl_arena_type * gp_default_arena;
+
+#endif /* INCLUDED_RTL_ALLOC_ARENA_HXX */
+
+/* vim:set shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/sal/rtl/alloc_cache.cxx b/sal/rtl/alloc_cache.cxx
new file mode 100644
index 0000000..745c0a8
--- /dev/null
+++ b/sal/rtl/alloc_cache.cxx
@@ -0,0 +1,1707 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/*
+ * This file is part of the LibreOffice project.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ * This file incorporates work covered by the following license notice:
+ *
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *   contributor license agreements. See the NOTICE file distributed
+ *   with this work for additional information regarding copyright
+ *   ownership. The ASF licenses this file to you under the Apache
+ *   License, Version 2.0 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.apache.org/licenses/LICENSE-2.0 .
+ */
+
+#include "alloc_cache.hxx"
+#include "alloc_impl.hxx"
+#include "alloc_arena.hxx"
+#include "internal/rtllifecycle.h"
+#include "sal/macros.h"
+#include "osl/diagnose.h"
+
+#include <cassert>
+#include <string.h>
+#include <stdio.h>
+
+extern AllocMode alloc_mode;
+
+/* ================================================================= *
+ *
+ * cache internals.
+ *
+ * ================================================================= */
+
+/** g_cache_list
+ *  @internal
+ */
+struct rtl_cache_list_st
+{
+    rtl_memory_lock_type m_lock;
+    rtl_cache_type       m_cache_head;
+
+#if defined(SAL_UNX)
+    pthread_t            m_update_thread;
+    pthread_cond_t       m_update_cond;
+#elif defined(SAL_W32)
+    HANDLE               m_update_thread;
+    HANDLE               m_update_cond;
+#endif /* SAL_UNX || SAL_W32 */
+    int                  m_update_done;
+};
+
+static rtl_cache_list_st g_cache_list;
+
+
+/** gp_cache_arena
+ *  provided for cache_type allocations, and hash_table resizing.
+ *
+ *  @internal
+ */
+static rtl_arena_type * gp_cache_arena = 0;
+
+
+/** gp_cache_magazine_cache
+ *  @internal
+ */
+static rtl_cache_type * gp_cache_magazine_cache = 0;
+
+
+/** gp_cache_slab_cache
+ *  @internal
+ */
+static rtl_cache_type * gp_cache_slab_cache = 0;
+
+
+/** gp_cache_bufctl_cache
+ *  @internal
+ */
+static rtl_cache_type * gp_cache_bufctl_cache = 0;
+
+
+/* ================================================================= */
+
+/** RTL_CACHE_HASH_INDEX()
+ */
+#define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
+     ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
+
+#define RTL_CACHE_HASH_INDEX(cache, addr) \
+    RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
+
+
+/** rtl_cache_hash_rescale()
+ */
+static void
+rtl_cache_hash_rescale (
+    rtl_cache_type * cache,
+    sal_Size         new_size
+)
+{
+    rtl_cache_bufctl_type ** new_table;
+    sal_Size                 new_bytes;
+
+    new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
+    new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
+
+    if (new_table != 0)
+    {
+        rtl_cache_bufctl_type ** old_table;
+        sal_Size                 old_size, i;
+
+        memset (new_table, 0, new_bytes);
+
+        RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
+
+        old_table = cache->m_hash_table;
+        old_size  = cache->m_hash_size;
+
+        // SAL_INFO(
+        //  "sal.rtl",
+        //  "rtl_cache_hash_rescale(" << cache->m_name << "): nbuf: "
+        //      << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
+        //      << " (ave: "
+        //      << ((cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
+        //          >> cache->m_hash_shift)
+        //      << "), frees: " << cache->m_slab_stats.m_free << " [old_size: "
+        //      << old_size << ", new_size: " << new_size << ']');
+
+        cache->m_hash_table = new_table;
+        cache->m_hash_size  = new_size;
+        cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
+
+        for (i = 0; i < old_size; i++)
+        {
+            rtl_cache_bufctl_type * curr = old_table[i];
+            while (curr != 0)
+            {
+                rtl_cache_bufctl_type  * next = curr->m_next;
+                rtl_cache_bufctl_type ** head;
+
+                head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
+                curr->m_next = (*head);
+                (*head) = curr;
+
+                curr = next;
+            }
+            old_table[i] = 0;
+        }
+
+        RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+
+        if (old_table != cache->m_hash_table_0)
+        {
+            sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
+            rtl_arena_free (gp_cache_arena, old_table, old_bytes);
+        }
+    }
+}
+
+/** rtl_cache_hash_insert()
+ */
+static RTL_MEMORY_INLINE sal_uIntPtr
+rtl_cache_hash_insert (
+    rtl_cache_type *        cache,
+    rtl_cache_bufctl_type * bufctl
+)
+{
+    rtl_cache_bufctl_type ** ppHead;
+
+    ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
+
+    bufctl->m_next = (*ppHead);
+    (*ppHead) = bufctl;
+
+    return (bufctl->m_addr);
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_cache_hash_insert)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_cache_hash_remove()
+ */
+static rtl_cache_bufctl_type *
+rtl_cache_hash_remove (
+    rtl_cache_type * cache,
+    sal_uIntPtr      addr
+)
+{
+    rtl_cache_bufctl_type ** ppHead;
+    rtl_cache_bufctl_type  * bufctl;
+    sal_Size                 lookups = 0;
+
+    ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
+    while ((bufctl = *ppHead) != 0)
+    {
+        if (bufctl->m_addr == addr)
+        {
+            *ppHead = bufctl->m_next, bufctl->m_next = 0;
+            break;
+        }
+
+        lookups += 1;
+        ppHead = &(bufctl->m_next);
+    }
+
+    assert(bufctl != 0); // bad free
+
+    if (lookups > 1)
+    {
+        sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
+        if (nbuf > 4 * cache->m_hash_size)
+        {
+            if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
+            {
+                sal_Size ave = nbuf >> cache->m_hash_shift;
+                sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
+
+                cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
+                RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+                rtl_cache_hash_rescale (cache, new_size);
+                RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
+                cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
+            }
+        }
+    }
+
+    return (bufctl);
+}
+
+/* ================================================================= */
+
+/** RTL_CACHE_SLAB()
+ */
+#define RTL_CACHE_SLAB(addr, size) \
+    (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
+
+
+/** rtl_cache_slab_constructor()
+ */
+static int
+rtl_cache_slab_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
+{
+    rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
+
+    QUEUE_START_NAMED(slab, slab_);
+    slab->m_ntypes = 0;
+
+    return (1);
+}
+
+
+/** rtl_cache_slab_destructor()
+ */
+static void
+rtl_cache_slab_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
+{
+    rtl_cache_slab_type * slab = static_cast< rtl_cache_slab_type * >(obj);
+    assert(QUEUE_STARTED_NAMED(slab, slab_)); // assure removed from queue(s)
+    assert(slab->m_ntypes == 0); // assure no longer referenced
+    (void) slab; // avoid warnings
+}
+
+
+/** rtl_cache_slab_create()
+ *
+ *  @precond cache->m_slab_lock released.
+ */
+static rtl_cache_slab_type *
+rtl_cache_slab_create (
+    rtl_cache_type * cache
+)
+{
+    rtl_cache_slab_type * slab = 0;
+    void *                addr;
+    sal_Size              size;
+
+    size = cache->m_slab_size;
+    addr = rtl_arena_alloc (cache->m_source, &size);
+    if (addr != 0)
+    {
+        assert(size >= cache->m_slab_size);
+
+        if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+        {
+            /* allocate slab struct from slab cache */
+            assert(cache != gp_cache_slab_cache);
+            slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
+        }
+        else
+        {
+            /* construct embedded slab struct */
+            slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
+            (void) rtl_cache_slab_constructor (slab, 0);
+        }
+        if (slab != 0)
+        {
+            slab->m_data = (sal_uIntPtr)(addr);
+
+            /* dynamic freelist initialization */
+            slab->m_bp = slab->m_data;
+            slab->m_sp = 0;
+        }
+        else
+        {
+            rtl_arena_free (cache->m_source, addr, size);
+        }
+    }
+    return (slab);
+}
+
+
+/** rtl_cache_slab_destroy()
+ *
+ *  @precond cache->m_slab_lock released.
+ */
+static void
+rtl_cache_slab_destroy (
+    rtl_cache_type *      cache,
+    rtl_cache_slab_type * slab
+)
+{
+    void *   addr   = (void*)(slab->m_data);
+    sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
+
+    if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+    {
+        /* cleanup bufctl(s) for free buffer(s) */
+        sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
+        for (ntypes -= refcnt; slab->m_sp != 0; ntypes--)
+        {
+            rtl_cache_bufctl_type * bufctl = slab->m_sp;
+
+            /* pop from freelist */
+            slab->m_sp = bufctl->m_next, bufctl->m_next = 0;
+
+            /* return bufctl struct to bufctl cache */
+            rtl_cache_free (gp_cache_bufctl_cache, bufctl);
+        }
+        assert(ntypes == 0);
+
+        /* return slab struct to slab cache */
+        rtl_cache_free (gp_cache_slab_cache, slab);
+    }
+    else
+    {
+        /* destruct embedded slab struct */
+        rtl_cache_slab_destructor (slab, 0);
+    }
+
+    if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
+    {
+        /* free memory */
+        rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
+    }
+}
+
+
+/** rtl_cache_slab_populate()
+ *
+ *  @precond cache->m_slab_lock acquired.
+ */
+static int
+rtl_cache_slab_populate (
+    rtl_cache_type * cache
+)
+{
+    rtl_cache_slab_type * slab;
+
+    RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+    slab = rtl_cache_slab_create (cache);
+    RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
+    if (slab != 0)
+    {
+        /* update buffer start addr w/ current color */
+        slab->m_bp += cache->m_ncolor;
+
+        /* update color for next slab */
+        cache->m_ncolor += cache->m_type_align;
+        if (cache->m_ncolor > cache->m_ncolor_max)
+            cache->m_ncolor = 0;
+
+        /* update stats */
+        cache->m_slab_stats.m_mem_total += cache->m_slab_size;
+
+        /* insert onto 'free' queue */
+        QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
+    }
+    return (slab != 0);
+}
+
+/* ================================================================= */
+
+/** rtl_cache_slab_alloc()
+ *
+ *  Allocate a buffer from slab layer; used by magazine layer.
+ */
+static void *
+rtl_cache_slab_alloc (
+    rtl_cache_type * cache
+)
+{
+    void                * addr = 0;
+    rtl_cache_slab_type * head;
+
+    RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
+
+    head = &(cache->m_free_head);
+    if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
+    {
+        rtl_cache_slab_type   * slab;
+        rtl_cache_bufctl_type * bufctl;
+
+        slab = head->m_slab_next;
+        assert(slab->m_ntypes < cache->m_ntypes);
+
+        if (slab->m_sp == 0)
+        {
+            /* initialize bufctl w/ current 'slab->m_bp' */
+            assert(slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
+            if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+            {
+                /* allocate bufctl */
+                assert(cache != gp_cache_bufctl_cache);
+                bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
+                if (bufctl == 0)
+                {
+                    /* out of memory */
+                    RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+                    return (0);
+                }
+
+                bufctl->m_addr = slab->m_bp;
+                bufctl->m_slab = (sal_uIntPtr)(slab);
+            }
+            else
+            {
+                /* embedded bufctl */
+                bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
+            }
+            bufctl->m_next = 0;
+
+            /* update 'slab->m_bp' to next free buffer */
+            slab->m_bp += cache->m_type_size;
+
+            /* assign bufctl to freelist */
+            slab->m_sp = bufctl;
+        }
+
+        /* pop front */
+        bufctl = slab->m_sp;
+        slab->m_sp = bufctl->m_next;
+
+        /* increment usage, check for full slab */
+        if ((slab->m_ntypes += 1) == cache->m_ntypes)
+        {
+            /* remove from 'free' queue */
+            QUEUE_REMOVE_NAMED(slab, slab_);
+
+            /* insert onto 'used' queue (tail) */
+            QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
+        }
+
+        /* update stats */
+        cache->m_slab_stats.m_alloc     += 1;
+        cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
+
+        if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+            addr = (void*)rtl_cache_hash_insert (cache, bufctl);
+        else
+            addr = bufctl;
+    }
+
+    RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+    return (addr);
+}
+
+
+/** rtl_cache_slab_free()
+ *
+ *  Return a buffer to slab layer; used by magazine layer.
+ */
+static void
+rtl_cache_slab_free (
+    rtl_cache_type * cache,
+    void *           addr
+)
+{
+    rtl_cache_bufctl_type * bufctl;
+    rtl_cache_slab_type   * slab;
+
+    RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
+
+    /* determine slab from addr */
+    if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+    {
+        bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
+        slab = (bufctl != 0) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
+    }
+    else
+    {
+        /* embedded slab struct */
+        bufctl = (rtl_cache_bufctl_type*)(addr);
+        slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
+    }
+
+    if (slab != 0)
+    {
+        /* check for full slab */
+        if (slab->m_ntypes == cache->m_ntypes)
+        {
+            /* remove from 'used' queue */
+            QUEUE_REMOVE_NAMED(slab, slab_);
+
+            /* insert onto 'free' queue (head) */
+            QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
+        }
+
+        /* push front */
+        bufctl->m_next = slab->m_sp;
+        slab->m_sp = bufctl;
+
+        /* update stats */
+        cache->m_slab_stats.m_free      += 1;
+        cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
+
+        /* decrement usage, check for empty slab */
+        if ((slab->m_ntypes -= 1) == 0)
+        {
+            /* remove from 'free' queue */
+            QUEUE_REMOVE_NAMED(slab, slab_);
+
+            /* update stats */
+            cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
+
+            /* free 'empty' slab */
+            RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+            rtl_cache_slab_destroy (cache, slab);
+            return;
+        }
+    }
+
+    RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
+}
+
+/* ================================================================= */
+
+/** rtl_cache_magazine_constructor()
+ */
+static int
+rtl_cache_magazine_constructor (void * obj, SAL_UNUSED_PARAMETER void *)
+{
+    rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
+    /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
+
+    mag->m_mag_next = 0;
+    mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
+    mag->m_mag_used = 0;
+
+    return (1);
+}
+
+
+/** rtl_cache_magazine_destructor()
+ */
+static void
+rtl_cache_magazine_destructor (void * obj, SAL_UNUSED_PARAMETER void *)
+{
+    rtl_cache_magazine_type * mag = static_cast< rtl_cache_magazine_type * >(
+        obj);
+    assert(mag->m_mag_next == 0); // assure removed from queue(s)
+    assert(mag->m_mag_used == 0); // assure no longer referenced
+    (void) mag; // avoid warnings
+}
+
+
+/** rtl_cache_magazine_clear()
+ */
+static void
+rtl_cache_magazine_clear (
+    rtl_cache_type *          cache,
+    rtl_cache_magazine_type * mag
+)
+{
+    for (; mag->m_mag_used > 0; --mag->m_mag_used)
+    {
+        void * obj = mag->m_objects[mag->m_mag_used - 1];
+        mag->m_objects[mag->m_mag_used - 1] = 0;
+
+        if (cache->m_destructor != 0)
+        {
+            /* destruct object */
+            (cache->m_destructor)(obj, cache->m_userarg);
+        }
+
+        /* return buffer to slab layer */
+        rtl_cache_slab_free (cache, obj);
+    }
+}
+
+/* ================================================================= */
+
+/** rtl_cache_depot_enqueue()
+ *
+ *  @precond cache->m_depot_lock acquired.
+ */
+static RTL_MEMORY_INLINE void
+rtl_cache_depot_enqueue (
+    rtl_cache_depot_type *    depot,
+    rtl_cache_magazine_type * mag
+)
+{
+    /* enqueue empty magazine */
+    mag->m_mag_next = depot->m_mag_next;
+    depot->m_mag_next = mag;
+
+    /* update depot stats */
+    depot->m_mag_count++;
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_cache_depot_enqueue)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_cache_depot_dequeue()
+ *
+ *  @precond cache->m_depot_lock acquired.
+ */
+static RTL_MEMORY_INLINE rtl_cache_magazine_type *
+rtl_cache_depot_dequeue (
+    rtl_cache_depot_type * depot
+)
+{
+    rtl_cache_magazine_type * mag = 0;
+    if (depot->m_mag_count > 0)
+    {
+        /* dequeue magazine */
+        assert(depot->m_mag_next != 0);
+
+        mag = depot->m_mag_next;
+        depot->m_mag_next = mag->m_mag_next;
+        mag->m_mag_next = 0;
+
+        /* update depot stats */
+        depot->m_mag_count--;
+        if(depot->m_curr_min > depot->m_mag_count)
+        {
+            depot->m_curr_min = depot->m_mag_count;
+        }
+    }
+    return (mag);
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_cache_depot_dequeue)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_cache_depot_exchange_alloc()
+ *
+ *  @precond cache->m_depot_lock acquired.
+ */
+static RTL_MEMORY_INLINE rtl_cache_magazine_type *
+rtl_cache_depot_exchange_alloc (
+    rtl_cache_type *          cache,
+    rtl_cache_magazine_type * empty
+)
+{
+    rtl_cache_magazine_type * full;
+
+    assert((empty == 0) || (empty->m_mag_used == 0));
+
+    /* dequeue full magazine */
+    full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
+    if ((full != 0) && (empty != 0))
+    {
+        /* enqueue empty magazine */
+        rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
+    }
+
+    assert((full == 0) || (full->m_mag_used > 0));
+
+    return (full);
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_cache_depot_exchange_alloc)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_cache_depot_exchange_free()
+ *
+ *  @precond cache->m_depot_lock acquired.
+ */
+static RTL_MEMORY_INLINE rtl_cache_magazine_type *
+rtl_cache_depot_exchange_free (
+    rtl_cache_type *          cache,
+    rtl_cache_magazine_type * full
+)
+{
+    rtl_cache_magazine_type * empty;
+
+    assert((full == 0) || (full->m_mag_used > 0));
+
+    /* dequeue empty magazine */
+    empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
+    if ((empty != 0) && (full != 0))
+    {
+        /* enqueue full magazine */
+        rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
+    }
+
+    assert((empty == 0) || (empty->m_mag_used == 0));
+
+    return (empty);
+}
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#pragma inline(rtl_cache_depot_exchange_free)
+#endif /* __SUNPRO_C */
+
+
+/** rtl_cache_depot_populate()
+ *
+ *  @precond cache->m_depot_lock acquired.
+ */
+static int
+rtl_cache_depot_populate (
+    rtl_cache_type * cache
+)
+{
+    rtl_cache_magazine_type * empty = 0;
+
+    if (cache->m_magazine_cache != 0)
+    {
+        /* allocate new empty magazine */
+        RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+        empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
+        RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
+        if (empty != 0)
+        {
+            /* enqueue (new) empty magazine */
+            rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
+        }
+    }
+    return (empty != 0);
+}
+
+/* ================================================================= */
+
+/** rtl_cache_constructor()
+ */
+static int
+rtl_cache_constructor (void * obj)
+{
+    rtl_cache_type * cache = (rtl_cache_type*)(obj);
+
+    memset (cache, 0, sizeof(rtl_cache_type));
+
+    /* linkage */
+    QUEUE_START_NAMED(cache, cache_);
+
+    /* slab layer */
+    (void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
+
+    QUEUE_START_NAMED(&(cache->m_free_head), slab_);
+    QUEUE_START_NAMED(&(cache->m_used_head), slab_);
+
+    cache->m_hash_table = cache->m_hash_table_0;
+    cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
+    cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
+
+    /* depot layer */
+    (void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
+
+    return (1);
+}
+
+/** rtl_cache_destructor()
+ */
+static void
+rtl_cache_destructor (void * obj)
+{
+    rtl_cache_type * cache = (rtl_cache_type*)(obj);
+
+    /* linkage */
+    assert(QUEUE_STARTED_NAMED(cache, cache_));
+
+    /* slab layer */
+    (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
+
+    assert(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
+    assert(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
+
+    assert(cache->m_hash_table == cache->m_hash_table_0);
+    assert(cache->m_hash_size  == RTL_CACHE_HASH_SIZE);
+    assert(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
+
+    /* depot layer */
+    (void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
+}
+
+/* ================================================================= */
+
+/** rtl_cache_activate()
+ */
+static rtl_cache_type *
+rtl_cache_activate (
+    rtl_cache_type * cache,
+    const char *     name,
+    size_t           objsize,
+    size_t           objalign,
+    int  (SAL_CALL * constructor)(void * obj, void * userarg),
+    void (SAL_CALL * destructor) (void * obj, void * userarg),
+    void (SAL_CALL * reclaim)    (void * userarg),
+    void *           userarg,
+    rtl_arena_type * source,
+    int              flags
+)
+{
+    assert(cache != 0);
+    if (cache != 0)
+    {
+        sal_Size slabsize;
+
+        snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
+
+        /* ensure minimum size (embedded bufctl linkage) */
+        if(objsize < sizeof(rtl_cache_bufctl_type*))
+        {
+            objsize = sizeof(rtl_cache_bufctl_type*);
+        }
+
+        if (objalign == 0)
+        {
+            /* determine default alignment */
+            if (objsize >= RTL_MEMORY_ALIGNMENT_8)
+                objalign = RTL_MEMORY_ALIGNMENT_8;
+            else
+                objalign = RTL_MEMORY_ALIGNMENT_4;
+        }
+        else
+        {
+            /* ensure minimum alignment */
+            if(objalign < RTL_MEMORY_ALIGNMENT_4)
+            {
+                objalign = RTL_MEMORY_ALIGNMENT_4;
+            }
+        }
+        assert(RTL_MEMORY_ISP2(objalign));
+
+        cache->m_type_size  = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
+        cache->m_type_align = objalign;
+        cache->m_type_shift = highbit(cache->m_type_size) - 1;
+
+        cache->m_constructor = constructor;
+        cache->m_destructor  = destructor;
+        cache->m_reclaim     = reclaim;
+        cache->m_userarg     = userarg;
+
+        /* slab layer */
+        cache->m_source = source;
+
+        slabsize = source->m_quantum; /* minimum slab size */
+        if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
+        {
+            /* next power of 2 above 3 * qcache_max */
+            if(slabsize < (1UL << highbit(3 * source->m_qcache_max)))
+            {
+                slabsize = (1UL << highbit(3 * source->m_qcache_max));
+            }
+        }
+        else
+        {
+            /* waste at most 1/8 of slab */
+            if(slabsize < cache->m_type_size * 8)
+            {
+                slabsize = cache->m_type_size * 8;
+            }
+        }
+
+        slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
+        if (!RTL_MEMORY_ISP2(slabsize))
+            slabsize = 1UL << highbit(slabsize);
+        cache->m_slab_size = slabsize;
+
+        if (cache->m_slab_size > source->m_quantum)
+        {
+            assert(gp_cache_slab_cache != 0);
+            assert(gp_cache_bufctl_cache != 0);
+
+            cache->m_features  |= RTL_CACHE_FEATURE_HASH;
+            cache->m_ntypes     = cache->m_slab_size / cache->m_type_size;
+            cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
+        }
+        else
+        {
+            /* embedded slab struct */
+            cache->m_ntypes     = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
+            cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
+        }
+
+        assert(cache->m_ntypes > 0);
+        cache->m_ncolor = 0;
+
+        if (flags & RTL_CACHE_FLAG_BULKDESTROY)
+        {
+            /* allow bulk slab delete upon cache deactivation */
+            cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
+        }
+
+        /* magazine layer */
+        if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
+        {
+            assert(gp_cache_magazine_cache != 0);
+            cache->m_magazine_cache = gp_cache_magazine_cache;
+        }
+
+        /* insert into cache list */
+        RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+        QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
+        RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+    }
+    return (cache);
+}
+
+/** rtl_cache_deactivate()
+ */
+static void
+rtl_cache_deactivate (
+    rtl_cache_type * cache
+)
+{
+    /* remove from cache list */
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    int active = QUEUE_STARTED_NAMED(cache, cache_) == 0;
+    QUEUE_REMOVE_NAMED(cache, cache_);
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+
+    assert(active); // orphaned cache
+    (void)active;
+
+    /* cleanup magazine layer */
+    if (cache->m_magazine_cache != 0)
+    {
+        rtl_cache_type *          mag_cache;
+        rtl_cache_magazine_type * mag;
+
+        /* prevent recursion */
+        mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
+
+        /* cleanup cpu layer */
+        if ((mag = cache->m_cpu_curr) != 0)
+        {
+            cache->m_cpu_curr = 0;
+            rtl_cache_magazine_clear (cache, mag);
+            rtl_cache_free (mag_cache, mag);
+        }
+        if ((mag = cache->m_cpu_prev) != 0)
+        {
+            cache->m_cpu_prev = 0;
+            rtl_cache_magazine_clear (cache, mag);
+            rtl_cache_free (mag_cache, mag);
+        }
+
+        /* cleanup depot layer */
+        while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != 0)
+        {
+            rtl_cache_magazine_clear (cache, mag);
+            rtl_cache_free (mag_cache, mag);
+        }
+        while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != 0)
+        {
+            rtl_cache_magazine_clear (cache, mag);
+            rtl_cache_free (mag_cache, mag);
+        }
+    }
+
+    // SAL_INFO(
+    //  "sal.rtl",
+    //  "rtl_cache_deactivate(" << cache->m_name << "): [slab]: allocs: "
+    //      << cache->m_slab_stats.m_alloc << ", frees: "
+    //      << cache->m_slab_stats.m_free << "; total: "
+    //      << cache->m_slab_stats.m_mem_total << ", used: "
+    //      << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
+    //      << cache->m_cpu_stats.m_alloc << ", frees: "
+    //      << cache->m_cpu_stats.m_free << "; [total]: allocs: "
+    //      << (cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc)
+    //      << ", frees: "
+    //      << (cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free));
+
+    /* cleanup slab layer */
+    if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
+    {
+        // SAL_INFO(
+        //  "sal.rtl",
+        //  "rtl_cache_deactivate(" << cache->m_name << "): cleaning up "
+        //      << (cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free)
+        //      << " leaked buffer(s) [" << cache->m_slab_stats.m_mem_alloc
+        //      << " bytes] [" << cache->m_slab_stats.m_mem_total << " total]");
+
+        if (cache->m_features & RTL_CACHE_FEATURE_HASH)
+        {
+            /* cleanup bufctl(s) for leaking buffer(s) */
+            sal_Size i, n = cache->m_hash_size;
+            for (i = 0; i < n; i++)
+            {
+                rtl_cache_bufctl_type * bufctl;
+                while ((bufctl = cache->m_hash_table[i]) != 0)
+                {
+                    /* pop from hash table */
+                    cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = 0;
+
+                    /* return to bufctl cache */
+                    rtl_cache_free (gp_cache_bufctl_cache, bufctl);
+                }
+            }
+        }
+        {
+            /* force cleanup of remaining slabs */
+            rtl_cache_slab_type *head, *slab;
+
+            head = &(cache->m_used_head);
+            for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
+            {
+                /* remove from 'used' queue */
+                QUEUE_REMOVE_NAMED(slab, slab_);
+
+                /* update stats */
+                cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
+
+                /* free slab */
+                rtl_cache_slab_destroy (cache, slab);
+            }
+
+            head = &(cache->m_free_head);
+            for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
+            {
+                /* remove from 'free' queue */
+                QUEUE_REMOVE_NAMED(slab, slab_);
+
+                /* update stats */
+                cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
+
+                /* free slab */
+                rtl_cache_slab_destroy (cache, slab);
+            }
+        }
+    }
+
+    if (cache->m_hash_table != cache->m_hash_table_0)
+    {
+        rtl_arena_free (
+            gp_cache_arena,
+            cache->m_hash_table,
+            cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
+
+        cache->m_hash_table = cache->m_hash_table_0;
+        cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
+        cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
+    }
+}
+
+/* ================================================================= *
+ *
+ * cache implementation.
+ *
+ * ================================================================= */
+
+/** rtl_cache_create()
+ */
+rtl_cache_type *
+SAL_CALL rtl_cache_create (
+    const char *     name,
+    sal_Size         objsize,
+    sal_Size         objalign,
+    int  (SAL_CALL * constructor)(void * obj, void * userarg),
+    void (SAL_CALL * destructor) (void * obj, void * userarg),
+    void (SAL_CALL * reclaim)    (void * userarg),
+    void *           userarg,
+    rtl_arena_type * source,
+    int              flags
+) SAL_THROW_EXTERN_C()
+{
+    rtl_cache_type * result = 0;
+    sal_Size         size   = sizeof(rtl_cache_type);
+
+try_alloc:
+    result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
+    if (result != 0)
+    {
+        rtl_cache_type * cache = result;
+        (void) rtl_cache_constructor (cache);
+
+        if (!source)
+        {
+            /* use default arena */
+            assert(gp_default_arena != 0);
+            source = gp_default_arena;
+        }
+
+        result = rtl_cache_activate (
+            cache,
+            name,
+            objsize,
+            objalign,
+            constructor,
+            destructor,
+            reclaim,
+            userarg,
+            source,
+            flags
+        );
+
+        if (result == 0)
+        {
+            /* activation failed */
+            rtl_cache_deactivate (cache);
+            rtl_cache_destructor (cache);
+            rtl_arena_free (gp_cache_arena, cache, size);
+        }
+    }
+    else if (gp_cache_arena == 0)
+    {
+        ensureCacheSingleton();
+        if (gp_cache_arena)
+        {
+            /* try again */
+            goto try_alloc;
+        }
+    }
+    return (result);
+}
+
+/** rtl_cache_destroy()
+ */
+void SAL_CALL rtl_cache_destroy (
+    rtl_cache_type * cache
+) SAL_THROW_EXTERN_C()
+{
+    if (cache != 0)
+    {
+        rtl_cache_deactivate (cache);
+        rtl_cache_destructor (cache);
+        rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
+    }
+}
+
+/** rtl_cache_alloc()
+ */
+void *
+SAL_CALL rtl_cache_alloc (
+    rtl_cache_type * cache
+) SAL_THROW_EXTERN_C()
+{
+    void * obj = 0;
+
+    if (cache == 0)
+        return (0);
+
+    if (alloc_mode == AMode_SYSTEM)
+    {
+        obj = rtl_allocateMemory(cache->m_type_size);
+        if ((obj != 0) && (cache->m_constructor != 0))
+        {
+            if (!((cache->m_constructor)(obj, cache->m_userarg)))
+            {
+                /* construction failure */
+                rtl_freeMemory(obj), obj = 0;
+            }
+        }
+        return obj;
+    }
+
+    RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
+    if (cache->m_cpu_curr != 0)
+    {
+        for (;;)
+        {
+            /* take object from magazine layer */
+            rtl_cache_magazine_type *curr, *prev, *temp;
+
+            curr = cache->m_cpu_curr;
+            if ((curr != 0) && (curr->m_mag_used > 0))
+            {
+                obj = curr->m_objects[--curr->m_mag_used];
+                cache->m_cpu_stats.m_alloc += 1;
+                RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+
+                return (obj);
+            }
+
+            prev = cache->m_cpu_prev;
+            if ((prev != 0) && (prev->m_mag_used > 0))
+            {
+                temp = cache->m_cpu_curr;
+                cache->m_cpu_curr = cache->m_cpu_prev;
+                cache->m_cpu_prev = temp;
+
+                continue;
+            }
+
+            temp = rtl_cache_depot_exchange_alloc (cache, prev);
+            if (temp != 0)
+            {
+                cache->m_cpu_prev = cache->m_cpu_curr;
+                cache->m_cpu_curr = temp;
+
+                continue;
+            }
+
+            /* no full magazine: fall through to slab layer */
+            break;
+        }
+    }
+    RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+
+    /* alloc buffer from slab layer */
+    obj = rtl_cache_slab_alloc (cache);
+    if ((obj != 0) && (cache->m_constructor != 0))
+    {
+        /* construct object */
+        if (!((cache->m_constructor)(obj, cache->m_userarg)))
+        {
+            /* construction failure */
+            rtl_cache_slab_free (cache, obj), obj = 0;
+        }
+    }
+    return (obj);
+}
+
+/** rtl_cache_free()
+ */
+void
+SAL_CALL rtl_cache_free (
+    rtl_cache_type * cache,
+    void *           obj
+) SAL_THROW_EXTERN_C()
+{
+    if ((obj != 0) && (cache != 0))
+    {
+        if (alloc_mode == AMode_SYSTEM)
+        {
+            if (cache->m_destructor != 0)
+            {
+                /* destruct object */
+                (cache->m_destructor)(obj, cache->m_userarg);
+            }
+            rtl_freeMemory(obj);
+            return;
+        }
+
+        RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
+
+        for (;;)
+        {
+            /* return object to magazine layer */
+            rtl_cache_magazine_type *curr, *prev, *temp;
+
+            curr = cache->m_cpu_curr;
+            if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
+            {
+                curr->m_objects[curr->m_mag_used++] = obj;
+                cache->m_cpu_stats.m_free += 1;
+                RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+
+                return;
+            }
+
+            prev = cache->m_cpu_prev;
+            if ((prev != 0) && (prev->m_mag_used == 0))
+            {
+                temp = cache->m_cpu_curr;
+                cache->m_cpu_curr = cache->m_cpu_prev;
+                cache->m_cpu_prev = temp;
+
+                continue;
+            }
+
+            temp = rtl_cache_depot_exchange_free (cache, prev);
+            if (temp != 0)
+            {
+                cache->m_cpu_prev = cache->m_cpu_curr;
+                cache->m_cpu_curr = temp;
+
+                continue;
+            }
+
+            if (rtl_cache_depot_populate(cache) != 0)
+            {
+                continue;
+            }
+
+            /* no empty magazine: fall through to slab layer */
+            break;
+        }
+
+        RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+
+        /* no space for constructed object in magazine layer */
+        if (cache->m_destructor != 0)
+        {
+            /* destruct object */
+            (cache->m_destructor)(obj, cache->m_userarg);
+        }
+
+        /* return buffer to slab layer */
+        rtl_cache_slab_free (cache, obj);
+    }
+}
+
+/* ================================================================= *
+ *
+ * cache wsupdate (machdep) internals.
+ *
+ * ================================================================= */
+
+/** rtl_cache_wsupdate_init()
+ *
+ *  @precond g_cache_list.m_lock initialized
+ */
+static void
+rtl_cache_wsupdate_init();
+
+
+/** rtl_cache_wsupdate_wait()
+ *
+ *  @precond g_cache_list.m_lock acquired
+ */
+static void
+rtl_cache_wsupdate_wait (
+    unsigned int seconds
+);
+
+/** rtl_cache_wsupdate_fini()
+ *
+ */
+static void
+rtl_cache_wsupdate_fini();
+
+/* ================================================================= */
+
+#if defined(SAL_UNX)
+
+#include <sys/time.h>
+
+static void *
+rtl_cache_wsupdate_all (void * arg);
+
+static void
+rtl_cache_wsupdate_init()
+{
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    g_cache_list.m_update_done = 0;
+    (void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
+    if (pthread_create (
+            &(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
+    {
+        /* failure */
+        g_cache_list.m_update_thread = (pthread_t)(0);
+    }
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+}
+
+static void
+rtl_cache_wsupdate_wait (unsigned int seconds)
+{
+    if (seconds > 0)
+    {
+        timeval  now;
+        timespec wakeup;
+
+        gettimeofday(&now, 0);
+        wakeup.tv_sec  = now.tv_sec + (seconds);
+        wakeup.tv_nsec = now.tv_usec * 1000;
+
+        (void) pthread_cond_timedwait (
+            &(g_cache_list.m_update_cond),
+            &(g_cache_list.m_lock),
+            &wakeup);
+    }
+}
+
+static void
+rtl_cache_wsupdate_fini()
+{
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    g_cache_list.m_update_done = 1;
+    pthread_cond_signal (&(g_cache_list.m_update_cond));
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+
+    if (g_cache_list.m_update_thread != (pthread_t)(0))
+        pthread_join (g_cache_list.m_update_thread, NULL);
+}
+
+/* ================================================================= */
+
+#elif defined(SAL_W32)
+
+static DWORD WINAPI
+rtl_cache_wsupdate_all (void * arg);
+
+static void
+rtl_cache_wsupdate_init()
+{
+    DWORD dwThreadId;
+
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    g_cache_list.m_update_done = 0;
+    g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
+
+    g_cache_list.m_update_thread =
+        CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+}
+
+static void
+rtl_cache_wsupdate_wait (unsigned int seconds)
+{
+    if (seconds > 0)
+    {
+        RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+        WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
+        RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    }
+}
+
+static void
+rtl_cache_wsupdate_fini()
+{
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    g_cache_list.m_update_done = 1;
+    SetEvent (g_cache_list.m_update_cond);
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+
+    WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
+}
+
+#endif /* SAL_UNX || SAL_W32 */
+
+/* ================================================================= */
+
+/** rtl_cache_depot_wsupdate()
+ *  update depot stats and purge excess magazines.
+ *
+ *  @precond cache->m_depot_lock acquired
+ */
+static void
+rtl_cache_depot_wsupdate (
+    rtl_cache_type *       cache,
+    rtl_cache_depot_type * depot
+)
+{
+    sal_Size npurge;
+
+    depot->m_prev_min = depot->m_curr_min;
+    depot->m_curr_min = depot->m_mag_count;
+
+    npurge = depot->m_curr_min < depot->m_prev_min ? depot->m_curr_min : depot->m_prev_min;
+    for (; npurge > 0; npurge--)
+    {
+        rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
+        if (mag != 0)
+        {
+            RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+            rtl_cache_magazine_clear (cache, mag);
+            rtl_cache_free (cache->m_magazine_cache, mag);
+            RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
+        }
+    }
+}
+
+/** rtl_cache_wsupdate()
+ *
+ *  @precond cache->m_depot_lock released
+ */
+static void
+rtl_cache_wsupdate (
+    rtl_cache_type * cache
+)
+{
+    if (cache->m_magazine_cache != 0)
+    {
+        RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
+
+        // SAL_INFO(
+        //  "sal.rtl",
+        //  "rtl_cache_wsupdate(" << cache->m_name
+        //      << ") [depot: count, curr_min, prev_min] full: "
+        //      << cache->m_depot_full.m_mag_count << ", "
+        //      << cache->m_depot_full.m_curr_min << ", "
+        //      << cache->m_depot_full.m_prev_min << "; empty: "
+        //      << cache->m_depot_empty.m_mag_count << ", "
+        //      << cache->m_depot_empty.m_curr_min << ", "
+        //      << cache->m_depot_empty.m_prev_min);
+
+        rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
+        rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
+
+        RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
+    }
+}
+
+/** rtl_cache_wsupdate_all()
+ *
+ */
+#if defined(SAL_UNX)
+static void *
+#elif defined(SAL_W32)
+static DWORD WINAPI
+#endif /* SAL_UNX || SAL_W32 */
+rtl_cache_wsupdate_all (void * arg)
+{
+    unsigned int seconds = sal::static_int_cast< unsigned int >(
+        reinterpret_cast< sal_uIntPtr >(arg));
+
+    RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+    while (!g_cache_list.m_update_done)
+    {
+        rtl_cache_wsupdate_wait (seconds);
+        if (!g_cache_list.m_update_done)
+        {
+            rtl_cache_type * head, * cache;
+
+            head = &(g_cache_list.m_cache_head);
+            for (cache  = head->m_cache_next;
+                 cache != head;
+                 cache  = cache->m_cache_next)
+            {
+                rtl_cache_wsupdate (cache);
+            }
+        }
+    }
+    RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+
+    return (0);
+}
+
+/* ================================================================= *
+ *
+ * cache initialization.
+ *
+ * ================================================================= */
+
+void
+rtl_cache_init()
+{
+    {
+        /* list of caches */
+        RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
+        (void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
+    }
+    {
+        /* cache: internal arena */
+        assert(gp_cache_arena == 0);
+
+        gp_cache_arena = rtl_arena_create (
+            "rtl_cache_internal_arena",
+            64,   /* quantum */
+            0,    /* no quantum caching */
+            NULL, /* default source */
+            rtl_arena_alloc,
+            rtl_arena_free,
+            0     /* flags */
+        );
+        assert(gp_cache_arena != 0);
+
+        /* check 'gp_default_arena' initialization */
+        assert(gp_default_arena != 0);
+    }
+    {
+        /* cache: magazine cache */
+        static rtl_cache_type g_cache_magazine_cache;
+
+        assert(gp_cache_magazine_cache == 0);
+        (void) rtl_cache_constructor (&g_cache_magazine_cache);
+
+        gp_cache_magazine_cache = rtl_cache_activate (
+            &g_cache_magazine_cache,
+            "rtl_cache_magazine_cache",
+            sizeof(rtl_cache_magazine_type), /* objsize  */
+            0,                               /* objalign */
+            rtl_cache_magazine_constructor,
+            rtl_cache_magazine_destructor,
+            0, /* reclaim */
+            0, /* userarg: NYI */
+            gp_default_arena, /* source */
+            RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
+        );
+        assert(gp_cache_magazine_cache != 0);
+
+        /* activate magazine layer */
+        g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
+    }
+    {
+        /* cache: slab (struct) cache */
+        static rtl_cache_type g_cache_slab_cache;
+
+        assert(gp_cache_slab_cache == 0);
+        (void) rtl_cache_constructor (&g_cache_slab_cache);
+
+        gp_cache_slab_cache = rtl_cache_activate (
+            &g_cache_slab_cache,
+            "rtl_cache_slab_cache",
+            sizeof(rtl_cache_slab_type), /* objsize  */
+            0,                           /* objalign */
+            rtl_cache_slab_constructor,
+            rtl_cache_slab_destructor,
+            0,                           /* reclaim */
+            0,                           /* userarg: none */
+            gp_default_arena,            /* source */
+            0                            /* flags: none */
+        );
+        assert(gp_cache_slab_cache != 0);
+    }
+    {
+        /* cache: bufctl cache */
+        static rtl_cache_type g_cache_bufctl_cache;
+
+        assert(gp_cache_bufctl_cache == 0);
+        (void) rtl_cache_constructor (&g_cache_bufctl_cache);
+
+        gp_cache_bufctl_cache = rtl_cache_activate (
+            &g_cache_bufctl_cache,
+            "rtl_cache_bufctl_cache",
+            sizeof(rtl_cache_bufctl_type), /* objsize */
+            0,                             /* objalign  */
+            0,                /* constructor */
+            0,                /* destructor */
+            0,                /* reclaim */
+            0,                /* userarg */
+            gp_default_arena, /* source */
+            0                 /* flags: none */
+        );
+        assert(gp_cache_bufctl_cache != 0);
+    }
+
+    rtl_cache_wsupdate_init();
+    // SAL_INFO("sal.rtl", "rtl_cache_init completed");
+}
+
+/* ================================================================= */
+
+void
+rtl_cache_fini()
+{
+    if (gp_cache_arena != 0)
+    {
+        rtl_cache_type * cache, * head;
+
+        rtl_cache_wsupdate_fini();
+
+        if (gp_cache_bufctl_cache != 0)
+        {
+            cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = 0;
+            rtl_cache_deactivate (cache);
+            rtl_cache_destructor (cache);
+        }
+        if (gp_cache_slab_cache != 0)
+        {
+            cache = gp_cache_slab_cache, gp_cache_slab_cache = 0;
+            rtl_cache_deactivate (cache);
+            rtl_cache_destructor (cache);
+        }
+        if (gp_cache_magazine_cache != 0)
+        {
+            cache = gp_cache_magazine_cache, gp_cache_magazine_cache = 0;
+            rtl_cache_deactivate (cache);
+            rtl_cache_destructor (cache);
+        }
+        if (gp_cache_arena != 0)
+        {
+            rtl_arena_destroy (gp_cache_arena);
+            gp_cache_arena = 0;
+        }
+
+        RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
+        head = &(g_cache_list.m_cache_head);
+        for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
+        {
+            // SAL_INFO(
+            //  "sal.rtl",
+            //  "rtl_cache_fini(" << cache->m_name << ") [slab]: allocs: "
+            //      << cache->m_slab_stats.m_alloc << ", frees: "
+            //      << cache->m_slab_stats.m_free << "; total: "
+            //      << cache->m_slab_stats.m_mem_total << ", used: "
+            //      << cache->m_slab_stats.m_mem_alloc << "; [cpu]: allocs: "
+            //      << cache->m_cpu_stats.m_alloc << ", frees: "
+            //      << cache->m_cpu_stats.m_free  << "; [total]: allocs: "
+            //      << (cache->m_slab_stats.m_alloc
+            //          + cache->m_cpu_stats.m_alloc)
+            //      << ", frees: "
+            //      << (cache->m_slab_stats.m_free
+            //          + cache->m_cpu_stats.m_free));
+        }
+        RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+    }
+    // SAL_INFO("sal.rtl", "rtl_cache_fini completed");
+}
+
+/* vim:set shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/sal/rtl/alloc_cache.hxx b/sal/rtl/alloc_cache.hxx
new file mode 100644
index 0000000..877ec82
--- /dev/null
+++ b/sal/rtl/alloc_cache.hxx
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/*
+ * This file is part of the LibreOffice project.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ * This file incorporates work covered by the following license notice:
+ *
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *   contributor license agreements. See the NOTICE file distributed
+ *   with this work for additional information regarding copyright
+ *   ownership. The ASF licenses this file to you under the Apache
+ *   License, Version 2.0 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.apache.org/licenses/LICENSE-2.0 .
+ */
+
+#ifndef INCLUDED_RTL_ALLOC_CACHE_HXX
+#define INCLUDED_RTL_ALLOC_CACHE_HXX
+
+#include "sal/types.h"
+#include "rtl/alloc.h"
+#include "alloc_impl.hxx"
+
+/** rtl_cache_stat_type
+ *  @internal
+ */
+struct rtl_cache_stat_type
+{
+    sal_uInt64 m_alloc;
+    sal_uInt64 m_free;
+
+    sal_Size   m_mem_total;
+    sal_Size   m_mem_alloc;
+};
+
+
+/** rtl_cache_bufctl_type
+ *  @internal
+ */
+struct rtl_cache_bufctl_type
+{
+    rtl_cache_bufctl_type * m_next; /* linkage */
+
+    sal_uIntPtr             m_addr; /* buffer address  */
+    sal_uIntPtr             m_slab; /* parent slab address */
+};
+
+
+/** rtl_cache_slab_type
+ *  @internal
+ */
+struct rtl_cache_slab_type
+{
+    rtl_cache_slab_type *   m_slab_next; /* slab linkage */
+    rtl_cache_slab_type *   m_slab_prev; /* slab linkage */
+
+    sal_Size                m_ntypes;    /* number of buffers used */
+    sal_uIntPtr             m_data;      /* buffer start addr */
+
+    sal_uIntPtr             m_bp;        /* free buffer linkage 'base pointer'  */
+    rtl_cache_bufctl_type * m_sp;        /* free buffer linkage 'stack pointer' */
+};
+
+
+/** rtl_cache_magazine_type
+ *  @internal
+ */
+#define RTL_CACHE_MAGAZINE_SIZE 61
+
+struct rtl_cache_magazine_type
+{
+    rtl_cache_magazine_type * m_mag_next; /* depot linkage */
+
+    sal_Size                  m_mag_size;
+    sal_Size                  m_mag_used;
+
+    void *                    m_objects[RTL_CACHE_MAGAZINE_SIZE];
+};
+
+
+/** rtl_cache_depot_type
+ *  @internal
+ */
+struct rtl_cache_depot_type
+{
+    /* magazine list */
+    rtl_cache_magazine_type * m_mag_next;  /* linkage */
+    sal_Size                  m_mag_count; /* count */
+
+    /* working set parameters */
+    sal_Size                  m_curr_min;
+    sal_Size                  m_prev_min;
+};
+
+
+/** rtl_cache_type
+ *  @internal
+ */
+#define RTL_CACHE_HASH_SIZE        8
+
+#define RTL_CACHE_FEATURE_HASH        1
+#define RTL_CACHE_FEATURE_BULKDESTROY 2
+#define RTL_CACHE_FEATURE_RESCALE     4 /* within hash rescale operation */
+
+struct rtl_cache_st
+{
+    /* linkage */
+    rtl_cache_type *          m_cache_next;
+    rtl_cache_type *          m_cache_prev;
+
+    /* properties */
+    char                      m_name[RTL_CACHE_NAME_LENGTH + 1];
+    long                      m_features;
+
+    sal_Size                  m_type_size;   /* const */
+    sal_Size                  m_type_align;  /* const */
+    sal_Size                  m_type_shift;  /* log2(m_type_size); const */
+
+    int  (SAL_CALL * m_constructor)(void * obj, void * userarg); /* const */
+    void (SAL_CALL * m_destructor) (void * obj, void * userarg); /* const */
+    void (SAL_CALL * m_reclaim)    (void * userarg);             /* const */
+    void *                    m_userarg;
+
+    /* slab layer */
+    rtl_memory_lock_type      m_slab_lock;
+    rtl_cache_stat_type       m_slab_stats;
+
+    rtl_arena_type *          m_source;     /* slab supplier; const */
+    sal_Size                  m_slab_size;  /* const */
+    sal_Size                  m_ntypes;     /* number of buffers per slab; const */
+    sal_Size                  m_ncolor;     /* next slab color */
+    sal_Size                  m_ncolor_max; /* max. slab color */
+
+    rtl_cache_slab_type       m_free_head;
+    rtl_cache_slab_type       m_used_head;
+
+    rtl_cache_bufctl_type **  m_hash_table;
+    rtl_cache_bufctl_type *   m_hash_table_0[RTL_CACHE_HASH_SIZE];
+    sal_Size                  m_hash_size;  /* m_hash_mask + 1   */
+    sal_Size                  m_hash_shift; /* log2(m_hash_size) */
+
+    /* depot layer */
+    rtl_memory_lock_type      m_depot_lock;
+
+    rtl_cache_depot_type      m_depot_empty;
+    rtl_cache_depot_type      m_depot_full;
+
+    rtl_cache_type *          m_magazine_cache; /* magazine supplier; const */
+
+    /* cpu layer */
+    rtl_cache_magazine_type * m_cpu_curr;
+    rtl_cache_magazine_type * m_cpu_prev;
+
+    rtl_cache_stat_type       m_cpu_stats;
+};
+

... etc. - the rest is truncated


More information about the Libreoffice-commits mailing list