[Spice-devel] [PATCH qxl-wddm-dod v2 12/25] Rename mspace.c to mspace.cpp
Sameeh Jubran
sameeh at daynix.com
Tue Sep 6 10:28:59 UTC 2016
On Mon, Sep 5, 2016 at 9:26 PM, Frediano Ziglio <fziglio at redhat.com> wrote:
> Why the rationale was removed?
>
> By mistake, i'll add it back in the next version.
> By the way this was a workaround for a VS bug, get Visual Studio 2015
> Update 1 (actually we are at update 3)
>
True, however microsoft recommends using visual c++ compiler profile as it
is different from the c compiler profile.
>
> Nack
>
> Frediano
>
> >
> > Signed-off-by: Sameeh Jubran <sameeh at daynix.com>
> > ---
> > qxldod/mspace.c | 2437
> > -----------------------------------------
> > qxldod/mspace.cpp | 2437
> > +++++++++++++++++++++++++++++++++++++++++
> > qxldod/qxldod.vcxproj | 2 +-
> > qxldod/qxldod.vcxproj.filters | 2 +-
> > 4 files changed, 2439 insertions(+), 2439 deletions(-)
> > delete mode 100755 qxldod/mspace.c
> > create mode 100755 qxldod/mspace.cpp
> >
> > diff --git a/qxldod/mspace.c b/qxldod/mspace.c
> > deleted file mode 100755
> > index d0ba123..0000000
> > --- a/qxldod/mspace.c
> > +++ /dev/null
> > @@ -1,2437 +0,0 @@
> > -// based on dlmalloc from Doug Lea
> > -
> > -
> > -// quote from the Doug Lea original file
> > - /*
> > - This is a version (aka dlmalloc) of malloc/free/realloc written by
> > - Doug Lea and released to the public domain, as explained at
> > - http://creativecommons.org/licenses/publicdomain. Send
> questions,
> > - comments, complaints, performance data, etc to dl at cs.oswego.edu
> > -
> > - * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
> > -
> > - Note: There may be an updated version of this malloc obtainable
> at
> > - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
> > - Check before installing!
> > - */
> > -
> > -
> > -#include <ntddk.h>
> > -
> > -#include "mspace.h"
> > -
> > -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
> > -
> > -#define MALLOC_ALIGNMENT ((size_t)8U)
> > -#define USE_LOCKS 0
> > -#define malloc_getpagesize ((size_t)4096U)
> > -#define DEFAULT_GRANULARITY malloc_getpagesize
> > -#define MAX_SIZE_T (~(size_t)0)
> > -#define MALLOC_FAILURE_ACTION
> > -#define MALLINFO_FIELD_TYPE size_t
> > -#define FOOTERS 0
> > -#define INSECURE 0
> > -#define PROCEED_ON_ERROR 0
> > -#define DEBUG 0
> > -#define ABORT_ON_ASSERT_FAILURE 1
> > -#define ABORT(user_data) abort_func(user_data)
> > -#define USE_BUILTIN_FFS 0
> > -#define USE_DEV_RANDOM 0
> > -#define PRINT(params) print_func params
> > -
> > -
> > -#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)
> > -#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)
> > -
> > -
> > -#define M_GRANULARITY (-1)
> > -
> > -void default_abort_func(void *user_data)
> > -{
> > - for (;;);
> > -}
> > -
> > -void default_print_func(void *user_data, char *format, ...)
> > -{
> > -}
> > -
> > -static mspace_abort_t abort_func = default_abort_func;
> > -static mspace_print_t print_func = default_print_func;
> > -
> > -void mspace_set_abort_func(mspace_abort_t f)
> > -{
> > - abort_func = f;
> > -}
> > -
> > -void mspace_set_print_func(mspace_print_t f)
> > -{
> > - print_func = f;
> > -}
> > -
> > -/* ------------------------ Mallinfo declarations
> ------------------------
> > */
> > -
> > -#if !NO_MALLINFO
> > -/*
> > - This version of malloc supports the standard SVID/XPG mallinfo
> > - routine that returns a struct containing usage properties and
> > - statistics. It should work on any system that has a
> > - /usr/include/malloc.h defining struct mallinfo. The main
> > - declaration needed is the mallinfo struct that is returned (by-copy)
> > - by mallinfo(). The malloinfo struct contains a bunch of fields that
> > - are not even meaningful in this version of malloc. These fields are
> > - are instead filled by mallinfo() with other numbers that might be of
> > - interest.
> > -
> > - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
> > - /usr/include/malloc.h file that includes a declaration of struct
> > - mallinfo. If so, it is included; else a compliant version is
> > - declared below. These must be precisely the same for mallinfo() to
> > - work. The original SVID version of this struct, defined on most
> > - systems with mallinfo, declares all fields as ints. But some others
> > - define as unsigned long. If your system defines the fields using a
> > - type of different width than listed here, you MUST #include your
> > - system version and #define HAVE_USR_INCLUDE_MALLOC_H.
> > -*/
> > -
> > -/* #define HAVE_USR_INCLUDE_MALLOC_H */
> > -
> > -
> > -struct mallinfo {
> > - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from
> system
> > */
> > - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
> > - MALLINFO_FIELD_TYPE smblks; /* always 0 */
> > - MALLINFO_FIELD_TYPE hblks; /* always 0 */
> > - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
> > - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
> > - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
> > - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
> > - MALLINFO_FIELD_TYPE fordblks; /* total free space */
> > - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
> > -};
> > -
> > -#endif /* NO_MALLINFO */
> > -
> > -
> > -
> > -#ifdef DEBUG
> > -#if ABORT_ON_ASSERT_FAILURE
> > -#define assert(user_data, x) if(!(x)) ABORT(user_data)
> > -#else /* ABORT_ON_ASSERT_FAILURE */
> > -#include <assert.h>
> > -#endif /* ABORT_ON_ASSERT_FAILURE */
> > -#else /* DEBUG */
> > -#define assert(user_data, x)
> > -#endif /* DEBUG */
> > -
> > -/* ------------------- size_t and alignment properties
> --------------------
> > */
> > -
> > -/* The byte and bit size of a size_t */
> > -#define SIZE_T_SIZE (sizeof(size_t))
> > -#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
> > -
> > -/* Some constants coerced to size_t */
> > -/* Annoying but necessary to avoid errors on some plaftorms */
> > -#define SIZE_T_ZERO ((size_t)0)
> > -#define SIZE_T_ONE ((size_t)1)
> > -#define SIZE_T_TWO ((size_t)2)
> > -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
> > -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
> > -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
> > -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
> > -
> > -/* The bit mask value corresponding to MALLOC_ALIGNMENT */
> > -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
> > -
> > -/* True if address a has acceptable alignment */
> > -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
> > -
> > -/* the number of bytes to offset an address to align it */
> > -#define align_offset(A)\
> > - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
> > - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &
> > CHUNK_ALIGN_MASK))
> > -
> > -/* --------------------------- Lock preliminaries
> ------------------------
> > */
> > -
> > -#if USE_LOCKS
> > -
> > -/*
> > - When locks are defined, there are up to two global locks:
> > -
> > - * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
> > - MORECORE. In many cases sys_alloc requires two calls, that should
> > - not be interleaved with calls by other threads. This does not
> > - protect against direct calls to MORECORE by other threads not
> > - using this lock, so there is still code to cope the best we can on
> > - interference.
> > -
> > - * magic_init_mutex ensures that mparams.magic and other
> > - unique mparams values are initialized only once.
> > -*/
> > -
> > -
> > -#define USE_LOCK_BIT (2U)
> > -#else /* USE_LOCKS */
> > -#define USE_LOCK_BIT (0U)
> > -#define INITIAL_LOCK(l)
> > -#endif /* USE_LOCKS */
> > -
> > -#if USE_LOCKS
> > -#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
> > -#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
> > -#else /* USE_LOCKS */
> > -#define ACQUIRE_MAGIC_INIT_LOCK()
> > -#define RELEASE_MAGIC_INIT_LOCK()
> > -#endif /* USE_LOCKS */
> > -
> > -
> > -
> > -/* ----------------------- Chunk representations
> ------------------------
> > */
> > -
> > -/*
> > - (The following includes lightly edited explanations by Colin Plumb.)
> > -
> > - The malloc_chunk declaration below is misleading (but accurate and
> > - necessary). It declares a "view" into memory allowing access to
> > - necessary fields at known offsets from a given base.
> > -
> > - Chunks of memory are maintained using a `boundary tag' method as
> > - originally described by Knuth. (See the paper by Paul Wilson
> > - ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
> > - techniques.) Sizes of free chunks are stored both in the front of
> > - each chunk and at the end. This makes consolidating fragmented
> > - chunks into bigger chunks fast. The head fields also hold bits
> > - representing whether chunks are free or in use.
> > -
> > - Here are some pictures to make it clearer. They are "exploded" to
> > - show that the state of a chunk can be thought of as extending from
> > - the high 31 bits of the head field of its header through the
> > - prev_foot and PINUSE_BIT bit of the following chunk header.
> > -
> > - A chunk that's in use looks like:
> > -
> > - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > - | Size of previous chunk (if P = 1)
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > - | Size of this chunk
> 1| +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > - |
> |
> > - +-
> -+
> > - |
> |
> > - +-
> -+
> > - |
> :
> > - +- size - sizeof(size_t) available payload bytes
> -+
> > - :
> |
> > - chunk-> +-
> -+
> > - |
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |1|
> > - | Size of next chunk (may or may not be in use) |
> +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -
> > - And if it's free, it looks like this:
> > -
> > - chunk-> +-
> -+
> > - | User payload (must be in use, or we would have merged!)
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > - | Size of this chunk
> 0| +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > - | Next pointer
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - | Prev pointer
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - |
> :
> > - +- size - sizeof(struct chunk) unused bytes
> -+
> > - :
> |
> > - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > - | Size of this chunk
> |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |0|
> > - | Size of next chunk (must be in use, or we would have merged)|
> +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > - | :
> > - +- User payload -+
> > - : |
> > - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > - |0|
> > - +-+
> > - Note that since we always merge adjacent free chunks, the chunks
> > - adjacent to a free chunk must be in use.
> > -
> > - Given a pointer to a chunk (which can be derived trivially from the
> > - payload pointer) we can, in O(1) time, find out whether the adjacent
> > - chunks are free, and if so, unlink them from the lists that they
> > - are on and merge them with the current chunk.
> > -
> > - Chunks always begin on even word boundaries, so the mem portion
> > - (which is returned to the user) is also on an even word boundary, and
> > - thus at least double-word aligned.
> > -
> > - The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
> > - chunk size (which is always a multiple of two words), is an in-use
> > - bit for the *previous* chunk. If that bit is *clear*, then the
> > - word before the current chunk size contains the previous chunk
> > - size, and can be used to find the front of the previous chunk.
> > - The very first chunk allocated always has this bit set, preventing
> > - access to non-existent (or non-owned) memory. If pinuse is set for
> > - any given chunk, then you CANNOT determine the size of the
> > - previous chunk, and might even get a memory addressing fault when
> > - trying to do so.
> > -
> > - The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
> > - the chunk size redundantly records whether the current chunk is
> > - inuse. This redundancy enables usage checks within free and realloc,
> > - and reduces indirection when freeing and consolidating chunks.
> > -
> > - Each freshly allocated chunk must have both cinuse and pinuse set.
> > - That is, each allocated chunk borders either a previously allocated
> > - and still in-use chunk, or the base of its memory arena. This is
> > - ensured by making all allocations from the the `lowest' part of any
> > - found chunk. Further, no free chunk physically borders another one,
> > - so each free chunk is known to be preceded and followed by either
> > - inuse chunks or the ends of memory.
> > -
> > - Note that the `foot' of the current chunk is actually represented
> > - as the prev_foot of the NEXT chunk. This makes it easier to
> > - deal with alignments etc but can be very confusing when trying
> > - to extend or adapt this code.
> > -
> > - The exceptions to all this are
> > -
> > - 1. The special chunk `top' is the top-most available chunk (i.e.,
> > - the one bordering the end of available memory). It is treated
> > - specially. Top is never included in any bin, is used only if
> > - no other chunk is available, and is released back to the
> > - system if it is very large (see M_TRIM_THRESHOLD). In effect,
> > - the top chunk is treated as larger (and thus less well
> > - fitting) than any other available chunk. The top chunk
> > - doesn't update its trailing size field since there is no next
> > - contiguous chunk that would have to index off it. However,
> > - space is still allocated for it (TOP_FOOT_SIZE) to enable
> > - separation or merging when space is extended.
> > -
> > - 3. Chunks allocated via mmap, which have the lowest-order bit
> > - (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
> > - PINUSE_BIT in their head fields. Because they are allocated
> > - one-by-one, each must carry its own prev_foot field, which is
> > - also used to hold the offset this chunk has within its mmapped
> > - region, which is needed to preserve alignment. Each mmapped
> > - chunk is trailed by the first two fields of a fake next-chunk
> > - for sake of usage checks.
> > -
> > -*/
> > -
> > -struct malloc_chunk {
> > - size_t prev_foot; /* Size of previous chunk (if
> free). */
> > - size_t head; /* Size and inuse bits. */
> > - struct malloc_chunk* fd; /* double links -- used only if
> free. */
> > - struct malloc_chunk* bk;
> > -};
> > -
> > -typedef struct malloc_chunk mchunk;
> > -typedef struct malloc_chunk* mchunkptr;
> > -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
> > -typedef unsigned int bindex_t; /* Described below */
> > -typedef unsigned int binmap_t; /* Described below */
> > -typedef unsigned int flag_t; /* The type of various bit flag
> sets
> > */
> > -
> > -
> > -/* ------------------- Chunks sizes and alignments
> -----------------------
> > */
> > -
> > -#define MCHUNK_SIZE (sizeof(mchunk))
> > -
> > -#if FOOTERS
> > -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
> > -#else /* FOOTERS */
> > -#define CHUNK_OVERHEAD (SIZE_T_SIZE)
> > -#endif /* FOOTERS */
> > -
> > -/* The smallest size we can malloc is an aligned minimal chunk */
> > -#define MIN_CHUNK_SIZE\
> > - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > -
> > -/* conversion from malloc headers to user pointers, and back */
> > -#define chunk2mem(p) ((void*)((char*)(p) +
> TWO_SIZE_T_SIZES))
> > -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) -
> TWO_SIZE_T_SIZES))
> > -/* chunk associated with aligned address A */
> > -#define align_as_chunk(A) (mchunkptr)((A) +
> align_offset(chunk2mem(A)))
> > -
> > -/* Bounds on request (not chunk) sizes. */
> > -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
> > -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD -
> SIZE_T_ONE)
> > -
> > -/* pad request bytes into a usable size */
> > -#define pad_request(req) \
> > - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > -
> > -/* pad request, checking for minimum (but not maximum) */
> > -#define request2size(req) \
> > - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
> > -
> > -/* ------------------ Operations on head and foot fields
> -----------------
> > */
> > -
> > -/*
> > - The head field of a chunk is or'ed with PINUSE_BIT when previous
> > - adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
> > - use. If the chunk was obtained with mmap, the prev_foot field has
> > - IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
> > - mmapped region to the base of the chunk.
> > -*/
> > -
> > -#define PINUSE_BIT (SIZE_T_ONE)
> > -#define CINUSE_BIT (SIZE_T_TWO)
> > -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
> > -
> > -/* Head value for fenceposts */
> > -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
> > -
> > -/* extraction of fields from head words */
> > -#define cinuse(p) ((p)->head & CINUSE_BIT)
> > -#define pinuse(p) ((p)->head & PINUSE_BIT)
> > -#define chunksize(p) ((p)->head & ~(INUSE_BITS))
> > -
> > -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
> > -#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
> > -
> > -/* Treat space at ptr +/- offset as a chunk */
> > -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
> > -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
> > -
> > -/* Ptr to next or previous physical malloc_chunk. */
> > -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &
> > ~INUSE_BITS)))
> > -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
> > -
> > -/* extract next chunk's pinuse bit */
> > -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
> > -
> > -/* Get/set size at footer */
> > -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
> > -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot =
> (s))
> > -
> > -/* Set size, pinuse bit, and foot */
> > -#define set_size_and_pinuse_of_free_chunk(p, s)\
> > - ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
> > -
> > -/* Set size, pinuse bit, foot, and clear next pinuse */
> > -#define set_free_with_pinuse(p, s, n)\
> > - (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
> > -
> > -/* Get the internal overhead associated with chunk p */
> > -#define overhead_for(p) CHUNK_OVERHEAD
> > -
> > -/* Return true if malloced space is not necessarily cleared */
> > -#define calloc_must_clear(p) (1)
> > -
> > -
> > -/* ---------------------- Overlaid data structures
> -----------------------
> > */
> > -
> > -/*
> > - When chunks are not in use, they are treated as nodes of either
> > - lists or trees.
> > -
> > - "Small" chunks are stored in circular doubly-linked lists, and look
> > - like this:
> > -
> > - chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Size of previous chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - `head:' | Size of chunk, in bytes
> > |P|
> > - mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Forward pointer to next chunk in list
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Back pointer to previous chunk in list
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Unused space (may be 0 bytes long)
> > .
> > - .
> > .
> > - .
> > |
> > -nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - `foot:' | Size of chunk, in bytes
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -
> > - Larger chunks are kept in a form of bitwise digital trees (aka
> > - tries) keyed on chunksizes. Because malloc_tree_chunks are only for
> > - free chunks greater than 256 bytes, their size doesn't impose any
> > - constraints on user chunk sizes. Each node looks like:
> > -
> > - chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Size of previous chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - `head:' | Size of chunk, in bytes
> > |P|
> > - mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Forward pointer to next chunk of same size
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Back pointer to previous chunk of same size
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Pointer to left child (child[0])
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Pointer to right child (child[1])
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Pointer to parent
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | bin index of this chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - | Unused space
> > .
> > - .
> > |
> > -nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > - `foot:' | Size of chunk, in bytes
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -
> > - Each tree holding treenodes is a tree of unique chunk sizes. Chunks
> > - of the same size are arranged in a circularly-linked list, with only
> > - the oldest chunk (the next to be used, in our FIFO ordering)
> > - actually in the tree. (Tree members are distinguished by a non-null
> > - parent pointer.) If a chunk with the same size an an existing node
> > - is inserted, it is linked off the existing node using pointers that
> > - work in the same way as fd/bk pointers of small chunks.
> > -
> > - Each tree contains a power of 2 sized range of chunk sizes (the
> > - smallest is 0x100 <= x < 0x180), which is is divided in half at each
> > - tree level, with the chunks in the smaller half of the range (0x100
> > - <= x < 0x140 for the top nose) in the left subtree and the larger
> > - half (0x140 <= x < 0x180) in the right subtree. This is, of course,
> > - done by inspecting individual bits.
> > -
> > - Using these rules, each node's left subtree contains all smaller
> > - sizes than its right subtree. However, the node at the root of each
> > - subtree has no particular ordering relationship to either. (The
> > - dividing line between the subtree sizes is based on trie relation.)
> > - If we remove the last chunk of a given size from the interior of the
> > - tree, we need to replace it with a leaf node. The tree ordering
> > - rules permit a node to be replaced by any leaf below it.
> > -
> > - The smallest chunk in a tree (a common operation in a best-fit
> > - allocator) can be found by walking a path to the leftmost leaf in
> > - the tree. Unlike a usual binary tree, where we follow left child
> > - pointers until we reach a null, here we follow the right child
> > - pointer any time the left one is null, until we reach a leaf with
> > - both child pointers null. The smallest chunk in the tree will be
> > - somewhere along that path.
> > -
> > - The worst case number of steps to add, find, or remove a node is
> > - bounded by the number of bits differentiating chunks within
> > - bins. Under current bin calculations, this ranges from 6 up to 21
> > - (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
> > - is of course much better.
> > -*/
> > -
> > -struct malloc_tree_chunk {
> > - /* The first four fields must be compatible with malloc_chunk */
> > - size_t prev_foot;
> > - size_t head;
> > - struct malloc_tree_chunk* fd;
> > - struct malloc_tree_chunk* bk;
> > -
> > - struct malloc_tree_chunk* child[2];
> > - struct malloc_tree_chunk* parent;
> > - bindex_t index;
> > -};
> > -
> > -typedef struct malloc_tree_chunk tchunk;
> > -typedef struct malloc_tree_chunk* tchunkptr;
> > -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees
> */
> > -
> > -/* A little helper macro for trees */
> > -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :
> > (t)->child[1])
> > -
> > -/* ----------------------------- Segments ------------------------------
> --
> > */
> > -
> > -/*
> > - Each malloc space may include non-contiguous segments, held in a
> > - list headed by an embedded malloc_segment record representing the
> > - top-most space. Segments also include flags holding properties of
> > - the space. Large chunks that are directly allocated by mmap are not
> > - included in this list. They are instead independently created and
> > - destroyed without otherwise keeping track of them.
> > -
> > - Segment management mainly comes into play for spaces allocated by
> > - MMAP. Any call to MMAP might or might not return memory that is
> > - adjacent to an existing segment. MORECORE normally contiguously
> > - extends the current space, so this space is almost always adjacent,
> > - which is simpler and faster to deal with. (This is why MORECORE is
> > - used preferentially to MMAP when both are available -- see
> > - sys_alloc.) When allocating using MMAP, we don't use any of the
> > - hinting mechanisms (inconsistently) supported in various
> > - implementations of unix mmap, or distinguish reserving from
> > - committing memory. Instead, we just ask for space, and exploit
> > - contiguity when we get it. It is probably possible to do
> > - better than this on some systems, but no general scheme seems
> > - to be significantly better.
> > -
> > - Management entails a simpler variant of the consolidation scheme
> > - used for chunks to reduce fragmentation -- new adjacent memory is
> > - normally prepended or appended to an existing segment. However,
> > - there are limitations compared to chunk consolidation that mostly
> > - reflect the fact that segment processing is relatively infrequent
> > - (occurring only when getting memory from system) and that we
> > - don't expect to have huge numbers of segments:
> > -
> > - * Segments are not indexed, so traversal requires linear scans. (It
> > - would be possible to index these, but is not worth the extra
> > - overhead and complexity for most programs on most platforms.)
> > - * New segments are only appended to old ones when holding top-most
> > - memory; if they cannot be prepended to others, they are held in
> > - different segments.
> > -
> > - Except for the top-most segment of an mstate, each segment record
> > - is kept at the tail of its segment. Segments are added by pushing
> > - segment records onto the list headed by &mstate.seg for the
> > - containing mstate.
> > -
> > - Segment flags control allocation/merge/deallocation policies:
> > - * If EXTERN_BIT set, then we did not allocate this segment,
> > - and so should not try to deallocate or merge with others.
> > - (This currently holds only for the initial segment passed
> > - into create_mspace_with_base.)
> > - * If IS_MMAPPED_BIT set, the segment may be merged with
> > - other surrounding mmapped segments and trimmed/de-allocated
> > - using munmap.
> > - * If neither bit is set, then the segment was obtained using
> > - MORECORE so can be merged with surrounding MORECORE'd segments
> > - and deallocated/trimmed using MORECORE with negative arguments.
> > -*/
> > -
> > -struct malloc_segment {
> > - char* base; /* base address */
> > - size_t size; /* allocated size */
> > - struct malloc_segment* next; /* ptr to next segment */
> > -};
> > -
> > -typedef struct malloc_segment msegment;
> > -typedef struct malloc_segment* msegmentptr;
> > -
> > -/* ---------------------------- malloc_state
> -----------------------------
> > */
> > -
> > -/*
> > - A malloc_state holds all of the bookkeeping for a space.
> > - The main fields are:
> > -
> > - Top
> > - The topmost chunk of the currently active segment. Its size is
> > - cached in topsize. The actual size of topmost space is
> > - topsize+TOP_FOOT_SIZE, which includes space reserved for adding
> > - fenceposts and segment records if necessary when getting more
> > - space from the system. The size at which to autotrim top is
> > - cached from mparams in trim_check, except that it is disabled if
> > - an autotrim fails.
> > -
> > - Designated victim (dv)
> > - This is the preferred chunk for servicing small requests that
> > - don't have exact fits. It is normally the chunk split off most
> > - recently to service another small request. Its size is cached in
> > - dvsize. The link fields of this chunk are not maintained since it
> > - is not kept in a bin.
> > -
> > - SmallBins
> > - An array of bin headers for free chunks. These bins hold chunks
> > - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
> > - chunks of all the same size, spaced 8 bytes apart. To simplify
> > - use in double-linked lists, each bin header acts as a malloc_chunk
> > - pointing to the real first node, if it exists (else pointing to
> > - itself). This avoids special-casing for headers. But to avoid
> > - waste, we allocate only the fd/bk pointers of bins, and then use
> > - repositioning tricks to treat these as the fields of a chunk.
> > -
> > - TreeBins
> > - Treebins are pointers to the roots of trees holding a range of
> > - sizes. There are 2 equally spaced treebins for each power of two
> > - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
> > - larger.
> > -
> > - Bin maps
> > - There is one bit map for small bins ("smallmap") and one for
> > - treebins ("treemap). Each bin sets its bit when non-empty, and
> > - clears the bit when empty. Bit operations are then used to avoid
> > - bin-by-bin searching -- nearly all "search" is done without ever
> > - looking at bins that won't be selected. The bit maps
> > - conservatively use 32 bits per map word, even if on 64bit system.
> > - For a good description of some of the bit-based techniques used
> > - here, see Henry S. Warren Jr's book "Hacker's Delight" (and
> > - supplement at http://hackersdelight.org/). Many of these are
> > - intended to reduce the branchiness of paths through malloc etc, as
> > - well as to reduce the number of memory locations read or written.
> > -
> > - Segments
> > - A list of segments headed by an embedded malloc_segment record
> > - representing the initial space.
> > -
> > - Address check support
> > - The least_addr field is the least address ever obtained from
> > - MORECORE or MMAP. Attempted frees and reallocs of any address less
> > - than this are trapped (unless INSECURE is defined).
> > -
> > - Magic tag
> > - A cross-check field that should always hold same value as
> mparams.magic.
> > -
> > - Flags
> > - Bits recording whether to use MMAP, locks, or contiguous MORECORE
> > -
> > - Statistics
> > - Each space keeps track of current and maximum system memory
> > - obtained via MORECORE or MMAP.
> > -
> > - Locking
> > - If USE_LOCKS is defined, the "mutex" lock is acquired and released
> > - around every public call using this mspace.
> > -*/
> > -
> > -/* Bin types, widths and sizes */
> > -#define NSMALLBINS (32U)
> > -#define NTREEBINS (32U)
> > -#define SMALLBIN_SHIFT (3U)
> > -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
> > -#define TREEBIN_SHIFT (8U)
> > -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
> > -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
> > -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -
> > CHUNK_OVERHEAD)
> > -
> > -struct malloc_state {
> > - binmap_t smallmap;
> > - binmap_t treemap;
> > - size_t dvsize;
> > - size_t topsize;
> > - char* least_addr;
> > - mchunkptr dv;
> > - mchunkptr top;
> > - size_t magic;
> > - mchunkptr smallbins[(NSMALLBINS+1)*2];
> > - tbinptr treebins[NTREEBINS];
> > - size_t footprint;
> > - size_t max_footprint;
> > - flag_t mflags;
> > - void *user_data;
> > -#if USE_LOCKS
> > - MLOCK_T mutex; /* locate lock among fields that rarely change
> */
> > -#endif /* USE_LOCKS */
> > - msegment seg;
> > -};
> > -
> > -typedef struct malloc_state* mstate;
> > -
> > -/* ------------- Global malloc_state and malloc_params
> -------------------
> > */
> > -
> > -/*
> > - malloc_params holds global properties, including those that can be
> > - dynamically set using mallopt. There is a single instance, mparams,
> > - initialized in init_mparams.
> > -*/
> > -
> > -struct malloc_params {
> > - size_t magic;
> > - size_t page_size;
> > - size_t granularity;
> > - flag_t default_mflags;
> > -};
> > -
> > -static struct malloc_params mparams;
> > -
> > -/* The global malloc_state used for all non-"mspace" calls */
> > -//static struct malloc_state _gm_;
> > -//#define gm (&_gm_)
> > -//#define is_global(M) ((M) == &_gm_)
> > -#define is_initialized(M) ((M)->top != 0)
> > -
> > -/* -------------------------- system alloc setup
> -------------------------
> > */
> > -
> > -/* Operations on mflags */
> > -
> > -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
> > -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
> > -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
> > -
> > -#define set_lock(M,L)\
> > - ((M)->mflags = (L)?\
> > - ((M)->mflags | USE_LOCK_BIT) :\
> > - ((M)->mflags & ~USE_LOCK_BIT))
> > -
> > -/* page-align a size */
> > -#define page_align(S)\
> > - (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
> > -
> > -/* granularity-align a size */
> > -#define granularity_align(S)\
> > - (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
> > -
> > -#define is_page_aligned(S)\
> > - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
> > -#define is_granularity_aligned(S)\
> > - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
> > -
> > -/* True if segment S holds address A */
> > -#define segment_holds(S, A)\
> > - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
> > -
> > -/* Return segment holding given address */
> > -static msegmentptr segment_holding(mstate m, char* addr) {
> > - msegmentptr sp = &m->seg;
> > - for (;;) {
> > - if (addr >= sp->base && addr < sp->base + sp->size)
> > - return sp;
> > - if ((sp = sp->next) == 0)
> > - return 0;
> > - }
> > -}
> > -
> > -/* Return true if segment contains a segment link */
> > -static int has_segment_link(mstate m, msegmentptr ss) {
> > - msegmentptr sp = &m->seg;
> > - for (;;) {
> > - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
> > - return 1;
> > - if ((sp = sp->next) == 0)
> > - return 0;
> > - }
> > -}
> > -
> > -
> > -
> > -/*
> > - TOP_FOOT_SIZE is padding at the end of a segment, including space
> > - that may be needed to place segment records and fenceposts when new
> > - noncontiguous segments are added.
> > -*/
> > -#define TOP_FOOT_SIZE\
> > - (align_offset(chunk2mem(0))+pad_request(sizeof(struct
> > malloc_segment))+MIN_CHUNK_SIZE)
> > -
> > -
> > -/* ------------------------------- Hooks
> --------------------------------
> > */
> > -
> > -/*
> > - PREACTION should be defined to return 0 on success, and nonzero on
> > - failure. If you are not using locking, you can redefine these to do
> > - anything you like.
> > -*/
> > -
> > -#if USE_LOCKS
> > -
> > -/* Ensure locks are initialized */
> > -#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
> > -
> > -#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))?
> > ACQUIRE_LOCK(&(M)->mutex) : 0)
> > -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
> > -#else /* USE_LOCKS */
> > -
> > -#ifndef PREACTION
> > -#define PREACTION(M) (0)
> > -#endif /* PREACTION */
> > -
> > -#ifndef POSTACTION
> > -#define POSTACTION(M)
> > -#endif /* POSTACTION */
> > -
> > -#endif /* USE_LOCKS */
> > -
> > -/*
> > - CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
> > - USAGE_ERROR_ACTION is triggered on detected bad frees and
> > - reallocs. The argument p is an address that might have triggered the
> > - fault. It is ignored by the two predefined actions, but might be
> > - useful in custom actions that try to help diagnose errors.
> > -*/
> > -
> > -#if PROCEED_ON_ERROR
> > -
> > -/* A count of the number of corruption errors causing resets */
> > -int malloc_corruption_error_count;
> > -
> > -/* default corruption action */
> > -static void reset_on_error(mstate m);
> > -
> > -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
> > -#define USAGE_ERROR_ACTION(m, p)
> > -
> > -#else /* PROCEED_ON_ERROR */
> > -
> > -#ifndef CORRUPTION_ERROR_ACTION
> > -#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
> > -#endif /* CORRUPTION_ERROR_ACTION */
> > -
> > -#ifndef USAGE_ERROR_ACTION
> > -#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
> > -#endif /* USAGE_ERROR_ACTION */
> > -
> > -#endif /* PROCEED_ON_ERROR */
> > -
> > -/* -------------------------- Debugging setup
> ----------------------------
> > */
> > -
> > -#if ! DEBUG
> > -
> > -#define check_free_chunk(M,P)
> > -#define check_inuse_chunk(M,P)
> > -#define check_malloced_chunk(M,P,N)
> > -#define check_malloc_state(M)
> > -#define check_top_chunk(M,P)
> > -
> > -#else /* DEBUG */
> > -#define check_free_chunk(M,P) do_check_free_chunk(M,P)
> > -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
> > -#define check_top_chunk(M,P) do_check_top_chunk(M,P)
> > -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
> > -#define check_malloc_state(M) do_check_malloc_state(M)
> > -
> > -static void do_check_any_chunk(mstate m, mchunkptr p);
> > -static void do_check_top_chunk(mstate m, mchunkptr p);
> > -static void do_check_inuse_chunk(mstate m, mchunkptr p);
> > -static void do_check_free_chunk(mstate m, mchunkptr p);
> > -static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
> > -static void do_check_tree(mstate m, tchunkptr t);
> > -static void do_check_treebin(mstate m, bindex_t i);
> > -static void do_check_smallbin(mstate m, bindex_t i);
> > -static void do_check_malloc_state(mstate m);
> > -static int bin_find(mstate m, mchunkptr x);
> > -static size_t traverse_and_check(mstate m);
> > -#endif /* DEBUG */
> > -
> > -/* ---------------------------- Indexing Bins
> ----------------------------
> > */
> > -
> > -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
> > -#define small_index(s) ((s) >> SMALLBIN_SHIFT)
> > -#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
> > -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
> > -
> > -/* addressing by index. See above about smallbin repositioning */
> > -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smal
> lbins[(i)<<1])))
> > -#define treebin_at(M,i) (&((M)->treebins[i]))
> > -
> > -/* assign tree index for size S to variable I */
> > -#if defined(__GNUC__) && defined(i386)
> > -#define compute_tree_index(S, I)\
> > -{\
> > - size_t X = S >> TREEBIN_SHIFT;\
> > - if (X == 0)\
> > - I = 0;\
> > - else if (X > 0xFFFF)\
> > - I = NTREEBINS-1;\
> > - else {\
> > - unsigned int K;\
> > - __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
> > - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
> > - }\
> > -}
> > -#else /* GNUC */
> > -#define compute_tree_index(S, I)\
> > -{\
> > - size_t X = S >> TREEBIN_SHIFT;\
> > - if (X == 0)\
> > - I = 0;\
> > - else if (X > 0xFFFF)\
> > - I = NTREEBINS-1;\
> > - else {\
> > - unsigned int Y = (unsigned int)X;\
> > - unsigned int N = ((Y - 0x100) >> 16) & 8;\
> > - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
> > - N += K;\
> > - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
> > - K = 14 - N + ((Y <<= K) >> 15);\
> > - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
> > - }\
> > -}
> > -#endif /* GNUC */
> > -
> > -/* Bit representing maximum resolved size in a treebin at i */
> > -#define bit_for_tree_index(i) \
> > - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT
> - 2)
> > -
> > -/* Shift placing maximum resolved bit in a treebin at i as sign bit */
> > -#define leftshift_for_tree_index(i) \
> > - ((i == NTREEBINS-1)? 0 : \
> > - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
> > -
> > -/* The size of the smallest chunk held in bin with index i */
> > -#define minsize_for_tree_index(i) \
> > - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
> > - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
> > -
> > -/* ------------------------ Operations on bin maps
> -----------------------
> > */
> > -
> > -/* bit corresponding to given index */
> > -#define idx2bit(i) ((binmap_t)(1) << (i))
> > -
> > -/* Mark/Clear bits with given index */
> > -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
> > -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
> > -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
> > -
> > -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
> > -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
> > -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
> > -
> > -/* index corresponding to given bit */
> > -
> > -#if defined(__GNUC__) && defined(i386)
> > -#define compute_bit2idx(X, I)\
> > -{\
> > - unsigned int J;\
> > - __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
> > - I = (bindex_t)J;\
> > -}
> > -
> > -#else /* GNUC */
> > -#if USE_BUILTIN_FFS
> > -#define compute_bit2idx(X, I) I = ffs(X)-1
> > -
> > -#else /* USE_BUILTIN_FFS */
> > -#define compute_bit2idx(X, I)\
> > -{\
> > - unsigned int Y = X - 1;\
> > - unsigned int K = Y >> (16-4) & 16;\
> > - unsigned int N = K; Y >>= K;\
> > - N += K = Y >> (8-3) & 8; Y >>= K;\
> > - N += K = Y >> (4-2) & 4; Y >>= K;\
> > - N += K = Y >> (2-1) & 2; Y >>= K;\
> > - N += K = Y >> (1-0) & 1; Y >>= K;\
> > - I = (bindex_t)(N + Y);\
> > -}
> > -#endif /* USE_BUILTIN_FFS */
> > -#endif /* GNUC */
> > -
> > -/* isolate the least set bit of a bitmap */
> > -#define least_bit(x) ((x) & -(x))
> > -
> > -/* mask with all bits to left of least bit of x on */
> > -#define left_bits(x) ((x<<1) | -(x<<1))
> > -
> > -/* mask with all bits to left of or equal to least bit of x on */
> > -#define same_or_left_bits(x) ((x) | -(x))
> > -
> > -
> > -/* ----------------------- Runtime Check Support
> -------------------------
> > */
> > -
> > -/*
> > - For security, the main invariant is that malloc/free/etc never
> > - writes to a static address other than malloc_state, unless static
> > - malloc_state itself has been corrupted, which cannot occur via
> > - malloc (because of these checks). In essence this means that we
> > - believe all pointers, sizes, maps etc held in malloc_state, but
> > - check all of those linked or offsetted from other embedded data
> > - structures. These checks are interspersed with main code in a way
> > - that tends to minimize their run-time cost.
> > -
> > - When FOOTERS is defined, in addition to range checking, we also
> > - verify footer fields of inuse chunks, which can be used guarantee
> > - that the mstate controlling malloc/free is intact. This is a
> > - streamlined version of the approach described by William Robertson
> > - et al in "Run-time Detection of Heap-based Overflows" LISA'03
> > - http://www.usenix.org/events/lisa03/tech/robertson.html The footer
> > - of an inuse chunk holds the xor of its mstate and a random seed,
> > - that is checked upon calls to free() and realloc(). This is
> > - (probablistically) unguessable from outside the program, but can be
> > - computed by any code successfully malloc'ing any chunk, so does not
> > - itself provide protection against code that has already broken
> > - security through some other means. Unlike Robertson et al, we
> > - always dynamically check addresses of all offset chunks (previous,
> > - next, etc). This turns out to be cheaper than relying on hashes.
> > -*/
> > -
> > -#if !INSECURE
> > -/* Check if address a is at least as high as any from MORECORE or MMAP
> */
> > -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
> > -/* Check if address of next chunk n is higher than base chunk p */
> > -#define ok_next(p, n) ((char*)(p) < (char*)(n))
> > -/* Check if p has its cinuse bit on */
> > -#define ok_cinuse(p) cinuse(p)
> > -/* Check if p has its pinuse bit on */
> > -#define ok_pinuse(p) pinuse(p)
> > -
> > -#else /* !INSECURE */
> > -#define ok_address(M, a) (1)
> > -#define ok_next(b, n) (1)
> > -#define ok_cinuse(p) (1)
> > -#define ok_pinuse(p) (1)
> > -#endif /* !INSECURE */
> > -
> > -#if (FOOTERS && !INSECURE)
> > -/* Check if (alleged) mstate m has expected magic field */
> > -#define ok_magic(M) ((M)->magic == mparams.magic)
> > -#else /* (FOOTERS && !INSECURE) */
> > -#define ok_magic(M) (1)
> > -#endif /* (FOOTERS && !INSECURE) */
> > -
> > -
> > -/* In gcc, use __builtin_expect to minimize impact of checks */
> > -#if !INSECURE
> > -#if defined(__GNUC__) && __GNUC__ >= 3
> > -#define RTCHECK(e) __builtin_expect(e, 1)
> > -#else /* GNUC */
> > -#define RTCHECK(e) (e)
> > -#endif /* GNUC */
> > -#else /* !INSECURE */
> > -#define RTCHECK(e) (1)
> > -#endif /* !INSECURE */
> > -
> > -/* macros to set up inuse chunks with or without footers */
> > -
> > -#if !FOOTERS
> > -
> > -#define mark_inuse_foot(M,p,s)
> > -
> > -/* Set cinuse bit and pinuse bit of next chunk */
> > -#define set_inuse(M,p,s)\
> > - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > -
> > -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
> > -#define set_inuse_and_pinuse(M,p,s)\
> > - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > -
> > -/* Set size, cinuse and pinuse bit of this chunk */
> > -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
> > -
> > -#else /* FOOTERS */
> > -
> > -/* Set foot of inuse chunk to be xor of mstate and seed */
> > -#define mark_inuse_foot(M,p,s)\
> > - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^
> > mparams.magic))
> > -
> > -#define get_mstate_for(p)\
> > - ((mstate)(((mchunkptr)((char*)(p) +\
> > - (chunksize(p))))->prev_foot ^ mparams.magic))
> > -
> > -#define set_inuse(M,p,s)\
> > - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
> > - mark_inuse_foot(M,p,s))
> > -
> > -#define set_inuse_and_pinuse(M,p,s)\
> > - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
> > - mark_inuse_foot(M,p,s))
> > -
> > -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > - mark_inuse_foot(M, p, s))
> > -
> > -#endif /* !FOOTERS */
> > -
> > -/* ---------------------------- setting mparams
> --------------------------
> > */
> > -
> > -/* Initialize mparams */
> > -static int init_mparams(void) {
> > - if (mparams.page_size == 0) {
> > - size_t s;
> > -
> > - mparams.default_mflags = USE_LOCK_BIT;
> > -
> > -#if (FOOTERS && !INSECURE)
> > - {
> > -#if USE_DEV_RANDOM
> > - int fd;
> > - unsigned char buf[sizeof(size_t)];
> > - /* Try to use /dev/urandom, else fall back on using time */
> > - if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
> > - read(fd, buf, sizeof(buf)) == sizeof(buf)) {
> > - s = *((size_t *) buf);
> > - close(fd);
> > - }
> > - else
> > -#endif /* USE_DEV_RANDOM */
> > - s = (size_t)(time(0) ^ (size_t)0x55555555U);
> > -
> > - s |= (size_t)8U; /* ensure nonzero */
> > - s &= ~(size_t)7U; /* improve chances of fault for bad values */
> > -
> > - }
> > -#else /* (FOOTERS && !INSECURE) */
> > - s = (size_t)0x58585858U;
> > -#endif /* (FOOTERS && !INSECURE) */
> > - ACQUIRE_MAGIC_INIT_LOCK();
> > - if (mparams.magic == 0) {
> > - mparams.magic = s;
> > - /* Set up lock for main malloc area */
> > - //INITIAL_LOCK(&gm->mutex);
> > - //gm->mflags = mparams.default_mflags;
> > - }
> > - RELEASE_MAGIC_INIT_LOCK();
> > -
> > -
> > - mparams.page_size = malloc_getpagesize;
> > - mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
> > - DEFAULT_GRANULARITY : mparams.page_size);
> > -
> > - /* Sanity-check configuration:
> > - size_t must be unsigned and as wide as pointer type.
> > - ints must be at least 4 bytes.
> > - alignment must be at least 8.
> > - Alignment, min chunk size, and page size must all be powers of 2.
> > - */
> > - if ((sizeof(size_t) != sizeof(char*)) ||
> > - (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
> > - (sizeof(int) < 4) ||
> > - (MALLOC_ALIGNMENT < (size_t)8U) ||
> > - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) !=
> 0) ||
> > - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0)
> ||
> > - ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) !=
> 0) ||
> > - ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) !=
> 0))
> > - ABORT(NULL);
> > - }
> > - return 0;
> > -}
> > -
> > -/* support for mallopt */
> > -static int change_mparam(int param_number, int value) {
> > - size_t val = (size_t)value;
> > - init_mparams();
> > - switch(param_number) {
> > - case M_GRANULARITY:
> > - if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
> > - mparams.granularity = val;
> > - return 1;
> > - }
> > - else
> > - return 0;
> > - default:
> > - return 0;
> > - }
> > -}
> > -
> > -#if DEBUG
> > -/* ------------------------- Debugging Support
> ---------------------------
> > */
> > -
> > -/* Check properties of any chunk, whether free, inuse, mmapped etc */
> > -static void do_check_any_chunk(mstate m, mchunkptr p) {
> > - assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > - assert(m->user_data, ok_address(m, p));
> > -}
> > -
> > -/* Check properties of top chunk */
> > -static void do_check_top_chunk(mstate m, mchunkptr p) {
> > - msegmentptr sp = segment_holding(m, (char*)p);
> > - size_t sz = chunksize(p);
> > - assert(m->user_data, sp != 0);
> > - assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > - assert(m->user_data, ok_address(m, p));
> > - assert(m->user_data, sz == m->topsize);
> > - assert(m->user_data, sz > 0);
> > - assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -
> > TOP_FOOT_SIZE);
> > - assert(m->user_data, pinuse(p));
> > - assert(m->user_data, !next_pinuse(p));
> > -}
> > -
> > -/* Check properties of inuse chunks */
> > -static void do_check_inuse_chunk(mstate m, mchunkptr p) {
> > - do_check_any_chunk(m, p);
> > - assert(m->user_data, cinuse(p));
> > - assert(m->user_data, next_pinuse(p));
> > - /* If not pinuse, previous chunk has OK offset */
> > - assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
> > -}
> > -
> > -/* Check properties of free chunks */
> > -static void do_check_free_chunk(mstate m, mchunkptr p) {
> > - size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > - mchunkptr next = chunk_plus_offset(p, sz);
> > - do_check_any_chunk(m, p);
> > - assert(m->user_data, !cinuse(p));
> > - assert(m->user_data, !next_pinuse(p));
> > - if (p != m->dv && p != m->top) {
> > - if (sz >= MIN_CHUNK_SIZE) {
> > - assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > - assert(m->user_data, is_aligned(chunk2mem(p)));
> > - assert(m->user_data, next->prev_foot == sz);
> > - assert(m->user_data, pinuse(p));
> > - assert(m->user_data, next == m->top || cinuse(next));
> > - assert(m->user_data, p->fd->bk == p);
> > - assert(m->user_data, p->bk->fd == p);
> > - }
> > - else /* markers are always of size SIZE_T_SIZE */
> > - assert(m->user_data, sz == SIZE_T_SIZE);
> > - }
> > -}
> > -
> > -/* Check properties of malloced chunks at the point they are malloced */
> > -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
> > - if (mem != 0) {
> > - mchunkptr p = mem2chunk(mem);
> > - size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > - do_check_inuse_chunk(m, p);
> > - assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > - assert(m->user_data, sz >= MIN_CHUNK_SIZE);
> > - assert(m->user_data, sz >= s);
> > - /* size is less than MIN_CHUNK_SIZE more than request */
> > - assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
> > - }
> > -}
> > -
> > -/* Check a tree and its subtrees. */
> > -static void do_check_tree(mstate m, tchunkptr t) {
> > - tchunkptr head = 0;
> > - tchunkptr u = t;
> > - bindex_t tindex = t->index;
> > - size_t tsize = chunksize(t);
> > - bindex_t idx;
> > - compute_tree_index(tsize, idx);
> > - assert(m->user_data, tindex == idx);
> > - assert(m->user_data, tsize >= MIN_LARGE_SIZE);
> > - assert(m->user_data, tsize >= minsize_for_tree_index(idx));
> > - assert(m->user_data, (idx == NTREEBINS-1) || (tsize <
> > minsize_for_tree_index((idx+1))));
> > -
> > - do { /* traverse through chain of same-sized nodes */
> > - do_check_any_chunk(m, ((mchunkptr)u));
> > - assert(m->user_data, u->index == tindex);
> > - assert(m->user_data, chunksize(u) == tsize);
> > - assert(m->user_data, !cinuse(u));
> > - assert(m->user_data, !next_pinuse(u));
> > - assert(m->user_data, u->fd->bk == u);
> > - assert(m->user_data, u->bk->fd == u);
> > - if (u->parent == 0) {
> > - assert(m->user_data, u->child[0] == 0);
> > - assert(m->user_data, u->child[1] == 0);
> > - }
> > - else {
> > - assert(m->user_data, head == 0); /* only one node on chain has
> parent
> > */
> > - head = u;
> > - assert(m->user_data, u->parent != u);
> > - assert(m->user_data, u->parent->child[0] == u ||
> > - u->parent->child[1] == u ||
> > - *((tbinptr*)(u->parent)) == u);
> > - if (u->child[0] != 0) {
> > - assert(m->user_data, u->child[0]->parent == u);
> > - assert(m->user_data, u->child[0] != u);
> > - do_check_tree(m, u->child[0]);
> > - }
> > - if (u->child[1] != 0) {
> > - assert(m->user_data, u->child[1]->parent == u);
> > - assert(m->user_data, u->child[1] != u);
> > - do_check_tree(m, u->child[1]);
> > - }
> > - if (u->child[0] != 0 && u->child[1] != 0) {
> > - assert(m->user_data, chunksize(u->child[0]) <
> > chunksize(u->child[1]));
> > - }
> > - }
> > - u = u->fd;
> > - } while (u != t);
> > - assert(m->user_data, head != 0);
> > -}
> > -
> > -/* Check all the chunks in a treebin. */
> > -static void do_check_treebin(mstate m, bindex_t i) {
> > - tbinptr* tb = treebin_at(m, i);
> > - tchunkptr t = *tb;
> > - int empty = (m->treemap & (1U << i)) == 0;
> > - if (t == 0)
> > - assert(m->user_data, empty);
> > - if (!empty)
> > - do_check_tree(m, t);
> > -}
> > -
> > -/* Check all the chunks in a smallbin. */
> > -static void do_check_smallbin(mstate m, bindex_t i) {
> > - sbinptr b = smallbin_at(m, i);
> > - mchunkptr p = b->bk;
> > - unsigned int empty = (m->smallmap & (1U << i)) == 0;
> > - if (p == b)
> > - assert(m->user_data, empty);
> > - if (!empty) {
> > - for (; p != b; p = p->bk) {
> > - size_t size = chunksize(p);
> > - mchunkptr q;
> > - /* each chunk claims to be free */
> > - do_check_free_chunk(m, p);
> > - /* chunk belongs in bin */
> > - assert(m->user_data, small_index(size) == i);
> > - assert(m->user_data, p->bk == b || chunksize(p->bk) ==
> chunksize(p));
> > - /* chunk is followed by an inuse chunk */
> > - q = next_chunk(p);
> > - if (q->head != FENCEPOST_HEAD)
> > - do_check_inuse_chunk(m, q);
> > - }
> > - }
> > -}
> > -
> > -/* Find x in a bin. Used in other check functions. */
> > -static int bin_find(mstate m, mchunkptr x) {
> > - size_t size = chunksize(x);
> > - if (is_small(size)) {
> > - bindex_t sidx = small_index(size);
> > - sbinptr b = smallbin_at(m, sidx);
> > - if (smallmap_is_marked(m, sidx)) {
> > - mchunkptr p = b;
> > - do {
> > - if (p == x)
> > - return 1;
> > - } while ((p = p->fd) != b);
> > - }
> > - }
> > - else {
> > - bindex_t tidx;
> > - compute_tree_index(size, tidx);
> > - if (treemap_is_marked(m, tidx)) {
> > - tchunkptr t = *treebin_at(m, tidx);
> > - size_t sizebits = size << leftshift_for_tree_index(tidx);
> > - while (t != 0 && chunksize(t) != size) {
> > - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > - sizebits <<= 1;
> > - }
> > - if (t != 0) {
> > - tchunkptr u = t;
> > - do {
> > - if (u == (tchunkptr)x)
> > - return 1;
> > - } while ((u = u->fd) != t);
> > - }
> > - }
> > - }
> > - return 0;
> > -}
> > -
> > -/* Traverse each chunk and check it; return total */
> > -static size_t traverse_and_check(mstate m) {
> > - size_t sum = 0;
> > - if (is_initialized(m)) {
> > - msegmentptr s = &m->seg;
> > - sum += m->topsize + TOP_FOOT_SIZE;
> > - while (s != 0) {
> > - mchunkptr q = align_as_chunk(s->base);
> > - mchunkptr lastq = 0;
> > - assert(m->user_data, pinuse(q));
> > - while (segment_holds(s, q) &&
> > - q != m->top && q->head != FENCEPOST_HEAD) {
> > - sum += chunksize(q);
> > - if (cinuse(q)) {
> > - assert(m->user_data, !bin_find(m, q));
> > - do_check_inuse_chunk(m, q);
> > - }
> > - else {
> > - assert(m->user_data, q == m->dv || bin_find(m, q));
> > - assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2
> > consecutive free */
> > - do_check_free_chunk(m, q);
> > - }
> > - lastq = q;
> > - q = next_chunk(q);
> > - }
> > - s = s->next;
> > - }
> > - }
> > - return sum;
> > -}
> > -
> > -/* Check all properties of malloc_state. */
> > -static void do_check_malloc_state(mstate m) {
> > - bindex_t i;
> > - size_t total;
> > - /* check bins */
> > - for (i = 0; i < NSMALLBINS; ++i)
> > - do_check_smallbin(m, i);
> > - for (i = 0; i < NTREEBINS; ++i)
> > - do_check_treebin(m, i);
> > -
> > - if (m->dvsize != 0) { /* check dv chunk */
> > - do_check_any_chunk(m, m->dv);
> > - assert(m->user_data, m->dvsize == chunksize(m->dv));
> > - assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
> > - assert(m->user_data, bin_find(m, m->dv) == 0);
> > - }
> > -
> > - if (m->top != 0) { /* check top chunk */
> > - do_check_top_chunk(m, m->top);
> > - assert(m->user_data, m->topsize == chunksize(m->top));
> > - assert(m->user_data, m->topsize > 0);
> > - assert(m->user_data, bin_find(m, m->top) == 0);
> > - }
> > -
> > - total = traverse_and_check(m);
> > - assert(m->user_data, total <= m->footprint);
> > - assert(m->user_data, m->footprint <= m->max_footprint);
> > -}
> > -#endif /* DEBUG */
> > -
> > -/* ----------------------------- statistics
> ------------------------------
> > */
> > -
> > -#if !NO_MALLINFO
> > -static struct mallinfo internal_mallinfo(mstate m) {
> > - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
> > - if (!PREACTION(m)) {
> > - check_malloc_state(m);
> > - if (is_initialized(m)) {
> > - size_t nfree = SIZE_T_ONE; /* top always free */
> > - size_t mfree = m->topsize + TOP_FOOT_SIZE;
> > - size_t sum = mfree;
> > - msegmentptr s = &m->seg;
> > - while (s != 0) {
> > - mchunkptr q = align_as_chunk(s->base);
> > - while (segment_holds(s, q) &&
> > - q != m->top && q->head != FENCEPOST_HEAD) {
> > - size_t sz = chunksize(q);
> > - sum += sz;
> > - if (!cinuse(q)) {
> > - mfree += sz;
> > - ++nfree;
> > - }
> > - q = next_chunk(q);
> > - }
> > - s = s->next;
> > - }
> > -
> > - nm.arena = sum;
> > - nm.ordblks = nfree;
> > - nm.hblkhd = m->footprint - sum;
> > - nm.usmblks = m->max_footprint;
> > - nm.uordblks = m->footprint - mfree;
> > - nm.fordblks = mfree;
> > - nm.keepcost = m->topsize;
> > - }
> > -
> > - POSTACTION(m);
> > - }
> > - return nm;
> > -}
> > -#endif /* !NO_MALLINFO */
> > -
> > -static void internal_malloc_stats(mstate m) {
> > - if (!PREACTION(m)) {
> > - size_t maxfp = 0;
> > - size_t fp = 0;
> > - size_t used = 0;
> > - check_malloc_state(m);
> > - if (is_initialized(m)) {
> > - msegmentptr s = &m->seg;
> > - maxfp = m->max_footprint;
> > - fp = m->footprint;
> > - used = fp - (m->topsize + TOP_FOOT_SIZE);
> > -
> > - while (s != 0) {
> > - mchunkptr q = align_as_chunk(s->base);
> > - while (segment_holds(s, q) &&
> > - q != m->top && q->head != FENCEPOST_HEAD) {
> > - if (!cinuse(q))
> > - used -= chunksize(q);
> > - q = next_chunk(q);
> > - }
> > - s = s->next;
> > - }
> > - }
> > -
> > - PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned
> > long)(maxfp)));
> > - PRINT((m->user_data, "system bytes = %10lu\n", (unsigned
> > long)(fp)));
> > - PRINT((m->user_data, "in use bytes = %10lu\n", (unsigned
> > long)(used)));
> > -
> > - POSTACTION(m);
> > - }
> > -}
> > -
> > -/* ----------------------- Operations on smallbins
> -----------------------
> > */
> > -
> > -/*
> > - Various forms of linking and unlinking are defined as macros. Even
> > - the ones for trees, which are very long but have very short typical
> > - paths. This is ugly but reduces reliance on inlining support of
> > - compilers.
> > -*/
> > -
> > -/* Link a free chunk into a smallbin */
> > -#define insert_small_chunk(M, P, S) {\
> > - bindex_t I = small_index(S);\
> > - mchunkptr B = smallbin_at(M, I);\
> > - mchunkptr F = B;\
> > - assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
> > - if (!smallmap_is_marked(M, I))\
> > - mark_smallmap(M, I);\
> > - else if (RTCHECK(ok_address(M, B->fd)))\
> > - F = B->fd;\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - B->fd = P;\
> > - F->bk = P;\
> > - P->fd = F;\
> > - P->bk = B;\
> > -}
> > -
> > -/* Unlink a chunk from a smallbin */
> > -#define unlink_small_chunk(M, P, S) {\
> > - mchunkptr F = P->fd;\
> > - mchunkptr B = P->bk;\
> > - bindex_t I = small_index(S);\
> > - assert((M)->user_data, P != B);\
> > - assert((M)->user_data, P != F);\
> > - assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > - if (F == B)\
> > - clear_smallmap(M, I);\
> > - else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
> > - (B == smallbin_at(M,I) || ok_address(M, B)))) {\
> > - F->bk = B;\
> > - B->fd = F;\
> > - }\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > -}
> > -
> > -/* Unlink the first chunk from a smallbin */
> > -#define unlink_first_small_chunk(M, B, P, I) {\
> > - mchunkptr F = P->fd;\
> > - assert((M)->user_data, P != B);\
> > - assert((M)->user_data, P != F);\
> > - assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > - if (B == F)\
> > - clear_smallmap(M, I);\
> > - else if (RTCHECK(ok_address(M, F))) {\
> > - B->fd = F;\
> > - F->bk = B;\
> > - }\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > -}
> > -
> > -/* Replace dv node, binning the old one */
> > -/* Used only when dvsize known to be small */
> > -#define replace_dv(M, P, S) {\
> > - size_t DVS = M->dvsize;\
> > - if (DVS != 0) {\
> > - mchunkptr DV = M->dv;\
> > - assert((M)->user_data, is_small(DVS));\
> > - insert_small_chunk(M, DV, DVS);\
> > - }\
> > - M->dvsize = S;\
> > - M->dv = P;\
> > -}
> > -
> > -
> > -/* ------------------------- Operations on trees
> -------------------------
> > */
> > -
> > -/* Insert chunk into tree */
> > -#define insert_large_chunk(M, X, S) {\
> > - tbinptr* H;\
> > - bindex_t I;\
> > - compute_tree_index(S, I);\
> > - H = treebin_at(M, I);\
> > - X->index = I;\
> > - X->child[0] = X->child[1] = 0;\
> > - if (!treemap_is_marked(M, I)) {\
> > - mark_treemap(M, I);\
> > - *H = X;\
> > - X->parent = (tchunkptr)H;\
> > - X->fd = X->bk = X;\
> > - }\
> > - else {\
> > - tchunkptr T = *H;\
> > - size_t K = S << leftshift_for_tree_index(I);\
> > - for (;;) {\
> > - if (chunksize(T) != S) {\
> > - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) &
> 1]);\
> > - K <<= 1;\
> > - if (*C != 0)\
> > - T = *C;\
> > - else if (RTCHECK(ok_address(M, C))) {\
> > - *C = X;\
> > - X->parent = T;\
> > - X->fd = X->bk = X;\
> > - break;\
> > - }\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - break;\
> > - }\
> > - }\
> > - else {\
> > - tchunkptr F = T->fd;\
> > - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
> > - T->fd = F->bk = X;\
> > - X->fd = F;\
> > - X->bk = T;\
> > - X->parent = 0;\
> > - break;\
> > - }\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - break;\
> > - }\
> > - }\
> > - }\
> > - }\
> > -}
> > -
> > -/*
> > - Unlink steps:
> > -
> > - 1. If x is a chained node, unlink it from its same-sized fd/bk links
> > - and choose its bk node as its replacement.
> > - 2. If x was the last node of its size, but not a leaf node, it must
> > - be replaced with a leaf node (not merely one with an open left or
> > - right), to make sure that lefts and rights of descendents
> > - correspond properly to bit masks. We use the rightmost descendent
> > - of x. We could use any other leaf, but this is easy to locate and
> > - tends to counteract removal of leftmosts elsewhere, and so keeps
> > - paths shorter than minimally guaranteed. This doesn't loop much
> > - because on average a node in a tree is near the bottom.
> > - 3. If x is the base of a chain (i.e., has parent links) relink
> > - x's parent and children to x's replacement (or null if none).
> > -*/
> > -
> > -#define unlink_large_chunk(M, X) {\
> > - tchunkptr XP = X->parent;\
> > - tchunkptr R;\
> > - if (X->bk != X) {\
> > - tchunkptr F = X->fd;\
> > - R = X->bk;\
> > - if (RTCHECK(ok_address(M, F))) {\
> > - F->bk = R;\
> > - R->fd = F;\
> > - }\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - }\
> > - else {\
> > - tchunkptr* RP;\
> > - if (((R = *(RP = &(X->child[1]))) != 0) ||\
> > - ((R = *(RP = &(X->child[0]))) != 0)) {\
> > - tchunkptr* CP;\
> > - while ((*(CP = &(R->child[1])) != 0) ||\
> > - (*(CP = &(R->child[0])) != 0)) {\
> > - R = *(RP = CP);\
> > - }\
> > - if (RTCHECK(ok_address(M, RP)))\
> > - *RP = 0;\
> > - else {\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - }\
> > - }\
> > - if (XP != 0) {\
> > - tbinptr* H = treebin_at(M, X->index);\
> > - if (X == *H) {\
> > - if ((*H = R) == 0) \
> > - clear_treemap(M, X->index);\
> > - }\
> > - else if (RTCHECK(ok_address(M, XP))) {\
> > - if (XP->child[0] == X) \
> > - XP->child[0] = R;\
> > - else \
> > - XP->child[1] = R;\
> > - }\
> > - else\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - if (R != 0) {\
> > - if (RTCHECK(ok_address(M, R))) {\
> > - tchunkptr C0, C1;\
> > - R->parent = XP;\
> > - if ((C0 = X->child[0]) != 0) {\
> > - if (RTCHECK(ok_address(M, C0))) {\
> > - R->child[0] = C0;\
> > - C0->parent = R;\
> > - }\
> > - else\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - if ((C1 = X->child[1]) != 0) {\
> > - if (RTCHECK(ok_address(M, C1))) {\
> > - R->child[1] = C1;\
> > - C1->parent = R;\
> > - }\
> > - else\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - }\
> > - else\
> > - CORRUPTION_ERROR_ACTION(M);\
> > - }\
> > - }\
> > -}
> > -
> > -/* Relays to large vs small bin operations */
> > -
> > -#define insert_chunk(M, P, S)\
> > - if (is_small(S)) insert_small_chunk(M, P, S)\
> > - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
> > -
> > -#define unlink_chunk(M, P, S)\
> > - if (is_small(S)) unlink_small_chunk(M, P, S)\
> > - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
> > -
> > -
> > -/* Relays to internal calls to malloc/free from realloc, memalign etc */
> > -
> > -#define internal_malloc(m, b) mspace_malloc(m, b)
> > -#define internal_free(m, mem) mspace_free(m,mem);
> > -
> > -
> > -/* -------------------------- mspace management
> --------------------------
> > */
> > -
> > -/* Initialize top chunk and its size */
> > -static void init_top(mstate m, mchunkptr p, size_t psize) {
> > - /* Ensure alignment */
> > - size_t offset = align_offset(chunk2mem(p));
> > - p = (mchunkptr)((char*)p + offset);
> > - psize -= offset;
> > -
> > - m->top = p;
> > - m->topsize = psize;
> > - p->head = psize | PINUSE_BIT;
> > - /* set size of fake trailing chunk holding overhead space only once */
> > - chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
> > -}
> > -
> > -/* Initialize bins for a new mstate that is otherwise zeroed out */
> > -static void init_bins(mstate m) {
> > - /* Establish circular links for smallbins */
> > - bindex_t i;
> > - for (i = 0; i < NSMALLBINS; ++i) {
> > - sbinptr bin = smallbin_at(m,i);
> > - bin->fd = bin->bk = bin;
> > - }
> > -}
> > -
> > -#if PROCEED_ON_ERROR
> > -
> > -/* default corruption action */
> > -static void reset_on_error(mstate m) {
> > - int i;
> > - ++malloc_corruption_error_count;
> > - /* Reinitialize fields to forget about all memory */
> > - m->smallbins = m->treebins = 0;
> > - m->dvsize = m->topsize = 0;
> > - m->seg.base = 0;
> > - m->seg.size = 0;
> > - m->seg.next = 0;
> > - m->top = m->dv = 0;
> > - for (i = 0; i < NTREEBINS; ++i)
> > - *treebin_at(m, i) = 0;
> > - init_bins(m);
> > -}
> > -#endif /* PROCEED_ON_ERROR */
> > -
> > -/* Allocate chunk and prepend remainder with chunk in successor base. */
> > -static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
> > - size_t nb) {
> > - mchunkptr p = align_as_chunk(newbase);
> > - mchunkptr oldfirst = align_as_chunk(oldbase);
> > - size_t psize = (char*)oldfirst - (char*)p;
> > - mchunkptr q = chunk_plus_offset(p, nb);
> > - size_t qsize = psize - nb;
> > - set_size_and_pinuse_of_inuse_chunk(m, p, nb);
> > -
> > - assert(m->user_data, (char*)oldfirst > (char*)q);
> > - assert(m->user_data, pinuse(oldfirst));
> > - assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
> > -
> > - /* consolidate remainder with first chunk of old base */
> > - if (oldfirst == m->top) {
> > - size_t tsize = m->topsize += qsize;
> > - m->top = q;
> > - q->head = tsize | PINUSE_BIT;
> > - check_top_chunk(m, q);
> > - }
> > - else if (oldfirst == m->dv) {
> > - size_t dsize = m->dvsize += qsize;
> > - m->dv = q;
> > - set_size_and_pinuse_of_free_chunk(q, dsize);
> > - }
> > - else {
> > - if (!cinuse(oldfirst)) {
> > - size_t nsize = chunksize(oldfirst);
> > - unlink_chunk(m, oldfirst, nsize);
> > - oldfirst = chunk_plus_offset(oldfirst, nsize);
> > - qsize += nsize;
> > - }
> > - set_free_with_pinuse(q, qsize, oldfirst);
> > - insert_chunk(m, q, qsize);
> > - check_free_chunk(m, q);
> > - }
> > -
> > - check_malloced_chunk(m, chunk2mem(p), nb);
> > - return chunk2mem(p);
> > -}
> > -
> > -/* -------------------------- System allocation
> --------------------------
> > */
> > -
> > -/* Get memory from system using MORECORE or MMAP */
> > -static void* sys_alloc(mstate m, size_t nb) {
> > - MALLOC_FAILURE_ACTION;
> > - return 0;
> > -}
> > -
> > -/* ---------------------------- malloc support
> ---------------------------
> > */
> > -
> > -/* allocate a large request from the best fitting chunk in a treebin */
> > -static void* tmalloc_large(mstate m, size_t nb) {
> > - tchunkptr v = 0;
> > - size_t rsize = -nb; /* Unsigned negation */
> > - tchunkptr t;
> > - bindex_t idx;
> > - compute_tree_index(nb, idx);
> > -
> > - if ((t = *treebin_at(m, idx)) != 0) {
> > - /* Traverse tree for this bin looking for node with size == nb */
> > - size_t sizebits = nb << leftshift_for_tree_index(idx);
> > - tchunkptr rst = 0; /* The deepest untaken right subtree */
> > - for (;;) {
> > - tchunkptr rt;
> > - size_t trem = chunksize(t) - nb;
> > - if (trem < rsize) {
> > - v = t;
> > - if ((rsize = trem) == 0)
> > - break;
> > - }
> > - rt = t->child[1];
> > - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > - if (rt != 0 && rt != t)
> > - rst = rt;
> > - if (t == 0) {
> > - t = rst; /* set t to least subtree holding sizes > nb */
> > - break;
> > - }
> > - sizebits <<= 1;
> > - }
> > - }
> > -
> > - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
> > - binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
> > - if (leftbits != 0) {
> > - bindex_t i;
> > - binmap_t leastbit = least_bit(leftbits);
> > - compute_bit2idx(leastbit, i);
> > - t = *treebin_at(m, i);
> > - }
> > - }
> > -
> > - while (t != 0) { /* find smallest of tree or subtree */
> > - size_t trem = chunksize(t) - nb;
> > - if (trem < rsize) {
> > - rsize = trem;
> > - v = t;
> > - }
> > - t = leftmost_child(t);
> > - }
> > -
> > - /* If dv is a better fit, return 0 so malloc will use it */
> > - if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
> > - if (RTCHECK(ok_address(m, v))) { /* split */
> > - mchunkptr r = chunk_plus_offset(v, nb);
> > - assert(m->user_data, chunksize(v) == rsize + nb);
> > - if (RTCHECK(ok_next(v, r))) {
> > - unlink_large_chunk(m, v);
> > - if (rsize < MIN_CHUNK_SIZE)
> > - set_inuse_and_pinuse(m, v, (rsize + nb));
> > - else {
> > - set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > - set_size_and_pinuse_of_free_chunk(r, rsize);
> > - insert_chunk(m, r, rsize);
> > - }
> > - return chunk2mem(v);
> > - }
> > - }
> > - CORRUPTION_ERROR_ACTION(m);
> > - }
> > - return 0;
> > -}
> > -
> > -/* allocate a small request from the best fitting chunk in a treebin */
> > -static void* tmalloc_small(mstate m, size_t nb) {
> > - tchunkptr t, v;
> > - size_t rsize;
> > - bindex_t i;
> > - binmap_t leastbit = least_bit(m->treemap);
> > - compute_bit2idx(leastbit, i);
> > -
> > - v = t = *treebin_at(m, i);
> > - rsize = chunksize(t) - nb;
> > -
> > - while ((t = leftmost_child(t)) != 0) {
> > - size_t trem = chunksize(t) - nb;
> > - if (trem < rsize) {
> > - rsize = trem;
> > - v = t;
> > - }
> > - }
> > -
> > - if (RTCHECK(ok_address(m, v))) {
> > - mchunkptr r = chunk_plus_offset(v, nb);
> > - assert(m->user_data, chunksize(v) == rsize + nb);
> > - if (RTCHECK(ok_next(v, r))) {
> > - unlink_large_chunk(m, v);
> > - if (rsize < MIN_CHUNK_SIZE)
> > - set_inuse_and_pinuse(m, v, (rsize + nb));
> > - else {
> > - set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > - set_size_and_pinuse_of_free_chunk(r, rsize);
> > - replace_dv(m, r, rsize);
> > - }
> > - return chunk2mem(v);
> > - }
> > - }
> > -
> > - CORRUPTION_ERROR_ACTION(m);
> > - return 0;
> > -}
> > -
> > -/* --------------------------- realloc support
> ---------------------------
> > */
> > -
> > -static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
> > - if (bytes >= MAX_REQUEST) {
> > - MALLOC_FAILURE_ACTION;
> > - return 0;
> > - }
> > - if (!PREACTION(m)) {
> > - mchunkptr oldp = mem2chunk(oldmem);
> > - size_t oldsize = chunksize(oldp);
> > - mchunkptr next = chunk_plus_offset(oldp, oldsize);
> > - mchunkptr newp = 0;
> > - void* extra = 0;
> > -
> > - /* Try to either shrink or extend into top. Else malloc-copy-free */
> > -
> > - if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
> > - ok_next(oldp, next) && ok_pinuse(next))) {
> > - size_t nb = request2size(bytes);
> > - if (oldsize >= nb) { /* already big enough */
> > - size_t rsize = oldsize - nb;
> > - newp = oldp;
> > - if (rsize >= MIN_CHUNK_SIZE) {
> > - mchunkptr remainder = chunk_plus_offset(newp, nb);
> > - set_inuse(m, newp, nb);
> > - set_inuse(m, remainder, rsize);
> > - extra = chunk2mem(remainder);
> > - }
> > - }
> > - else if (next == m->top && oldsize + m->topsize > nb) {
> > - /* Expand into top */
> > - size_t newsize = oldsize + m->topsize;
> > - size_t newtopsize = newsize - nb;
> > - mchunkptr newtop = chunk_plus_offset(oldp, nb);
> > - set_inuse(m, oldp, nb);
> > - newtop->head = newtopsize |PINUSE_BIT;
> > - m->top = newtop;
> > - m->topsize = newtopsize;
> > - newp = oldp;
> > - }
> > - }
> > - else {
> > - USAGE_ERROR_ACTION(m, oldmem);
> > - POSTACTION(m);
> > - return 0;
> > - }
> > -
> > - POSTACTION(m);
> > -
> > - if (newp != 0) {
> > - if (extra != 0) {
> > - internal_free(m, extra);
> > - }
> > - check_inuse_chunk(m, newp);
> > - return chunk2mem(newp);
> > - }
> > - else {
> > - void* newmem = internal_malloc(m, bytes);
> > - if (newmem != 0) {
> > - size_t oc = oldsize - overhead_for(oldp);
> > - MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
> > - internal_free(m, oldmem);
> > - }
> > - return newmem;
> > - }
> > - }
> > - return 0;
> > -}
> > -
> > -/* --------------------------- memalign support
> --------------------------
> > */
> > -
> > -static void* internal_memalign(mstate m, size_t alignment, size_t
> bytes) {
> > - if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
> > - return internal_malloc(m, bytes);
> > - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk
> size
> > */
> > - alignment = MIN_CHUNK_SIZE;
> > - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of
> 2 */
> > - size_t a = MALLOC_ALIGNMENT << 1;
> > - while (a < alignment) a <<= 1;
> > - alignment = a;
> > - }
> > -
> > - if (bytes >= MAX_REQUEST - alignment) {
> > - if (m != 0) { /* Test isn't needed but avoids compiler warning */
> > - MALLOC_FAILURE_ACTION;
> > - }
> > - }
> > - else {
> > - size_t nb = request2size(bytes);
> > - size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
> > - char* mem = (char*)internal_malloc(m, req);
> > - if (mem != 0) {
> > - void* leader = 0;
> > - void* trailer = 0;
> > - mchunkptr p = mem2chunk(mem);
> > -
> > - if (PREACTION(m)) return 0;
> > - if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
> > - /*
> > - Find an aligned spot inside chunk. Since we need to give
> > - back leading space in a chunk of at least MIN_CHUNK_SIZE, if
> > - the first calculation places us at a spot with less than
> > - MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
> > - We've allocated enough total room so that this is always
> > - possible.
> > - */
> > - char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
> > - alignment -
> > - SIZE_T_ONE)) &
> > - -alignment));
> > - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
> > - br : br+alignment;
> > - mchunkptr newp = (mchunkptr)pos;
> > - size_t leadsize = pos - (char*)(p);
> > - size_t newsize = chunksize(p) - leadsize;
> > -
> > - /* Otherwise, give back leader, use the rest */
> > - set_inuse(m, newp, newsize);
> > - set_inuse(m, p, leadsize);
> > - leader = chunk2mem(p);
> > -
> > - p = newp;
> > - }
> > -
> > - assert(m->user_data, chunksize(p) >= nb);
> > - assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
> > - check_inuse_chunk(m, p);
> > - POSTACTION(m);
> > - if (leader != 0) {
> > - internal_free(m, leader);
> > - }
> > - if (trailer != 0) {
> > - internal_free(m, trailer);
> > - }
> > - return chunk2mem(p);
> > - }
> > - }
> > - return 0;
> > -}
> > -
> > -/* ----------------------------- user mspaces
> ----------------------------
> > */
> > -
> > -static mstate init_user_mstate(char* tbase, size_t tsize, void
> *user_data) {
> > - size_t msize = pad_request(sizeof(struct malloc_state));
> > - mchunkptr mn;
> > - mchunkptr msp = align_as_chunk(tbase);
> > - mstate m = (mstate)(chunk2mem(msp));
> > - MEMCLEAR(m, msize);
> > - INITIAL_LOCK(&m->mutex);
> > - msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
> > - m->seg.base = m->least_addr = tbase;
> > - m->seg.size = m->footprint = m->max_footprint = tsize;
> > - m->magic = mparams.magic;
> > - m->mflags = mparams.default_mflags;
> > - m->user_data = user_data;
> > - init_bins(m);
> > - mn = next_chunk(mem2chunk(m));
> > - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -
> TOP_FOOT_SIZE);
> > - check_top_chunk(m, m->top);
> > - return m;
> > -}
> > -
> > -mspace create_mspace_with_base(void* base, size_t capacity, int locked,
> void
> > *user_data) {
> > - mstate m = 0;
> > - size_t msize = pad_request(sizeof(struct malloc_state));
> > - init_mparams(); /* Ensure pagesize etc initialized */
> > -
> > - if (capacity > msize + TOP_FOOT_SIZE &&
> > - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size))
> {
> > - m = init_user_mstate((char*)base, capacity, user_data);
> > - set_lock(m, locked);
> > - }
> > - return (mspace)m;
> > -}
> > -
> > -/*
> > - mspace versions of routines are near-clones of the global
> > - versions. This is not so nice but better than the alternatives.
> > -*/
> > -
> > -
> > -void* mspace_malloc(mspace msp, size_t bytes) {
> > - mstate ms = (mstate)msp;
> > - if (!ok_magic(ms)) {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - return 0;
> > - }
> > - if (!PREACTION(ms)) {
> > - void* mem;
> > - size_t nb;
> > - if (bytes <= MAX_SMALL_REQUEST) {
> > - bindex_t idx;
> > - binmap_t smallbits;
> > - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
> > - idx = small_index(nb);
> > - smallbits = ms->smallmap >> idx;
> > -
> > - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a
> smallbin. */
> > - mchunkptr b, p;
> > - idx += ~smallbits & 1; /* Uses next bin if idx empty */
> > - b = smallbin_at(ms, idx);
> > - p = b->fd;
> > - assert(ms->user_data, chunksize(p) == small_index2size(idx));
> > - unlink_first_small_chunk(ms, b, p, idx);
> > - set_inuse_and_pinuse(ms, p, small_index2size(idx));
> > - mem = chunk2mem(p);
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > -
> > - else if (nb > ms->dvsize) {
> > - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
> > - mchunkptr b, p, r;
> > - size_t rsize;
> > - bindex_t i;
> > - binmap_t leftbits = (smallbits << idx) &
> left_bits(idx2bit(idx));
> > - binmap_t leastbit = least_bit(leftbits);
> > - compute_bit2idx(leastbit, i);
> > - b = smallbin_at(ms, i);
> > - p = b->fd;
> > - assert(ms->user_data, chunksize(p) == small_index2size(i));
> > - unlink_first_small_chunk(ms, b, p, i);
> > - rsize = small_index2size(i) - nb;
> > - /* Fit here cannot be remainderless if 4byte sizes */
> > - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
> > - set_inuse_and_pinuse(ms, p, small_index2size(i));
> > - else {
> > - set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > - r = chunk_plus_offset(p, nb);
> > - set_size_and_pinuse_of_free_chunk(r, rsize);
> > - replace_dv(ms, r, rsize);
> > - }
> > - mem = chunk2mem(p);
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > -
> > - else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) !=
> 0) {
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > - }
> > - }
> > - else if (bytes >= MAX_REQUEST)
> > - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys
> alloc)
> > */
> > - else {
> > - nb = pad_request(bytes);
> > - if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > - }
> > -
> > - if (nb <= ms->dvsize) {
> > - size_t rsize = ms->dvsize - nb;
> > - mchunkptr p = ms->dv;
> > - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
> > - mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
> > - ms->dvsize = rsize;
> > - set_size_and_pinuse_of_free_chunk(r, rsize);
> > - set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > - }
> > - else { /* exhaust dv */
> > - size_t dvs = ms->dvsize;
> > - ms->dvsize = 0;
> > - ms->dv = 0;
> > - set_inuse_and_pinuse(ms, p, dvs);
> > - }
> > - mem = chunk2mem(p);
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > -
> > - else if (nb < ms->topsize) { /* Split top */
> > - size_t rsize = ms->topsize -= nb;
> > - mchunkptr p = ms->top;
> > - mchunkptr r = ms->top = chunk_plus_offset(p, nb);
> > - r->head = rsize | PINUSE_BIT;
> > - set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > - mem = chunk2mem(p);
> > - check_top_chunk(ms, ms->top);
> > - check_malloced_chunk(ms, mem, nb);
> > - goto postaction;
> > - }
> > -
> > - mem = sys_alloc(ms, nb);
> > -
> > - postaction:
> > - POSTACTION(ms);
> > - return mem;
> > - }
> > -
> > - return 0;
> > -}
> > -
> > -void mspace_free(mspace msp, void* mem) {
> > - if (mem != 0) {
> > - mchunkptr p = mem2chunk(mem);
> > -#if FOOTERS
> > - mstate fm = get_mstate_for(p);
> > -#else /* FOOTERS */
> > - mstate fm = (mstate)msp;
> > -#endif /* FOOTERS */
> > - if (!ok_magic(fm)) {
> > - USAGE_ERROR_ACTION(fm, p);
> > - return;
> > - }
> > - if (!PREACTION(fm)) {
> > - check_inuse_chunk(fm, p);
> > - if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
> > - size_t psize = chunksize(p);
> > - mchunkptr next = chunk_plus_offset(p, psize);
> > - if (!pinuse(p)) {
> > - size_t prevsize = p->prev_foot;
> > -
> > - mchunkptr prev = chunk_minus_offset(p, prevsize);
> > - psize += prevsize;
> > - p = prev;
> > - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward
> */
> > - if (p != fm->dv) {
> > - unlink_chunk(fm, p, prevsize);
> > - }
> > - else if ((next->head & INUSE_BITS) == INUSE_BITS) {
> > - fm->dvsize = psize;
> > - set_free_with_pinuse(p, psize, next);
> > - goto postaction;
> > - }
> > - }
> > - else
> > - goto erroraction;
> > - }
> > -
> > - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
> > - if (!cinuse(next)) { /* consolidate forward */
> > - if (next == fm->top) {
> > - size_t tsize = fm->topsize += psize;
> > - fm->top = p;
> > - p->head = tsize | PINUSE_BIT;
> > - if (p == fm->dv) {
> > - fm->dv = 0;
> > - fm->dvsize = 0;
> > - }
> > - goto postaction;
> > - }
> > - else if (next == fm->dv) {
> > - size_t dsize = fm->dvsize += psize;
> > - fm->dv = p;
> > - set_size_and_pinuse_of_free_chunk(p, dsize);
> > - goto postaction;
> > - }
> > - else {
> > - size_t nsize = chunksize(next);
> > - psize += nsize;
> > - unlink_chunk(fm, next, nsize);
> > - set_size_and_pinuse_of_free_chunk(p, psize);
> > - if (p == fm->dv) {
> > - fm->dvsize = psize;
> > - goto postaction;
> > - }
> > - }
> > - }
> > - else
> > - set_free_with_pinuse(p, psize, next);
> > - insert_chunk(fm, p, psize);
> > - check_free_chunk(fm, p);
> > - goto postaction;
> > - }
> > - }
> > - erroraction:
> > - USAGE_ERROR_ACTION(fm, p);
> > - postaction:
> > - POSTACTION(fm);
> > - }
> > - }
> > -}
> > -
> > -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
> > - void* mem;
> > - size_t req = 0;
> > - mstate ms = (mstate)msp;
> > - if (!ok_magic(ms)) {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - return 0;
> > - }
> > - if (n_elements != 0) {
> > - req = n_elements * elem_size;
> > - if (((n_elements | elem_size) & ~(size_t)0xffff) &&
> > - (req / n_elements != elem_size))
> > - req = MAX_SIZE_T; /* force downstream failure on overflow */
> > - }
> > - mem = internal_malloc(ms, req);
> > - if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
> > - MEMCLEAR(mem, req);
> > - return mem;
> > -}
> > -
> > -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
> > - if (oldmem == 0)
> > - return mspace_malloc(msp, bytes);
> > -#ifdef REALLOC_ZERO_BYTES_FREES
> > - if (bytes == 0) {
> > - mspace_free(msp, oldmem);
> > - return 0;
> > - }
> > -#endif /* REALLOC_ZERO_BYTES_FREES */
> > - else {
> > -#if FOOTERS
> > - mchunkptr p = mem2chunk(oldmem);
> > - mstate ms = get_mstate_for(p);
> > -#else /* FOOTERS */
> > - mstate ms = (mstate)msp;
> > -#endif /* FOOTERS */
> > - if (!ok_magic(ms)) {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - return 0;
> > - }
> > - return internal_realloc(ms, oldmem, bytes);
> > - }
> > -}
> > -
> > -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
> > - mstate ms = (mstate)msp;
> > - if (!ok_magic(ms)) {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - return 0;
> > - }
> > - return internal_memalign(ms, alignment, bytes);
> > -}
> > -
> > -void mspace_malloc_stats(mspace msp) {
> > - mstate ms = (mstate)msp;
> > - if (ok_magic(ms)) {
> > - internal_malloc_stats(ms);
> > - }
> > - else {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - }
> > -}
> > -
> > -size_t mspace_footprint(mspace msp) {
> > - size_t result;
> > - mstate ms = (mstate)msp;
> > - if (ok_magic(ms)) {
> > - result = ms->footprint;
> > - } else {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - }
> > - return result;
> > -}
> > -
> > -
> > -size_t mspace_max_footprint(mspace msp) {
> > - size_t result;
> > - mstate ms = (mstate)msp;
> > - if (ok_magic(ms)) {
> > - result = ms->max_footprint;
> > - } else {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - }
> > - return result;
> > -}
> > -
> > -
> > -#if !NO_MALLINFO
> > -struct mallinfo mspace_mallinfo(mspace msp) {
> > - mstate ms = (mstate)msp;
> > - if (!ok_magic(ms)) {
> > - USAGE_ERROR_ACTION(ms,ms);
> > - }
> > - return internal_mallinfo(ms);
> > -}
> > -#endif /* NO_MALLINFO */
> > -
> > -int mspace_mallopt(int param_number, int value) {
> > - return change_mparam(param_number, value);
> > -}
> > -
> > diff --git a/qxldod/mspace.cpp b/qxldod/mspace.cpp
> > new file mode 100755
> > index 0000000..d0ba123
> > --- /dev/null
> > +++ b/qxldod/mspace.cpp
> > @@ -0,0 +1,2437 @@
> > +// based on dlmalloc from Doug Lea
> > +
> > +
> > +// quote from the Doug Lea original file
> > + /*
> > + This is a version (aka dlmalloc) of malloc/free/realloc written by
> > + Doug Lea and released to the public domain, as explained at
> > + http://creativecommons.org/licenses/publicdomain. Send
> questions,
> > + comments, complaints, performance data, etc to dl at cs.oswego.edu
> > +
> > + * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
> > +
> > + Note: There may be an updated version of this malloc obtainable
> at
> > + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
> > + Check before installing!
> > + */
> > +
> > +
> > +#include <ntddk.h>
> > +
> > +#include "mspace.h"
> > +
> > +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
> > +
> > +#define MALLOC_ALIGNMENT ((size_t)8U)
> > +#define USE_LOCKS 0
> > +#define malloc_getpagesize ((size_t)4096U)
> > +#define DEFAULT_GRANULARITY malloc_getpagesize
> > +#define MAX_SIZE_T (~(size_t)0)
> > +#define MALLOC_FAILURE_ACTION
> > +#define MALLINFO_FIELD_TYPE size_t
> > +#define FOOTERS 0
> > +#define INSECURE 0
> > +#define PROCEED_ON_ERROR 0
> > +#define DEBUG 0
> > +#define ABORT_ON_ASSERT_FAILURE 1
> > +#define ABORT(user_data) abort_func(user_data)
> > +#define USE_BUILTIN_FFS 0
> > +#define USE_DEV_RANDOM 0
> > +#define PRINT(params) print_func params
> > +
> > +
> > +#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)
> > +#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)
> > +
> > +
> > +#define M_GRANULARITY (-1)
> > +
> > +void default_abort_func(void *user_data)
> > +{
> > + for (;;);
> > +}
> > +
> > +void default_print_func(void *user_data, char *format, ...)
> > +{
> > +}
> > +
> > +static mspace_abort_t abort_func = default_abort_func;
> > +static mspace_print_t print_func = default_print_func;
> > +
> > +void mspace_set_abort_func(mspace_abort_t f)
> > +{
> > + abort_func = f;
> > +}
> > +
> > +void mspace_set_print_func(mspace_print_t f)
> > +{
> > + print_func = f;
> > +}
> > +
> > +/* ------------------------ Mallinfo declarations
> ------------------------
> > */
> > +
> > +#if !NO_MALLINFO
> > +/*
> > + This version of malloc supports the standard SVID/XPG mallinfo
> > + routine that returns a struct containing usage properties and
> > + statistics. It should work on any system that has a
> > + /usr/include/malloc.h defining struct mallinfo. The main
> > + declaration needed is the mallinfo struct that is returned (by-copy)
> > + by mallinfo(). The malloinfo struct contains a bunch of fields that
> > + are not even meaningful in this version of malloc. These fields are
> > + are instead filled by mallinfo() with other numbers that might be of
> > + interest.
> > +
> > + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
> > + /usr/include/malloc.h file that includes a declaration of struct
> > + mallinfo. If so, it is included; else a compliant version is
> > + declared below. These must be precisely the same for mallinfo() to
> > + work. The original SVID version of this struct, defined on most
> > + systems with mallinfo, declares all fields as ints. But some others
> > + define as unsigned long. If your system defines the fields using a
> > + type of different width than listed here, you MUST #include your
> > + system version and #define HAVE_USR_INCLUDE_MALLOC_H.
> > +*/
> > +
> > +/* #define HAVE_USR_INCLUDE_MALLOC_H */
> > +
> > +
> > +struct mallinfo {
> > + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from
> system
> > */
> > + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
> > + MALLINFO_FIELD_TYPE smblks; /* always 0 */
> > + MALLINFO_FIELD_TYPE hblks; /* always 0 */
> > + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
> > + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
> > + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
> > + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
> > + MALLINFO_FIELD_TYPE fordblks; /* total free space */
> > + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
> > +};
> > +
> > +#endif /* NO_MALLINFO */
> > +
> > +
> > +
> > +#ifdef DEBUG
> > +#if ABORT_ON_ASSERT_FAILURE
> > +#define assert(user_data, x) if(!(x)) ABORT(user_data)
> > +#else /* ABORT_ON_ASSERT_FAILURE */
> > +#include <assert.h>
> > +#endif /* ABORT_ON_ASSERT_FAILURE */
> > +#else /* DEBUG */
> > +#define assert(user_data, x)
> > +#endif /* DEBUG */
> > +
> > +/* ------------------- size_t and alignment properties
> --------------------
> > */
> > +
> > +/* The byte and bit size of a size_t */
> > +#define SIZE_T_SIZE (sizeof(size_t))
> > +#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
> > +
> > +/* Some constants coerced to size_t */
> > +/* Annoying but necessary to avoid errors on some plaftorms */
> > +#define SIZE_T_ZERO ((size_t)0)
> > +#define SIZE_T_ONE ((size_t)1)
> > +#define SIZE_T_TWO ((size_t)2)
> > +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
> > +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
> > +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
> > +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
> > +
> > +/* The bit mask value corresponding to MALLOC_ALIGNMENT */
> > +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
> > +
> > +/* True if address a has acceptable alignment */
> > +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
> > +
> > +/* the number of bytes to offset an address to align it */
> > +#define align_offset(A)\
> > + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
> > + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &
> > CHUNK_ALIGN_MASK))
> > +
> > +/* --------------------------- Lock preliminaries
> ------------------------
> > */
> > +
> > +#if USE_LOCKS
> > +
> > +/*
> > + When locks are defined, there are up to two global locks:
> > +
> > + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
> > + MORECORE. In many cases sys_alloc requires two calls, that should
> > + not be interleaved with calls by other threads. This does not
> > + protect against direct calls to MORECORE by other threads not
> > + using this lock, so there is still code to cope the best we can on
> > + interference.
> > +
> > + * magic_init_mutex ensures that mparams.magic and other
> > + unique mparams values are initialized only once.
> > +*/
> > +
> > +
> > +#define USE_LOCK_BIT (2U)
> > +#else /* USE_LOCKS */
> > +#define USE_LOCK_BIT (0U)
> > +#define INITIAL_LOCK(l)
> > +#endif /* USE_LOCKS */
> > +
> > +#if USE_LOCKS
> > +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
> > +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
> > +#else /* USE_LOCKS */
> > +#define ACQUIRE_MAGIC_INIT_LOCK()
> > +#define RELEASE_MAGIC_INIT_LOCK()
> > +#endif /* USE_LOCKS */
> > +
> > +
> > +
> > +/* ----------------------- Chunk representations
> ------------------------
> > */
> > +
> > +/*
> > + (The following includes lightly edited explanations by Colin Plumb.)
> > +
> > + The malloc_chunk declaration below is misleading (but accurate and
> > + necessary). It declares a "view" into memory allowing access to
> > + necessary fields at known offsets from a given base.
> > +
> > + Chunks of memory are maintained using a `boundary tag' method as
> > + originally described by Knuth. (See the paper by Paul Wilson
> > + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
> > + techniques.) Sizes of free chunks are stored both in the front of
> > + each chunk and at the end. This makes consolidating fragmented
> > + chunks into bigger chunks fast. The head fields also hold bits
> > + representing whether chunks are free or in use.
> > +
> > + Here are some pictures to make it clearer. They are "exploded" to
> > + show that the state of a chunk can be thought of as extending from
> > + the high 31 bits of the head field of its header through the
> > + prev_foot and PINUSE_BIT bit of the following chunk header.
> > +
> > + A chunk that's in use looks like:
> > +
> > + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > + | Size of previous chunk (if P = 1)
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > + | Size of this chunk
> 1| +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > + |
> |
> > + +-
> -+
> > + |
> |
> > + +-
> -+
> > + |
> :
> > + +- size - sizeof(size_t) available payload bytes
> -+
> > + :
> |
> > + chunk-> +-
> -+
> > + |
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |1|
> > + | Size of next chunk (may or may not be in use) |
> +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +
> > + And if it's free, it looks like this:
> > +
> > + chunk-> +-
> -+
> > + | User payload (must be in use, or we would have merged!)
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > + | Size of this chunk
> 0| +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > + | Next pointer
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + | Prev pointer
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + |
> :
> > + +- size - sizeof(struct chunk) unused bytes
> -+
> > + :
> |
> > + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > + | Size of this chunk
> |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |0|
> > + | Size of next chunk (must be in use, or we would have merged)|
> +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > + | :
> > + +- User payload -+
> > + : |
> > + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> -+-+-+
> > + |0|
> > + +-+
> > + Note that since we always merge adjacent free chunks, the chunks
> > + adjacent to a free chunk must be in use.
> > +
> > + Given a pointer to a chunk (which can be derived trivially from the
> > + payload pointer) we can, in O(1) time, find out whether the adjacent
> > + chunks are free, and if so, unlink them from the lists that they
> > + are on and merge them with the current chunk.
> > +
> > + Chunks always begin on even word boundaries, so the mem portion
> > + (which is returned to the user) is also on an even word boundary, and
> > + thus at least double-word aligned.
> > +
> > + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
> > + chunk size (which is always a multiple of two words), is an in-use
> > + bit for the *previous* chunk. If that bit is *clear*, then the
> > + word before the current chunk size contains the previous chunk
> > + size, and can be used to find the front of the previous chunk.
> > + The very first chunk allocated always has this bit set, preventing
> > + access to non-existent (or non-owned) memory. If pinuse is set for
> > + any given chunk, then you CANNOT determine the size of the
> > + previous chunk, and might even get a memory addressing fault when
> > + trying to do so.
> > +
> > + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
> > + the chunk size redundantly records whether the current chunk is
> > + inuse. This redundancy enables usage checks within free and realloc,
> > + and reduces indirection when freeing and consolidating chunks.
> > +
> > + Each freshly allocated chunk must have both cinuse and pinuse set.
> > + That is, each allocated chunk borders either a previously allocated
> > + and still in-use chunk, or the base of its memory arena. This is
> > + ensured by making all allocations from the the `lowest' part of any
> > + found chunk. Further, no free chunk physically borders another one,
> > + so each free chunk is known to be preceded and followed by either
> > + inuse chunks or the ends of memory.
> > +
> > + Note that the `foot' of the current chunk is actually represented
> > + as the prev_foot of the NEXT chunk. This makes it easier to
> > + deal with alignments etc but can be very confusing when trying
> > + to extend or adapt this code.
> > +
> > + The exceptions to all this are
> > +
> > + 1. The special chunk `top' is the top-most available chunk (i.e.,
> > + the one bordering the end of available memory). It is treated
> > + specially. Top is never included in any bin, is used only if
> > + no other chunk is available, and is released back to the
> > + system if it is very large (see M_TRIM_THRESHOLD). In effect,
> > + the top chunk is treated as larger (and thus less well
> > + fitting) than any other available chunk. The top chunk
> > + doesn't update its trailing size field since there is no next
> > + contiguous chunk that would have to index off it. However,
> > + space is still allocated for it (TOP_FOOT_SIZE) to enable
> > + separation or merging when space is extended.
> > +
> > + 3. Chunks allocated via mmap, which have the lowest-order bit
> > + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
> > + PINUSE_BIT in their head fields. Because they are allocated
> > + one-by-one, each must carry its own prev_foot field, which is
> > + also used to hold the offset this chunk has within its mmapped
> > + region, which is needed to preserve alignment. Each mmapped
> > + chunk is trailed by the first two fields of a fake next-chunk
> > + for sake of usage checks.
> > +
> > +*/
> > +
> > +struct malloc_chunk {
> > + size_t prev_foot; /* Size of previous chunk (if
> free). */
> > + size_t head; /* Size and inuse bits. */
> > + struct malloc_chunk* fd; /* double links -- used only if
> free. */
> > + struct malloc_chunk* bk;
> > +};
> > +
> > +typedef struct malloc_chunk mchunk;
> > +typedef struct malloc_chunk* mchunkptr;
> > +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
> > +typedef unsigned int bindex_t; /* Described below */
> > +typedef unsigned int binmap_t; /* Described below */
> > +typedef unsigned int flag_t; /* The type of various bit flag
> sets
> > */
> > +
> > +
> > +/* ------------------- Chunks sizes and alignments
> -----------------------
> > */
> > +
> > +#define MCHUNK_SIZE (sizeof(mchunk))
> > +
> > +#if FOOTERS
> > +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
> > +#else /* FOOTERS */
> > +#define CHUNK_OVERHEAD (SIZE_T_SIZE)
> > +#endif /* FOOTERS */
> > +
> > +/* The smallest size we can malloc is an aligned minimal chunk */
> > +#define MIN_CHUNK_SIZE\
> > + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > +
> > +/* conversion from malloc headers to user pointers, and back */
> > +#define chunk2mem(p) ((void*)((char*)(p) +
> TWO_SIZE_T_SIZES))
> > +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) -
> TWO_SIZE_T_SIZES))
> > +/* chunk associated with aligned address A */
> > +#define align_as_chunk(A) (mchunkptr)((A) +
> align_offset(chunk2mem(A)))
> > +
> > +/* Bounds on request (not chunk) sizes. */
> > +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
> > +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD -
> SIZE_T_ONE)
> > +
> > +/* pad request bytes into a usable size */
> > +#define pad_request(req) \
> > + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > +
> > +/* pad request, checking for minimum (but not maximum) */
> > +#define request2size(req) \
> > + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
> > +
> > +/* ------------------ Operations on head and foot fields
> -----------------
> > */
> > +
> > +/*
> > + The head field of a chunk is or'ed with PINUSE_BIT when previous
> > + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
> > + use. If the chunk was obtained with mmap, the prev_foot field has
> > + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
> > + mmapped region to the base of the chunk.
> > +*/
> > +
> > +#define PINUSE_BIT (SIZE_T_ONE)
> > +#define CINUSE_BIT (SIZE_T_TWO)
> > +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
> > +
> > +/* Head value for fenceposts */
> > +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
> > +
> > +/* extraction of fields from head words */
> > +#define cinuse(p) ((p)->head & CINUSE_BIT)
> > +#define pinuse(p) ((p)->head & PINUSE_BIT)
> > +#define chunksize(p) ((p)->head & ~(INUSE_BITS))
> > +
> > +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
> > +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
> > +
> > +/* Treat space at ptr +/- offset as a chunk */
> > +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
> > +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
> > +
> > +/* Ptr to next or previous physical malloc_chunk. */
> > +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &
> > ~INUSE_BITS)))
> > +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
> > +
> > +/* extract next chunk's pinuse bit */
> > +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
> > +
> > +/* Get/set size at footer */
> > +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
> > +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot =
> (s))
> > +
> > +/* Set size, pinuse bit, and foot */
> > +#define set_size_and_pinuse_of_free_chunk(p, s)\
> > + ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
> > +
> > +/* Set size, pinuse bit, foot, and clear next pinuse */
> > +#define set_free_with_pinuse(p, s, n)\
> > + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
> > +
> > +/* Get the internal overhead associated with chunk p */
> > +#define overhead_for(p) CHUNK_OVERHEAD
> > +
> > +/* Return true if malloced space is not necessarily cleared */
> > +#define calloc_must_clear(p) (1)
> > +
> > +
> > +/* ---------------------- Overlaid data structures
> -----------------------
> > */
> > +
> > +/*
> > + When chunks are not in use, they are treated as nodes of either
> > + lists or trees.
> > +
> > + "Small" chunks are stored in circular doubly-linked lists, and look
> > + like this:
> > +
> > + chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Size of previous chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + `head:' | Size of chunk, in bytes
> > |P|
> > + mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Forward pointer to next chunk in list
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Back pointer to previous chunk in list
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Unused space (may be 0 bytes long)
> > .
> > + .
> > .
> > + .
> > |
> > +nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + `foot:' | Size of chunk, in bytes
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +
> > + Larger chunks are kept in a form of bitwise digital trees (aka
> > + tries) keyed on chunksizes. Because malloc_tree_chunks are only for
> > + free chunks greater than 256 bytes, their size doesn't impose any
> > + constraints on user chunk sizes. Each node looks like:
> > +
> > + chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Size of previous chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + `head:' | Size of chunk, in bytes
> > |P|
> > + mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Forward pointer to next chunk of same size
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Back pointer to previous chunk of same size
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Pointer to left child (child[0])
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Pointer to right child (child[1])
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Pointer to parent
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | bin index of this chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + | Unused space
> > .
> > + .
> > |
> > +nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > + `foot:' | Size of chunk, in bytes
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +
> > + Each tree holding treenodes is a tree of unique chunk sizes. Chunks
> > + of the same size are arranged in a circularly-linked list, with only
> > + the oldest chunk (the next to be used, in our FIFO ordering)
> > + actually in the tree. (Tree members are distinguished by a non-null
> > + parent pointer.) If a chunk with the same size an an existing node
> > + is inserted, it is linked off the existing node using pointers that
> > + work in the same way as fd/bk pointers of small chunks.
> > +
> > + Each tree contains a power of 2 sized range of chunk sizes (the
> > + smallest is 0x100 <= x < 0x180), which is is divided in half at each
> > + tree level, with the chunks in the smaller half of the range (0x100
> > + <= x < 0x140 for the top nose) in the left subtree and the larger
> > + half (0x140 <= x < 0x180) in the right subtree. This is, of course,
> > + done by inspecting individual bits.
> > +
> > + Using these rules, each node's left subtree contains all smaller
> > + sizes than its right subtree. However, the node at the root of each
> > + subtree has no particular ordering relationship to either. (The
> > + dividing line between the subtree sizes is based on trie relation.)
> > + If we remove the last chunk of a given size from the interior of the
> > + tree, we need to replace it with a leaf node. The tree ordering
> > + rules permit a node to be replaced by any leaf below it.
> > +
> > + The smallest chunk in a tree (a common operation in a best-fit
> > + allocator) can be found by walking a path to the leftmost leaf in
> > + the tree. Unlike a usual binary tree, where we follow left child
> > + pointers until we reach a null, here we follow the right child
> > + pointer any time the left one is null, until we reach a leaf with
> > + both child pointers null. The smallest chunk in the tree will be
> > + somewhere along that path.
> > +
> > + The worst case number of steps to add, find, or remove a node is
> > + bounded by the number of bits differentiating chunks within
> > + bins. Under current bin calculations, this ranges from 6 up to 21
> > + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
> > + is of course much better.
> > +*/
> > +
> > +struct malloc_tree_chunk {
> > + /* The first four fields must be compatible with malloc_chunk */
> > + size_t prev_foot;
> > + size_t head;
> > + struct malloc_tree_chunk* fd;
> > + struct malloc_tree_chunk* bk;
> > +
> > + struct malloc_tree_chunk* child[2];
> > + struct malloc_tree_chunk* parent;
> > + bindex_t index;
> > +};
> > +
> > +typedef struct malloc_tree_chunk tchunk;
> > +typedef struct malloc_tree_chunk* tchunkptr;
> > +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees
> */
> > +
> > +/* A little helper macro for trees */
> > +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :
> > (t)->child[1])
> > +
> > +/* ----------------------------- Segments ------------------------------
> --
> > */
> > +
> > +/*
> > + Each malloc space may include non-contiguous segments, held in a
> > + list headed by an embedded malloc_segment record representing the
> > + top-most space. Segments also include flags holding properties of
> > + the space. Large chunks that are directly allocated by mmap are not
> > + included in this list. They are instead independently created and
> > + destroyed without otherwise keeping track of them.
> > +
> > + Segment management mainly comes into play for spaces allocated by
> > + MMAP. Any call to MMAP might or might not return memory that is
> > + adjacent to an existing segment. MORECORE normally contiguously
> > + extends the current space, so this space is almost always adjacent,
> > + which is simpler and faster to deal with. (This is why MORECORE is
> > + used preferentially to MMAP when both are available -- see
> > + sys_alloc.) When allocating using MMAP, we don't use any of the
> > + hinting mechanisms (inconsistently) supported in various
> > + implementations of unix mmap, or distinguish reserving from
> > + committing memory. Instead, we just ask for space, and exploit
> > + contiguity when we get it. It is probably possible to do
> > + better than this on some systems, but no general scheme seems
> > + to be significantly better.
> > +
> > + Management entails a simpler variant of the consolidation scheme
> > + used for chunks to reduce fragmentation -- new adjacent memory is
> > + normally prepended or appended to an existing segment. However,
> > + there are limitations compared to chunk consolidation that mostly
> > + reflect the fact that segment processing is relatively infrequent
> > + (occurring only when getting memory from system) and that we
> > + don't expect to have huge numbers of segments:
> > +
> > + * Segments are not indexed, so traversal requires linear scans. (It
> > + would be possible to index these, but is not worth the extra
> > + overhead and complexity for most programs on most platforms.)
> > + * New segments are only appended to old ones when holding top-most
> > + memory; if they cannot be prepended to others, they are held in
> > + different segments.
> > +
> > + Except for the top-most segment of an mstate, each segment record
> > + is kept at the tail of its segment. Segments are added by pushing
> > + segment records onto the list headed by &mstate.seg for the
> > + containing mstate.
> > +
> > + Segment flags control allocation/merge/deallocation policies:
> > + * If EXTERN_BIT set, then we did not allocate this segment,
> > + and so should not try to deallocate or merge with others.
> > + (This currently holds only for the initial segment passed
> > + into create_mspace_with_base.)
> > + * If IS_MMAPPED_BIT set, the segment may be merged with
> > + other surrounding mmapped segments and trimmed/de-allocated
> > + using munmap.
> > + * If neither bit is set, then the segment was obtained using
> > + MORECORE so can be merged with surrounding MORECORE'd segments
> > + and deallocated/trimmed using MORECORE with negative arguments.
> > +*/
> > +
> > +struct malloc_segment {
> > + char* base; /* base address */
> > + size_t size; /* allocated size */
> > + struct malloc_segment* next; /* ptr to next segment */
> > +};
> > +
> > +typedef struct malloc_segment msegment;
> > +typedef struct malloc_segment* msegmentptr;
> > +
> > +/* ---------------------------- malloc_state
> -----------------------------
> > */
> > +
> > +/*
> > + A malloc_state holds all of the bookkeeping for a space.
> > + The main fields are:
> > +
> > + Top
> > + The topmost chunk of the currently active segment. Its size is
> > + cached in topsize. The actual size of topmost space is
> > + topsize+TOP_FOOT_SIZE, which includes space reserved for adding
> > + fenceposts and segment records if necessary when getting more
> > + space from the system. The size at which to autotrim top is
> > + cached from mparams in trim_check, except that it is disabled if
> > + an autotrim fails.
> > +
> > + Designated victim (dv)
> > + This is the preferred chunk for servicing small requests that
> > + don't have exact fits. It is normally the chunk split off most
> > + recently to service another small request. Its size is cached in
> > + dvsize. The link fields of this chunk are not maintained since it
> > + is not kept in a bin.
> > +
> > + SmallBins
> > + An array of bin headers for free chunks. These bins hold chunks
> > + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
> > + chunks of all the same size, spaced 8 bytes apart. To simplify
> > + use in double-linked lists, each bin header acts as a malloc_chunk
> > + pointing to the real first node, if it exists (else pointing to
> > + itself). This avoids special-casing for headers. But to avoid
> > + waste, we allocate only the fd/bk pointers of bins, and then use
> > + repositioning tricks to treat these as the fields of a chunk.
> > +
> > + TreeBins
> > + Treebins are pointers to the roots of trees holding a range of
> > + sizes. There are 2 equally spaced treebins for each power of two
> > + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
> > + larger.
> > +
> > + Bin maps
> > + There is one bit map for small bins ("smallmap") and one for
> > + treebins ("treemap). Each bin sets its bit when non-empty, and
> > + clears the bit when empty. Bit operations are then used to avoid
> > + bin-by-bin searching -- nearly all "search" is done without ever
> > + looking at bins that won't be selected. The bit maps
> > + conservatively use 32 bits per map word, even if on 64bit system.
> > + For a good description of some of the bit-based techniques used
> > + here, see Henry S. Warren Jr's book "Hacker's Delight" (and
> > + supplement at http://hackersdelight.org/). Many of these are
> > + intended to reduce the branchiness of paths through malloc etc, as
> > + well as to reduce the number of memory locations read or written.
> > +
> > + Segments
> > + A list of segments headed by an embedded malloc_segment record
> > + representing the initial space.
> > +
> > + Address check support
> > + The least_addr field is the least address ever obtained from
> > + MORECORE or MMAP. Attempted frees and reallocs of any address less
> > + than this are trapped (unless INSECURE is defined).
> > +
> > + Magic tag
> > + A cross-check field that should always hold same value as
> mparams.magic.
> > +
> > + Flags
> > + Bits recording whether to use MMAP, locks, or contiguous MORECORE
> > +
> > + Statistics
> > + Each space keeps track of current and maximum system memory
> > + obtained via MORECORE or MMAP.
> > +
> > + Locking
> > + If USE_LOCKS is defined, the "mutex" lock is acquired and released
> > + around every public call using this mspace.
> > +*/
> > +
> > +/* Bin types, widths and sizes */
> > +#define NSMALLBINS (32U)
> > +#define NTREEBINS (32U)
> > +#define SMALLBIN_SHIFT (3U)
> > +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
> > +#define TREEBIN_SHIFT (8U)
> > +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
> > +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
> > +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -
> > CHUNK_OVERHEAD)
> > +
> > +struct malloc_state {
> > + binmap_t smallmap;
> > + binmap_t treemap;
> > + size_t dvsize;
> > + size_t topsize;
> > + char* least_addr;
> > + mchunkptr dv;
> > + mchunkptr top;
> > + size_t magic;
> > + mchunkptr smallbins[(NSMALLBINS+1)*2];
> > + tbinptr treebins[NTREEBINS];
> > + size_t footprint;
> > + size_t max_footprint;
> > + flag_t mflags;
> > + void *user_data;
> > +#if USE_LOCKS
> > + MLOCK_T mutex; /* locate lock among fields that rarely change
> */
> > +#endif /* USE_LOCKS */
> > + msegment seg;
> > +};
> > +
> > +typedef struct malloc_state* mstate;
> > +
> > +/* ------------- Global malloc_state and malloc_params
> -------------------
> > */
> > +
> > +/*
> > + malloc_params holds global properties, including those that can be
> > + dynamically set using mallopt. There is a single instance, mparams,
> > + initialized in init_mparams.
> > +*/
> > +
> > +struct malloc_params {
> > + size_t magic;
> > + size_t page_size;
> > + size_t granularity;
> > + flag_t default_mflags;
> > +};
> > +
> > +static struct malloc_params mparams;
> > +
> > +/* The global malloc_state used for all non-"mspace" calls */
> > +//static struct malloc_state _gm_;
> > +//#define gm (&_gm_)
> > +//#define is_global(M) ((M) == &_gm_)
> > +#define is_initialized(M) ((M)->top != 0)
> > +
> > +/* -------------------------- system alloc setup
> -------------------------
> > */
> > +
> > +/* Operations on mflags */
> > +
> > +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
> > +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
> > +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
> > +
> > +#define set_lock(M,L)\
> > + ((M)->mflags = (L)?\
> > + ((M)->mflags | USE_LOCK_BIT) :\
> > + ((M)->mflags & ~USE_LOCK_BIT))
> > +
> > +/* page-align a size */
> > +#define page_align(S)\
> > + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
> > +
> > +/* granularity-align a size */
> > +#define granularity_align(S)\
> > + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
> > +
> > +#define is_page_aligned(S)\
> > + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
> > +#define is_granularity_aligned(S)\
> > + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
> > +
> > +/* True if segment S holds address A */
> > +#define segment_holds(S, A)\
> > + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
> > +
> > +/* Return segment holding given address */
> > +static msegmentptr segment_holding(mstate m, char* addr) {
> > + msegmentptr sp = &m->seg;
> > + for (;;) {
> > + if (addr >= sp->base && addr < sp->base + sp->size)
> > + return sp;
> > + if ((sp = sp->next) == 0)
> > + return 0;
> > + }
> > +}
> > +
> > +/* Return true if segment contains a segment link */
> > +static int has_segment_link(mstate m, msegmentptr ss) {
> > + msegmentptr sp = &m->seg;
> > + for (;;) {
> > + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
> > + return 1;
> > + if ((sp = sp->next) == 0)
> > + return 0;
> > + }
> > +}
> > +
> > +
> > +
> > +/*
> > + TOP_FOOT_SIZE is padding at the end of a segment, including space
> > + that may be needed to place segment records and fenceposts when new
> > + noncontiguous segments are added.
> > +*/
> > +#define TOP_FOOT_SIZE\
> > + (align_offset(chunk2mem(0))+pad_request(sizeof(struct
> > malloc_segment))+MIN_CHUNK_SIZE)
> > +
> > +
> > +/* ------------------------------- Hooks
> --------------------------------
> > */
> > +
> > +/*
> > + PREACTION should be defined to return 0 on success, and nonzero on
> > + failure. If you are not using locking, you can redefine these to do
> > + anything you like.
> > +*/
> > +
> > +#if USE_LOCKS
> > +
> > +/* Ensure locks are initialized */
> > +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
> > +
> > +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))?
> > ACQUIRE_LOCK(&(M)->mutex) : 0)
> > +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
> > +#else /* USE_LOCKS */
> > +
> > +#ifndef PREACTION
> > +#define PREACTION(M) (0)
> > +#endif /* PREACTION */
> > +
> > +#ifndef POSTACTION
> > +#define POSTACTION(M)
> > +#endif /* POSTACTION */
> > +
> > +#endif /* USE_LOCKS */
> > +
> > +/*
> > + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
> > + USAGE_ERROR_ACTION is triggered on detected bad frees and
> > + reallocs. The argument p is an address that might have triggered the
> > + fault. It is ignored by the two predefined actions, but might be
> > + useful in custom actions that try to help diagnose errors.
> > +*/
> > +
> > +#if PROCEED_ON_ERROR
> > +
> > +/* A count of the number of corruption errors causing resets */
> > +int malloc_corruption_error_count;
> > +
> > +/* default corruption action */
> > +static void reset_on_error(mstate m);
> > +
> > +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
> > +#define USAGE_ERROR_ACTION(m, p)
> > +
> > +#else /* PROCEED_ON_ERROR */
> > +
> > +#ifndef CORRUPTION_ERROR_ACTION
> > +#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
> > +#endif /* CORRUPTION_ERROR_ACTION */
> > +
> > +#ifndef USAGE_ERROR_ACTION
> > +#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
> > +#endif /* USAGE_ERROR_ACTION */
> > +
> > +#endif /* PROCEED_ON_ERROR */
> > +
> > +/* -------------------------- Debugging setup
> ----------------------------
> > */
> > +
> > +#if ! DEBUG
> > +
> > +#define check_free_chunk(M,P)
> > +#define check_inuse_chunk(M,P)
> > +#define check_malloced_chunk(M,P,N)
> > +#define check_malloc_state(M)
> > +#define check_top_chunk(M,P)
> > +
> > +#else /* DEBUG */
> > +#define check_free_chunk(M,P) do_check_free_chunk(M,P)
> > +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
> > +#define check_top_chunk(M,P) do_check_top_chunk(M,P)
> > +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
> > +#define check_malloc_state(M) do_check_malloc_state(M)
> > +
> > +static void do_check_any_chunk(mstate m, mchunkptr p);
> > +static void do_check_top_chunk(mstate m, mchunkptr p);
> > +static void do_check_inuse_chunk(mstate m, mchunkptr p);
> > +static void do_check_free_chunk(mstate m, mchunkptr p);
> > +static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
> > +static void do_check_tree(mstate m, tchunkptr t);
> > +static void do_check_treebin(mstate m, bindex_t i);
> > +static void do_check_smallbin(mstate m, bindex_t i);
> > +static void do_check_malloc_state(mstate m);
> > +static int bin_find(mstate m, mchunkptr x);
> > +static size_t traverse_and_check(mstate m);
> > +#endif /* DEBUG */
> > +
> > +/* ---------------------------- Indexing Bins
> ----------------------------
> > */
> > +
> > +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
> > +#define small_index(s) ((s) >> SMALLBIN_SHIFT)
> > +#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
> > +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
> > +
> > +/* addressing by index. See above about smallbin repositioning */
> > +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smal
> lbins[(i)<<1])))
> > +#define treebin_at(M,i) (&((M)->treebins[i]))
> > +
> > +/* assign tree index for size S to variable I */
> > +#if defined(__GNUC__) && defined(i386)
> > +#define compute_tree_index(S, I)\
> > +{\
> > + size_t X = S >> TREEBIN_SHIFT;\
> > + if (X == 0)\
> > + I = 0;\
> > + else if (X > 0xFFFF)\
> > + I = NTREEBINS-1;\
> > + else {\
> > + unsigned int K;\
> > + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
> > + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
> > + }\
> > +}
> > +#else /* GNUC */
> > +#define compute_tree_index(S, I)\
> > +{\
> > + size_t X = S >> TREEBIN_SHIFT;\
> > + if (X == 0)\
> > + I = 0;\
> > + else if (X > 0xFFFF)\
> > + I = NTREEBINS-1;\
> > + else {\
> > + unsigned int Y = (unsigned int)X;\
> > + unsigned int N = ((Y - 0x100) >> 16) & 8;\
> > + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
> > + N += K;\
> > + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
> > + K = 14 - N + ((Y <<= K) >> 15);\
> > + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
> > + }\
> > +}
> > +#endif /* GNUC */
> > +
> > +/* Bit representing maximum resolved size in a treebin at i */
> > +#define bit_for_tree_index(i) \
> > + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT
> - 2)
> > +
> > +/* Shift placing maximum resolved bit in a treebin at i as sign bit */
> > +#define leftshift_for_tree_index(i) \
> > + ((i == NTREEBINS-1)? 0 : \
> > + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
> > +
> > +/* The size of the smallest chunk held in bin with index i */
> > +#define minsize_for_tree_index(i) \
> > + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
> > + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
> > +
> > +/* ------------------------ Operations on bin maps
> -----------------------
> > */
> > +
> > +/* bit corresponding to given index */
> > +#define idx2bit(i) ((binmap_t)(1) << (i))
> > +
> > +/* Mark/Clear bits with given index */
> > +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
> > +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
> > +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
> > +
> > +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
> > +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
> > +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
> > +
> > +/* index corresponding to given bit */
> > +
> > +#if defined(__GNUC__) && defined(i386)
> > +#define compute_bit2idx(X, I)\
> > +{\
> > + unsigned int J;\
> > + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
> > + I = (bindex_t)J;\
> > +}
> > +
> > +#else /* GNUC */
> > +#if USE_BUILTIN_FFS
> > +#define compute_bit2idx(X, I) I = ffs(X)-1
> > +
> > +#else /* USE_BUILTIN_FFS */
> > +#define compute_bit2idx(X, I)\
> > +{\
> > + unsigned int Y = X - 1;\
> > + unsigned int K = Y >> (16-4) & 16;\
> > + unsigned int N = K; Y >>= K;\
> > + N += K = Y >> (8-3) & 8; Y >>= K;\
> > + N += K = Y >> (4-2) & 4; Y >>= K;\
> > + N += K = Y >> (2-1) & 2; Y >>= K;\
> > + N += K = Y >> (1-0) & 1; Y >>= K;\
> > + I = (bindex_t)(N + Y);\
> > +}
> > +#endif /* USE_BUILTIN_FFS */
> > +#endif /* GNUC */
> > +
> > +/* isolate the least set bit of a bitmap */
> > +#define least_bit(x) ((x) & -(x))
> > +
> > +/* mask with all bits to left of least bit of x on */
> > +#define left_bits(x) ((x<<1) | -(x<<1))
> > +
> > +/* mask with all bits to left of or equal to least bit of x on */
> > +#define same_or_left_bits(x) ((x) | -(x))
> > +
> > +
> > +/* ----------------------- Runtime Check Support
> -------------------------
> > */
> > +
> > +/*
> > + For security, the main invariant is that malloc/free/etc never
> > + writes to a static address other than malloc_state, unless static
> > + malloc_state itself has been corrupted, which cannot occur via
> > + malloc (because of these checks). In essence this means that we
> > + believe all pointers, sizes, maps etc held in malloc_state, but
> > + check all of those linked or offsetted from other embedded data
> > + structures. These checks are interspersed with main code in a way
> > + that tends to minimize their run-time cost.
> > +
> > + When FOOTERS is defined, in addition to range checking, we also
> > + verify footer fields of inuse chunks, which can be used guarantee
> > + that the mstate controlling malloc/free is intact. This is a
> > + streamlined version of the approach described by William Robertson
> > + et al in "Run-time Detection of Heap-based Overflows" LISA'03
> > + http://www.usenix.org/events/lisa03/tech/robertson.html The footer
> > + of an inuse chunk holds the xor of its mstate and a random seed,
> > + that is checked upon calls to free() and realloc(). This is
> > + (probablistically) unguessable from outside the program, but can be
> > + computed by any code successfully malloc'ing any chunk, so does not
> > + itself provide protection against code that has already broken
> > + security through some other means. Unlike Robertson et al, we
> > + always dynamically check addresses of all offset chunks (previous,
> > + next, etc). This turns out to be cheaper than relying on hashes.
> > +*/
> > +
> > +#if !INSECURE
> > +/* Check if address a is at least as high as any from MORECORE or MMAP
> */
> > +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
> > +/* Check if address of next chunk n is higher than base chunk p */
> > +#define ok_next(p, n) ((char*)(p) < (char*)(n))
> > +/* Check if p has its cinuse bit on */
> > +#define ok_cinuse(p) cinuse(p)
> > +/* Check if p has its pinuse bit on */
> > +#define ok_pinuse(p) pinuse(p)
> > +
> > +#else /* !INSECURE */
> > +#define ok_address(M, a) (1)
> > +#define ok_next(b, n) (1)
> > +#define ok_cinuse(p) (1)
> > +#define ok_pinuse(p) (1)
> > +#endif /* !INSECURE */
> > +
> > +#if (FOOTERS && !INSECURE)
> > +/* Check if (alleged) mstate m has expected magic field */
> > +#define ok_magic(M) ((M)->magic == mparams.magic)
> > +#else /* (FOOTERS && !INSECURE) */
> > +#define ok_magic(M) (1)
> > +#endif /* (FOOTERS && !INSECURE) */
> > +
> > +
> > +/* In gcc, use __builtin_expect to minimize impact of checks */
> > +#if !INSECURE
> > +#if defined(__GNUC__) && __GNUC__ >= 3
> > +#define RTCHECK(e) __builtin_expect(e, 1)
> > +#else /* GNUC */
> > +#define RTCHECK(e) (e)
> > +#endif /* GNUC */
> > +#else /* !INSECURE */
> > +#define RTCHECK(e) (1)
> > +#endif /* !INSECURE */
> > +
> > +/* macros to set up inuse chunks with or without footers */
> > +
> > +#if !FOOTERS
> > +
> > +#define mark_inuse_foot(M,p,s)
> > +
> > +/* Set cinuse bit and pinuse bit of next chunk */
> > +#define set_inuse(M,p,s)\
> > + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > +
> > +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
> > +#define set_inuse_and_pinuse(M,p,s)\
> > + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > +
> > +/* Set size, cinuse and pinuse bit of this chunk */
> > +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
> > +
> > +#else /* FOOTERS */
> > +
> > +/* Set foot of inuse chunk to be xor of mstate and seed */
> > +#define mark_inuse_foot(M,p,s)\
> > + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^
> > mparams.magic))
> > +
> > +#define get_mstate_for(p)\
> > + ((mstate)(((mchunkptr)((char*)(p) +\
> > + (chunksize(p))))->prev_foot ^ mparams.magic))
> > +
> > +#define set_inuse(M,p,s)\
> > + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
> > + mark_inuse_foot(M,p,s))
> > +
> > +#define set_inuse_and_pinuse(M,p,s)\
> > + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
> > + mark_inuse_foot(M,p,s))
> > +
> > +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > + mark_inuse_foot(M, p, s))
> > +
> > +#endif /* !FOOTERS */
> > +
> > +/* ---------------------------- setting mparams
> --------------------------
> > */
> > +
> > +/* Initialize mparams */
> > +static int init_mparams(void) {
> > + if (mparams.page_size == 0) {
> > + size_t s;
> > +
> > + mparams.default_mflags = USE_LOCK_BIT;
> > +
> > +#if (FOOTERS && !INSECURE)
> > + {
> > +#if USE_DEV_RANDOM
> > + int fd;
> > + unsigned char buf[sizeof(size_t)];
> > + /* Try to use /dev/urandom, else fall back on using time */
> > + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
> > + read(fd, buf, sizeof(buf)) == sizeof(buf)) {
> > + s = *((size_t *) buf);
> > + close(fd);
> > + }
> > + else
> > +#endif /* USE_DEV_RANDOM */
> > + s = (size_t)(time(0) ^ (size_t)0x55555555U);
> > +
> > + s |= (size_t)8U; /* ensure nonzero */
> > + s &= ~(size_t)7U; /* improve chances of fault for bad values */
> > +
> > + }
> > +#else /* (FOOTERS && !INSECURE) */
> > + s = (size_t)0x58585858U;
> > +#endif /* (FOOTERS && !INSECURE) */
> > + ACQUIRE_MAGIC_INIT_LOCK();
> > + if (mparams.magic == 0) {
> > + mparams.magic = s;
> > + /* Set up lock for main malloc area */
> > + //INITIAL_LOCK(&gm->mutex);
> > + //gm->mflags = mparams.default_mflags;
> > + }
> > + RELEASE_MAGIC_INIT_LOCK();
> > +
> > +
> > + mparams.page_size = malloc_getpagesize;
> > + mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
> > + DEFAULT_GRANULARITY : mparams.page_size);
> > +
> > + /* Sanity-check configuration:
> > + size_t must be unsigned and as wide as pointer type.
> > + ints must be at least 4 bytes.
> > + alignment must be at least 8.
> > + Alignment, min chunk size, and page size must all be powers of 2.
> > + */
> > + if ((sizeof(size_t) != sizeof(char*)) ||
> > + (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
> > + (sizeof(int) < 4) ||
> > + (MALLOC_ALIGNMENT < (size_t)8U) ||
> > + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) !=
> 0) ||
> > + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0)
> ||
> > + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) !=
> 0) ||
> > + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) !=
> 0))
> > + ABORT(NULL);
> > + }
> > + return 0;
> > +}
> > +
> > +/* support for mallopt */
> > +static int change_mparam(int param_number, int value) {
> > + size_t val = (size_t)value;
> > + init_mparams();
> > + switch(param_number) {
> > + case M_GRANULARITY:
> > + if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
> > + mparams.granularity = val;
> > + return 1;
> > + }
> > + else
> > + return 0;
> > + default:
> > + return 0;
> > + }
> > +}
> > +
> > +#if DEBUG
> > +/* ------------------------- Debugging Support
> ---------------------------
> > */
> > +
> > +/* Check properties of any chunk, whether free, inuse, mmapped etc */
> > +static void do_check_any_chunk(mstate m, mchunkptr p) {
> > + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > + assert(m->user_data, ok_address(m, p));
> > +}
> > +
> > +/* Check properties of top chunk */
> > +static void do_check_top_chunk(mstate m, mchunkptr p) {
> > + msegmentptr sp = segment_holding(m, (char*)p);
> > + size_t sz = chunksize(p);
> > + assert(m->user_data, sp != 0);
> > + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > + assert(m->user_data, ok_address(m, p));
> > + assert(m->user_data, sz == m->topsize);
> > + assert(m->user_data, sz > 0);
> > + assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -
> > TOP_FOOT_SIZE);
> > + assert(m->user_data, pinuse(p));
> > + assert(m->user_data, !next_pinuse(p));
> > +}
> > +
> > +/* Check properties of inuse chunks */
> > +static void do_check_inuse_chunk(mstate m, mchunkptr p) {
> > + do_check_any_chunk(m, p);
> > + assert(m->user_data, cinuse(p));
> > + assert(m->user_data, next_pinuse(p));
> > + /* If not pinuse, previous chunk has OK offset */
> > + assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
> > +}
> > +
> > +/* Check properties of free chunks */
> > +static void do_check_free_chunk(mstate m, mchunkptr p) {
> > + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > + mchunkptr next = chunk_plus_offset(p, sz);
> > + do_check_any_chunk(m, p);
> > + assert(m->user_data, !cinuse(p));
> > + assert(m->user_data, !next_pinuse(p));
> > + if (p != m->dv && p != m->top) {
> > + if (sz >= MIN_CHUNK_SIZE) {
> > + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > + assert(m->user_data, is_aligned(chunk2mem(p)));
> > + assert(m->user_data, next->prev_foot == sz);
> > + assert(m->user_data, pinuse(p));
> > + assert(m->user_data, next == m->top || cinuse(next));
> > + assert(m->user_data, p->fd->bk == p);
> > + assert(m->user_data, p->bk->fd == p);
> > + }
> > + else /* markers are always of size SIZE_T_SIZE */
> > + assert(m->user_data, sz == SIZE_T_SIZE);
> > + }
> > +}
> > +
> > +/* Check properties of malloced chunks at the point they are malloced */
> > +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
> > + if (mem != 0) {
> > + mchunkptr p = mem2chunk(mem);
> > + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > + do_check_inuse_chunk(m, p);
> > + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > + assert(m->user_data, sz >= MIN_CHUNK_SIZE);
> > + assert(m->user_data, sz >= s);
> > + /* size is less than MIN_CHUNK_SIZE more than request */
> > + assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
> > + }
> > +}
> > +
> > +/* Check a tree and its subtrees. */
> > +static void do_check_tree(mstate m, tchunkptr t) {
> > + tchunkptr head = 0;
> > + tchunkptr u = t;
> > + bindex_t tindex = t->index;
> > + size_t tsize = chunksize(t);
> > + bindex_t idx;
> > + compute_tree_index(tsize, idx);
> > + assert(m->user_data, tindex == idx);
> > + assert(m->user_data, tsize >= MIN_LARGE_SIZE);
> > + assert(m->user_data, tsize >= minsize_for_tree_index(idx));
> > + assert(m->user_data, (idx == NTREEBINS-1) || (tsize <
> > minsize_for_tree_index((idx+1))));
> > +
> > + do { /* traverse through chain of same-sized nodes */
> > + do_check_any_chunk(m, ((mchunkptr)u));
> > + assert(m->user_data, u->index == tindex);
> > + assert(m->user_data, chunksize(u) == tsize);
> > + assert(m->user_data, !cinuse(u));
> > + assert(m->user_data, !next_pinuse(u));
> > + assert(m->user_data, u->fd->bk == u);
> > + assert(m->user_data, u->bk->fd == u);
> > + if (u->parent == 0) {
> > + assert(m->user_data, u->child[0] == 0);
> > + assert(m->user_data, u->child[1] == 0);
> > + }
> > + else {
> > + assert(m->user_data, head == 0); /* only one node on chain has
> parent
> > */
> > + head = u;
> > + assert(m->user_data, u->parent != u);
> > + assert(m->user_data, u->parent->child[0] == u ||
> > + u->parent->child[1] == u ||
> > + *((tbinptr*)(u->parent)) == u);
> > + if (u->child[0] != 0) {
> > + assert(m->user_data, u->child[0]->parent == u);
> > + assert(m->user_data, u->child[0] != u);
> > + do_check_tree(m, u->child[0]);
> > + }
> > + if (u->child[1] != 0) {
> > + assert(m->user_data, u->child[1]->parent == u);
> > + assert(m->user_data, u->child[1] != u);
> > + do_check_tree(m, u->child[1]);
> > + }
> > + if (u->child[0] != 0 && u->child[1] != 0) {
> > + assert(m->user_data, chunksize(u->child[0]) <
> > chunksize(u->child[1]));
> > + }
> > + }
> > + u = u->fd;
> > + } while (u != t);
> > + assert(m->user_data, head != 0);
> > +}
> > +
> > +/* Check all the chunks in a treebin. */
> > +static void do_check_treebin(mstate m, bindex_t i) {
> > + tbinptr* tb = treebin_at(m, i);
> > + tchunkptr t = *tb;
> > + int empty = (m->treemap & (1U << i)) == 0;
> > + if (t == 0)
> > + assert(m->user_data, empty);
> > + if (!empty)
> > + do_check_tree(m, t);
> > +}
> > +
> > +/* Check all the chunks in a smallbin. */
> > +static void do_check_smallbin(mstate m, bindex_t i) {
> > + sbinptr b = smallbin_at(m, i);
> > + mchunkptr p = b->bk;
> > + unsigned int empty = (m->smallmap & (1U << i)) == 0;
> > + if (p == b)
> > + assert(m->user_data, empty);
> > + if (!empty) {
> > + for (; p != b; p = p->bk) {
> > + size_t size = chunksize(p);
> > + mchunkptr q;
> > + /* each chunk claims to be free */
> > + do_check_free_chunk(m, p);
> > + /* chunk belongs in bin */
> > + assert(m->user_data, small_index(size) == i);
> > + assert(m->user_data, p->bk == b || chunksize(p->bk) ==
> chunksize(p));
> > + /* chunk is followed by an inuse chunk */
> > + q = next_chunk(p);
> > + if (q->head != FENCEPOST_HEAD)
> > + do_check_inuse_chunk(m, q);
> > + }
> > + }
> > +}
> > +
> > +/* Find x in a bin. Used in other check functions. */
> > +static int bin_find(mstate m, mchunkptr x) {
> > + size_t size = chunksize(x);
> > + if (is_small(size)) {
> > + bindex_t sidx = small_index(size);
> > + sbinptr b = smallbin_at(m, sidx);
> > + if (smallmap_is_marked(m, sidx)) {
> > + mchunkptr p = b;
> > + do {
> > + if (p == x)
> > + return 1;
> > + } while ((p = p->fd) != b);
> > + }
> > + }
> > + else {
> > + bindex_t tidx;
> > + compute_tree_index(size, tidx);
> > + if (treemap_is_marked(m, tidx)) {
> > + tchunkptr t = *treebin_at(m, tidx);
> > + size_t sizebits = size << leftshift_for_tree_index(tidx);
> > + while (t != 0 && chunksize(t) != size) {
> > + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > + sizebits <<= 1;
> > + }
> > + if (t != 0) {
> > + tchunkptr u = t;
> > + do {
> > + if (u == (tchunkptr)x)
> > + return 1;
> > + } while ((u = u->fd) != t);
> > + }
> > + }
> > + }
> > + return 0;
> > +}
> > +
> > +/* Traverse each chunk and check it; return total */
> > +static size_t traverse_and_check(mstate m) {
> > + size_t sum = 0;
> > + if (is_initialized(m)) {
> > + msegmentptr s = &m->seg;
> > + sum += m->topsize + TOP_FOOT_SIZE;
> > + while (s != 0) {
> > + mchunkptr q = align_as_chunk(s->base);
> > + mchunkptr lastq = 0;
> > + assert(m->user_data, pinuse(q));
> > + while (segment_holds(s, q) &&
> > + q != m->top && q->head != FENCEPOST_HEAD) {
> > + sum += chunksize(q);
> > + if (cinuse(q)) {
> > + assert(m->user_data, !bin_find(m, q));
> > + do_check_inuse_chunk(m, q);
> > + }
> > + else {
> > + assert(m->user_data, q == m->dv || bin_find(m, q));
> > + assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2
> > consecutive free */
> > + do_check_free_chunk(m, q);
> > + }
> > + lastq = q;
> > + q = next_chunk(q);
> > + }
> > + s = s->next;
> > + }
> > + }
> > + return sum;
> > +}
> > +
> > +/* Check all properties of malloc_state. */
> > +static void do_check_malloc_state(mstate m) {
> > + bindex_t i;
> > + size_t total;
> > + /* check bins */
> > + for (i = 0; i < NSMALLBINS; ++i)
> > + do_check_smallbin(m, i);
> > + for (i = 0; i < NTREEBINS; ++i)
> > + do_check_treebin(m, i);
> > +
> > + if (m->dvsize != 0) { /* check dv chunk */
> > + do_check_any_chunk(m, m->dv);
> > + assert(m->user_data, m->dvsize == chunksize(m->dv));
> > + assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
> > + assert(m->user_data, bin_find(m, m->dv) == 0);
> > + }
> > +
> > + if (m->top != 0) { /* check top chunk */
> > + do_check_top_chunk(m, m->top);
> > + assert(m->user_data, m->topsize == chunksize(m->top));
> > + assert(m->user_data, m->topsize > 0);
> > + assert(m->user_data, bin_find(m, m->top) == 0);
> > + }
> > +
> > + total = traverse_and_check(m);
> > + assert(m->user_data, total <= m->footprint);
> > + assert(m->user_data, m->footprint <= m->max_footprint);
> > +}
> > +#endif /* DEBUG */
> > +
> > +/* ----------------------------- statistics
> ------------------------------
> > */
> > +
> > +#if !NO_MALLINFO
> > +static struct mallinfo internal_mallinfo(mstate m) {
> > + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
> > + if (!PREACTION(m)) {
> > + check_malloc_state(m);
> > + if (is_initialized(m)) {
> > + size_t nfree = SIZE_T_ONE; /* top always free */
> > + size_t mfree = m->topsize + TOP_FOOT_SIZE;
> > + size_t sum = mfree;
> > + msegmentptr s = &m->seg;
> > + while (s != 0) {
> > + mchunkptr q = align_as_chunk(s->base);
> > + while (segment_holds(s, q) &&
> > + q != m->top && q->head != FENCEPOST_HEAD) {
> > + size_t sz = chunksize(q);
> > + sum += sz;
> > + if (!cinuse(q)) {
> > + mfree += sz;
> > + ++nfree;
> > + }
> > + q = next_chunk(q);
> > + }
> > + s = s->next;
> > + }
> > +
> > + nm.arena = sum;
> > + nm.ordblks = nfree;
> > + nm.hblkhd = m->footprint - sum;
> > + nm.usmblks = m->max_footprint;
> > + nm.uordblks = m->footprint - mfree;
> > + nm.fordblks = mfree;
> > + nm.keepcost = m->topsize;
> > + }
> > +
> > + POSTACTION(m);
> > + }
> > + return nm;
> > +}
> > +#endif /* !NO_MALLINFO */
> > +
> > +static void internal_malloc_stats(mstate m) {
> > + if (!PREACTION(m)) {
> > + size_t maxfp = 0;
> > + size_t fp = 0;
> > + size_t used = 0;
> > + check_malloc_state(m);
> > + if (is_initialized(m)) {
> > + msegmentptr s = &m->seg;
> > + maxfp = m->max_footprint;
> > + fp = m->footprint;
> > + used = fp - (m->topsize + TOP_FOOT_SIZE);
> > +
> > + while (s != 0) {
> > + mchunkptr q = align_as_chunk(s->base);
> > + while (segment_holds(s, q) &&
> > + q != m->top && q->head != FENCEPOST_HEAD) {
> > + if (!cinuse(q))
> > + used -= chunksize(q);
> > + q = next_chunk(q);
> > + }
> > + s = s->next;
> > + }
> > + }
> > +
> > + PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned
> > long)(maxfp)));
> > + PRINT((m->user_data, "system bytes = %10lu\n", (unsigned
> > long)(fp)));
> > + PRINT((m->user_data, "in use bytes = %10lu\n", (unsigned
> > long)(used)));
> > +
> > + POSTACTION(m);
> > + }
> > +}
> > +
> > +/* ----------------------- Operations on smallbins
> -----------------------
> > */
> > +
> > +/*
> > + Various forms of linking and unlinking are defined as macros. Even
> > + the ones for trees, which are very long but have very short typical
> > + paths. This is ugly but reduces reliance on inlining support of
> > + compilers.
> > +*/
> > +
> > +/* Link a free chunk into a smallbin */
> > +#define insert_small_chunk(M, P, S) {\
> > + bindex_t I = small_index(S);\
> > + mchunkptr B = smallbin_at(M, I);\
> > + mchunkptr F = B;\
> > + assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
> > + if (!smallmap_is_marked(M, I))\
> > + mark_smallmap(M, I);\
> > + else if (RTCHECK(ok_address(M, B->fd)))\
> > + F = B->fd;\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + B->fd = P;\
> > + F->bk = P;\
> > + P->fd = F;\
> > + P->bk = B;\
> > +}
> > +
> > +/* Unlink a chunk from a smallbin */
> > +#define unlink_small_chunk(M, P, S) {\
> > + mchunkptr F = P->fd;\
> > + mchunkptr B = P->bk;\
> > + bindex_t I = small_index(S);\
> > + assert((M)->user_data, P != B);\
> > + assert((M)->user_data, P != F);\
> > + assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > + if (F == B)\
> > + clear_smallmap(M, I);\
> > + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
> > + (B == smallbin_at(M,I) || ok_address(M, B)))) {\
> > + F->bk = B;\
> > + B->fd = F;\
> > + }\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > +}
> > +
> > +/* Unlink the first chunk from a smallbin */
> > +#define unlink_first_small_chunk(M, B, P, I) {\
> > + mchunkptr F = P->fd;\
> > + assert((M)->user_data, P != B);\
> > + assert((M)->user_data, P != F);\
> > + assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > + if (B == F)\
> > + clear_smallmap(M, I);\
> > + else if (RTCHECK(ok_address(M, F))) {\
> > + B->fd = F;\
> > + F->bk = B;\
> > + }\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > +}
> > +
> > +/* Replace dv node, binning the old one */
> > +/* Used only when dvsize known to be small */
> > +#define replace_dv(M, P, S) {\
> > + size_t DVS = M->dvsize;\
> > + if (DVS != 0) {\
> > + mchunkptr DV = M->dv;\
> > + assert((M)->user_data, is_small(DVS));\
> > + insert_small_chunk(M, DV, DVS);\
> > + }\
> > + M->dvsize = S;\
> > + M->dv = P;\
> > +}
> > +
> > +
> > +/* ------------------------- Operations on trees
> -------------------------
> > */
> > +
> > +/* Insert chunk into tree */
> > +#define insert_large_chunk(M, X, S) {\
> > + tbinptr* H;\
> > + bindex_t I;\
> > + compute_tree_index(S, I);\
> > + H = treebin_at(M, I);\
> > + X->index = I;\
> > + X->child[0] = X->child[1] = 0;\
> > + if (!treemap_is_marked(M, I)) {\
> > + mark_treemap(M, I);\
> > + *H = X;\
> > + X->parent = (tchunkptr)H;\
> > + X->fd = X->bk = X;\
> > + }\
> > + else {\
> > + tchunkptr T = *H;\
> > + size_t K = S << leftshift_for_tree_index(I);\
> > + for (;;) {\
> > + if (chunksize(T) != S) {\
> > + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) &
> 1]);\
> > + K <<= 1;\
> > + if (*C != 0)\
> > + T = *C;\
> > + else if (RTCHECK(ok_address(M, C))) {\
> > + *C = X;\
> > + X->parent = T;\
> > + X->fd = X->bk = X;\
> > + break;\
> > + }\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + break;\
> > + }\
> > + }\
> > + else {\
> > + tchunkptr F = T->fd;\
> > + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
> > + T->fd = F->bk = X;\
> > + X->fd = F;\
> > + X->bk = T;\
> > + X->parent = 0;\
> > + break;\
> > + }\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + break;\
> > + }\
> > + }\
> > + }\
> > + }\
> > +}
> > +
> > +/*
> > + Unlink steps:
> > +
> > + 1. If x is a chained node, unlink it from its same-sized fd/bk links
> > + and choose its bk node as its replacement.
> > + 2. If x was the last node of its size, but not a leaf node, it must
> > + be replaced with a leaf node (not merely one with an open left or
> > + right), to make sure that lefts and rights of descendents
> > + correspond properly to bit masks. We use the rightmost descendent
> > + of x. We could use any other leaf, but this is easy to locate and
> > + tends to counteract removal of leftmosts elsewhere, and so keeps
> > + paths shorter than minimally guaranteed. This doesn't loop much
> > + because on average a node in a tree is near the bottom.
> > + 3. If x is the base of a chain (i.e., has parent links) relink
> > + x's parent and children to x's replacement (or null if none).
> > +*/
> > +
> > +#define unlink_large_chunk(M, X) {\
> > + tchunkptr XP = X->parent;\
> > + tchunkptr R;\
> > + if (X->bk != X) {\
> > + tchunkptr F = X->fd;\
> > + R = X->bk;\
> > + if (RTCHECK(ok_address(M, F))) {\
> > + F->bk = R;\
> > + R->fd = F;\
> > + }\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + }\
> > + else {\
> > + tchunkptr* RP;\
> > + if (((R = *(RP = &(X->child[1]))) != 0) ||\
> > + ((R = *(RP = &(X->child[0]))) != 0)) {\
> > + tchunkptr* CP;\
> > + while ((*(CP = &(R->child[1])) != 0) ||\
> > + (*(CP = &(R->child[0])) != 0)) {\
> > + R = *(RP = CP);\
> > + }\
> > + if (RTCHECK(ok_address(M, RP)))\
> > + *RP = 0;\
> > + else {\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + }\
> > + }\
> > + if (XP != 0) {\
> > + tbinptr* H = treebin_at(M, X->index);\
> > + if (X == *H) {\
> > + if ((*H = R) == 0) \
> > + clear_treemap(M, X->index);\
> > + }\
> > + else if (RTCHECK(ok_address(M, XP))) {\
> > + if (XP->child[0] == X) \
> > + XP->child[0] = R;\
> > + else \
> > + XP->child[1] = R;\
> > + }\
> > + else\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + if (R != 0) {\
> > + if (RTCHECK(ok_address(M, R))) {\
> > + tchunkptr C0, C1;\
> > + R->parent = XP;\
> > + if ((C0 = X->child[0]) != 0) {\
> > + if (RTCHECK(ok_address(M, C0))) {\
> > + R->child[0] = C0;\
> > + C0->parent = R;\
> > + }\
> > + else\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + if ((C1 = X->child[1]) != 0) {\
> > + if (RTCHECK(ok_address(M, C1))) {\
> > + R->child[1] = C1;\
> > + C1->parent = R;\
> > + }\
> > + else\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + }\
> > + else\
> > + CORRUPTION_ERROR_ACTION(M);\
> > + }\
> > + }\
> > +}
> > +
> > +/* Relays to large vs small bin operations */
> > +
> > +#define insert_chunk(M, P, S)\
> > + if (is_small(S)) insert_small_chunk(M, P, S)\
> > + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
> > +
> > +#define unlink_chunk(M, P, S)\
> > + if (is_small(S)) unlink_small_chunk(M, P, S)\
> > + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
> > +
> > +
> > +/* Relays to internal calls to malloc/free from realloc, memalign etc */
> > +
> > +#define internal_malloc(m, b) mspace_malloc(m, b)
> > +#define internal_free(m, mem) mspace_free(m,mem);
> > +
> > +
> > +/* -------------------------- mspace management
> --------------------------
> > */
> > +
> > +/* Initialize top chunk and its size */
> > +static void init_top(mstate m, mchunkptr p, size_t psize) {
> > + /* Ensure alignment */
> > + size_t offset = align_offset(chunk2mem(p));
> > + p = (mchunkptr)((char*)p + offset);
> > + psize -= offset;
> > +
> > + m->top = p;
> > + m->topsize = psize;
> > + p->head = psize | PINUSE_BIT;
> > + /* set size of fake trailing chunk holding overhead space only once */
> > + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
> > +}
> > +
> > +/* Initialize bins for a new mstate that is otherwise zeroed out */
> > +static void init_bins(mstate m) {
> > + /* Establish circular links for smallbins */
> > + bindex_t i;
> > + for (i = 0; i < NSMALLBINS; ++i) {
> > + sbinptr bin = smallbin_at(m,i);
> > + bin->fd = bin->bk = bin;
> > + }
> > +}
> > +
> > +#if PROCEED_ON_ERROR
> > +
> > +/* default corruption action */
> > +static void reset_on_error(mstate m) {
> > + int i;
> > + ++malloc_corruption_error_count;
> > + /* Reinitialize fields to forget about all memory */
> > + m->smallbins = m->treebins = 0;
> > + m->dvsize = m->topsize = 0;
> > + m->seg.base = 0;
> > + m->seg.size = 0;
> > + m->seg.next = 0;
> > + m->top = m->dv = 0;
> > + for (i = 0; i < NTREEBINS; ++i)
> > + *treebin_at(m, i) = 0;
> > + init_bins(m);
> > +}
> > +#endif /* PROCEED_ON_ERROR */
> > +
> > +/* Allocate chunk and prepend remainder with chunk in successor base. */
> > +static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
> > + size_t nb) {
> > + mchunkptr p = align_as_chunk(newbase);
> > + mchunkptr oldfirst = align_as_chunk(oldbase);
> > + size_t psize = (char*)oldfirst - (char*)p;
> > + mchunkptr q = chunk_plus_offset(p, nb);
> > + size_t qsize = psize - nb;
> > + set_size_and_pinuse_of_inuse_chunk(m, p, nb);
> > +
> > + assert(m->user_data, (char*)oldfirst > (char*)q);
> > + assert(m->user_data, pinuse(oldfirst));
> > + assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
> > +
> > + /* consolidate remainder with first chunk of old base */
> > + if (oldfirst == m->top) {
> > + size_t tsize = m->topsize += qsize;
> > + m->top = q;
> > + q->head = tsize | PINUSE_BIT;
> > + check_top_chunk(m, q);
> > + }
> > + else if (oldfirst == m->dv) {
> > + size_t dsize = m->dvsize += qsize;
> > + m->dv = q;
> > + set_size_and_pinuse_of_free_chunk(q, dsize);
> > + }
> > + else {
> > + if (!cinuse(oldfirst)) {
> > + size_t nsize = chunksize(oldfirst);
> > + unlink_chunk(m, oldfirst, nsize);
> > + oldfirst = chunk_plus_offset(oldfirst, nsize);
> > + qsize += nsize;
> > + }
> > + set_free_with_pinuse(q, qsize, oldfirst);
> > + insert_chunk(m, q, qsize);
> > + check_free_chunk(m, q);
> > + }
> > +
> > + check_malloced_chunk(m, chunk2mem(p), nb);
> > + return chunk2mem(p);
> > +}
> > +
> > +/* -------------------------- System allocation
> --------------------------
> > */
> > +
> > +/* Get memory from system using MORECORE or MMAP */
> > +static void* sys_alloc(mstate m, size_t nb) {
> > + MALLOC_FAILURE_ACTION;
> > + return 0;
> > +}
> > +
> > +/* ---------------------------- malloc support
> ---------------------------
> > */
> > +
> > +/* allocate a large request from the best fitting chunk in a treebin */
> > +static void* tmalloc_large(mstate m, size_t nb) {
> > + tchunkptr v = 0;
> > + size_t rsize = -nb; /* Unsigned negation */
> > + tchunkptr t;
> > + bindex_t idx;
> > + compute_tree_index(nb, idx);
> > +
> > + if ((t = *treebin_at(m, idx)) != 0) {
> > + /* Traverse tree for this bin looking for node with size == nb */
> > + size_t sizebits = nb << leftshift_for_tree_index(idx);
> > + tchunkptr rst = 0; /* The deepest untaken right subtree */
> > + for (;;) {
> > + tchunkptr rt;
> > + size_t trem = chunksize(t) - nb;
> > + if (trem < rsize) {
> > + v = t;
> > + if ((rsize = trem) == 0)
> > + break;
> > + }
> > + rt = t->child[1];
> > + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > + if (rt != 0 && rt != t)
> > + rst = rt;
> > + if (t == 0) {
> > + t = rst; /* set t to least subtree holding sizes > nb */
> > + break;
> > + }
> > + sizebits <<= 1;
> > + }
> > + }
> > +
> > + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
> > + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
> > + if (leftbits != 0) {
> > + bindex_t i;
> > + binmap_t leastbit = least_bit(leftbits);
> > + compute_bit2idx(leastbit, i);
> > + t = *treebin_at(m, i);
> > + }
> > + }
> > +
> > + while (t != 0) { /* find smallest of tree or subtree */
> > + size_t trem = chunksize(t) - nb;
> > + if (trem < rsize) {
> > + rsize = trem;
> > + v = t;
> > + }
> > + t = leftmost_child(t);
> > + }
> > +
> > + /* If dv is a better fit, return 0 so malloc will use it */
> > + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
> > + if (RTCHECK(ok_address(m, v))) { /* split */
> > + mchunkptr r = chunk_plus_offset(v, nb);
> > + assert(m->user_data, chunksize(v) == rsize + nb);
> > + if (RTCHECK(ok_next(v, r))) {
> > + unlink_large_chunk(m, v);
> > + if (rsize < MIN_CHUNK_SIZE)
> > + set_inuse_and_pinuse(m, v, (rsize + nb));
> > + else {
> > + set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > + set_size_and_pinuse_of_free_chunk(r, rsize);
> > + insert_chunk(m, r, rsize);
> > + }
> > + return chunk2mem(v);
> > + }
> > + }
> > + CORRUPTION_ERROR_ACTION(m);
> > + }
> > + return 0;
> > +}
> > +
> > +/* allocate a small request from the best fitting chunk in a treebin */
> > +static void* tmalloc_small(mstate m, size_t nb) {
> > + tchunkptr t, v;
> > + size_t rsize;
> > + bindex_t i;
> > + binmap_t leastbit = least_bit(m->treemap);
> > + compute_bit2idx(leastbit, i);
> > +
> > + v = t = *treebin_at(m, i);
> > + rsize = chunksize(t) - nb;
> > +
> > + while ((t = leftmost_child(t)) != 0) {
> > + size_t trem = chunksize(t) - nb;
> > + if (trem < rsize) {
> > + rsize = trem;
> > + v = t;
> > + }
> > + }
> > +
> > + if (RTCHECK(ok_address(m, v))) {
> > + mchunkptr r = chunk_plus_offset(v, nb);
> > + assert(m->user_data, chunksize(v) == rsize + nb);
> > + if (RTCHECK(ok_next(v, r))) {
> > + unlink_large_chunk(m, v);
> > + if (rsize < MIN_CHUNK_SIZE)
> > + set_inuse_and_pinuse(m, v, (rsize + nb));
> > + else {
> > + set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > + set_size_and_pinuse_of_free_chunk(r, rsize);
> > + replace_dv(m, r, rsize);
> > + }
> > + return chunk2mem(v);
> > + }
> > + }
> > +
> > + CORRUPTION_ERROR_ACTION(m);
> > + return 0;
> > +}
> > +
> > +/* --------------------------- realloc support
> ---------------------------
> > */
> > +
> > +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
> > + if (bytes >= MAX_REQUEST) {
> > + MALLOC_FAILURE_ACTION;
> > + return 0;
> > + }
> > + if (!PREACTION(m)) {
> > + mchunkptr oldp = mem2chunk(oldmem);
> > + size_t oldsize = chunksize(oldp);
> > + mchunkptr next = chunk_plus_offset(oldp, oldsize);
> > + mchunkptr newp = 0;
> > + void* extra = 0;
> > +
> > + /* Try to either shrink or extend into top. Else malloc-copy-free */
> > +
> > + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
> > + ok_next(oldp, next) && ok_pinuse(next))) {
> > + size_t nb = request2size(bytes);
> > + if (oldsize >= nb) { /* already big enough */
> > + size_t rsize = oldsize - nb;
> > + newp = oldp;
> > + if (rsize >= MIN_CHUNK_SIZE) {
> > + mchunkptr remainder = chunk_plus_offset(newp, nb);
> > + set_inuse(m, newp, nb);
> > + set_inuse(m, remainder, rsize);
> > + extra = chunk2mem(remainder);
> > + }
> > + }
> > + else if (next == m->top && oldsize + m->topsize > nb) {
> > + /* Expand into top */
> > + size_t newsize = oldsize + m->topsize;
> > + size_t newtopsize = newsize - nb;
> > + mchunkptr newtop = chunk_plus_offset(oldp, nb);
> > + set_inuse(m, oldp, nb);
> > + newtop->head = newtopsize |PINUSE_BIT;
> > + m->top = newtop;
> > + m->topsize = newtopsize;
> > + newp = oldp;
> > + }
> > + }
> > + else {
> > + USAGE_ERROR_ACTION(m, oldmem);
> > + POSTACTION(m);
> > + return 0;
> > + }
> > +
> > + POSTACTION(m);
> > +
> > + if (newp != 0) {
> > + if (extra != 0) {
> > + internal_free(m, extra);
> > + }
> > + check_inuse_chunk(m, newp);
> > + return chunk2mem(newp);
> > + }
> > + else {
> > + void* newmem = internal_malloc(m, bytes);
> > + if (newmem != 0) {
> > + size_t oc = oldsize - overhead_for(oldp);
> > + MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
> > + internal_free(m, oldmem);
> > + }
> > + return newmem;
> > + }
> > + }
> > + return 0;
> > +}
> > +
> > +/* --------------------------- memalign support
> --------------------------
> > */
> > +
> > +static void* internal_memalign(mstate m, size_t alignment, size_t
> bytes) {
> > + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
> > + return internal_malloc(m, bytes);
> > + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk
> size
> > */
> > + alignment = MIN_CHUNK_SIZE;
> > + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of
> 2 */
> > + size_t a = MALLOC_ALIGNMENT << 1;
> > + while (a < alignment) a <<= 1;
> > + alignment = a;
> > + }
> > +
> > + if (bytes >= MAX_REQUEST - alignment) {
> > + if (m != 0) { /* Test isn't needed but avoids compiler warning */
> > + MALLOC_FAILURE_ACTION;
> > + }
> > + }
> > + else {
> > + size_t nb = request2size(bytes);
> > + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
> > + char* mem = (char*)internal_malloc(m, req);
> > + if (mem != 0) {
> > + void* leader = 0;
> > + void* trailer = 0;
> > + mchunkptr p = mem2chunk(mem);
> > +
> > + if (PREACTION(m)) return 0;
> > + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
> > + /*
> > + Find an aligned spot inside chunk. Since we need to give
> > + back leading space in a chunk of at least MIN_CHUNK_SIZE, if
> > + the first calculation places us at a spot with less than
> > + MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
> > + We've allocated enough total room so that this is always
> > + possible.
> > + */
> > + char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
> > + alignment -
> > + SIZE_T_ONE)) &
> > + -alignment));
> > + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
> > + br : br+alignment;
> > + mchunkptr newp = (mchunkptr)pos;
> > + size_t leadsize = pos - (char*)(p);
> > + size_t newsize = chunksize(p) - leadsize;
> > +
> > + /* Otherwise, give back leader, use the rest */
> > + set_inuse(m, newp, newsize);
> > + set_inuse(m, p, leadsize);
> > + leader = chunk2mem(p);
> > +
> > + p = newp;
> > + }
> > +
> > + assert(m->user_data, chunksize(p) >= nb);
> > + assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
> > + check_inuse_chunk(m, p);
> > + POSTACTION(m);
> > + if (leader != 0) {
> > + internal_free(m, leader);
> > + }
> > + if (trailer != 0) {
> > + internal_free(m, trailer);
> > + }
> > + return chunk2mem(p);
> > + }
> > + }
> > + return 0;
> > +}
> > +
> > +/* ----------------------------- user mspaces
> ----------------------------
> > */
> > +
> > +static mstate init_user_mstate(char* tbase, size_t tsize, void
> *user_data) {
> > + size_t msize = pad_request(sizeof(struct malloc_state));
> > + mchunkptr mn;
> > + mchunkptr msp = align_as_chunk(tbase);
> > + mstate m = (mstate)(chunk2mem(msp));
> > + MEMCLEAR(m, msize);
> > + INITIAL_LOCK(&m->mutex);
> > + msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
> > + m->seg.base = m->least_addr = tbase;
> > + m->seg.size = m->footprint = m->max_footprint = tsize;
> > + m->magic = mparams.magic;
> > + m->mflags = mparams.default_mflags;
> > + m->user_data = user_data;
> > + init_bins(m);
> > + mn = next_chunk(mem2chunk(m));
> > + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -
> TOP_FOOT_SIZE);
> > + check_top_chunk(m, m->top);
> > + return m;
> > +}
> > +
> > +mspace create_mspace_with_base(void* base, size_t capacity, int locked,
> void
> > *user_data) {
> > + mstate m = 0;
> > + size_t msize = pad_request(sizeof(struct malloc_state));
> > + init_mparams(); /* Ensure pagesize etc initialized */
> > +
> > + if (capacity > msize + TOP_FOOT_SIZE &&
> > + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size))
> {
> > + m = init_user_mstate((char*)base, capacity, user_data);
> > + set_lock(m, locked);
> > + }
> > + return (mspace)m;
> > +}
> > +
> > +/*
> > + mspace versions of routines are near-clones of the global
> > + versions. This is not so nice but better than the alternatives.
> > +*/
> > +
> > +
> > +void* mspace_malloc(mspace msp, size_t bytes) {
> > + mstate ms = (mstate)msp;
> > + if (!ok_magic(ms)) {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + return 0;
> > + }
> > + if (!PREACTION(ms)) {
> > + void* mem;
> > + size_t nb;
> > + if (bytes <= MAX_SMALL_REQUEST) {
> > + bindex_t idx;
> > + binmap_t smallbits;
> > + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
> > + idx = small_index(nb);
> > + smallbits = ms->smallmap >> idx;
> > +
> > + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a
> smallbin. */
> > + mchunkptr b, p;
> > + idx += ~smallbits & 1; /* Uses next bin if idx empty */
> > + b = smallbin_at(ms, idx);
> > + p = b->fd;
> > + assert(ms->user_data, chunksize(p) == small_index2size(idx));
> > + unlink_first_small_chunk(ms, b, p, idx);
> > + set_inuse_and_pinuse(ms, p, small_index2size(idx));
> > + mem = chunk2mem(p);
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > +
> > + else if (nb > ms->dvsize) {
> > + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
> > + mchunkptr b, p, r;
> > + size_t rsize;
> > + bindex_t i;
> > + binmap_t leftbits = (smallbits << idx) &
> left_bits(idx2bit(idx));
> > + binmap_t leastbit = least_bit(leftbits);
> > + compute_bit2idx(leastbit, i);
> > + b = smallbin_at(ms, i);
> > + p = b->fd;
> > + assert(ms->user_data, chunksize(p) == small_index2size(i));
> > + unlink_first_small_chunk(ms, b, p, i);
> > + rsize = small_index2size(i) - nb;
> > + /* Fit here cannot be remainderless if 4byte sizes */
> > + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
> > + set_inuse_and_pinuse(ms, p, small_index2size(i));
> > + else {
> > + set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > + r = chunk_plus_offset(p, nb);
> > + set_size_and_pinuse_of_free_chunk(r, rsize);
> > + replace_dv(ms, r, rsize);
> > + }
> > + mem = chunk2mem(p);
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > +
> > + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) !=
> 0) {
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > + }
> > + }
> > + else if (bytes >= MAX_REQUEST)
> > + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys
> alloc)
> > */
> > + else {
> > + nb = pad_request(bytes);
> > + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > + }
> > +
> > + if (nb <= ms->dvsize) {
> > + size_t rsize = ms->dvsize - nb;
> > + mchunkptr p = ms->dv;
> > + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
> > + mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
> > + ms->dvsize = rsize;
> > + set_size_and_pinuse_of_free_chunk(r, rsize);
> > + set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > + }
> > + else { /* exhaust dv */
> > + size_t dvs = ms->dvsize;
> > + ms->dvsize = 0;
> > + ms->dv = 0;
> > + set_inuse_and_pinuse(ms, p, dvs);
> > + }
> > + mem = chunk2mem(p);
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > +
> > + else if (nb < ms->topsize) { /* Split top */
> > + size_t rsize = ms->topsize -= nb;
> > + mchunkptr p = ms->top;
> > + mchunkptr r = ms->top = chunk_plus_offset(p, nb);
> > + r->head = rsize | PINUSE_BIT;
> > + set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > + mem = chunk2mem(p);
> > + check_top_chunk(ms, ms->top);
> > + check_malloced_chunk(ms, mem, nb);
> > + goto postaction;
> > + }
> > +
> > + mem = sys_alloc(ms, nb);
> > +
> > + postaction:
> > + POSTACTION(ms);
> > + return mem;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +void mspace_free(mspace msp, void* mem) {
> > + if (mem != 0) {
> > + mchunkptr p = mem2chunk(mem);
> > +#if FOOTERS
> > + mstate fm = get_mstate_for(p);
> > +#else /* FOOTERS */
> > + mstate fm = (mstate)msp;
> > +#endif /* FOOTERS */
> > + if (!ok_magic(fm)) {
> > + USAGE_ERROR_ACTION(fm, p);
> > + return;
> > + }
> > + if (!PREACTION(fm)) {
> > + check_inuse_chunk(fm, p);
> > + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
> > + size_t psize = chunksize(p);
> > + mchunkptr next = chunk_plus_offset(p, psize);
> > + if (!pinuse(p)) {
> > + size_t prevsize = p->prev_foot;
> > +
> > + mchunkptr prev = chunk_minus_offset(p, prevsize);
> > + psize += prevsize;
> > + p = prev;
> > + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward
> */
> > + if (p != fm->dv) {
> > + unlink_chunk(fm, p, prevsize);
> > + }
> > + else if ((next->head & INUSE_BITS) == INUSE_BITS) {
> > + fm->dvsize = psize;
> > + set_free_with_pinuse(p, psize, next);
> > + goto postaction;
> > + }
> > + }
> > + else
> > + goto erroraction;
> > + }
> > +
> > + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
> > + if (!cinuse(next)) { /* consolidate forward */
> > + if (next == fm->top) {
> > + size_t tsize = fm->topsize += psize;
> > + fm->top = p;
> > + p->head = tsize | PINUSE_BIT;
> > + if (p == fm->dv) {
> > + fm->dv = 0;
> > + fm->dvsize = 0;
> > + }
> > + goto postaction;
> > + }
> > + else if (next == fm->dv) {
> > + size_t dsize = fm->dvsize += psize;
> > + fm->dv = p;
> > + set_size_and_pinuse_of_free_chunk(p, dsize);
> > + goto postaction;
> > + }
> > + else {
> > + size_t nsize = chunksize(next);
> > + psize += nsize;
> > + unlink_chunk(fm, next, nsize);
> > + set_size_and_pinuse_of_free_chunk(p, psize);
> > + if (p == fm->dv) {
> > + fm->dvsize = psize;
> > + goto postaction;
> > + }
> > + }
> > + }
> > + else
> > + set_free_with_pinuse(p, psize, next);
> > + insert_chunk(fm, p, psize);
> > + check_free_chunk(fm, p);
> > + goto postaction;
> > + }
> > + }
> > + erroraction:
> > + USAGE_ERROR_ACTION(fm, p);
> > + postaction:
> > + POSTACTION(fm);
> > + }
> > + }
> > +}
> > +
> > +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
> > + void* mem;
> > + size_t req = 0;
> > + mstate ms = (mstate)msp;
> > + if (!ok_magic(ms)) {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + return 0;
> > + }
> > + if (n_elements != 0) {
> > + req = n_elements * elem_size;
> > + if (((n_elements | elem_size) & ~(size_t)0xffff) &&
> > + (req / n_elements != elem_size))
> > + req = MAX_SIZE_T; /* force downstream failure on overflow */
> > + }
> > + mem = internal_malloc(ms, req);
> > + if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
> > + MEMCLEAR(mem, req);
> > + return mem;
> > +}
> > +
> > +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
> > + if (oldmem == 0)
> > + return mspace_malloc(msp, bytes);
> > +#ifdef REALLOC_ZERO_BYTES_FREES
> > + if (bytes == 0) {
> > + mspace_free(msp, oldmem);
> > + return 0;
> > + }
> > +#endif /* REALLOC_ZERO_BYTES_FREES */
> > + else {
> > +#if FOOTERS
> > + mchunkptr p = mem2chunk(oldmem);
> > + mstate ms = get_mstate_for(p);
> > +#else /* FOOTERS */
> > + mstate ms = (mstate)msp;
> > +#endif /* FOOTERS */
> > + if (!ok_magic(ms)) {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + return 0;
> > + }
> > + return internal_realloc(ms, oldmem, bytes);
> > + }
> > +}
> > +
> > +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
> > + mstate ms = (mstate)msp;
> > + if (!ok_magic(ms)) {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + return 0;
> > + }
> > + return internal_memalign(ms, alignment, bytes);
> > +}
> > +
> > +void mspace_malloc_stats(mspace msp) {
> > + mstate ms = (mstate)msp;
> > + if (ok_magic(ms)) {
> > + internal_malloc_stats(ms);
> > + }
> > + else {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + }
> > +}
> > +
> > +size_t mspace_footprint(mspace msp) {
> > + size_t result;
> > + mstate ms = (mstate)msp;
> > + if (ok_magic(ms)) {
> > + result = ms->footprint;
> > + } else {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + }
> > + return result;
> > +}
> > +
> > +
> > +size_t mspace_max_footprint(mspace msp) {
> > + size_t result;
> > + mstate ms = (mstate)msp;
> > + if (ok_magic(ms)) {
> > + result = ms->max_footprint;
> > + } else {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + }
> > + return result;
> > +}
> > +
> > +
> > +#if !NO_MALLINFO
> > +struct mallinfo mspace_mallinfo(mspace msp) {
> > + mstate ms = (mstate)msp;
> > + if (!ok_magic(ms)) {
> > + USAGE_ERROR_ACTION(ms,ms);
> > + }
> > + return internal_mallinfo(ms);
> > +}
> > +#endif /* NO_MALLINFO */
> > +
> > +int mspace_mallopt(int param_number, int value) {
> > + return change_mparam(param_number, value);
> > +}
> > +
> > diff --git a/qxldod/qxldod.vcxproj b/qxldod/qxldod.vcxproj
> > index 749ba1b..f4d279b 100755
> > --- a/qxldod/qxldod.vcxproj
> > +++ b/qxldod/qxldod.vcxproj
> > @@ -279,7 +279,7 @@
> > <ItemGroup>
> > <ClCompile Include="BaseObject.cpp" />
> > <ClCompile Include="driver.cpp" />
> > - <ClCompile Include="mspace.c" />
> > + <ClCompile Include="mspace.cpp" />
> > <ClCompile Include="QxlDod.cpp" />
> > </ItemGroup>
> > <ItemGroup>
> > diff --git a/qxldod/qxldod.vcxproj.filters
> b/qxldod/qxldod.vcxproj.filters
> > index bb9daa9..b0a8103 100755
> > --- a/qxldod/qxldod.vcxproj.filters
> > +++ b/qxldod/qxldod.vcxproj.filters
> > @@ -42,7 +42,7 @@
> > <ClCompile Include="QxlDod.cpp">
> > <Filter>Source Files</Filter>
> > </ClCompile>
> > - <ClCompile Include="mspace.c">
> > + <ClCompile Include="mspace.cpp">
> > <Filter>Source Files</Filter>
> > </ClCompile>
> > </ItemGroup>
>
>
--
Respectfully,
*Sameeh Jubran*
*Linkedin <https://il.linkedin.com/pub/sameeh-jubran/87/747/a8a>*
*Junior Software Engineer @ Daynix <http://www.daynix.com>.*
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/spice-devel/attachments/20160906/856258e4/attachment-0001.html>
More information about the Spice-devel
mailing list