[Spice-devel] [PATCH qxl-wddm-dod 12/26] Code Analysis clean up

Sameeh Jubran sameeh at daynix.com
Mon Sep 5 07:07:35 UTC 2016


On Tue, Aug 30, 2016 at 6:17 PM, Frediano Ziglio <fziglio at redhat.com> wrote:

> >
> > Signed-off-by: Sameeh Jubran <sameeh at daynix.com>
> > ---
> >  qxldod/QxlDod.cpp             |  520 +++++----
> >  qxldod/mspace.c               | 2437
> >  ----------------------------------------
> >  qxldod/mspace.cpp             | 2439
> >  +++++++++++++++++++++++++++++++++++++++++
> >  qxldod/qxldod.vcxproj         |    2 +-
> >  qxldod/qxldod.vcxproj.filters |    2 +-
> >  5 files changed, 2729 insertions(+), 2671 deletions(-)
> >  delete mode 100755 qxldod/mspace.c
> >  create mode 100644 qxldod/mspace.cpp
> >
> > diff --git a/qxldod/QxlDod.cpp b/qxldod/QxlDod.cpp
> > index a1718a4..a328d2d 100755
> > --- a/qxldod/QxlDod.cpp
> > +++ b/qxldod/QxlDod.cpp
> > @@ -3,7 +3,7 @@
> >  #include "qxl_windows.h"
> >
> >  #pragma code_seg(push)
> > -#pragma code_seg()
> > +#pragma code_seg("PAGE")
> >
> >  #define WIN_QXL_INT_MASK ((QXL_INTERRUPT_DISPLAY) | \
> >                            (QXL_INTERRUPT_CURSOR) | \
> > @@ -55,15 +55,11 @@ typedef struct _QXL_ESCAPE {
> >      };
> >  }QXL_ESCAPE;
> >
> > -#pragma code_seg(pop)
> > -
> > -#pragma code_seg("PAGE")
> > -
> > -
> >  QxlDod::QxlDod(_In_ DEVICE_OBJECT* pPhysicalDeviceObject) :
> >  m_pPhysicalDevice(pPhysicalDeviceObject),
> >
> m_MonitorPowerState(PowerDeviceD0),
> >
> m_AdapterPowerState(PowerDeviceD0)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s\n", __FUNCTION__));
> >      *((UINT*)&m_Flags) = 0;
> >      RtlZeroMemory(&m_DxgkInterface, sizeof(m_DxgkInterface));
> > @@ -246,6 +242,7 @@ DbgDevicePowerString(
> >      __in DEVICE_POWER_STATE Type
> >      )
> >  {
> > +    PAGED_CODE();
> >      switch (Type)
> >      {
> >      case PowerDeviceUnspecified:
> > @@ -270,6 +267,7 @@ DbgPowerActionString(
> >      __in POWER_ACTION Type
> >      )
> >  {
> > +    PAGED_CODE();
> >      switch (Type)
> >      {
> >      case PowerActionNone:
> > @@ -1474,7 +1472,7 @@ NTSTATUS QxlDod::CommitVidPn(_In_ CONST
> > DXGKARG_COMMITVIDPN* CONST pCommitVidPn)
> >
> >  CommitVidPnExit:
> >
> > -    NTSTATUS TempStatus;
> > +    NTSTATUS TempStatus(STATUS_SUCCESS);
> >      UNREFERENCED_PARAMETER(TempStatus);
> >
> >      if ((pVidPnSourceModeSetInterface != NULL) &&
> > @@ -1664,7 +1662,7 @@ NTSTATUS QxlDod::UpdateActiveVidPnPresentPath(_In_
> > CONST DXGKARG_UPDATEACTIVEVID
> >  //
> >  // Non-Paged Code
> >  //
> > -#pragma code_seg(push)
> > +#pragma code_seg(push)  //Non-Paged Code
> >  #pragma code_seg()
> >
> >  VOID QxlDod::DpcRoutine(VOID)
> > @@ -1835,7 +1833,7 @@ NTSTATUS QxlDod::WriteHWInfoStr(_In_ HANDLE
> > DevInstRegKeyHandle, _In_ PCWSTR psz
> >      return Status;
> >  }
> >
> > -NTSTATUS QxlDod::RegisterHWInfo(ULONG Id)
> > +NTSTATUS QxlDod::RegisterHWInfo(_In_ ULONG Id)
> >  {
> >      PAGED_CODE();
> >
> > @@ -1914,11 +1912,10 @@ NTSTATUS QxlDod::RegisterHWInfo(ULONG Id)
> >      return Status;
> >  }
> >
> > -
> >  //
> >  // Non-Paged Code
> >  //
> > -#pragma code_seg(push)
> > +#pragma code_seg(push) //Non-Paged
> >  #pragma code_seg()
> >
> >  UINT BPPFromPixelFormat(D3DDDIFORMAT Format)
> > @@ -1947,122 +1944,42 @@ D3DDDI_VIDEO_PRESENT_SOURCE_ID
> > QxlDod::FindSourceForTarget(D3DDDI_VIDEO_PRESENT_
> >
> >      return DefaultToZero ? 0 : D3DDDI_ID_UNINITIALIZED;
> >  }
> > +// HW specific code
> >
> > -
> > -//
> > -// Frame buffer map/unmap
> > -//
> > -
> > -NTSTATUS
> > -MapFrameBuffer(
> > -    _In_                       PHYSICAL_ADDRESS    PhysicalAddress,
> > -    _In_                       ULONG               Length,
> > -    _Outptr_result_bytebuffer_(Length) VOID**
> VirtualAddress)
> > +VOID GetPitches(_In_ CONST BLT_INFO* pBltInfo, _Out_ LONG* pPixelPitch,
> > _Out_ LONG* pRowPitch)
> >  {
> > -    PAGED_CODE();
> > -
> > -    //
> > -    // Check for parameters
> > -    //
> > -    if ((PhysicalAddress.QuadPart == (ULONGLONG)0) ||
> > -        (Length == 0) ||
> > -        (VirtualAddress == NULL))
> > +    switch (pBltInfo->Rotation) {
> > +    case D3DKMDT_VPPR_IDENTITY:
> >      {
> > -        DbgPrint(TRACE_LEVEL_ERROR, ("One of PhysicalAddress.QuadPart
> > (0x%I64x), Length (%lu), VirtualAddress (%p) is NULL or 0\n",
> > -                        PhysicalAddress.QuadPart, Length,
> VirtualAddress));
> > -        return STATUS_INVALID_PARAMETER;
> > +        *pPixelPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > +        *pRowPitch = pBltInfo->Pitch;
> > +        return;
> >      }
> > -
> > -    *VirtualAddress = MmMapIoSpace(PhysicalAddress,
> > -                                   Length,
> > -                                   MmWriteCombined);
> > -    if (*VirtualAddress == NULL)
> > +    case D3DKMDT_VPPR_ROTATE90:
> >      {
> > -        // The underlying call to MmMapIoSpace failed. This may be
> because,
> > MmWriteCombined
> > -        // isn't supported, so try again with MmNonCached
> > -
> > -        *VirtualAddress = MmMapIoSpace(PhysicalAddress,
> > -                                       Length,
> > -                                       MmNonCached);
> > -        if (*VirtualAddress == NULL)
> > -        {
> > -            DbgPrint(TRACE_LEVEL_ERROR, ("MmMapIoSpace returned a NULL
> > buffer when trying to allocate %lu bytes", Length));
> > -            return STATUS_NO_MEMORY;
> > -        }
> > +        *pPixelPitch = -((LONG) pBltInfo->Pitch);
> > +        *pRowPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > +        return;
> >      }
> > -
> > -    return STATUS_SUCCESS;
> > -}
> > -
> > -NTSTATUS
> > -UnmapFrameBuffer(
> > -    _In_reads_bytes_(Length) VOID* VirtualAddress,
> > -    _In_                ULONG Length)
> > -{
> > -    PAGED_CODE();
> > -
> > -
> > -    //
> > -    // Check for parameters
> > -    //
> > -    if ((VirtualAddress == NULL) && (Length == 0))
> > +    case D3DKMDT_VPPR_ROTATE180:
> >      {
> > -        // Allow this function to be called when there's no work to do,
> and
> > treat as successful
> > -        return STATUS_SUCCESS;
> > +        *pPixelPitch = -((LONG) pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > +        *pRowPitch = -((LONG) pBltInfo->Pitch);
> > +        return;
> >      }
> > -    else if ((VirtualAddress == NULL) || (Length == 0))
> > +    case D3DKMDT_VPPR_ROTATE270:
> >      {
> > -        DbgPrint(TRACE_LEVEL_ERROR, ("Only one of Length (%lu),
> > VirtualAddress (%p) is NULL or 0",
> > -                        Length, VirtualAddress));
> > -        return STATUS_INVALID_PARAMETER;
> > +        *pPixelPitch = pBltInfo->Pitch;
> > +        *pRowPitch = -((LONG) pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > +        return;
> >      }
> > -
> > -    MmUnmapIoSpace(VirtualAddress,
> > -                   Length);
> > -
> > -    return STATUS_SUCCESS;
> > -}
> > -
> > -
> > -
> > -
> > -// HW specific code
> > -
> > -VOID GetPitches(_In_ CONST BLT_INFO* pBltInfo, _Out_ LONG* pPixelPitch,
> > _Out_ LONG* pRowPitch)
> > -{
> > -    switch (pBltInfo->Rotation)
> > +    default:
> >      {
> > -        case D3DKMDT_VPPR_IDENTITY:
> > -        {
> > -            *pPixelPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > -            *pRowPitch = pBltInfo->Pitch;
> > -            return;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE90:
> > -        {
> > -            *pPixelPitch = -((LONG)pBltInfo->Pitch);
> > -            *pRowPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > -            return;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE180:
> > -        {
> > -            *pPixelPitch = -((LONG)pBltInfo->BitsPerPel /
> BITS_PER_BYTE);
> > -            *pRowPitch = -((LONG)pBltInfo->Pitch);
> > -            return;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE270:
> > -        {
> > -            *pPixelPitch = pBltInfo->Pitch;
> > -            *pRowPitch = -((LONG)pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > -            return;
> > -        }
> > -        default:
> > -        {
> > -            QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",
> > pBltInfo->Rotation);
> > -            *pPixelPitch = 0;
> > -            *pRowPitch = 0;
> > -            return;
> > -        }
> > +        QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",
> > pBltInfo->Rotation);
> > +        *pPixelPitch = 0;
> > +        *pRowPitch = 0;
> > +        return;
> > +    }
> >      }
> >  }
> >
> > @@ -2072,61 +1989,60 @@ BYTE* GetRowStart(_In_ CONST BLT_INFO* pBltInfo,
> > CONST RECT* pRect)
> >      LONG OffLeft = pRect->left + pBltInfo->Offset.x;
> >      LONG OffTop = pRect->top + pBltInfo->Offset.y;
> >      LONG BytesPerPixel = (pBltInfo->BitsPerPel / BITS_PER_BYTE);
> > -    switch (pBltInfo->Rotation)
> > +    switch (pBltInfo->Rotation) {
> > +    case D3DKMDT_VPPR_IDENTITY:
> >      {
> > -        case D3DKMDT_VPPR_IDENTITY:
> > -        {
> > -            pRet = ((BYTE*)pBltInfo->pBits +
> > -                           OffTop * pBltInfo->Pitch +
> > -                           OffLeft * BytesPerPixel);
> > -            break;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE90:
> > -        {
> > -            pRet = ((BYTE*)pBltInfo->pBits +
> > -                           (pBltInfo->Height - 1 - OffLeft) *
> > pBltInfo->Pitch +
> > -                           OffTop * BytesPerPixel);
> > -            break;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE180:
> > -        {
> > -            pRet = ((BYTE*)pBltInfo->pBits +
> > -                           (pBltInfo->Height - 1 - OffTop) *
> pBltInfo->Pitch
> > +
> > -                           (pBltInfo->Width - 1 - OffLeft) *
> BytesPerPixel);
> > -            break;
> > -        }
> > -        case D3DKMDT_VPPR_ROTATE270:
> > -        {
> > -            pRet = ((BYTE*)pBltInfo->pBits +
> > -                           OffLeft * pBltInfo->Pitch +
> > -                           (pBltInfo->Width - 1 - OffTop) *
> BytesPerPixel);
> > -            break;
> > -        }
> > -        default:
> > -        {
> > -            QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",
> > pBltInfo->Rotation);
> > -            break;
> > -        }
> > +        pRet = ((BYTE*) pBltInfo->pBits +
> > +            OffTop * pBltInfo->Pitch +
> > +            OffLeft * BytesPerPixel);
> > +        break;
> > +    }
> > +    case D3DKMDT_VPPR_ROTATE90:
> > +    {
> > +        pRet = ((BYTE*) pBltInfo->pBits +
> > +            (pBltInfo->Height - 1 - OffLeft) * pBltInfo->Pitch +
> > +            OffTop * BytesPerPixel);
> > +        break;
> > +    }
> > +    case D3DKMDT_VPPR_ROTATE180:
> > +    {
> > +        pRet = ((BYTE*) pBltInfo->pBits +
> > +            (pBltInfo->Height - 1 - OffTop) * pBltInfo->Pitch +
> > +            (pBltInfo->Width - 1 - OffLeft) * BytesPerPixel);
> > +        break;
> > +    }
> > +    case D3DKMDT_VPPR_ROTATE270:
> > +    {
> > +        pRet = ((BYTE*) pBltInfo->pBits +
> > +            OffLeft * pBltInfo->Pitch +
> > +            (pBltInfo->Width - 1 - OffTop) * BytesPerPixel);
> > +        break;
> > +    }
> > +    default:
> > +    {
> > +        QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",
> > pBltInfo->Rotation);
> > +        break;
> > +    }
> >      }
> >
> >      return pRet;
> >  }
> >
> >  /****************************Internal*Routine**************
> ****************\
> > - * CopyBitsGeneric
> > - *
> > - *
> > - * Blt function which can handle a rotated dst/src, offset rects in
> dst/src
> > - * and bpp combinations of:
> > - *   dst | src
> > - *    32 | 32   // For identity rotation this is much faster in
> > CopyBits32_32
> > - *    32 | 24
> > - *    32 | 16
> > - *    24 | 32
> > - *    16 | 32
> > - *     8 | 32
> > - *    24 | 24   // untested
> > - *
> > +* CopyBitsGeneric
> > +*
> > +*
> > +* Blt function which can handle a rotated dst/src, offset rects in
> dst/src
> > +* and bpp combinations of:
> > +*   dst | src
> > +*    32 | 32   // For identity rotation this is much faster in
> CopyBits32_32
> > +*    32 | 24
> > +*    32 | 16
> > +*    24 | 32
> > +*    16 | 32
> > +*     8 | 32
> > +*    24 | 24   // untested
> > +*
> >  \***********************************************************
> ***************/
> >
> >  VOID CopyBitsGeneric(
> > @@ -2140,13 +2056,12 @@ VOID CopyBitsGeneric(
> >      LONG SrcPixelPitch = 0;
> >      LONG SrcRowPitch = 0;
> >
> > -    DbgPrint(TRACE_LEVEL_VERBOSE , ("---> %s NumRects = %d Dst = %p Src
> =
> > %p\n", __FUNCTION__, NumRects, pDst->pBits, pSrc->pBits));
> > +    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s NumRects = %d Dst = %p Src =
> > %p\n", __FUNCTION__, NumRects, pDst->pBits, pSrc->pBits));
> >
> >      GetPitches(pDst, &DstPixelPitch, &DstRowPitch);
> >      GetPitches(pSrc, &SrcPixelPitch, &SrcRowPitch);
> >
> > -    for (UINT iRect = 0; iRect < NumRects; iRect++)
> > -    {
> > +    for (UINT iRect = 0; iRect < NumRects; iRect++) {
> >          CONST RECT* pRect = &pRects[iRect];
> >
> >          NT_ASSERT(pRect->right >= pRect->left);
> > @@ -2158,57 +2073,47 @@ VOID CopyBitsGeneric(
> >          BYTE* pDstRow = GetRowStart(pDst, pRect);
> >          CONST BYTE* pSrcRow = GetRowStart(pSrc, pRect);
> >
> > -        for (UINT y=0; y < NumRows; y++)
> > -        {
> > +        for (UINT y = 0; y < NumRows; y++) {
> >              BYTE* pDstPixel = pDstRow;
> >              CONST BYTE* pSrcPixel = pSrcRow;
> >
> > -            for (UINT x=0; x < NumPixels; x++)
> > -            {
> > +            for (UINT x = 0; x < NumPixels; x++) {
> >                  if ((pDst->BitsPerPel == 24) ||
> > -                    (pSrc->BitsPerPel == 24))
> > -                {
> > +                    (pSrc->BitsPerPel == 24)) {
> >                      pDstPixel[0] = pSrcPixel[0];
> >                      pDstPixel[1] = pSrcPixel[1];
> >                      pDstPixel[2] = pSrcPixel[2];
> >                      // pPixel[3] is the alpha channel and is ignored for
> >                      whichever of Src/Dst is 32bpp
> >                  }
> > -                else if (pDst->BitsPerPel == 32)
> > -                {
> > -                    if (pSrc->BitsPerPel == 32)
> > -                    {
> > -                        UINT32* pDstPixelAs32 = (UINT32*)pDstPixel;
> > -                        UINT32* pSrcPixelAs32 = (UINT32*)pSrcPixel;
> > +                else if (pDst->BitsPerPel == 32) {
> > +                    if (pSrc->BitsPerPel == 32) {
> > +                        UINT32* pDstPixelAs32 = (UINT32*) pDstPixel;
> > +                        UINT32* pSrcPixelAs32 = (UINT32*) pSrcPixel;
> >                          *pDstPixelAs32 = *pSrcPixelAs32;
> >                      }
> > -                    else if (pSrc->BitsPerPel == 16)
> > -                    {
> > -                        UINT32* pDstPixelAs32 = (UINT32*)pDstPixel;
> > -                        UINT16* pSrcPixelAs16 = (UINT16*)pSrcPixel;
> > +                    else if (pSrc->BitsPerPel == 16) {
> > +                        UINT32* pDstPixelAs32 = (UINT32*) pDstPixel;
> > +                        UINT16* pSrcPixelAs16 = (UINT16*) pSrcPixel;
> >
> >                          *pDstPixelAs32 =
> >                          CONVERT_16BPP_TO_32BPP(*pSrcPixelAs16);
> >                      }
> > -                    else
> > -                    {
> > +                    else {
> >                          // Invalid pSrc->BitsPerPel on a
> pDst->BitsPerPel of
> >                          32
> >                          NT_ASSERT(FALSE);
> >                      }
> >                  }
> > -                else if (pDst->BitsPerPel == 16)
> > -                {
> > +                else if (pDst->BitsPerPel == 16) {
> >                      NT_ASSERT(pSrc->BitsPerPel == 32);
> >
> > -                    UINT16* pDstPixelAs16 = (UINT16*)pDstPixel;
> > +                    UINT16* pDstPixelAs16 = (UINT16*) pDstPixel;
> >                      *pDstPixelAs16 = CONVERT_32BPP_TO_16BPP(pSrcPixel);
> >                  }
> > -                else if (pDst->BitsPerPel == 8)
> > -                {
> > +                else if (pDst->BitsPerPel == 8) {
> >                      NT_ASSERT(pSrc->BitsPerPel == 32);
> >
> >                      *pDstPixel = CONVERT_32BPP_TO_8BPP(pSrcPixel);
> >                  }
> > -                else
> > -                {
> > +                else {
> >                      // Invalid pDst->BitsPerPel
> >                      NT_ASSERT(FALSE);
> >                  }
> > @@ -2236,8 +2141,7 @@ VOID CopyBits32_32(
> >
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > -    for (UINT iRect = 0; iRect < NumRects; iRect++)
> > -    {
> > +    for (UINT iRect = 0; iRect < NumRects; iRect++) {
> >          CONST RECT* pRect = &pRects[iRect];
> >
> >          NT_ASSERT(pRect->right >= pRect->left);
> > @@ -2246,15 +2150,14 @@ VOID CopyBits32_32(
> >          UINT NumPixels = pRect->right - pRect->left;
> >          UINT NumRows = pRect->bottom - pRect->top;
> >          UINT BytesToCopy = NumPixels * 4;
> > -        BYTE* pStartDst = ((BYTE*)pDst->pBits +
> > -                          (pRect->top + pDst->Offset.y) * pDst->Pitch +
> > -                          (pRect->left + pDst->Offset.x) * 4);
> > -        CONST BYTE* pStartSrc = ((BYTE*)pSrc->pBits +
> > -                                (pRect->top + pSrc->Offset.y) *
> pSrc->Pitch
> > +
> > -                                (pRect->left + pSrc->Offset.x) * 4);
> > -
> > -        for (UINT i = 0; i < NumRows; ++i)
> > -        {
> > +        BYTE* pStartDst = ((BYTE*) pDst->pBits +
> > +            (pRect->top + pDst->Offset.y) * pDst->Pitch +
> > +            (pRect->left + pDst->Offset.x) * 4);
> > +        CONST BYTE* pStartSrc = ((BYTE*) pSrc->pBits +
> > +            (pRect->top + pSrc->Offset.y) * pSrc->Pitch +
> > +            (pRect->left + pSrc->Offset.x) * 4);
> > +
> > +        for (UINT i = 0; i < NumRows; ++i) {
> >              RtlCopyMemory(pStartDst, pStartSrc, BytesToCopy);
> >              pStartDst += pDst->Pitch;
> >              pStartSrc += pSrc->Pitch;
> > @@ -2264,7 +2167,7 @@ VOID CopyBits32_32(
> >  }
> >
> >
> > -VOID BltBits (
> > +VOID BltBits(
> >      BLT_INFO* pDst,
> >      CONST BLT_INFO* pSrc,
> >      UINT  NumRects,
> > @@ -2274,31 +2177,105 @@ VOID BltBits (
> >      // This usage is redundant in the sample driver since it is already
> >      being used for MmProbeAndLockPages. However, it is very important
> >      // to have this in place and to make sure developers don't miss it,
> it
> >      is in these two locations.
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > -    __try
> > -    {
> > +    __try {
> >          if (pDst->BitsPerPel == 32 &&
> >              pSrc->BitsPerPel == 32 &&
> >              pDst->Rotation == D3DKMDT_VPPR_IDENTITY &&
> > -            pSrc->Rotation == D3DKMDT_VPPR_IDENTITY)
> > -        {
> > +            pSrc->Rotation == D3DKMDT_VPPR_IDENTITY) {
> >              // This is by far the most common copy function being called
> >              CopyBits32_32(pDst, pSrc, NumRects, pRects);
> >          }
> > -        else
> > -        {
> > +        else {
> >              CopyBitsGeneric(pDst, pSrc, NumRects, pRects);
> >          }
> >      }
> > -    #pragma prefast(suppress: __WARNING_EXCEPTIONEXECUTEHANDLER,
> "try/except
> > is only able to protect against user-mode errors and these are the only
> > errors we try to catch here");
> > -    __except(EXCEPTION_EXECUTE_HANDLER)
> > +#pragma prefast(suppress: __WARNING_EXCEPTIONEXECUTEHANDLER,
> "try/except is
> > only able to protect against user-mode errors and these are the only
> errors
> > we try to catch here");
> > +    __except (EXCEPTION_EXECUTE_HANDLER)
> >      {
> >          DbgPrint(TRACE_LEVEL_ERROR, ("Either dst (0x%I64x) or src
> (0x%I64x)
> >          bits encountered exception during access.\n", pDst->pBits,
> >          pSrc->pBits));
> >      }
> >  }
> > -#pragma code_seg(pop) // End Non-Paged Code
> > +
> > +#pragma code seg(pop) // End Non Paged Code
> > +#pragma code_seg("PAGE")
> > +//
> > +// Frame buffer map/unmap
> > +//
> > +
> > +NTSTATUS
> > +MapFrameBuffer(
> > +    _In_                       PHYSICAL_ADDRESS    PhysicalAddress,
> > +    _In_                       ULONG               Length,
> > +    _Outptr_result_bytebuffer_(Length) VOID**
> VirtualAddress)
> > +{
> > +    PAGED_CODE();
> > +
> > +    //
> > +    // Check for parameters
> > +    //
> > +    if ((PhysicalAddress.QuadPart == (ULONGLONG)0) ||
> > +        (Length == 0) ||
> > +        (VirtualAddress == NULL))
> > +    {
> > +        DbgPrint(TRACE_LEVEL_ERROR, ("One of PhysicalAddress.QuadPart
> > (0x%I64x), Length (%lu), VirtualAddress (%p) is NULL or 0\n",
> > +                        PhysicalAddress.QuadPart, Length,
> VirtualAddress));
> > +        return STATUS_INVALID_PARAMETER;
> > +    }
> > +
> > +    *VirtualAddress = MmMapIoSpaceEx(PhysicalAddress,
> > +                                   Length,
> > +                                   PAGE_WRITECOMBINE | PAGE_READWRITE);
> > +    if (*VirtualAddress == NULL)
> > +    {
> > +        // The underlying call to MmMapIoSpace failed. This may be
> because,
> > MmWriteCombined
> > +        // isn't supported, so try again with MmNonCached
> > +
> > +        *VirtualAddress = MmMapIoSpaceEx(PhysicalAddress,
> > +                                       Length,
> > +                                     (ULONG) (PAGE_NOCACHE |
> > PAGE_READWRITE));
> > +        if (*VirtualAddress == NULL)
> > +        {
> > +            DbgPrint(TRACE_LEVEL_ERROR, ("MmMapIoSpace returned a NULL
> > buffer when trying to allocate %lu bytes", Length));
> > +            return STATUS_NO_MEMORY;
> > +        }
> > +    }
> > +
> > +    return STATUS_SUCCESS;
> > +}
> > +
> > +NTSTATUS
> > +UnmapFrameBuffer(
> > +    _In_reads_bytes_(Length) VOID* VirtualAddress,
> > +    _In_                ULONG Length)
> > +{
> > +    PAGED_CODE();
> > +
> > +
> > +    //
> > +    // Check for parameters
> > +    //
> > +    if ((VirtualAddress == NULL) && (Length == 0))
> > +    {
> > +        // Allow this function to be called when there's no work to do,
> and
> > treat as successful
> > +        return STATUS_SUCCESS;
> > +    }
> > +    else if ((VirtualAddress == NULL) || (Length == 0))
> > +    {
> > +        DbgPrint(TRACE_LEVEL_ERROR, ("Only one of Length (%lu),
> > VirtualAddress (%p) is NULL or 0",
> > +                        Length, VirtualAddress));
> > +        return STATUS_INVALID_PARAMETER;
> > +    }
> > +
> > +    MmUnmapIoSpace(VirtualAddress,
> > +                   Length);
> > +
> > +    return STATUS_SUCCESS;
> > +}
> > +
> >
> >  VgaDevice::VgaDevice(_In_ QxlDod* pQxlDod) : HwDeviceInterface(pQxlDod)
> >  {
> > +    PAGED_CODE();
> >      m_pQxlDod = pQxlDod;
> >      m_ModeInfo = NULL;
> >      m_ModeCount = 0;
> > @@ -2310,6 +2287,7 @@ VgaDevice::VgaDevice(_In_ QxlDod* pQxlDod) :
> > HwDeviceInterface(pQxlDod)
> >
> >  VgaDevice::~VgaDevice(void)
> >  {
> > +    PAGED_CODE();
> >      HWClose();
> >      delete [] reinterpret_cast<BYTE*>(m_ModeInfo);
> >      delete [] reinterpret_cast<BYTE*>(m_ModeNumbers);
> > @@ -2551,6 +2529,7 @@ NTSTATUS
> > VgaDevice::GetModeList(DXGK_DISPLAY_INFORMATION* pDispInfo)
> >
> >  NTSTATUS VgaDevice::QueryCurrentMode(PVIDEO_MODE RequestedMode)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> >      NTSTATUS Status = STATUS_SUCCESS;
> >      UNREFERENCED_PARAMETER(RequestedMode);
> > @@ -2560,6 +2539,8 @@ NTSTATUS VgaDevice::QueryCurrentMode(PVIDEO_MODE
> > RequestedMode)
> >
> >  NTSTATUS VgaDevice::SetCurrentMode(ULONG Mode)
> >  {
> > +    PAGED_CODE();
> > +
> >      NTSTATUS Status = STATUS_SUCCESS;
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s Mode = %x\n",
> __FUNCTION__,
> >      Mode));
> >      X86BIOS_REGISTERS regs = {0};
> > @@ -2576,6 +2557,8 @@ NTSTATUS VgaDevice::SetCurrentMode(ULONG Mode)
> >
> >  NTSTATUS VgaDevice::GetCurrentMode(ULONG* pMode)
> >  {
> > +    PAGED_CODE();
> > +
> >      NTSTATUS Status = STATUS_SUCCESS;
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s\n", __FUNCTION__));
> >      X86BIOS_REGISTERS regs = {0};
> > @@ -2592,6 +2575,8 @@ NTSTATUS VgaDevice::GetCurrentMode(ULONG* pMode)
> >
> >  NTSTATUS VgaDevice::HWInit(PCM_RESOURCE_LIST pResList,
> >  DXGK_DISPLAY_INFORMATION* pDispInfo)
> >  {
> > +    PAGED_CODE();
> > +
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      UNREFERENCED_PARAMETER(pResList);
> >      UNREFERENCED_PARAMETER(pDispInfo);
> > @@ -2601,13 +2586,17 @@ NTSTATUS VgaDevice::HWInit(PCM_RESOURCE_LIST
> > pResList, DXGK_DISPLAY_INFORMATION*
> >
> >  NTSTATUS VgaDevice::HWClose(void)
> >  {
> > +    PAGED_CODE();
> > +
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> >      return STATUS_SUCCESS;
> >  }
> >
> > -NTSTATUS VgaDevice::SetPowerState(_In_  DEVICE_POWER_STATE
> DevicePowerState,
> > DXGK_DISPLAY_INFORMATION* pDispInfo)
> > +NTSTATUS VgaDevice::SetPowerState(DEVICE_POWER_STATE DevicePowerState,
> > DXGK_DISPLAY_INFORMATION* pDispInfo)
> >  {
> > +    PAGED_CODE();
> > +
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s\n", __FUNCTION__));
> >
> >      X86BIOS_REGISTERS regs = {0};
> > @@ -2630,7 +2619,6 @@ NTSTATUS VgaDevice::SetPowerState(_In_
> > DEVICE_POWER_STATE DevicePowerState, DXG
> >      return STATUS_SUCCESS;
> >  }
> >
> > -
> >  NTSTATUS
> >  VgaDevice::ExecutePresentDisplayOnly(
> >      _In_ BYTE*             DstAddr,
> > @@ -2903,18 +2891,21 @@ VOID VgaDevice::HWResetDevice(VOID)
> >
> >  NTSTATUS  VgaDevice::SetPointerShape(_In_ CONST
> DXGKARG_SETPOINTERSHAPE*
> >  pSetPointerShape)
> >  {
> > +    PAGED_CODE();
> >      UNREFERENCED_PARAMETER(pSetPointerShape);
> >      return STATUS_NOT_SUPPORTED;
> >  }
> >
> >  NTSTATUS VgaDevice::SetPointerPosition(_In_ CONST
> >  DXGKARG_SETPOINTERPOSITION* pSetPointerPosition)
> >  {
> > +    PAGED_CODE();
> >      UNREFERENCED_PARAMETER(pSetPointerPosition);
> >      return STATUS_SUCCESS;
> >  }
> >
> >  NTSTATUS VgaDevice::Escape(_In_ CONST DXGKARG_ESCAPE* pEscap)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> >      return STATUS_NOT_IMPLEMENTED;
> > @@ -2922,6 +2913,7 @@ NTSTATUS VgaDevice::Escape(_In_ CONST
> DXGKARG_ESCAPE*
> > pEscap)
> >
> >  QxlDevice::QxlDevice(_In_ QxlDod* pQxlDod) : HwDeviceInterface(pQxlDod)
> >  {
> > +    PAGED_CODE();
> >      m_pQxlDod = pQxlDod;
> >      m_ModeInfo = NULL;
> >      m_ModeCount = 0;
> > @@ -2935,6 +2927,7 @@ QxlDevice::QxlDevice(_In_ QxlDod* pQxlDod) :
> > HwDeviceInterface(pQxlDod)
> >
> >  QxlDevice::~QxlDevice(void)
> >  {
> > +    PAGED_CODE();
> >      HWClose();
> >      delete [] reinterpret_cast<BYTE*>(m_ModeInfo);
> >      delete [] reinterpret_cast<BYTE*>(m_ModeNumbers);
> > @@ -3108,6 +3101,7 @@ NTSTATUS
> > QxlDevice::GetModeList(DXGK_DISPLAY_INFORMATION* pDispInfo)
> >
> >  NTSTATUS QxlDevice::QueryCurrentMode(PVIDEO_MODE RequestedMode)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> >      NTSTATUS Status = STATUS_SUCCESS;
> >      UNREFERENCED_PARAMETER(RequestedMode);
> > @@ -3116,6 +3110,7 @@ NTSTATUS QxlDevice::QueryCurrentMode(PVIDEO_MODE
> > RequestedMode)
> >
> >  NTSTATUS QxlDevice::SetCurrentMode(ULONG Mode)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s - %d: Mode = %d\n",
> >      __FUNCTION__, m_Id, Mode));
> >      for (ULONG idx = 0; idx < GetModeCount(); idx++)
> >      {
> > @@ -3135,6 +3130,7 @@ NTSTATUS QxlDevice::SetCurrentMode(ULONG Mode)
> >
> >  NTSTATUS QxlDevice::GetCurrentMode(ULONG* pMode)
> >  {
> > +    PAGED_CODE();
> >      NTSTATUS Status = STATUS_SUCCESS;
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s\n", __FUNCTION__));
> >      UNREFERENCED_PARAMETER(pMode);
> > @@ -3142,8 +3138,9 @@ NTSTATUS QxlDevice::GetCurrentMode(ULONG* pMode)
> >      return Status;
> >  }
> >
> > -NTSTATUS QxlDevice::SetPowerState(_In_ DEVICE_POWER_STATE
> DevicePowerState,
> > DXGK_DISPLAY_INFORMATION* pDispInfo)
> > +NTSTATUS QxlDevice::SetPowerState(DEVICE_POWER_STATE DevicePowerState,
> > DXGK_DISPLAY_INFORMATION* pDispInfo)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      switch (DevicePowerState)
> >      {
> > @@ -3159,6 +3156,7 @@ NTSTATUS QxlDevice::SetPowerState(_In_
> > DEVICE_POWER_STATE DevicePowerState, DXGK
> >
> >  NTSTATUS QxlDevice::HWInit(PCM_RESOURCE_LIST pResList,
> >  DXGK_DISPLAY_INFORMATION* pDispInfo)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      PDXGKRNL_INTERFACE pDxgkInterface = m_pQxlDod->GetDxgkInterface();
> >      UINT pci_range = QXL_RAM_RANGE_INDEX;
> > @@ -3306,6 +3304,7 @@ NTSTATUS QxlDevice::HWInit(PCM_RESOURCE_LIST
> pResList,
> > DXGK_DISPLAY_INFORMATION*
> >
> >  NTSTATUS QxlDevice::QxlInit(DXGK_DISPLAY_INFORMATION* pDispInfo)
> >  {
> > +    PAGED_CODE();
> >      NTSTATUS Status = STATUS_SUCCESS;
> >
> >      if (!InitMemSlots()) {
> > @@ -3333,11 +3332,13 @@ NTSTATUS QxlDevice::QxlInit(DXGK_
> DISPLAY_INFORMATION*
> > pDispInfo)
> >
> >  void QxlDevice::QxlClose()
> >  {
> > +    PAGED_CODE();
> >      DestroyMemSlots();
> >  }
> >
> >  void QxlDevice::UnmapMemory(void)
> >  {
> > +    PAGED_CODE();
> >      PDXGKRNL_INTERFACE pDxgkInterface = m_pQxlDod->GetDxgkInterface();
> >      if (m_IoMapped && m_IoBase)
> >      {
> > @@ -3365,6 +3366,7 @@ void QxlDevice::UnmapMemory(void)
> >
> >  BOOL QxlDevice::InitMemSlots(void)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      m_NumMemSlots = m_RomHdr->slots_end;
> >      m_SlotGenBits = m_RomHdr->slot_gen_bits;
> > @@ -3384,6 +3386,7 @@ BOOL QxlDevice::InitMemSlots(void)
> >
> >  void QxlDevice::DestroyMemSlots(void)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      delete [] reinterpret_cast<BYTE*>(m_MemSlots);
> >      m_MemSlots = NULL;
> > @@ -3392,6 +3395,7 @@ void QxlDevice::DestroyMemSlots(void)
> >
> >  void QxlDevice::CreatePrimarySurface(PVIDEO_MODE_INFORMATION pModeInfo)
> >  {
> > +    PAGED_CODE();
> >      QXLSurfaceCreate *primary_surface_create;
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("---> %s - %d: (%d x %d)\n",
> >      __FUNCTION__, m_Id,
> >          pModeInfo->VisScreenWidth, pModeInfo->VisScreenHeight));
> > @@ -3414,6 +3418,7 @@ void
> > QxlDevice::CreatePrimarySurface(PVIDEO_MODE_INFORMATION pModeInfo)
> >
> >  void QxlDevice::DestroyPrimarySurface(void)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >  //    AsyncIo(QXL_IO_DESTROY_PRIMARY_ASYNC, 0);
> >      SyncIo(QXL_IO_DESTROY_PRIMARY, 0);
> > @@ -3422,6 +3427,7 @@ void QxlDevice::DestroyPrimarySurface(void)
> >
> >  _inline QXLPHYSICAL QxlDevice::PA(PVOID virt, UINT8 slot_id)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));
> >      MemSlot *pSlot = &m_MemSlots[slot_id];;
> >      return pSlot->high_bits | ((UINT64)virt - pSlot->start_virt_addr);
> > @@ -3429,6 +3435,7 @@ _inline QXLPHYSICAL QxlDevice::PA(PVOID virt, UINT8
> > slot_id)
> >
> >  _inline UINT64 QxlDevice::VA(QXLPHYSICAL paddr, UINT8 slot_id)
> >  {
> > +    PAGED_CODE();
> >      UINT64 virt;
> >      MemSlot *pSlot = &m_MemSlots[slot_id];;
> >
> > @@ -3441,6 +3448,7 @@ _inline UINT64 QxlDevice::VA(QXLPHYSICAL paddr,
> UINT8
> > slot_id)
> >
> >  void QxlDevice::SetupHWSlot(UINT8 Idx, MemSlot *pSlot)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      m_RamHdr->mem_slot.mem_start = pSlot->start_phys_addr;
> >      m_RamHdr->mem_slot.mem_end = pSlot->end_phys_addr;
> > @@ -3450,6 +3458,7 @@ void QxlDevice::SetupHWSlot(UINT8 Idx, MemSlot
> *pSlot)
> >
> >  BOOL QxlDevice::CreateEvents()
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      KeInitializeEvent(&m_DisplayEvent,
> >                        SynchronizationEvent,
> > @@ -3471,6 +3480,7 @@ BOOL QxlDevice::CreateEvents()
> >
> >  BOOL QxlDevice::CreateRings()
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      m_CommandRing = &(m_RamHdr->cmd_ring);
> >      m_CursorRing = &(m_RamHdr->cursor_ring);
> > @@ -3481,6 +3491,7 @@ BOOL QxlDevice::CreateRings()
> >
> >  void QxlDevice::AsyncIo(UCHAR  Port, UCHAR Value)
> >  {
> > +    PAGED_CODE();
> >      LARGE_INTEGER timeout;
> >      BOOLEAN locked = FALSE;
> >      locked = WaitForObject(&m_IoLock, NULL);
> > @@ -3492,6 +3503,7 @@ void QxlDevice::AsyncIo(UCHAR  Port, UCHAR Value)
> >
> >  void QxlDevice::SyncIo(UCHAR  Port, UCHAR Value)
> >  {
> > +    PAGED_CODE();
> >      BOOLEAN locked = FALSE;
> >      locked = WaitForObject(&m_IoLock, NULL);
> >      WRITE_PORT_UCHAR(m_IoBase + Port, Value);
> > @@ -3500,6 +3512,7 @@ void QxlDevice::SyncIo(UCHAR  Port, UCHAR Value)
> >
> >  UINT8 QxlDevice::SetupMemSlot(UINT8 Idx, UINT64 pastart, UINT64 paend,
> >  UINT64 vastart, UINT64 vaend)
> >  {
> > +    PAGED_CODE();
> >      UINT64 high_bits;
> >      MemSlot *pSlot;
> >      UINT8 slot_index;
> > @@ -3525,6 +3538,7 @@ UINT8 QxlDevice::SetupMemSlot(UINT8 Idx, UINT64
> > pastart, UINT64 paend, UINT64 va
> >
> >  BOOL QxlDevice::CreateMemSlots(void)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s 3\n", __FUNCTION__));
> >      UINT64 len = m_RomHdr->surface0_area_size + m_RomHdr->num_pages *
> >      PAGE_SIZE;
> >      m_MainMemSlot = SetupMemSlot(0,
> > @@ -3544,6 +3558,7 @@ BOOL QxlDevice::CreateMemSlots(void)
> >
> >  void QxlDevice::InitDeviceMemoryResources(void)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s num_pages = %d\n",
> __FUNCTION__,
> >      m_RomHdr->num_pages));
> >      InitMspace(MSPACE_TYPE_DEVRAM, (m_RamStart +
> >      m_RomHdr->surface0_area_size), (size_t)(m_RomHdr->num_pages *
> >      PAGE_SIZE));
> >      InitMspace(MSPACE_TYPE_VRAM, m_VRamStart, m_VRamSize);
> > @@ -3552,6 +3567,7 @@ void QxlDevice::InitDeviceMemoryResources(void)
> >
> >  void QxlDevice::InitMonitorConfig(void)
> >  {
> > +    PAGED_CODE();
> >      size_t config_size = sizeof(QXLMonitorsConfig) + sizeof(QXLHead);
> >      m_monitor_config = (QXLMonitorsConfig*) AllocMem(MSPACE_TYPE_DEVRAM,
> >      config_size, TRUE);
> >      RtlZeroMemory(m_monitor_config, config_size);
> > @@ -3560,9 +3576,9 @@ void QxlDevice::InitMonitorConfig(void)
> >      *m_monitor_config_pa = PA(m_monitor_config, m_MainMemSlot);
> >  }
> >
> > -
> >  void QxlDevice::InitMspace(UINT32 mspace_type, UINT8 *start, size_t
> >  capacity)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s type = %d, start = %p,
> capacity
> >      = %d\n", __FUNCTION__, mspace_type, start, capacity));
> >      m_MSInfo[mspace_type]._mspace = create_mspace_with_base(start,
> capacity,
> >      0, this);
> >      m_MSInfo[mspace_type].mspace_start = start;
> > @@ -3754,6 +3770,7 @@ QxlDevice::ExecutePresentDisplayOnly(
> >
> >  void QxlDevice::WaitForReleaseRing(void)
> >  {
> > +    PAGED_CODE();
> >      int wait;
> >
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("--->%s\n", __FUNCTION__));
> > @@ -3786,6 +3803,7 @@ void QxlDevice::WaitForReleaseRing(void)
> >
> >  void QxlDevice::FlushReleaseRing()
> >  {
> > +    PAGED_CODE();
> >      UINT64 output;
> >      int notify;
> >      int num_to_release = 50;
> > @@ -3817,6 +3835,7 @@ void QxlDevice::FlushReleaseRing()
> >
> >  void QxlDevice::EmptyReleaseRing()
> >  {
> > +    PAGED_CODE();
> >      BOOLEAN locked = FALSE;
> >      locked = WaitForObject(&m_MemLock, NULL);
> >      while (m_FreeOutputs || !SPICE_RING_IS_EMPTY(m_ReleaseRing)) {
> > @@ -3827,6 +3846,7 @@ void QxlDevice::EmptyReleaseRing()
> >
> >  UINT64 QxlDevice::ReleaseOutput(UINT64 output_id)
> >  {
> > +    PAGED_CODE();
> >      QXLOutput *output = (QXLOutput *)output_id;
> >      Resource **now;
> >      Resource **end;
> > @@ -3846,6 +3866,7 @@ UINT64 QxlDevice::ReleaseOutput(UINT64 output_id)
> >
> >  void *QxlDevice::AllocMem(UINT32 mspace_type, size_t size, BOOL force)
> >  {
> > +    PAGED_CODE();
> >      PVOID ptr;
> >      BOOLEAN locked = FALSE;
> >
> > @@ -3894,6 +3915,7 @@ void *QxlDevice::AllocMem(UINT32 mspace_type,
> size_t
> > size, BOOL force)
> >
> >  void QxlDevice::FreeMem(UINT32 mspace_type, void *ptr)
> >  {
> > +    PAGED_CODE();
> >      ASSERT(m_MSInfo[mspace_type]._mspace);
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > @@ -3914,6 +3936,7 @@ void QxlDevice::FreeMem(UINT32 mspace_type, void
> *ptr)
> >
> >  QXLDrawable *QxlDevice::GetDrawable()
> >  {
> > +    PAGED_CODE();
> >      QXLOutput *output;
> >
> >      output = (QXLOutput *)AllocMem(MSPACE_TYPE_DEVRAM,
> sizeof(QXLOutput) +
> >      sizeof(QXLDrawable), TRUE);
> > @@ -3926,6 +3949,7 @@ QXLDrawable *QxlDevice::GetDrawable()
> >
> >  QXLCursorCmd *QxlDevice::CursorCmd()
> >  {
> > +    PAGED_CODE();
> >      QXLCursorCmd *cursor_cmd;
> >      QXLOutput *output;
> >
> > @@ -3941,6 +3965,7 @@ QXLCursorCmd *QxlDevice::CursorCmd()
> >
> >  BOOL QxlDevice::SetClip(const RECT *clip, QXLDrawable *drawable)
> >  {
> > +    PAGED_CODE();
> >      Resource *rects_res;
> >
> >      if (clip == NULL) {
> > @@ -3970,12 +3995,14 @@ BOOL QxlDevice::SetClip(const RECT *clip,
> QXLDrawable
> > *drawable)
> >
> >  void QxlDevice::AddRes(QXLOutput *output, Resource *res)
> >  {
> > +    PAGED_CODE();
> >      res->refs++;
> >      output->resources[output->num_res++] = res;
> >  }
> >
> >  void QxlDevice::DrawableAddRes(QXLDrawable *drawable, Resource *res)
> >  {
> > +    PAGED_CODE();
> >      QXLOutput *output;
> >
> >      output = (QXLOutput *)((UINT8 *)drawable - sizeof(QXLOutput));
> > @@ -3984,6 +4011,7 @@ void QxlDevice::DrawableAddRes(QXLDrawable
> *drawable,
> > Resource *res)
> >
> >  void QxlDevice::CursorCmdAddRes(QXLCursorCmd *cmd, Resource *res)
> >  {
> > +    PAGED_CODE();
> >      QXLOutput *output;
> >
> >      output = (QXLOutput *)((UINT8 *)cmd - sizeof(QXLOutput));
> > @@ -3992,6 +4020,7 @@ void QxlDevice::CursorCmdAddRes(QXLCursorCmd *cmd,
> > Resource *res)
> >
> >  void QxlDevice::FreeClipRectsEx(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));
> >      QxlDevice* pqxl = (QxlDevice*)res->ptr;
> >      pqxl->FreeClipRects(res);
> > @@ -3999,6 +4028,7 @@ void QxlDevice::FreeClipRectsEx(Resource *res)
> >
> >  void QxlDevice::FreeClipRects(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      QXLPHYSICAL chunk_phys;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > @@ -4014,6 +4044,7 @@ void QxlDevice::FreeClipRects(Resource *res)
> >
> >  void QxlDevice::FreeBitmapImageEx(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));
> >      QxlDevice* pqxl = (QxlDevice*)res->ptr;
> >      pqxl->FreeBitmapImage(res);
> > @@ -4021,6 +4052,7 @@ void QxlDevice::FreeBitmapImageEx(Resource *res)
> >
> >  void QxlDevice::FreeBitmapImage(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      InternalImage *internal;
> >      QXLPHYSICAL chunk_phys;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > @@ -4040,6 +4072,7 @@ void QxlDevice::FreeBitmapImage(Resource *res)
> >
> >  void QxlDevice::FreeCursorEx(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));
> >      QxlDevice* pqxl = (QxlDevice*)res->ptr;
> >      pqxl->FreeCursor(res);
> > @@ -4047,6 +4080,7 @@ void QxlDevice::FreeCursorEx(Resource *res)
> >
> >  void QxlDevice::FreeCursor(Resource *res)
> >  {
> > +    PAGED_CODE();
> >      QXLPHYSICAL chunk_phys;
> >
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > @@ -4063,6 +4097,7 @@ void QxlDevice::FreeCursor(Resource *res)
> >
> >  QXLDrawable *QxlDevice::Drawable(UINT8 type, CONST RECT *area, CONST
> RECT
> >  *clip, UINT32 surface_id)
> >  {
> > +    PAGED_CODE();
> >      QXLDrawable *drawable;
> >
> >      ASSERT(area);
> > @@ -4088,7 +4123,9 @@ QXLDrawable *QxlDevice::Drawable(UINT8 type, CONST
> RECT
> > *area, CONST RECT *clip,
> >      return drawable;
> >  }
> >
> > -void QxlDevice::PushDrawable(QXLDrawable *drawable) {
> > +void QxlDevice::PushDrawable(QXLDrawable *drawable)
> > +{
> > +    PAGED_CODE();
> >      QXLCommand *cmd;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > @@ -4105,6 +4142,7 @@ void QxlDevice::PushDrawable(QXLDrawable
> *drawable) {
> >
> >  void QxlDevice::PushCursorCmd(QXLCursorCmd *cursor_cmd)
> >  {
> > +    PAGED_CODE();
> >      QXLCommand *cmd;
> >
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > @@ -4126,6 +4164,7 @@ VOID QxlDevice::SetImageId(InternalImage
> *internal,
> >      LONG height,
> >      UINT8 format, UINT32 key)
> >  {
> > +    PAGED_CODE();
> >      UINT32 image_info = IMAGE_HASH_INIT_VAL(width, height, format);
> >
> >      if (cache_me) {
> > @@ -4145,6 +4184,7 @@ VOID QxlDevice::BltBits (
> >      UINT  NumRects,
> >      _In_reads_(NumRects) CONST RECT *pRects)
> >  {
> > +    PAGED_CODE();
> >      QXLDrawable *drawable;
> >      Resource *image_res;
> >      InternalImage *internal;
> > @@ -4160,6 +4200,7 @@ VOID QxlDevice::BltBits (
> >
> >      if (!(drawable = Drawable(QXL_DRAW_COPY, pRects, NULL, 0))) {
> >          DbgPrint(TRACE_LEVEL_ERROR, ("Cannot get Drawable.\n"));
> > +        return;
> >      }
> >
> >      CONST RECT* pRect = &pRects[0];
> > @@ -4241,6 +4282,7 @@ VOID QxlDevice::PutBytesAlign(QXLDataChunk
> **chunk_ptr,
> > UINT8 **now_ptr,
> >                              UINT8 **end_ptr, UINT8 *src, int size,
> >                              size_t alloc_size, uint32_t alignment)
> >  {
> > +    PAGED_CODE();
> >      QXLDataChunk *chunk = *chunk_ptr;
> >      UINT8 *now = *now_ptr;
> >      UINT8 *end = *end_ptr;
> > @@ -4305,6 +4347,7 @@ VOID QxlDevice::BlackOutScreen(CURRENT_BDD_MODE*
> > pCurrentBddMod)
> >
> >  NTSTATUS QxlDevice::HWClose(void)
> >  {
> > +    PAGED_CODE();
> >      QxlClose();
> >      UnmapMemory();
> >      return STATUS_SUCCESS;
> > @@ -4312,6 +4355,7 @@ NTSTATUS QxlDevice::HWClose(void)
> >
> >  NTSTATUS  QxlDevice::SetPointerShape(_In_ CONST
> DXGKARG_SETPOINTERSHAPE*
> >  pSetPointerShape)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s flag = %x\n", __FUNCTION__,
> >      pSetPointerShape->Flags.Value));
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("<--> %s flag = %d pitch = %d,
> pixels
> >      = %p, id = %d, w = %d, h = %d, x = %d, y = %d\n", __FUNCTION__,
> >                                   pSetPointerShape->Flags.Value,
> > @@ -4394,6 +4438,7 @@ NTSTATUS  QxlDevice::SetPointerShape(_In_ CONST
> > DXGKARG_SETPOINTERSHAPE* pSetPoi
> >
> >  NTSTATUS QxlDevice::SetPointerPosition(_In_ CONST
> >  DXGKARG_SETPOINTERPOSITION* pSetPointerPosition)
> >  {
> > +    PAGED_CODE();
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      DbgPrint(TRACE_LEVEL_INFORMATION, ("<--> %s flag = %d id = %d, x =
> %d, y
> >      = %d\n", __FUNCTION__,
> >                                   pSetPointerPosition->Flags.Value,
> > @@ -4417,6 +4462,7 @@ NTSTATUS QxlDevice::SetPointerPosition(_In_ CONST
> > DXGKARG_SETPOINTERPOSITION* pS
> >
> >  NTSTATUS QxlDevice::UpdateChildStatus(BOOLEAN connect)
> >  {
> > +    PAGED_CODE();
> >      NTSTATUS           Status(STATUS_SUCCESS);
> >      DXGK_CHILD_STATUS  ChildStatus;
> >      PDXGKRNL_INTERFACE pDXGKInterface(m_pQxlDod->GetDxgkInterface());
> > @@ -4430,6 +4476,7 @@ NTSTATUS QxlDevice::UpdateChildStatus(BOOLEAN
> connect)
> >
> >  NTSTATUS QxlDevice::SetCustomDisplay(QXLEscapeSetCustomDisplay*
> >  custom_display)
> >  {
> > +    PAGED_CODE();
> >      NTSTATUS status;
> >      UINT xres = custom_display->xres;
> >      UINT yres = custom_display->yres;
> > @@ -4454,6 +4501,7 @@ NTSTATUS
> > QxlDevice::SetCustomDisplay(QXLEscapeSetCustomDisplay* custom_display)
> >
> >  void QxlDevice::SetMonitorConfig(QXLHead * monitor_config)
> >  {
> > +    PAGED_CODE();
> >      m_monitor_config->count = 1;
> >      m_monitor_config->max_allowed = 1;
> >
> > @@ -4469,6 +4517,7 @@ void QxlDevice::SetMonitorConfig(QXLHead *
> > monitor_config)
> >
> >  NTSTATUS QxlDevice::Escape(_In_ CONST DXGKARG_ESCAPE* pEscape)
> >  {
> > +    PAGED_CODE();
> >      size_t          data_size(sizeof(int));
> >      QXL_ESCAPE*     pQXLEscape((QXL_ESCAPE*)
> pEscape->pPrivateDriverData);
> >      NTSTATUS        status(STATUS_SUCCESS);
> > @@ -4510,6 +4559,7 @@ NTSTATUS QxlDevice::Escape(_In_ CONST
> DXGKARG_ESCAPE*
> > pEscape)
> >
> >  VOID QxlDevice::WaitForCmdRing()
> >  {
> > +    PAGED_CODE();
> >      int wait;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > @@ -4526,6 +4576,7 @@ VOID QxlDevice::WaitForCmdRing()
> >
> >  VOID QxlDevice::PushCmd()
> >  {
> > +    PAGED_CODE();
> >      int notify;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      SPICE_RING_PUSH(m_CommandRing, notify);
> > @@ -4537,6 +4588,7 @@ VOID QxlDevice::PushCmd()
> >
> >  VOID QxlDevice::WaitForCursorRing(VOID)
> >  {
> > +    PAGED_CODE();
> >      int wait;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >
> > @@ -4559,6 +4611,7 @@ VOID QxlDevice::WaitForCursorRing(VOID)
> >
> >  VOID QxlDevice::PushCursor(VOID)
> >  {
> > +    PAGED_CODE();
> >      int notify;
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> >      SPICE_RING_PUSH(m_CursorRing, notify);
> > @@ -4665,7 +4718,7 @@ BOOLEAN HwDeviceInterface::InterruptRoutine(_In_
> > PDXGKRNL_INTERFACE pDxgkInterfa
> >      }
> >  }
> >
> > -VOID HwDeviceInterface::DpcRoutine(PDXGKRNL_INTERFACE pDxgkInterface)
> > +VOID HwDeviceInterface::DpcRoutine(_In_ PDXGKRNL_INTERFACE
> pDxgkInterface)
> >  {
> >      QxlDevice * qxl_device;
> >      VgaDevice * vga_device;
> > @@ -4707,23 +4760,12 @@ VOID HwDeviceInterface::ResetDevice(void)
> >      }
> >  }
> >
> > -#pragma code_seg(pop) //end non-paged code
> >
> > -VOID QxlDevice::UpdateArea(CONST RECT* area, UINT32 surface_id)
> > -{
> > -    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > -    CopyRect(&m_RamHdr->update_area, area);
> > -    m_RamHdr->update_surface = surface_id;
> > -//    AsyncIo(QXL_IO_UPDATE_AREA_ASYNC, 0);
> > -    SyncIo(QXL_IO_UPDATE_AREA, 0);
> > -    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> > -}
> > -
> > -BOOLEAN QxlDevice:: DpcCallbackEx(PVOID ptr)
> > +BOOLEAN QxlDevice::DpcCallbackEx(PVOID ptr)
> >  {
> >      DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));
> >      PDPC_CB_CONTEXT ctx = (PDPC_CB_CONTEXT) ptr;
> > -    QxlDevice* pqxl = (QxlDevice*)ctx->ptr;
> > +    QxlDevice* pqxl = (QxlDevice*) ctx->ptr;
> >      pqxl->DpcCallback(ctx);
> >      return TRUE;
> >  }
> > @@ -4734,9 +4776,22 @@ VOID QxlDevice::DpcCallback(PDPC_CB_CONTEXT ctx)
> >      m_Pending = 0;
> >
> >  }
> > +#pragma code_seg(pop) //end non-paged code
> > +
> > +VOID QxlDevice::UpdateArea(CONST RECT* area, UINT32 surface_id)
> > +{
> > +    PAGED_CODE();
> > +    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
> > +    CopyRect(&m_RamHdr->update_area, area);
> > +    m_RamHdr->update_surface = surface_id;
> > +//    AsyncIo(QXL_IO_UPDATE_AREA_ASYNC, 0);
> > +    SyncIo(QXL_IO_UPDATE_AREA, 0);
> > +    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
> > +}
> >
> >  UINT SpiceFromPixelFormat(D3DDDIFORMAT Format)
> >  {
> > +    PAGED_CODE();
> >      switch (Format)
> >      {
> >          case D3DDDIFMT_UNKNOWN:
> > @@ -4748,3 +4803,4 @@ UINT SpiceFromPixelFormat(D3DDDIFORMAT Format)
> >          default: QXL_LOG_ASSERTION1("Unknown D3DDDIFORMAT 0x%I64x",
> Format);
> >          return 0;
> >      }
> >  }
> > +#pragma code_seg(pop)
> > \ No newline at end of file
> > diff --git a/qxldod/mspace.c b/qxldod/mspace.c
> > deleted file mode 100755
> > index d0ba123..0000000
> > --- a/qxldod/mspace.c
> > +++ /dev/null
> > @@ -1,2437 +0,0 @@
> > -// based on dlmalloc from Doug Lea
> > -
> > -
> > -// quote from the Doug Lea original file
> > -    /*
> > -      This is a version (aka dlmalloc) of malloc/free/realloc written by
> > -      Doug Lea and released to the public domain, as explained at
> > -      http://creativecommons.org/licenses/publicdomain.  Send
> questions,
> > -      comments, complaints, performance data, etc to dl at cs.oswego.edu
> > -
> > -    * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
> > -
> > -       Note: There may be an updated version of this malloc obtainable
> at
> > -               ftp://gee.cs.oswego.edu/pub/misc/malloc.c
> > -             Check before installing!
> > -    */
> > -
> > -
> > -#include <ntddk.h>
> > -
> > -#include "mspace.h"
> > -
> > -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
> > -
> > -#define MALLOC_ALIGNMENT ((size_t)8U)
> > -#define USE_LOCKS 0
> > -#define malloc_getpagesize ((size_t)4096U)
> > -#define DEFAULT_GRANULARITY malloc_getpagesize
> > -#define MAX_SIZE_T (~(size_t)0)
> > -#define MALLOC_FAILURE_ACTION
> > -#define MALLINFO_FIELD_TYPE size_t
> > -#define FOOTERS 0
> > -#define INSECURE 0
> > -#define PROCEED_ON_ERROR 0
> > -#define DEBUG 0
> > -#define ABORT_ON_ASSERT_FAILURE 1
> > -#define ABORT(user_data) abort_func(user_data)
> > -#define USE_BUILTIN_FFS 0
> > -#define USE_DEV_RANDOM 0
> > -#define PRINT(params) print_func params
> > -
> > -
> > -#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)
> > -#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)
> > -
> > -
> > -#define M_GRANULARITY        (-1)
> > -
> > -void default_abort_func(void *user_data)
> > -{
> > -    for (;;);
> > -}
> > -
> > -void default_print_func(void *user_data, char *format, ...)
> > -{
> > -}
> > -
> > -static mspace_abort_t abort_func = default_abort_func;
> > -static mspace_print_t print_func = default_print_func;
> > -
> > -void mspace_set_abort_func(mspace_abort_t f)
> > -{
> > -    abort_func = f;
> > -}
> > -
> > -void mspace_set_print_func(mspace_print_t f)
> > -{
> > -    print_func = f;
> > -}
> > -
> > -/* ------------------------ Mallinfo declarations
> ------------------------
> > */
> > -
> > -#if !NO_MALLINFO
> > -/*
> > -  This version of malloc supports the standard SVID/XPG mallinfo
> > -  routine that returns a struct containing usage properties and
> > -  statistics. It should work on any system that has a
> > -  /usr/include/malloc.h defining struct mallinfo.  The main
> > -  declaration needed is the mallinfo struct that is returned (by-copy)
> > -  by mallinfo().  The malloinfo struct contains a bunch of fields that
> > -  are not even meaningful in this version of malloc.  These fields are
> > -  are instead filled by mallinfo() with other numbers that might be of
> > -  interest.
> > -
> > -  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
> > -  /usr/include/malloc.h file that includes a declaration of struct
> > -  mallinfo.  If so, it is included; else a compliant version is
> > -  declared below.  These must be precisely the same for mallinfo() to
> > -  work.  The original SVID version of this struct, defined on most
> > -  systems with mallinfo, declares all fields as ints. But some others
> > -  define as unsigned long. If your system defines the fields using a
> > -  type of different width than listed here, you MUST #include your
> > -  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
> > -*/
> > -
> > -/* #define HAVE_USR_INCLUDE_MALLOC_H */
> > -
> > -
> > -struct mallinfo {
> > -  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from
> system
> > */
> > -  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
> > -  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
> > -  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
> > -  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
> > -  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
> > -  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
> > -  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
> > -  MALLINFO_FIELD_TYPE fordblks; /* total free space */
> > -  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
> > -};
> > -
> > -#endif /* NO_MALLINFO */
> > -
> > -
> > -
> > -#ifdef DEBUG
> > -#if ABORT_ON_ASSERT_FAILURE
> > -#define assert(user_data, x) if(!(x)) ABORT(user_data)
> > -#else /* ABORT_ON_ASSERT_FAILURE */
> > -#include <assert.h>
> > -#endif /* ABORT_ON_ASSERT_FAILURE */
> > -#else  /* DEBUG */
> > -#define assert(user_data, x)
> > -#endif /* DEBUG */
> > -
> > -/* ------------------- size_t and alignment properties
> --------------------
> > */
> > -
> > -/* The byte and bit size of a size_t */
> > -#define SIZE_T_SIZE         (sizeof(size_t))
> > -#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
> > -
> > -/* Some constants coerced to size_t */
> > -/* Annoying but necessary to avoid errors on some plaftorms */
> > -#define SIZE_T_ZERO         ((size_t)0)
> > -#define SIZE_T_ONE          ((size_t)1)
> > -#define SIZE_T_TWO          ((size_t)2)
> > -#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
> > -#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
> > -#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
> > -#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
> > -
> > -/* The bit mask value corresponding to MALLOC_ALIGNMENT */
> > -#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
> > -
> > -/* True if address a has acceptable alignment */
> > -#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
> > -
> > -/* the number of bytes to offset an address to align it */
> > -#define align_offset(A)\
> > - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
> > -  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &
> > CHUNK_ALIGN_MASK))
> > -
> > -/* --------------------------- Lock preliminaries
> ------------------------
> > */
> > -
> > -#if USE_LOCKS
> > -
> > -/*
> > -  When locks are defined, there are up to two global locks:
> > -
> > -  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
> > -    MORECORE.  In many cases sys_alloc requires two calls, that should
> > -    not be interleaved with calls by other threads.  This does not
> > -    protect against direct calls to MORECORE by other threads not
> > -    using this lock, so there is still code to cope the best we can on
> > -    interference.
> > -
> > -  * magic_init_mutex ensures that mparams.magic and other
> > -    unique mparams values are initialized only once.
> > -*/
> > -
> > -
> > -#define USE_LOCK_BIT               (2U)
> > -#else  /* USE_LOCKS */
> > -#define USE_LOCK_BIT               (0U)
> > -#define INITIAL_LOCK(l)
> > -#endif /* USE_LOCKS */
> > -
> > -#if USE_LOCKS
> > -#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
> > -#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
> > -#else  /* USE_LOCKS */
> > -#define ACQUIRE_MAGIC_INIT_LOCK()
> > -#define RELEASE_MAGIC_INIT_LOCK()
> > -#endif /* USE_LOCKS */
> > -
> > -
> > -
> > -/* -----------------------  Chunk representations
> ------------------------
> > */
> > -
> > -/*
> > -  (The following includes lightly edited explanations by Colin Plumb.)
> > -
> > -  The malloc_chunk declaration below is misleading (but accurate and
> > -  necessary).  It declares a "view" into memory allowing access to
> > -  necessary fields at known offsets from a given base.
> > -
> > -  Chunks of memory are maintained using a `boundary tag' method as
> > -  originally described by Knuth.  (See the paper by Paul Wilson
> > -  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
> > -  techniques.)  Sizes of free chunks are stored both in the front of
> > -  each chunk and at the end.  This makes consolidating fragmented
> > -  chunks into bigger chunks fast.  The head fields also hold bits
> > -  representing whether chunks are free or in use.
> > -
> > -  Here are some pictures to make it clearer.  They are "exploded" to
> > -  show that the state of a chunk can be thought of as extending from
> > -  the high 31 bits of the head field of its header through the
> > -  prev_foot and PINUSE_BIT bit of the following chunk header.
> > -
> > -  A chunk that's in use looks like:
> > -
> > -   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -           | Size of previous chunk (if P = 1)
>    |
> > -           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > -         | Size of this chunk
>  1| +-+
> > -   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         |
>  |
> > -         +-
>  -+
> > -         |
>  |
> > -         +-
>  -+
> > -         |
>  :
> > -         +-      size - sizeof(size_t) available payload bytes
> -+
> > -         :
>  |
> > - chunk-> +-
>  -+
> > -         |
>  |
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |1|
> > -       | Size of next chunk (may or may not be in use)               |
> +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -
> > -    And if it's free, it looks like this:
> > -
> > -   chunk-> +-
>    -+
> > -           | User payload (must be in use, or we would have merged!)
>    |
> > -           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > -         | Size of this chunk
>  0| +-+
> > -   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         | Next pointer
>   |
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         | Prev pointer
>   |
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         |
>  :
> > -         +-      size - sizeof(struct chunk) unused bytes
>  -+
> > -         :
>  |
> > - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -         | Size of this chunk
>   |
> > -         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |0|
> > -       | Size of next chunk (must be in use, or we would have merged)|
> +-+
> > - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -       |                                                               :
> > -       +- User payload                                                -+
> > -       :                                                               |
> > -       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > -                                                                     |0|
> > -                                                                     +-+
> > -  Note that since we always merge adjacent free chunks, the chunks
> > -  adjacent to a free chunk must be in use.
> > -
> > -  Given a pointer to a chunk (which can be derived trivially from the
> > -  payload pointer) we can, in O(1) time, find out whether the adjacent
> > -  chunks are free, and if so, unlink them from the lists that they
> > -  are on and merge them with the current chunk.
> > -
> > -  Chunks always begin on even word boundaries, so the mem portion
> > -  (which is returned to the user) is also on an even word boundary, and
> > -  thus at least double-word aligned.
> > -
> > -  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
> > -  chunk size (which is always a multiple of two words), is an in-use
> > -  bit for the *previous* chunk.  If that bit is *clear*, then the
> > -  word before the current chunk size contains the previous chunk
> > -  size, and can be used to find the front of the previous chunk.
> > -  The very first chunk allocated always has this bit set, preventing
> > -  access to non-existent (or non-owned) memory. If pinuse is set for
> > -  any given chunk, then you CANNOT determine the size of the
> > -  previous chunk, and might even get a memory addressing fault when
> > -  trying to do so.
> > -
> > -  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
> > -  the chunk size redundantly records whether the current chunk is
> > -  inuse. This redundancy enables usage checks within free and realloc,
> > -  and reduces indirection when freeing and consolidating chunks.
> > -
> > -  Each freshly allocated chunk must have both cinuse and pinuse set.
> > -  That is, each allocated chunk borders either a previously allocated
> > -  and still in-use chunk, or the base of its memory arena. This is
> > -  ensured by making all allocations from the the `lowest' part of any
> > -  found chunk.  Further, no free chunk physically borders another one,
> > -  so each free chunk is known to be preceded and followed by either
> > -  inuse chunks or the ends of memory.
> > -
> > -  Note that the `foot' of the current chunk is actually represented
> > -  as the prev_foot of the NEXT chunk. This makes it easier to
> > -  deal with alignments etc but can be very confusing when trying
> > -  to extend or adapt this code.
> > -
> > -  The exceptions to all this are
> > -
> > -     1. The special chunk `top' is the top-most available chunk (i.e.,
> > -        the one bordering the end of available memory). It is treated
> > -        specially.  Top is never included in any bin, is used only if
> > -        no other chunk is available, and is released back to the
> > -        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
> > -        the top chunk is treated as larger (and thus less well
> > -        fitting) than any other available chunk.  The top chunk
> > -        doesn't update its trailing size field since there is no next
> > -        contiguous chunk that would have to index off it. However,
> > -        space is still allocated for it (TOP_FOOT_SIZE) to enable
> > -        separation or merging when space is extended.
> > -
> > -     3. Chunks allocated via mmap, which have the lowest-order bit
> > -        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
> > -        PINUSE_BIT in their head fields.  Because they are allocated
> > -        one-by-one, each must carry its own prev_foot field, which is
> > -        also used to hold the offset this chunk has within its mmapped
> > -        region, which is needed to preserve alignment. Each mmapped
> > -        chunk is trailed by the first two fields of a fake next-chunk
> > -        for sake of usage checks.
> > -
> > -*/
> > -
> > -struct malloc_chunk {
> > -  size_t               prev_foot;  /* Size of previous chunk (if
> free).  */
> > -  size_t               head;       /* Size and inuse bits. */
> > -  struct malloc_chunk* fd;         /* double links -- used only if
> free. */
> > -  struct malloc_chunk* bk;
> > -};
> > -
> > -typedef struct malloc_chunk  mchunk;
> > -typedef struct malloc_chunk* mchunkptr;
> > -typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
> > -typedef unsigned int bindex_t;         /* Described below */
> > -typedef unsigned int binmap_t;         /* Described below */
> > -typedef unsigned int flag_t;           /* The type of various bit flag
> sets
> > */
> > -
> > -
> > -/* ------------------- Chunks sizes and alignments
> -----------------------
> > */
> > -
> > -#define MCHUNK_SIZE         (sizeof(mchunk))
> > -
> > -#if FOOTERS
> > -#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
> > -#else /* FOOTERS */
> > -#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
> > -#endif /* FOOTERS */
> > -
> > -/* The smallest size we can malloc is an aligned minimal chunk */
> > -#define MIN_CHUNK_SIZE\
> > -  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > -
> > -/* conversion from malloc headers to user pointers, and back */
> > -#define chunk2mem(p)        ((void*)((char*)(p)       +
> TWO_SIZE_T_SIZES))
> > -#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) -
> TWO_SIZE_T_SIZES))
> > -/* chunk associated with aligned address A */
> > -#define align_as_chunk(A)   (mchunkptr)((A) +
> align_offset(chunk2mem(A)))
> > -
> > -/* Bounds on request (not chunk) sizes. */
> > -#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
> > -#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD -
> SIZE_T_ONE)
> > -
> > -/* pad request bytes into a usable size */
> > -#define pad_request(req) \
> > -   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > -
> > -/* pad request, checking for minimum (but not maximum) */
> > -#define request2size(req) \
> > -  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
> > -
> > -/* ------------------ Operations on head and foot fields
> -----------------
> > */
> > -
> > -/*
> > -  The head field of a chunk is or'ed with PINUSE_BIT when previous
> > -  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
> > -  use. If the chunk was obtained with mmap, the prev_foot field has
> > -  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
> > -  mmapped region to the base of the chunk.
> > -*/
> > -
> > -#define PINUSE_BIT          (SIZE_T_ONE)
> > -#define CINUSE_BIT          (SIZE_T_TWO)
> > -#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
> > -
> > -/* Head value for fenceposts */
> > -#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
> > -
> > -/* extraction of fields from head words */
> > -#define cinuse(p)           ((p)->head & CINUSE_BIT)
> > -#define pinuse(p)           ((p)->head & PINUSE_BIT)
> > -#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
> > -
> > -#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
> > -#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
> > -
> > -/* Treat space at ptr +/- offset as a chunk */
> > -#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
> > -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
> > -
> > -/* Ptr to next or previous physical malloc_chunk. */
> > -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &
> > ~INUSE_BITS)))
> > -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
> > -
> > -/* extract next chunk's pinuse bit */
> > -#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
> > -
> > -/* Get/set size at footer */
> > -#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
> > -#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot =
> (s))
> > -
> > -/* Set size, pinuse bit, and foot */
> > -#define set_size_and_pinuse_of_free_chunk(p, s)\
> > -  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
> > -
> > -/* Set size, pinuse bit, foot, and clear next pinuse */
> > -#define set_free_with_pinuse(p, s, n)\
> > -  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
> > -
> > -/* Get the internal overhead associated with chunk p */
> > -#define overhead_for(p) CHUNK_OVERHEAD
> > -
> > -/* Return true if malloced space is not necessarily cleared */
> > -#define calloc_must_clear(p) (1)
> > -
> > -
> > -/* ---------------------- Overlaid data structures
> -----------------------
> > */
> > -
> > -/*
> > -  When chunks are not in use, they are treated as nodes of either
> > -  lists or trees.
> > -
> > -  "Small"  chunks are stored in circular doubly-linked lists, and look
> > -  like this:
> > -
> > -    chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Size of previous chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -    `head:' |             Size of chunk, in bytes
> > |P|
> > -      mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Forward pointer to next chunk in list
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Back pointer to previous chunk in list
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Unused space (may be 0 bytes long)
> > .
> > -            .
> > .
> > -            .
> > |
> > -nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -    `foot:' |             Size of chunk, in bytes
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -
> > -  Larger chunks are kept in a form of bitwise digital trees (aka
> > -  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
> > -  free chunks greater than 256 bytes, their size doesn't impose any
> > -  constraints on user chunk sizes.  Each node looks like:
> > -
> > -    chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Size of previous chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -    `head:' |             Size of chunk, in bytes
> > |P|
> > -      mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Forward pointer to next chunk of same size
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Back pointer to previous chunk of same size
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Pointer to left child (child[0])
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Pointer to right child (child[1])
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Pointer to parent
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             bin index of this chunk
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -            |             Unused space
> > .
> > -            .
> > |
> > -nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -    `foot:' |             Size of chunk, in bytes
> > |
> > -
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > -
> > -  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
> > -  of the same size are arranged in a circularly-linked list, with only
> > -  the oldest chunk (the next to be used, in our FIFO ordering)
> > -  actually in the tree.  (Tree members are distinguished by a non-null
> > -  parent pointer.)  If a chunk with the same size an an existing node
> > -  is inserted, it is linked off the existing node using pointers that
> > -  work in the same way as fd/bk pointers of small chunks.
> > -
> > -  Each tree contains a power of 2 sized range of chunk sizes (the
> > -  smallest is 0x100 <= x < 0x180), which is is divided in half at each
> > -  tree level, with the chunks in the smaller half of the range (0x100
> > -  <= x < 0x140 for the top nose) in the left subtree and the larger
> > -  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
> > -  done by inspecting individual bits.
> > -
> > -  Using these rules, each node's left subtree contains all smaller
> > -  sizes than its right subtree.  However, the node at the root of each
> > -  subtree has no particular ordering relationship to either.  (The
> > -  dividing line between the subtree sizes is based on trie relation.)
> > -  If we remove the last chunk of a given size from the interior of the
> > -  tree, we need to replace it with a leaf node.  The tree ordering
> > -  rules permit a node to be replaced by any leaf below it.
> > -
> > -  The smallest chunk in a tree (a common operation in a best-fit
> > -  allocator) can be found by walking a path to the leftmost leaf in
> > -  the tree.  Unlike a usual binary tree, where we follow left child
> > -  pointers until we reach a null, here we follow the right child
> > -  pointer any time the left one is null, until we reach a leaf with
> > -  both child pointers null. The smallest chunk in the tree will be
> > -  somewhere along that path.
> > -
> > -  The worst case number of steps to add, find, or remove a node is
> > -  bounded by the number of bits differentiating chunks within
> > -  bins. Under current bin calculations, this ranges from 6 up to 21
> > -  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
> > -  is of course much better.
> > -*/
> > -
> > -struct malloc_tree_chunk {
> > -  /* The first four fields must be compatible with malloc_chunk */
> > -  size_t                    prev_foot;
> > -  size_t                    head;
> > -  struct malloc_tree_chunk* fd;
> > -  struct malloc_tree_chunk* bk;
> > -
> > -  struct malloc_tree_chunk* child[2];
> > -  struct malloc_tree_chunk* parent;
> > -  bindex_t                  index;
> > -};
> > -
> > -typedef struct malloc_tree_chunk  tchunk;
> > -typedef struct malloc_tree_chunk* tchunkptr;
> > -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees
> */
> > -
> > -/* A little helper macro for trees */
> > -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :
> > (t)->child[1])
> > -
> > -/* ----------------------------- Segments ------------------------------
> --
> > */
> > -
> > -/*
> > -  Each malloc space may include non-contiguous segments, held in a
> > -  list headed by an embedded malloc_segment record representing the
> > -  top-most space. Segments also include flags holding properties of
> > -  the space. Large chunks that are directly allocated by mmap are not
> > -  included in this list. They are instead independently created and
> > -  destroyed without otherwise keeping track of them.
> > -
> > -  Segment management mainly comes into play for spaces allocated by
> > -  MMAP.  Any call to MMAP might or might not return memory that is
> > -  adjacent to an existing segment.  MORECORE normally contiguously
> > -  extends the current space, so this space is almost always adjacent,
> > -  which is simpler and faster to deal with. (This is why MORECORE is
> > -  used preferentially to MMAP when both are available -- see
> > -  sys_alloc.)  When allocating using MMAP, we don't use any of the
> > -  hinting mechanisms (inconsistently) supported in various
> > -  implementations of unix mmap, or distinguish reserving from
> > -  committing memory. Instead, we just ask for space, and exploit
> > -  contiguity when we get it.  It is probably possible to do
> > -  better than this on some systems, but no general scheme seems
> > -  to be significantly better.
> > -
> > -  Management entails a simpler variant of the consolidation scheme
> > -  used for chunks to reduce fragmentation -- new adjacent memory is
> > -  normally prepended or appended to an existing segment. However,
> > -  there are limitations compared to chunk consolidation that mostly
> > -  reflect the fact that segment processing is relatively infrequent
> > -  (occurring only when getting memory from system) and that we
> > -  don't expect to have huge numbers of segments:
> > -
> > -  * Segments are not indexed, so traversal requires linear scans.  (It
> > -    would be possible to index these, but is not worth the extra
> > -    overhead and complexity for most programs on most platforms.)
> > -  * New segments are only appended to old ones when holding top-most
> > -    memory; if they cannot be prepended to others, they are held in
> > -    different segments.
> > -
> > -  Except for the top-most segment of an mstate, each segment record
> > -  is kept at the tail of its segment. Segments are added by pushing
> > -  segment records onto the list headed by &mstate.seg for the
> > -  containing mstate.
> > -
> > -  Segment flags control allocation/merge/deallocation policies:
> > -  * If EXTERN_BIT set, then we did not allocate this segment,
> > -    and so should not try to deallocate or merge with others.
> > -    (This currently holds only for the initial segment passed
> > -    into create_mspace_with_base.)
> > -  * If IS_MMAPPED_BIT set, the segment may be merged with
> > -    other surrounding mmapped segments and trimmed/de-allocated
> > -    using munmap.
> > -  * If neither bit is set, then the segment was obtained using
> > -    MORECORE so can be merged with surrounding MORECORE'd segments
> > -    and deallocated/trimmed using MORECORE with negative arguments.
> > -*/
> > -
> > -struct malloc_segment {
> > -  char*        base;             /* base address */
> > -  size_t       size;             /* allocated size */
> > -  struct malloc_segment* next;   /* ptr to next segment */
> > -};
> > -
> > -typedef struct malloc_segment  msegment;
> > -typedef struct malloc_segment* msegmentptr;
> > -
> > -/* ---------------------------- malloc_state
> -----------------------------
> > */
> > -
> > -/*
> > -   A malloc_state holds all of the bookkeeping for a space.
> > -   The main fields are:
> > -
> > -  Top
> > -    The topmost chunk of the currently active segment. Its size is
> > -    cached in topsize.  The actual size of topmost space is
> > -    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
> > -    fenceposts and segment records if necessary when getting more
> > -    space from the system.  The size at which to autotrim top is
> > -    cached from mparams in trim_check, except that it is disabled if
> > -    an autotrim fails.
> > -
> > -  Designated victim (dv)
> > -    This is the preferred chunk for servicing small requests that
> > -    don't have exact fits.  It is normally the chunk split off most
> > -    recently to service another small request.  Its size is cached in
> > -    dvsize. The link fields of this chunk are not maintained since it
> > -    is not kept in a bin.
> > -
> > -  SmallBins
> > -    An array of bin headers for free chunks.  These bins hold chunks
> > -    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
> > -    chunks of all the same size, spaced 8 bytes apart.  To simplify
> > -    use in double-linked lists, each bin header acts as a malloc_chunk
> > -    pointing to the real first node, if it exists (else pointing to
> > -    itself).  This avoids special-casing for headers.  But to avoid
> > -    waste, we allocate only the fd/bk pointers of bins, and then use
> > -    repositioning tricks to treat these as the fields of a chunk.
> > -
> > -  TreeBins
> > -    Treebins are pointers to the roots of trees holding a range of
> > -    sizes. There are 2 equally spaced treebins for each power of two
> > -    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
> > -    larger.
> > -
> > -  Bin maps
> > -    There is one bit map for small bins ("smallmap") and one for
> > -    treebins ("treemap).  Each bin sets its bit when non-empty, and
> > -    clears the bit when empty.  Bit operations are then used to avoid
> > -    bin-by-bin searching -- nearly all "search" is done without ever
> > -    looking at bins that won't be selected.  The bit maps
> > -    conservatively use 32 bits per map word, even if on 64bit system.
> > -    For a good description of some of the bit-based techniques used
> > -    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
> > -    supplement at http://hackersdelight.org/). Many of these are
> > -    intended to reduce the branchiness of paths through malloc etc, as
> > -    well as to reduce the number of memory locations read or written.
> > -
> > -  Segments
> > -    A list of segments headed by an embedded malloc_segment record
> > -    representing the initial space.
> > -
> > -  Address check support
> > -    The least_addr field is the least address ever obtained from
> > -    MORECORE or MMAP. Attempted frees and reallocs of any address less
> > -    than this are trapped (unless INSECURE is defined).
> > -
> > -  Magic tag
> > -    A cross-check field that should always hold same value as
> mparams.magic.
> > -
> > -  Flags
> > -    Bits recording whether to use MMAP, locks, or contiguous MORECORE
> > -
> > -  Statistics
> > -    Each space keeps track of current and maximum system memory
> > -    obtained via MORECORE or MMAP.
> > -
> > -  Locking
> > -    If USE_LOCKS is defined, the "mutex" lock is acquired and released
> > -    around every public call using this mspace.
> > -*/
> > -
> > -/* Bin types, widths and sizes */
> > -#define NSMALLBINS        (32U)
> > -#define NTREEBINS         (32U)
> > -#define SMALLBIN_SHIFT    (3U)
> > -#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
> > -#define TREEBIN_SHIFT     (8U)
> > -#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
> > -#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
> > -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -
> > CHUNK_OVERHEAD)
> > -
> > -struct malloc_state {
> > -  binmap_t   smallmap;
> > -  binmap_t   treemap;
> > -  size_t     dvsize;
> > -  size_t     topsize;
> > -  char*      least_addr;
> > -  mchunkptr  dv;
> > -  mchunkptr  top;
> > -  size_t     magic;
> > -  mchunkptr  smallbins[(NSMALLBINS+1)*2];
> > -  tbinptr    treebins[NTREEBINS];
> > -  size_t     footprint;
> > -  size_t     max_footprint;
> > -  flag_t     mflags;
> > -  void      *user_data;
> > -#if USE_LOCKS
> > -  MLOCK_T    mutex;     /* locate lock among fields that rarely change
> */
> > -#endif /* USE_LOCKS */
> > -  msegment   seg;
> > -};
> > -
> > -typedef struct malloc_state*    mstate;
> > -
> > -/* ------------- Global malloc_state and malloc_params
> -------------------
> > */
> > -
> > -/*
> > -  malloc_params holds global properties, including those that can be
> > -  dynamically set using mallopt. There is a single instance, mparams,
> > -  initialized in init_mparams.
> > -*/
> > -
> > -struct malloc_params {
> > -  size_t magic;
> > -  size_t page_size;
> > -  size_t granularity;
> > -  flag_t default_mflags;
> > -};
> > -
> > -static struct malloc_params mparams;
> > -
> > -/* The global malloc_state used for all non-"mspace" calls */
> > -//static struct malloc_state _gm_;
> > -//#define gm                 (&_gm_)
> > -//#define is_global(M)       ((M) == &_gm_)
> > -#define is_initialized(M)  ((M)->top != 0)
> > -
> > -/* -------------------------- system alloc setup
> -------------------------
> > */
> > -
> > -/* Operations on mflags */
> > -
> > -#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
> > -#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
> > -#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
> > -
> > -#define set_lock(M,L)\
> > - ((M)->mflags = (L)?\
> > -  ((M)->mflags | USE_LOCK_BIT) :\
> > -  ((M)->mflags & ~USE_LOCK_BIT))
> > -
> > -/* page-align a size */
> > -#define page_align(S)\
> > - (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
> > -
> > -/* granularity-align a size */
> > -#define granularity_align(S)\
> > -  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
> > -
> > -#define is_page_aligned(S)\
> > -   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
> > -#define is_granularity_aligned(S)\
> > -   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
> > -
> > -/*  True if segment S holds address A */
> > -#define segment_holds(S, A)\
> > -  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
> > -
> > -/* Return segment holding given address */
> > -static msegmentptr segment_holding(mstate m, char* addr) {
> > -  msegmentptr sp = &m->seg;
> > -  for (;;) {
> > -    if (addr >= sp->base && addr < sp->base + sp->size)
> > -      return sp;
> > -    if ((sp = sp->next) == 0)
> > -      return 0;
> > -  }
> > -}
> > -
> > -/* Return true if segment contains a segment link */
> > -static int has_segment_link(mstate m, msegmentptr ss) {
> > -  msegmentptr sp = &m->seg;
> > -  for (;;) {
> > -    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
> > -      return 1;
> > -    if ((sp = sp->next) == 0)
> > -      return 0;
> > -  }
> > -}
> > -
> > -
> > -
> > -/*
> > -  TOP_FOOT_SIZE is padding at the end of a segment, including space
> > -  that may be needed to place segment records and fenceposts when new
> > -  noncontiguous segments are added.
> > -*/
> > -#define TOP_FOOT_SIZE\
> > -  (align_offset(chunk2mem(0))+pad_request(sizeof(struct
> > malloc_segment))+MIN_CHUNK_SIZE)
> > -
> > -
> > -/* -------------------------------  Hooks
> --------------------------------
> > */
> > -
> > -/*
> > -  PREACTION should be defined to return 0 on success, and nonzero on
> > -  failure. If you are not using locking, you can redefine these to do
> > -  anything you like.
> > -*/
> > -
> > -#if USE_LOCKS
> > -
> > -/* Ensure locks are initialized */
> > -#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
> > -
> > -#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))?
> > ACQUIRE_LOCK(&(M)->mutex) : 0)
> > -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
> > -#else /* USE_LOCKS */
> > -
> > -#ifndef PREACTION
> > -#define PREACTION(M) (0)
> > -#endif  /* PREACTION */
> > -
> > -#ifndef POSTACTION
> > -#define POSTACTION(M)
> > -#endif  /* POSTACTION */
> > -
> > -#endif /* USE_LOCKS */
> > -
> > -/*
> > -  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
> > -  USAGE_ERROR_ACTION is triggered on detected bad frees and
> > -  reallocs. The argument p is an address that might have triggered the
> > -  fault. It is ignored by the two predefined actions, but might be
> > -  useful in custom actions that try to help diagnose errors.
> > -*/
> > -
> > -#if PROCEED_ON_ERROR
> > -
> > -/* A count of the number of corruption errors causing resets */
> > -int malloc_corruption_error_count;
> > -
> > -/* default corruption action */
> > -static void reset_on_error(mstate m);
> > -
> > -#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
> > -#define USAGE_ERROR_ACTION(m, p)
> > -
> > -#else /* PROCEED_ON_ERROR */
> > -
> > -#ifndef CORRUPTION_ERROR_ACTION
> > -#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
> > -#endif /* CORRUPTION_ERROR_ACTION */
> > -
> > -#ifndef USAGE_ERROR_ACTION
> > -#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
> > -#endif /* USAGE_ERROR_ACTION */
> > -
> > -#endif /* PROCEED_ON_ERROR */
> > -
> > -/* -------------------------- Debugging setup
> ----------------------------
> > */
> > -
> > -#if ! DEBUG
> > -
> > -#define check_free_chunk(M,P)
> > -#define check_inuse_chunk(M,P)
> > -#define check_malloced_chunk(M,P,N)
> > -#define check_malloc_state(M)
> > -#define check_top_chunk(M,P)
> > -
> > -#else /* DEBUG */
> > -#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
> > -#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
> > -#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
> > -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
> > -#define check_malloc_state(M)       do_check_malloc_state(M)
> > -
> > -static void   do_check_any_chunk(mstate m, mchunkptr p);
> > -static void   do_check_top_chunk(mstate m, mchunkptr p);
> > -static void   do_check_inuse_chunk(mstate m, mchunkptr p);
> > -static void   do_check_free_chunk(mstate m, mchunkptr p);
> > -static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
> > -static void   do_check_tree(mstate m, tchunkptr t);
> > -static void   do_check_treebin(mstate m, bindex_t i);
> > -static void   do_check_smallbin(mstate m, bindex_t i);
> > -static void   do_check_malloc_state(mstate m);
> > -static int    bin_find(mstate m, mchunkptr x);
> > -static size_t traverse_and_check(mstate m);
> > -#endif /* DEBUG */
> > -
> > -/* ---------------------------- Indexing Bins
> ----------------------------
> > */
> > -
> > -#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
> > -#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
> > -#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
> > -#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
> > -
> > -/* addressing by index. See above about smallbin repositioning */
> > -#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->
> smallbins[(i)<<1])))
> > -#define treebin_at(M,i)     (&((M)->treebins[i]))
> > -
> > -/* assign tree index for size S to variable I */
> > -#if defined(__GNUC__) && defined(i386)
> > -#define compute_tree_index(S, I)\
> > -{\
> > -  size_t X = S >> TREEBIN_SHIFT;\
> > -  if (X == 0)\
> > -    I = 0;\
> > -  else if (X > 0xFFFF)\
> > -    I = NTREEBINS-1;\
> > -  else {\
> > -    unsigned int K;\
> > -    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
> > -    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
> > -  }\
> > -}
> > -#else /* GNUC */
> > -#define compute_tree_index(S, I)\
> > -{\
> > -  size_t X = S >> TREEBIN_SHIFT;\
> > -  if (X == 0)\
> > -    I = 0;\
> > -  else if (X > 0xFFFF)\
> > -    I = NTREEBINS-1;\
> > -  else {\
> > -    unsigned int Y = (unsigned int)X;\
> > -    unsigned int N = ((Y - 0x100) >> 16) & 8;\
> > -    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
> > -    N += K;\
> > -    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
> > -    K = 14 - N + ((Y <<= K) >> 15);\
> > -    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
> > -  }\
> > -}
> > -#endif /* GNUC */
> > -
> > -/* Bit representing maximum resolved size in a treebin at i */
> > -#define bit_for_tree_index(i) \
> > -   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT
> - 2)
> > -
> > -/* Shift placing maximum resolved bit in a treebin at i as sign bit */
> > -#define leftshift_for_tree_index(i) \
> > -   ((i == NTREEBINS-1)? 0 : \
> > -    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
> > -
> > -/* The size of the smallest chunk held in bin with index i */
> > -#define minsize_for_tree_index(i) \
> > -   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
> > -   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
> > -
> > -/* ------------------------ Operations on bin maps
> -----------------------
> > */
> > -
> > -/* bit corresponding to given index */
> > -#define idx2bit(i)              ((binmap_t)(1) << (i))
> > -
> > -/* Mark/Clear bits with given index */
> > -#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
> > -#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
> > -#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
> > -
> > -#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
> > -#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
> > -#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
> > -
> > -/* index corresponding to given bit */
> > -
> > -#if defined(__GNUC__) && defined(i386)
> > -#define compute_bit2idx(X, I)\
> > -{\
> > -  unsigned int J;\
> > -  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
> > -  I = (bindex_t)J;\
> > -}
> > -
> > -#else /* GNUC */
> > -#if  USE_BUILTIN_FFS
> > -#define compute_bit2idx(X, I) I = ffs(X)-1
> > -
> > -#else /* USE_BUILTIN_FFS */
> > -#define compute_bit2idx(X, I)\
> > -{\
> > -  unsigned int Y = X - 1;\
> > -  unsigned int K = Y >> (16-4) & 16;\
> > -  unsigned int N = K;        Y >>= K;\
> > -  N += K = Y >> (8-3) &  8;  Y >>= K;\
> > -  N += K = Y >> (4-2) &  4;  Y >>= K;\
> > -  N += K = Y >> (2-1) &  2;  Y >>= K;\
> > -  N += K = Y >> (1-0) &  1;  Y >>= K;\
> > -  I = (bindex_t)(N + Y);\
> > -}
> > -#endif /* USE_BUILTIN_FFS */
> > -#endif /* GNUC */
> > -
> > -/* isolate the least set bit of a bitmap */
> > -#define least_bit(x)         ((x) & -(x))
> > -
> > -/* mask with all bits to left of least bit of x on */
> > -#define left_bits(x)         ((x<<1) | -(x<<1))
> > -
> > -/* mask with all bits to left of or equal to least bit of x on */
> > -#define same_or_left_bits(x) ((x) | -(x))
> > -
> > -
> > -/* ----------------------- Runtime Check Support
> -------------------------
> > */
> > -
> > -/*
> > -  For security, the main invariant is that malloc/free/etc never
> > -  writes to a static address other than malloc_state, unless static
> > -  malloc_state itself has been corrupted, which cannot occur via
> > -  malloc (because of these checks). In essence this means that we
> > -  believe all pointers, sizes, maps etc held in malloc_state, but
> > -  check all of those linked or offsetted from other embedded data
> > -  structures.  These checks are interspersed with main code in a way
> > -  that tends to minimize their run-time cost.
> > -
> > -  When FOOTERS is defined, in addition to range checking, we also
> > -  verify footer fields of inuse chunks, which can be used guarantee
> > -  that the mstate controlling malloc/free is intact.  This is a
> > -  streamlined version of the approach described by William Robertson
> > -  et al in "Run-time Detection of Heap-based Overflows" LISA'03
> > -  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
> > -  of an inuse chunk holds the xor of its mstate and a random seed,
> > -  that is checked upon calls to free() and realloc().  This is
> > -  (probablistically) unguessable from outside the program, but can be
> > -  computed by any code successfully malloc'ing any chunk, so does not
> > -  itself provide protection against code that has already broken
> > -  security through some other means.  Unlike Robertson et al, we
> > -  always dynamically check addresses of all offset chunks (previous,
> > -  next, etc). This turns out to be cheaper than relying on hashes.
> > -*/
> > -
> > -#if !INSECURE
> > -/* Check if address a is at least as high as any from MORECORE or MMAP
> */
> > -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
> > -/* Check if address of next chunk n is higher than base chunk p */
> > -#define ok_next(p, n)    ((char*)(p) < (char*)(n))
> > -/* Check if p has its cinuse bit on */
> > -#define ok_cinuse(p)     cinuse(p)
> > -/* Check if p has its pinuse bit on */
> > -#define ok_pinuse(p)     pinuse(p)
> > -
> > -#else /* !INSECURE */
> > -#define ok_address(M, a) (1)
> > -#define ok_next(b, n)    (1)
> > -#define ok_cinuse(p)     (1)
> > -#define ok_pinuse(p)     (1)
> > -#endif /* !INSECURE */
> > -
> > -#if (FOOTERS && !INSECURE)
> > -/* Check if (alleged) mstate m has expected magic field */
> > -#define ok_magic(M)      ((M)->magic == mparams.magic)
> > -#else  /* (FOOTERS && !INSECURE) */
> > -#define ok_magic(M)      (1)
> > -#endif /* (FOOTERS && !INSECURE) */
> > -
> > -
> > -/* In gcc, use __builtin_expect to minimize impact of checks */
> > -#if !INSECURE
> > -#if defined(__GNUC__) && __GNUC__ >= 3
> > -#define RTCHECK(e)  __builtin_expect(e, 1)
> > -#else /* GNUC */
> > -#define RTCHECK(e)  (e)
> > -#endif /* GNUC */
> > -#else /* !INSECURE */
> > -#define RTCHECK(e)  (1)
> > -#endif /* !INSECURE */
> > -
> > -/* macros to set up inuse chunks with or without footers */
> > -
> > -#if !FOOTERS
> > -
> > -#define mark_inuse_foot(M,p,s)
> > -
> > -/* Set cinuse bit and pinuse bit of next chunk */
> > -#define set_inuse(M,p,s)\
> > -  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > -  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > -
> > -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
> > -#define set_inuse_and_pinuse(M,p,s)\
> > -  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > -  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > -
> > -/* Set size, cinuse and pinuse bit of this chunk */
> > -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > -  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
> > -
> > -#else /* FOOTERS */
> > -
> > -/* Set foot of inuse chunk to be xor of mstate and seed */
> > -#define mark_inuse_foot(M,p,s)\
> > -  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^
> > mparams.magic))
> > -
> > -#define get_mstate_for(p)\
> > -  ((mstate)(((mchunkptr)((char*)(p) +\
> > -    (chunksize(p))))->prev_foot ^ mparams.magic))
> > -
> > -#define set_inuse(M,p,s)\
> > -  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > -  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
> > -  mark_inuse_foot(M,p,s))
> > -
> > -#define set_inuse_and_pinuse(M,p,s)\
> > -  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > -  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
> > - mark_inuse_foot(M,p,s))
> > -
> > -#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > -  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > -  mark_inuse_foot(M, p, s))
> > -
> > -#endif /* !FOOTERS */
> > -
> > -/* ---------------------------- setting mparams
> --------------------------
> > */
> > -
> > -/* Initialize mparams */
> > -static int init_mparams(void) {
> > -  if (mparams.page_size == 0) {
> > -    size_t s;
> > -
> > -    mparams.default_mflags = USE_LOCK_BIT;
> > -
> > -#if (FOOTERS && !INSECURE)
> > -    {
> > -#if USE_DEV_RANDOM
> > -      int fd;
> > -      unsigned char buf[sizeof(size_t)];
> > -      /* Try to use /dev/urandom, else fall back on using time */
> > -      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
> > -          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
> > -        s = *((size_t *) buf);
> > -        close(fd);
> > -      }
> > -      else
> > -#endif /* USE_DEV_RANDOM */
> > -        s = (size_t)(time(0) ^ (size_t)0x55555555U);
> > -
> > -      s |= (size_t)8U;    /* ensure nonzero */
> > -      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
> > -
> > -    }
> > -#else /* (FOOTERS && !INSECURE) */
> > -    s = (size_t)0x58585858U;
> > -#endif /* (FOOTERS && !INSECURE) */
> > -    ACQUIRE_MAGIC_INIT_LOCK();
> > -    if (mparams.magic == 0) {
> > -      mparams.magic = s;
> > -      /* Set up lock for main malloc area */
> > -      //INITIAL_LOCK(&gm->mutex);
> > -      //gm->mflags = mparams.default_mflags;
> > -    }
> > -    RELEASE_MAGIC_INIT_LOCK();
> > -
> > -
> > -    mparams.page_size = malloc_getpagesize;
> > -    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
> > -                           DEFAULT_GRANULARITY : mparams.page_size);
> > -
> > -    /* Sanity-check configuration:
> > -       size_t must be unsigned and as wide as pointer type.
> > -       ints must be at least 4 bytes.
> > -       alignment must be at least 8.
> > -       Alignment, min chunk size, and page size must all be powers of 2.
> > -    */
> > -    if ((sizeof(size_t) != sizeof(char*)) ||
> > -        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
> > -        (sizeof(int) < 4)  ||
> > -        (MALLOC_ALIGNMENT < (size_t)8U) ||
> > -        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0)
> ||
> > -        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0)
> ||
> > -        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) !=
> 0) ||
> > -        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   !=
> 0))
> > -      ABORT(NULL);
> > -  }
> > -  return 0;
> > -}
> > -
> > -/* support for mallopt */
> > -static int change_mparam(int param_number, int value) {
> > -  size_t val = (size_t)value;
> > -  init_mparams();
> > -  switch(param_number) {
> > -  case M_GRANULARITY:
> > -    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
> > -      mparams.granularity = val;
> > -      return 1;
> > -    }
> > -    else
> > -      return 0;
> > -  default:
> > -    return 0;
> > -  }
> > -}
> > -
> > -#if DEBUG
> > -/* ------------------------- Debugging Support
> ---------------------------
> > */
> > -
> > -/* Check properties of any chunk, whether free, inuse, mmapped etc  */
> > -static void do_check_any_chunk(mstate m, mchunkptr p) {
> > -  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > -  assert(m->user_data, ok_address(m, p));
> > -}
> > -
> > -/* Check properties of top chunk */
> > -static void do_check_top_chunk(mstate m, mchunkptr p) {
> > -  msegmentptr sp = segment_holding(m, (char*)p);
> > -  size_t  sz = chunksize(p);
> > -  assert(m->user_data, sp != 0);
> > -  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > -  assert(m->user_data, ok_address(m, p));
> > -  assert(m->user_data, sz == m->topsize);
> > -  assert(m->user_data, sz > 0);
> > -  assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -
> > TOP_FOOT_SIZE);
> > -  assert(m->user_data, pinuse(p));
> > -  assert(m->user_data, !next_pinuse(p));
> > -}
> > -
> > -/* Check properties of inuse chunks */
> > -static void do_check_inuse_chunk(mstate m, mchunkptr p) {
> > -  do_check_any_chunk(m, p);
> > -  assert(m->user_data, cinuse(p));
> > -  assert(m->user_data, next_pinuse(p));
> > -  /* If not pinuse, previous chunk has OK offset */
> > -  assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
> > -}
> > -
> > -/* Check properties of free chunks */
> > -static void do_check_free_chunk(mstate m, mchunkptr p) {
> > -  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > -  mchunkptr next = chunk_plus_offset(p, sz);
> > -  do_check_any_chunk(m, p);
> > -  assert(m->user_data, !cinuse(p));
> > -  assert(m->user_data, !next_pinuse(p));
> > -  if (p != m->dv && p != m->top) {
> > -    if (sz >= MIN_CHUNK_SIZE) {
> > -      assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > -      assert(m->user_data, is_aligned(chunk2mem(p)));
> > -      assert(m->user_data, next->prev_foot == sz);
> > -      assert(m->user_data, pinuse(p));
> > -      assert(m->user_data, next == m->top || cinuse(next));
> > -      assert(m->user_data, p->fd->bk == p);
> > -      assert(m->user_data, p->bk->fd == p);
> > -    }
> > -    else  /* markers are always of size SIZE_T_SIZE */
> > -      assert(m->user_data, sz == SIZE_T_SIZE);
> > -  }
> > -}
> > -
> > -/* Check properties of malloced chunks at the point they are malloced */
> > -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
> > -  if (mem != 0) {
> > -    mchunkptr p = mem2chunk(mem);
> > -    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > -    do_check_inuse_chunk(m, p);
> > -    assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > -    assert(m->user_data, sz >= MIN_CHUNK_SIZE);
> > -    assert(m->user_data, sz >= s);
> > -    /* size is less than MIN_CHUNK_SIZE more than request */
> > -    assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
> > -  }
> > -}
> > -
> > -/* Check a tree and its subtrees.  */
> > -static void do_check_tree(mstate m, tchunkptr t) {
> > -  tchunkptr head = 0;
> > -  tchunkptr u = t;
> > -  bindex_t tindex = t->index;
> > -  size_t tsize = chunksize(t);
> > -  bindex_t idx;
> > -  compute_tree_index(tsize, idx);
> > -  assert(m->user_data, tindex == idx);
> > -  assert(m->user_data, tsize >= MIN_LARGE_SIZE);
> > -  assert(m->user_data, tsize >= minsize_for_tree_index(idx));
> > -  assert(m->user_data, (idx == NTREEBINS-1) || (tsize <
> > minsize_for_tree_index((idx+1))));
> > -
> > -  do { /* traverse through chain of same-sized nodes */
> > -    do_check_any_chunk(m, ((mchunkptr)u));
> > -    assert(m->user_data, u->index == tindex);
> > -    assert(m->user_data, chunksize(u) == tsize);
> > -    assert(m->user_data, !cinuse(u));
> > -    assert(m->user_data, !next_pinuse(u));
> > -    assert(m->user_data, u->fd->bk == u);
> > -    assert(m->user_data, u->bk->fd == u);
> > -    if (u->parent == 0) {
> > -      assert(m->user_data, u->child[0] == 0);
> > -      assert(m->user_data, u->child[1] == 0);
> > -    }
> > -    else {
> > -      assert(m->user_data, head == 0); /* only one node on chain has
> parent
> > */
> > -      head = u;
> > -      assert(m->user_data, u->parent != u);
> > -      assert(m->user_data, u->parent->child[0] == u ||
> > -             u->parent->child[1] == u ||
> > -             *((tbinptr*)(u->parent)) == u);
> > -      if (u->child[0] != 0) {
> > -        assert(m->user_data, u->child[0]->parent == u);
> > -        assert(m->user_data, u->child[0] != u);
> > -        do_check_tree(m, u->child[0]);
> > -      }
> > -      if (u->child[1] != 0) {
> > -        assert(m->user_data, u->child[1]->parent == u);
> > -        assert(m->user_data, u->child[1] != u);
> > -        do_check_tree(m, u->child[1]);
> > -      }
> > -      if (u->child[0] != 0 && u->child[1] != 0) {
> > -        assert(m->user_data, chunksize(u->child[0]) <
> > chunksize(u->child[1]));
> > -      }
> > -    }
> > -    u = u->fd;
> > -  } while (u != t);
> > -  assert(m->user_data, head != 0);
> > -}
> > -
> > -/*  Check all the chunks in a treebin.  */
> > -static void do_check_treebin(mstate m, bindex_t i) {
> > -  tbinptr* tb = treebin_at(m, i);
> > -  tchunkptr t = *tb;
> > -  int empty = (m->treemap & (1U << i)) == 0;
> > -  if (t == 0)
> > -    assert(m->user_data, empty);
> > -  if (!empty)
> > -    do_check_tree(m, t);
> > -}
> > -
> > -/*  Check all the chunks in a smallbin.  */
> > -static void do_check_smallbin(mstate m, bindex_t i) {
> > -  sbinptr b = smallbin_at(m, i);
> > -  mchunkptr p = b->bk;
> > -  unsigned int empty = (m->smallmap & (1U << i)) == 0;
> > -  if (p == b)
> > -    assert(m->user_data, empty);
> > -  if (!empty) {
> > -    for (; p != b; p = p->bk) {
> > -      size_t size = chunksize(p);
> > -      mchunkptr q;
> > -      /* each chunk claims to be free */
> > -      do_check_free_chunk(m, p);
> > -      /* chunk belongs in bin */
> > -      assert(m->user_data, small_index(size) == i);
> > -      assert(m->user_data, p->bk == b || chunksize(p->bk) ==
> chunksize(p));
> > -      /* chunk is followed by an inuse chunk */
> > -      q = next_chunk(p);
> > -      if (q->head != FENCEPOST_HEAD)
> > -        do_check_inuse_chunk(m, q);
> > -    }
> > -  }
> > -}
> > -
> > -/* Find x in a bin. Used in other check functions. */
> > -static int bin_find(mstate m, mchunkptr x) {
> > -  size_t size = chunksize(x);
> > -  if (is_small(size)) {
> > -    bindex_t sidx = small_index(size);
> > -    sbinptr b = smallbin_at(m, sidx);
> > -    if (smallmap_is_marked(m, sidx)) {
> > -      mchunkptr p = b;
> > -      do {
> > -        if (p == x)
> > -          return 1;
> > -      } while ((p = p->fd) != b);
> > -    }
> > -  }
> > -  else {
> > -    bindex_t tidx;
> > -    compute_tree_index(size, tidx);
> > -    if (treemap_is_marked(m, tidx)) {
> > -      tchunkptr t = *treebin_at(m, tidx);
> > -      size_t sizebits = size << leftshift_for_tree_index(tidx);
> > -      while (t != 0 && chunksize(t) != size) {
> > -        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > -        sizebits <<= 1;
> > -      }
> > -      if (t != 0) {
> > -        tchunkptr u = t;
> > -        do {
> > -          if (u == (tchunkptr)x)
> > -            return 1;
> > -        } while ((u = u->fd) != t);
> > -      }
> > -    }
> > -  }
> > -  return 0;
> > -}
> > -
> > -/* Traverse each chunk and check it; return total */
> > -static size_t traverse_and_check(mstate m) {
> > -  size_t sum = 0;
> > -  if (is_initialized(m)) {
> > -    msegmentptr s = &m->seg;
> > -    sum += m->topsize + TOP_FOOT_SIZE;
> > -    while (s != 0) {
> > -      mchunkptr q = align_as_chunk(s->base);
> > -      mchunkptr lastq = 0;
> > -      assert(m->user_data, pinuse(q));
> > -      while (segment_holds(s, q) &&
> > -             q != m->top && q->head != FENCEPOST_HEAD) {
> > -        sum += chunksize(q);
> > -        if (cinuse(q)) {
> > -          assert(m->user_data, !bin_find(m, q));
> > -          do_check_inuse_chunk(m, q);
> > -        }
> > -        else {
> > -          assert(m->user_data, q == m->dv || bin_find(m, q));
> > -          assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2
> > consecutive free */
> > -          do_check_free_chunk(m, q);
> > -        }
> > -        lastq = q;
> > -        q = next_chunk(q);
> > -      }
> > -      s = s->next;
> > -    }
> > -  }
> > -  return sum;
> > -}
> > -
> > -/* Check all properties of malloc_state. */
> > -static void do_check_malloc_state(mstate m) {
> > -  bindex_t i;
> > -  size_t total;
> > -  /* check bins */
> > -  for (i = 0; i < NSMALLBINS; ++i)
> > -    do_check_smallbin(m, i);
> > -  for (i = 0; i < NTREEBINS; ++i)
> > -    do_check_treebin(m, i);
> > -
> > -  if (m->dvsize != 0) { /* check dv chunk */
> > -    do_check_any_chunk(m, m->dv);
> > -    assert(m->user_data, m->dvsize == chunksize(m->dv));
> > -    assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
> > -    assert(m->user_data, bin_find(m, m->dv) == 0);
> > -  }
> > -
> > -  if (m->top != 0) {   /* check top chunk */
> > -    do_check_top_chunk(m, m->top);
> > -    assert(m->user_data, m->topsize == chunksize(m->top));
> > -    assert(m->user_data, m->topsize > 0);
> > -    assert(m->user_data, bin_find(m, m->top) == 0);
> > -  }
> > -
> > -  total = traverse_and_check(m);
> > -  assert(m->user_data, total <= m->footprint);
> > -  assert(m->user_data, m->footprint <= m->max_footprint);
> > -}
> > -#endif /* DEBUG */
> > -
> > -/* ----------------------------- statistics
> ------------------------------
> > */
> > -
> > -#if !NO_MALLINFO
> > -static struct mallinfo internal_mallinfo(mstate m) {
> > -  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
> > -  if (!PREACTION(m)) {
> > -    check_malloc_state(m);
> > -    if (is_initialized(m)) {
> > -      size_t nfree = SIZE_T_ONE; /* top always free */
> > -      size_t mfree = m->topsize + TOP_FOOT_SIZE;
> > -      size_t sum = mfree;
> > -      msegmentptr s = &m->seg;
> > -      while (s != 0) {
> > -        mchunkptr q = align_as_chunk(s->base);
> > -        while (segment_holds(s, q) &&
> > -               q != m->top && q->head != FENCEPOST_HEAD) {
> > -          size_t sz = chunksize(q);
> > -          sum += sz;
> > -          if (!cinuse(q)) {
> > -            mfree += sz;
> > -            ++nfree;
> > -          }
> > -          q = next_chunk(q);
> > -        }
> > -        s = s->next;
> > -      }
> > -
> > -      nm.arena    = sum;
> > -      nm.ordblks  = nfree;
> > -      nm.hblkhd   = m->footprint - sum;
> > -      nm.usmblks  = m->max_footprint;
> > -      nm.uordblks = m->footprint - mfree;
> > -      nm.fordblks = mfree;
> > -      nm.keepcost = m->topsize;
> > -    }
> > -
> > -    POSTACTION(m);
> > -  }
> > -  return nm;
> > -}
> > -#endif /* !NO_MALLINFO */
> > -
> > -static void internal_malloc_stats(mstate m) {
> > -  if (!PREACTION(m)) {
> > -    size_t maxfp = 0;
> > -    size_t fp = 0;
> > -    size_t used = 0;
> > -    check_malloc_state(m);
> > -    if (is_initialized(m)) {
> > -      msegmentptr s = &m->seg;
> > -      maxfp = m->max_footprint;
> > -      fp = m->footprint;
> > -      used = fp - (m->topsize + TOP_FOOT_SIZE);
> > -
> > -      while (s != 0) {
> > -        mchunkptr q = align_as_chunk(s->base);
> > -        while (segment_holds(s, q) &&
> > -               q != m->top && q->head != FENCEPOST_HEAD) {
> > -          if (!cinuse(q))
> > -            used -= chunksize(q);
> > -          q = next_chunk(q);
> > -        }
> > -        s = s->next;
> > -      }
> > -    }
> > -
> > -    PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned
> > long)(maxfp)));
> > -    PRINT((m->user_data, "system bytes     = %10lu\n", (unsigned
> > long)(fp)));
> > -    PRINT((m->user_data, "in use bytes     = %10lu\n", (unsigned
> > long)(used)));
> > -
> > -    POSTACTION(m);
> > -  }
> > -}
> > -
> > -/* ----------------------- Operations on smallbins
> -----------------------
> > */
> > -
> > -/*
> > -  Various forms of linking and unlinking are defined as macros.  Even
> > -  the ones for trees, which are very long but have very short typical
> > -  paths.  This is ugly but reduces reliance on inlining support of
> > -  compilers.
> > -*/
> > -
> > -/* Link a free chunk into a smallbin  */
> > -#define insert_small_chunk(M, P, S) {\
> > -  bindex_t I  = small_index(S);\
> > -  mchunkptr B = smallbin_at(M, I);\
> > -  mchunkptr F = B;\
> > -  assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
> > -  if (!smallmap_is_marked(M, I))\
> > -    mark_smallmap(M, I);\
> > -  else if (RTCHECK(ok_address(M, B->fd)))\
> > -    F = B->fd;\
> > -  else {\
> > -    CORRUPTION_ERROR_ACTION(M);\
> > -  }\
> > -  B->fd = P;\
> > -  F->bk = P;\
> > -  P->fd = F;\
> > -  P->bk = B;\
> > -}
> > -
> > -/* Unlink a chunk from a smallbin  */
> > -#define unlink_small_chunk(M, P, S) {\
> > -  mchunkptr F = P->fd;\
> > -  mchunkptr B = P->bk;\
> > -  bindex_t I = small_index(S);\
> > -  assert((M)->user_data, P != B);\
> > -  assert((M)->user_data, P != F);\
> > -  assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > -  if (F == B)\
> > -    clear_smallmap(M, I);\
> > -  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
> > -                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
> > -    F->bk = B;\
> > -    B->fd = F;\
> > -  }\
> > -  else {\
> > -    CORRUPTION_ERROR_ACTION(M);\
> > -  }\
> > -}
> > -
> > -/* Unlink the first chunk from a smallbin */
> > -#define unlink_first_small_chunk(M, B, P, I) {\
> > -  mchunkptr F = P->fd;\
> > -  assert((M)->user_data, P != B);\
> > -  assert((M)->user_data, P != F);\
> > -  assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > -  if (B == F)\
> > -    clear_smallmap(M, I);\
> > -  else if (RTCHECK(ok_address(M, F))) {\
> > -    B->fd = F;\
> > -    F->bk = B;\
> > -  }\
> > -  else {\
> > -    CORRUPTION_ERROR_ACTION(M);\
> > -  }\
> > -}
> > -
> > -/* Replace dv node, binning the old one */
> > -/* Used only when dvsize known to be small */
> > -#define replace_dv(M, P, S) {\
> > -  size_t DVS = M->dvsize;\
> > -  if (DVS != 0) {\
> > -    mchunkptr DV = M->dv;\
> > -    assert((M)->user_data, is_small(DVS));\
> > -    insert_small_chunk(M, DV, DVS);\
> > -  }\
> > -  M->dvsize = S;\
> > -  M->dv = P;\
> > -}
> > -
> > -
> > -/* ------------------------- Operations on trees
> -------------------------
> > */
> > -
> > -/* Insert chunk into tree */
> > -#define insert_large_chunk(M, X, S) {\
> > -  tbinptr* H;\
> > -  bindex_t I;\
> > -  compute_tree_index(S, I);\
> > -  H = treebin_at(M, I);\
> > -  X->index = I;\
> > -  X->child[0] = X->child[1] = 0;\
> > -  if (!treemap_is_marked(M, I)) {\
> > -    mark_treemap(M, I);\
> > -    *H = X;\
> > -    X->parent = (tchunkptr)H;\
> > -    X->fd = X->bk = X;\
> > -  }\
> > -  else {\
> > -    tchunkptr T = *H;\
> > -    size_t K = S << leftshift_for_tree_index(I);\
> > -    for (;;) {\
> > -      if (chunksize(T) != S) {\
> > -        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) &
> 1]);\
> > -        K <<= 1;\
> > -        if (*C != 0)\
> > -          T = *C;\
> > -        else if (RTCHECK(ok_address(M, C))) {\
> > -          *C = X;\
> > -          X->parent = T;\
> > -          X->fd = X->bk = X;\
> > -          break;\
> > -        }\
> > -        else {\
> > -          CORRUPTION_ERROR_ACTION(M);\
> > -          break;\
> > -        }\
> > -      }\
> > -      else {\
> > -        tchunkptr F = T->fd;\
> > -        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
> > -          T->fd = F->bk = X;\
> > -          X->fd = F;\
> > -          X->bk = T;\
> > -          X->parent = 0;\
> > -          break;\
> > -        }\
> > -        else {\
> > -          CORRUPTION_ERROR_ACTION(M);\
> > -          break;\
> > -        }\
> > -      }\
> > -    }\
> > -  }\
> > -}
> > -
> > -/*
> > -  Unlink steps:
> > -
> > -  1. If x is a chained node, unlink it from its same-sized fd/bk links
> > -     and choose its bk node as its replacement.
> > -  2. If x was the last node of its size, but not a leaf node, it must
> > -     be replaced with a leaf node (not merely one with an open left or
> > -     right), to make sure that lefts and rights of descendents
> > -     correspond properly to bit masks.  We use the rightmost descendent
> > -     of x.  We could use any other leaf, but this is easy to locate and
> > -     tends to counteract removal of leftmosts elsewhere, and so keeps
> > -     paths shorter than minimally guaranteed.  This doesn't loop much
> > -     because on average a node in a tree is near the bottom.
> > -  3. If x is the base of a chain (i.e., has parent links) relink
> > -     x's parent and children to x's replacement (or null if none).
> > -*/
> > -
> > -#define unlink_large_chunk(M, X) {\
> > -  tchunkptr XP = X->parent;\
> > -  tchunkptr R;\
> > -  if (X->bk != X) {\
> > -    tchunkptr F = X->fd;\
> > -    R = X->bk;\
> > -    if (RTCHECK(ok_address(M, F))) {\
> > -      F->bk = R;\
> > -      R->fd = F;\
> > -    }\
> > -    else {\
> > -      CORRUPTION_ERROR_ACTION(M);\
> > -    }\
> > -  }\
> > -  else {\
> > -    tchunkptr* RP;\
> > -    if (((R = *(RP = &(X->child[1]))) != 0) ||\
> > -        ((R = *(RP = &(X->child[0]))) != 0)) {\
> > -      tchunkptr* CP;\
> > -      while ((*(CP = &(R->child[1])) != 0) ||\
> > -             (*(CP = &(R->child[0])) != 0)) {\
> > -        R = *(RP = CP);\
> > -      }\
> > -      if (RTCHECK(ok_address(M, RP)))\
> > -        *RP = 0;\
> > -      else {\
> > -        CORRUPTION_ERROR_ACTION(M);\
> > -      }\
> > -    }\
> > -  }\
> > -  if (XP != 0) {\
> > -    tbinptr* H = treebin_at(M, X->index);\
> > -    if (X == *H) {\
> > -      if ((*H = R) == 0) \
> > -        clear_treemap(M, X->index);\
> > -    }\
> > -    else if (RTCHECK(ok_address(M, XP))) {\
> > -      if (XP->child[0] == X) \
> > -        XP->child[0] = R;\
> > -      else \
> > -        XP->child[1] = R;\
> > -    }\
> > -    else\
> > -      CORRUPTION_ERROR_ACTION(M);\
> > -    if (R != 0) {\
> > -      if (RTCHECK(ok_address(M, R))) {\
> > -        tchunkptr C0, C1;\
> > -        R->parent = XP;\
> > -        if ((C0 = X->child[0]) != 0) {\
> > -          if (RTCHECK(ok_address(M, C0))) {\
> > -            R->child[0] = C0;\
> > -            C0->parent = R;\
> > -          }\
> > -          else\
> > -            CORRUPTION_ERROR_ACTION(M);\
> > -        }\
> > -        if ((C1 = X->child[1]) != 0) {\
> > -          if (RTCHECK(ok_address(M, C1))) {\
> > -            R->child[1] = C1;\
> > -            C1->parent = R;\
> > -          }\
> > -          else\
> > -            CORRUPTION_ERROR_ACTION(M);\
> > -        }\
> > -      }\
> > -      else\
> > -        CORRUPTION_ERROR_ACTION(M);\
> > -    }\
> > -  }\
> > -}
> > -
> > -/* Relays to large vs small bin operations */
> > -
> > -#define insert_chunk(M, P, S)\
> > -  if (is_small(S)) insert_small_chunk(M, P, S)\
> > -  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
> > -
> > -#define unlink_chunk(M, P, S)\
> > -  if (is_small(S)) unlink_small_chunk(M, P, S)\
> > -  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
> > -
> > -
> > -/* Relays to internal calls to malloc/free from realloc, memalign etc */
> > -
> > -#define internal_malloc(m, b) mspace_malloc(m, b)
> > -#define internal_free(m, mem) mspace_free(m,mem);
> > -
> > -
> > -/* -------------------------- mspace management
> --------------------------
> > */
> > -
> > -/* Initialize top chunk and its size */
> > -static void init_top(mstate m, mchunkptr p, size_t psize) {
> > -  /* Ensure alignment */
> > -  size_t offset = align_offset(chunk2mem(p));
> > -  p = (mchunkptr)((char*)p + offset);
> > -  psize -= offset;
> > -
> > -  m->top = p;
> > -  m->topsize = psize;
> > -  p->head = psize | PINUSE_BIT;
> > -  /* set size of fake trailing chunk holding overhead space only once */
> > -  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
> > -}
> > -
> > -/* Initialize bins for a new mstate that is otherwise zeroed out */
> > -static void init_bins(mstate m) {
> > -  /* Establish circular links for smallbins */
> > -  bindex_t i;
> > -  for (i = 0; i < NSMALLBINS; ++i) {
> > -    sbinptr bin = smallbin_at(m,i);
> > -    bin->fd = bin->bk = bin;
> > -  }
> > -}
> > -
> > -#if PROCEED_ON_ERROR
> > -
> > -/* default corruption action */
> > -static void reset_on_error(mstate m) {
> > -  int i;
> > -  ++malloc_corruption_error_count;
> > -  /* Reinitialize fields to forget about all memory */
> > -  m->smallbins = m->treebins = 0;
> > -  m->dvsize = m->topsize = 0;
> > -  m->seg.base = 0;
> > -  m->seg.size = 0;
> > -  m->seg.next = 0;
> > -  m->top = m->dv = 0;
> > -  for (i = 0; i < NTREEBINS; ++i)
> > -    *treebin_at(m, i) = 0;
> > -  init_bins(m);
> > -}
> > -#endif /* PROCEED_ON_ERROR */
> > -
> > -/* Allocate chunk and prepend remainder with chunk in successor base. */
> > -static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
> > -                           size_t nb) {
> > -  mchunkptr p = align_as_chunk(newbase);
> > -  mchunkptr oldfirst = align_as_chunk(oldbase);
> > -  size_t psize = (char*)oldfirst - (char*)p;
> > -  mchunkptr q = chunk_plus_offset(p, nb);
> > -  size_t qsize = psize - nb;
> > -  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
> > -
> > -  assert(m->user_data, (char*)oldfirst > (char*)q);
> > -  assert(m->user_data, pinuse(oldfirst));
> > -  assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
> > -
> > -  /* consolidate remainder with first chunk of old base */
> > -  if (oldfirst == m->top) {
> > -    size_t tsize = m->topsize += qsize;
> > -    m->top = q;
> > -    q->head = tsize | PINUSE_BIT;
> > -    check_top_chunk(m, q);
> > -  }
> > -  else if (oldfirst == m->dv) {
> > -    size_t dsize = m->dvsize += qsize;
> > -    m->dv = q;
> > -    set_size_and_pinuse_of_free_chunk(q, dsize);
> > -  }
> > -  else {
> > -    if (!cinuse(oldfirst)) {
> > -      size_t nsize = chunksize(oldfirst);
> > -      unlink_chunk(m, oldfirst, nsize);
> > -      oldfirst = chunk_plus_offset(oldfirst, nsize);
> > -      qsize += nsize;
> > -    }
> > -    set_free_with_pinuse(q, qsize, oldfirst);
> > -    insert_chunk(m, q, qsize);
> > -    check_free_chunk(m, q);
> > -  }
> > -
> > -  check_malloced_chunk(m, chunk2mem(p), nb);
> > -  return chunk2mem(p);
> > -}
> > -
> > -/* -------------------------- System allocation
> --------------------------
> > */
> > -
> > -/* Get memory from system using MORECORE or MMAP */
> > -static void* sys_alloc(mstate m, size_t nb) {
> > -  MALLOC_FAILURE_ACTION;
> > -  return 0;
> > -}
> > -
> > -/* ---------------------------- malloc support
> ---------------------------
> > */
> > -
> > -/* allocate a large request from the best fitting chunk in a treebin */
> > -static void* tmalloc_large(mstate m, size_t nb) {
> > -  tchunkptr v = 0;
> > -  size_t rsize = -nb; /* Unsigned negation */
> > -  tchunkptr t;
> > -  bindex_t idx;
> > -  compute_tree_index(nb, idx);
> > -
> > -  if ((t = *treebin_at(m, idx)) != 0) {
> > -    /* Traverse tree for this bin looking for node with size == nb */
> > -    size_t sizebits = nb << leftshift_for_tree_index(idx);
> > -    tchunkptr rst = 0;  /* The deepest untaken right subtree */
> > -    for (;;) {
> > -      tchunkptr rt;
> > -      size_t trem = chunksize(t) - nb;
> > -      if (trem < rsize) {
> > -        v = t;
> > -        if ((rsize = trem) == 0)
> > -          break;
> > -      }
> > -      rt = t->child[1];
> > -      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > -      if (rt != 0 && rt != t)
> > -        rst = rt;
> > -      if (t == 0) {
> > -        t = rst; /* set t to least subtree holding sizes > nb */
> > -        break;
> > -      }
> > -      sizebits <<= 1;
> > -    }
> > -  }
> > -
> > -  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
> > -    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
> > -    if (leftbits != 0) {
> > -      bindex_t i;
> > -      binmap_t leastbit = least_bit(leftbits);
> > -      compute_bit2idx(leastbit, i);
> > -      t = *treebin_at(m, i);
> > -    }
> > -  }
> > -
> > -  while (t != 0) { /* find smallest of tree or subtree */
> > -    size_t trem = chunksize(t) - nb;
> > -    if (trem < rsize) {
> > -      rsize = trem;
> > -      v = t;
> > -    }
> > -    t = leftmost_child(t);
> > -  }
> > -
> > -  /*  If dv is a better fit, return 0 so malloc will use it */
> > -  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
> > -    if (RTCHECK(ok_address(m, v))) { /* split */
> > -      mchunkptr r = chunk_plus_offset(v, nb);
> > -      assert(m->user_data, chunksize(v) == rsize + nb);
> > -      if (RTCHECK(ok_next(v, r))) {
> > -        unlink_large_chunk(m, v);
> > -        if (rsize < MIN_CHUNK_SIZE)
> > -          set_inuse_and_pinuse(m, v, (rsize + nb));
> > -        else {
> > -          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > -          set_size_and_pinuse_of_free_chunk(r, rsize);
> > -          insert_chunk(m, r, rsize);
> > -        }
> > -        return chunk2mem(v);
> > -      }
> > -    }
> > -    CORRUPTION_ERROR_ACTION(m);
> > -  }
> > -  return 0;
> > -}
> > -
> > -/* allocate a small request from the best fitting chunk in a treebin */
> > -static void* tmalloc_small(mstate m, size_t nb) {
> > -  tchunkptr t, v;
> > -  size_t rsize;
> > -  bindex_t i;
> > -  binmap_t leastbit = least_bit(m->treemap);
> > -  compute_bit2idx(leastbit, i);
> > -
> > -  v = t = *treebin_at(m, i);
> > -  rsize = chunksize(t) - nb;
> > -
> > -  while ((t = leftmost_child(t)) != 0) {
> > -    size_t trem = chunksize(t) - nb;
> > -    if (trem < rsize) {
> > -      rsize = trem;
> > -      v = t;
> > -    }
> > -  }
> > -
> > -  if (RTCHECK(ok_address(m, v))) {
> > -    mchunkptr r = chunk_plus_offset(v, nb);
> > -    assert(m->user_data, chunksize(v) == rsize + nb);
> > -    if (RTCHECK(ok_next(v, r))) {
> > -      unlink_large_chunk(m, v);
> > -      if (rsize < MIN_CHUNK_SIZE)
> > -        set_inuse_and_pinuse(m, v, (rsize + nb));
> > -      else {
> > -        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > -        set_size_and_pinuse_of_free_chunk(r, rsize);
> > -        replace_dv(m, r, rsize);
> > -      }
> > -      return chunk2mem(v);
> > -    }
> > -  }
> > -
> > -  CORRUPTION_ERROR_ACTION(m);
> > -  return 0;
> > -}
> > -
> > -/* --------------------------- realloc support
> ---------------------------
> > */
> > -
> > -static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
> > -  if (bytes >= MAX_REQUEST) {
> > -    MALLOC_FAILURE_ACTION;
> > -    return 0;
> > -  }
> > -  if (!PREACTION(m)) {
> > -    mchunkptr oldp = mem2chunk(oldmem);
> > -    size_t oldsize = chunksize(oldp);
> > -    mchunkptr next = chunk_plus_offset(oldp, oldsize);
> > -    mchunkptr newp = 0;
> > -    void* extra = 0;
> > -
> > -    /* Try to either shrink or extend into top. Else malloc-copy-free */
> > -
> > -    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
> > -                ok_next(oldp, next) && ok_pinuse(next))) {
> > -      size_t nb = request2size(bytes);
> > -      if (oldsize >= nb) { /* already big enough */
> > -        size_t rsize = oldsize - nb;
> > -        newp = oldp;
> > -        if (rsize >= MIN_CHUNK_SIZE) {
> > -          mchunkptr remainder = chunk_plus_offset(newp, nb);
> > -          set_inuse(m, newp, nb);
> > -          set_inuse(m, remainder, rsize);
> > -          extra = chunk2mem(remainder);
> > -        }
> > -      }
> > -      else if (next == m->top && oldsize + m->topsize > nb) {
> > -        /* Expand into top */
> > -        size_t newsize = oldsize + m->topsize;
> > -        size_t newtopsize = newsize - nb;
> > -        mchunkptr newtop = chunk_plus_offset(oldp, nb);
> > -        set_inuse(m, oldp, nb);
> > -        newtop->head = newtopsize |PINUSE_BIT;
> > -        m->top = newtop;
> > -        m->topsize = newtopsize;
> > -        newp = oldp;
> > -      }
> > -    }
> > -    else {
> > -      USAGE_ERROR_ACTION(m, oldmem);
> > -      POSTACTION(m);
> > -      return 0;
> > -    }
> > -
> > -    POSTACTION(m);
> > -
> > -    if (newp != 0) {
> > -      if (extra != 0) {
> > -        internal_free(m, extra);
> > -      }
> > -      check_inuse_chunk(m, newp);
> > -      return chunk2mem(newp);
> > -    }
> > -    else {
> > -      void* newmem = internal_malloc(m, bytes);
> > -      if (newmem != 0) {
> > -        size_t oc = oldsize - overhead_for(oldp);
> > -        MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
> > -        internal_free(m, oldmem);
> > -      }
> > -      return newmem;
> > -    }
> > -  }
> > -  return 0;
> > -}
> > -
> > -/* --------------------------- memalign support
> --------------------------
> > */
> > -
> > -static void* internal_memalign(mstate m, size_t alignment, size_t
> bytes) {
> > -  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
> > -    return internal_malloc(m, bytes);
> > -  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk
> size
> > */
> > -    alignment = MIN_CHUNK_SIZE;
> > -  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of
> 2 */
> > -    size_t a = MALLOC_ALIGNMENT << 1;
> > -    while (a < alignment) a <<= 1;
> > -    alignment = a;
> > -  }
> > -
> > -  if (bytes >= MAX_REQUEST - alignment) {
> > -    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
> > -      MALLOC_FAILURE_ACTION;
> > -    }
> > -  }
> > -  else {
> > -    size_t nb = request2size(bytes);
> > -    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
> > -    char* mem = (char*)internal_malloc(m, req);
> > -    if (mem != 0) {
> > -      void* leader = 0;
> > -      void* trailer = 0;
> > -      mchunkptr p = mem2chunk(mem);
> > -
> > -      if (PREACTION(m)) return 0;
> > -      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
> > -        /*
> > -          Find an aligned spot inside chunk.  Since we need to give
> > -          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
> > -          the first calculation places us at a spot with less than
> > -          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
> > -          We've allocated enough total room so that this is always
> > -          possible.
> > -        */
> > -        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
> > -                                                       alignment -
> > -                                                       SIZE_T_ONE)) &
> > -                                             -alignment));
> > -        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
> > -          br : br+alignment;
> > -        mchunkptr newp = (mchunkptr)pos;
> > -        size_t leadsize = pos - (char*)(p);
> > -        size_t newsize = chunksize(p) - leadsize;
> > -
> > -        /* Otherwise, give back leader, use the rest */
> > -        set_inuse(m, newp, newsize);
> > -        set_inuse(m, p, leadsize);
> > -        leader = chunk2mem(p);
> > -
> > -        p = newp;
> > -      }
> > -
> > -      assert(m->user_data, chunksize(p) >= nb);
> > -      assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
> > -      check_inuse_chunk(m, p);
> > -      POSTACTION(m);
> > -      if (leader != 0) {
> > -        internal_free(m, leader);
> > -      }
> > -      if (trailer != 0) {
> > -        internal_free(m, trailer);
> > -      }
> > -      return chunk2mem(p);
> > -    }
> > -  }
> > -  return 0;
> > -}
> > -
> > -/* ----------------------------- user mspaces
> ----------------------------
> > */
> > -
> > -static mstate init_user_mstate(char* tbase, size_t tsize, void
> *user_data) {
> > -  size_t msize = pad_request(sizeof(struct malloc_state));
> > -  mchunkptr mn;
> > -  mchunkptr msp = align_as_chunk(tbase);
> > -  mstate m = (mstate)(chunk2mem(msp));
> > -  MEMCLEAR(m, msize);
> > -  INITIAL_LOCK(&m->mutex);
> > -  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
> > -  m->seg.base = m->least_addr = tbase;
> > -  m->seg.size = m->footprint = m->max_footprint = tsize;
> > -  m->magic = mparams.magic;
> > -  m->mflags = mparams.default_mflags;
> > -  m->user_data = user_data;
> > -  init_bins(m);
> > -  mn = next_chunk(mem2chunk(m));
> > -  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -
> TOP_FOOT_SIZE);
> > -  check_top_chunk(m, m->top);
> > -  return m;
> > -}
> > -
> > -mspace create_mspace_with_base(void* base, size_t capacity, int locked,
> void
> > *user_data) {
> > -  mstate m = 0;
> > -  size_t msize = pad_request(sizeof(struct malloc_state));
> > -  init_mparams(); /* Ensure pagesize etc initialized */
> > -
> > -  if (capacity > msize + TOP_FOOT_SIZE &&
> > -      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size))
> {
> > -    m = init_user_mstate((char*)base, capacity, user_data);
> > -    set_lock(m, locked);
> > -  }
> > -  return (mspace)m;
> > -}
> > -
> > -/*
> > -  mspace versions of routines are near-clones of the global
> > -  versions. This is not so nice but better than the alternatives.
> > -*/
> > -
> > -
> > -void* mspace_malloc(mspace msp, size_t bytes) {
> > -  mstate ms = (mstate)msp;
> > -  if (!ok_magic(ms)) {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -    return 0;
> > -  }
> > -  if (!PREACTION(ms)) {
> > -    void* mem;
> > -    size_t nb;
> > -    if (bytes <= MAX_SMALL_REQUEST) {
> > -      bindex_t idx;
> > -      binmap_t smallbits;
> > -      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
> > -      idx = small_index(nb);
> > -      smallbits = ms->smallmap >> idx;
> > -
> > -      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a
> smallbin. */
> > -        mchunkptr b, p;
> > -        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
> > -        b = smallbin_at(ms, idx);
> > -        p = b->fd;
> > -        assert(ms->user_data, chunksize(p) == small_index2size(idx));
> > -        unlink_first_small_chunk(ms, b, p, idx);
> > -        set_inuse_and_pinuse(ms, p, small_index2size(idx));
> > -        mem = chunk2mem(p);
> > -        check_malloced_chunk(ms, mem, nb);
> > -        goto postaction;
> > -      }
> > -
> > -      else if (nb > ms->dvsize) {
> > -        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
> > -          mchunkptr b, p, r;
> > -          size_t rsize;
> > -          bindex_t i;
> > -          binmap_t leftbits = (smallbits << idx) &
> left_bits(idx2bit(idx));
> > -          binmap_t leastbit = least_bit(leftbits);
> > -          compute_bit2idx(leastbit, i);
> > -          b = smallbin_at(ms, i);
> > -          p = b->fd;
> > -          assert(ms->user_data, chunksize(p) == small_index2size(i));
> > -          unlink_first_small_chunk(ms, b, p, i);
> > -          rsize = small_index2size(i) - nb;
> > -          /* Fit here cannot be remainderless if 4byte sizes */
> > -          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
> > -            set_inuse_and_pinuse(ms, p, small_index2size(i));
> > -          else {
> > -            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > -            r = chunk_plus_offset(p, nb);
> > -            set_size_and_pinuse_of_free_chunk(r, rsize);
> > -            replace_dv(ms, r, rsize);
> > -          }
> > -          mem = chunk2mem(p);
> > -          check_malloced_chunk(ms, mem, nb);
> > -          goto postaction;
> > -        }
> > -
> > -        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) !=
> 0) {
> > -          check_malloced_chunk(ms, mem, nb);
> > -          goto postaction;
> > -        }
> > -      }
> > -    }
> > -    else if (bytes >= MAX_REQUEST)
> > -      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys
> alloc)
> > */
> > -    else {
> > -      nb = pad_request(bytes);
> > -      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
> > -        check_malloced_chunk(ms, mem, nb);
> > -        goto postaction;
> > -      }
> > -    }
> > -
> > -    if (nb <= ms->dvsize) {
> > -      size_t rsize = ms->dvsize - nb;
> > -      mchunkptr p = ms->dv;
> > -      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
> > -        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
> > -        ms->dvsize = rsize;
> > -        set_size_and_pinuse_of_free_chunk(r, rsize);
> > -        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > -      }
> > -      else { /* exhaust dv */
> > -        size_t dvs = ms->dvsize;
> > -        ms->dvsize = 0;
> > -        ms->dv = 0;
> > -        set_inuse_and_pinuse(ms, p, dvs);
> > -      }
> > -      mem = chunk2mem(p);
> > -      check_malloced_chunk(ms, mem, nb);
> > -      goto postaction;
> > -    }
> > -
> > -    else if (nb < ms->topsize) { /* Split top */
> > -      size_t rsize = ms->topsize -= nb;
> > -      mchunkptr p = ms->top;
> > -      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
> > -      r->head = rsize | PINUSE_BIT;
> > -      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > -      mem = chunk2mem(p);
> > -      check_top_chunk(ms, ms->top);
> > -      check_malloced_chunk(ms, mem, nb);
> > -      goto postaction;
> > -    }
> > -
> > -    mem = sys_alloc(ms, nb);
> > -
> > -  postaction:
> > -    POSTACTION(ms);
> > -    return mem;
> > -  }
> > -
> > -  return 0;
> > -}
> > -
> > -void mspace_free(mspace msp, void* mem) {
> > -  if (mem != 0) {
> > -    mchunkptr p  = mem2chunk(mem);
> > -#if FOOTERS
> > -    mstate fm = get_mstate_for(p);
> > -#else /* FOOTERS */
> > -    mstate fm = (mstate)msp;
> > -#endif /* FOOTERS */
> > -    if (!ok_magic(fm)) {
> > -      USAGE_ERROR_ACTION(fm, p);
> > -      return;
> > -    }
> > -    if (!PREACTION(fm)) {
> > -      check_inuse_chunk(fm, p);
> > -      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
> > -        size_t psize = chunksize(p);
> > -        mchunkptr next = chunk_plus_offset(p, psize);
> > -        if (!pinuse(p)) {
> > -          size_t prevsize = p->prev_foot;
> > -
> > -          mchunkptr prev = chunk_minus_offset(p, prevsize);
> > -          psize += prevsize;
> > -          p = prev;
> > -          if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward
> */
> > -            if (p != fm->dv) {
> > -              unlink_chunk(fm, p, prevsize);
> > -            }
> > -            else if ((next->head & INUSE_BITS) == INUSE_BITS) {
> > -              fm->dvsize = psize;
> > -              set_free_with_pinuse(p, psize, next);
> > -              goto postaction;
> > -            }
> > -          }
> > -          else
> > -            goto erroraction;
> > -        }
> > -
> > -        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
> > -          if (!cinuse(next)) {  /* consolidate forward */
> > -            if (next == fm->top) {
> > -              size_t tsize = fm->topsize += psize;
> > -              fm->top = p;
> > -              p->head = tsize | PINUSE_BIT;
> > -              if (p == fm->dv) {
> > -                fm->dv = 0;
> > -                fm->dvsize = 0;
> > -              }
> > -              goto postaction;
> > -            }
> > -            else if (next == fm->dv) {
> > -              size_t dsize = fm->dvsize += psize;
> > -              fm->dv = p;
> > -              set_size_and_pinuse_of_free_chunk(p, dsize);
> > -              goto postaction;
> > -            }
> > -            else {
> > -              size_t nsize = chunksize(next);
> > -              psize += nsize;
> > -              unlink_chunk(fm, next, nsize);
> > -              set_size_and_pinuse_of_free_chunk(p, psize);
> > -              if (p == fm->dv) {
> > -                fm->dvsize = psize;
> > -                goto postaction;
> > -              }
> > -            }
> > -          }
> > -          else
> > -            set_free_with_pinuse(p, psize, next);
> > -          insert_chunk(fm, p, psize);
> > -          check_free_chunk(fm, p);
> > -          goto postaction;
> > -        }
> > -      }
> > -    erroraction:
> > -      USAGE_ERROR_ACTION(fm, p);
> > -    postaction:
> > -      POSTACTION(fm);
> > -    }
> > -  }
> > -}
> > -
> > -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
> > -  void* mem;
> > -  size_t req = 0;
> > -  mstate ms = (mstate)msp;
> > -  if (!ok_magic(ms)) {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -    return 0;
> > -  }
> > -  if (n_elements != 0) {
> > -    req = n_elements * elem_size;
> > -    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
> > -        (req / n_elements != elem_size))
> > -      req = MAX_SIZE_T; /* force downstream failure on overflow */
> > -  }
> > -  mem = internal_malloc(ms, req);
> > -  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
> > -    MEMCLEAR(mem, req);
> > -  return mem;
> > -}
> > -
> > -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
> > -  if (oldmem == 0)
> > -    return mspace_malloc(msp, bytes);
> > -#ifdef REALLOC_ZERO_BYTES_FREES
> > -  if (bytes == 0) {
> > -    mspace_free(msp, oldmem);
> > -    return 0;
> > -  }
> > -#endif /* REALLOC_ZERO_BYTES_FREES */
> > -  else {
> > -#if FOOTERS
> > -    mchunkptr p  = mem2chunk(oldmem);
> > -    mstate ms = get_mstate_for(p);
> > -#else /* FOOTERS */
> > -    mstate ms = (mstate)msp;
> > -#endif /* FOOTERS */
> > -    if (!ok_magic(ms)) {
> > -      USAGE_ERROR_ACTION(ms,ms);
> > -      return 0;
> > -    }
> > -    return internal_realloc(ms, oldmem, bytes);
> > -  }
> > -}
> > -
> > -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
> > -  mstate ms = (mstate)msp;
> > -  if (!ok_magic(ms)) {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -    return 0;
> > -  }
> > -  return internal_memalign(ms, alignment, bytes);
> > -}
> > -
> > -void mspace_malloc_stats(mspace msp) {
> > -  mstate ms = (mstate)msp;
> > -  if (ok_magic(ms)) {
> > -    internal_malloc_stats(ms);
> > -  }
> > -  else {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -  }
> > -}
> > -
> > -size_t mspace_footprint(mspace msp) {
> > -  size_t result;
> > -  mstate ms = (mstate)msp;
> > -  if (ok_magic(ms)) {
> > -    result = ms->footprint;
> > -  } else {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -  }
> > -  return result;
> > -}
> > -
> > -
> > -size_t mspace_max_footprint(mspace msp) {
> > -  size_t result;
> > -  mstate ms = (mstate)msp;
> > -  if (ok_magic(ms)) {
> > -    result = ms->max_footprint;
> > -  } else {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -  }
> > -  return result;
> > -}
> > -
> > -
> > -#if !NO_MALLINFO
> > -struct mallinfo mspace_mallinfo(mspace msp) {
> > -  mstate ms = (mstate)msp;
> > -  if (!ok_magic(ms)) {
> > -    USAGE_ERROR_ACTION(ms,ms);
> > -  }
> > -  return internal_mallinfo(ms);
> > -}
> > -#endif /* NO_MALLINFO */
> > -
> > -int mspace_mallopt(int param_number, int value) {
> > -  return change_mparam(param_number, value);
> > -}
> > -
> > diff --git a/qxldod/mspace.cpp b/qxldod/mspace.cpp
> > new file mode 100644
> > index 0000000..28c9f96
> > --- /dev/null
> > +++ b/qxldod/mspace.cpp
> > @@ -0,0 +1,2439 @@
> > +// based on dlmalloc from Doug Lea
> > +
> > +
> > +// quote from the Doug Lea original file
> > +    /*
> > +      This is a version (aka dlmalloc) of malloc/free/realloc written by
> > +      Doug Lea and released to the public domain, as explained at
> > +      http://creativecommons.org/licenses/publicdomain.  Send
> questions,
> > +      comments, complaints, performance data, etc to dl at cs.oswego.edu
> > +
> > +    * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
> > +
> > +       Note: There may be an updated version of this malloc obtainable
> at
> > +               ftp://gee.cs.oswego.edu/pub/misc/malloc.c
> > +             Check before installing!
> > +    */
> > +
> > +
> > +#include <ntddk.h>
> > +
> > +#include "mspace.h"
> > +
> > +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
> > +
> > +#define MALLOC_ALIGNMENT ((size_t)8U)
> > +#define USE_LOCKS 0
> > +#define malloc_getpagesize ((size_t)4096U)
> > +#define DEFAULT_GRANULARITY malloc_getpagesize
> > +#define MAX_SIZE_T (~(size_t)0)
> > +#define MALLOC_FAILURE_ACTION
> > +#define MALLINFO_FIELD_TYPE size_t
> > +#define FOOTERS 0
> > +#define INSECURE 0
> > +#define PROCEED_ON_ERROR 0
> > +#define DEBUG 0
> > +#define ABORT_ON_ASSERT_FAILURE 1
> > +#define ABORT(user_data) abort_func(user_data)
> > +#define USE_BUILTIN_FFS 0
> > +#define USE_DEV_RANDOM 0
> > +#define PRINT(params) print_func params
> > +
> > +
> > +#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)
> > +#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)
> > +
> > +
> > +#define M_GRANULARITY        (-1)
> > +
> > +void default_abort_func(void *user_data)
> > +{
> > +    for (;;);
> > +}
> > +
> > +void default_print_func(void *user_data, char *format, ...)
> > +{
> > +}
> > +
> > +static mspace_abort_t abort_func = default_abort_func;
> > +static mspace_print_t print_func = default_print_func;
> > +
> > +void mspace_set_abort_func(mspace_abort_t f)
> > +{
> > +    abort_func = f;
> > +}
> > +
> > +void mspace_set_print_func(mspace_print_t f)
> > +{
> > +    print_func = f;
> > +}
> > +
> > +/* ------------------------ Mallinfo declarations
> ------------------------
> > */
> > +
> > +#if !NO_MALLINFO
> > +/*
> > +  This version of malloc supports the standard SVID/XPG mallinfo
> > +  routine that returns a struct containing usage properties and
> > +  statistics. It should work on any system that has a
> > +  /usr/include/malloc.h defining struct mallinfo.  The main
> > +  declaration needed is the mallinfo struct that is returned (by-copy)
> > +  by mallinfo().  The malloinfo struct contains a bunch of fields that
> > +  are not even meaningful in this version of malloc.  These fields are
> > +  are instead filled by mallinfo() with other numbers that might be of
> > +  interest.
> > +
> > +  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
> > +  /usr/include/malloc.h file that includes a declaration of struct
> > +  mallinfo.  If so, it is included; else a compliant version is
> > +  declared below.  These must be precisely the same for mallinfo() to
> > +  work.  The original SVID version of this struct, defined on most
> > +  systems with mallinfo, declares all fields as ints. But some others
> > +  define as unsigned long. If your system defines the fields using a
> > +  type of different width than listed here, you MUST #include your
> > +  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
> > +*/
> > +
> > +/* #define HAVE_USR_INCLUDE_MALLOC_H */
> > +
> > +
> > +struct mallinfo {
> > +  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from
> system
> > */
> > +  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
> > +  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
> > +  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
> > +  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
> > +  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
> > +  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
> > +  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
> > +  MALLINFO_FIELD_TYPE fordblks; /* total free space */
> > +  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
> > +};
> > +
> > +#endif /* NO_MALLINFO */
> > +
> > +
> > +
> > +#ifdef DEBUG
> > +#if ABORT_ON_ASSERT_FAILURE
> > +#define assert(user_data, x) if(!(x)) ABORT(user_data)
> > +#else /* ABORT_ON_ASSERT_FAILURE */
> > +#include <assert.h>
> > +#endif /* ABORT_ON_ASSERT_FAILURE */
> > +#else  /* DEBUG */
> > +#define assert(user_data, x)
> > +#endif /* DEBUG */
> > +
> > +/* ------------------- size_t and alignment properties
> --------------------
> > */
> > +
> > +/* The byte and bit size of a size_t */
> > +#define SIZE_T_SIZE         (sizeof(size_t))
> > +#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
> > +
> > +/* Some constants coerced to size_t */
> > +/* Annoying but necessary to avoid errors on some plaftorms */
> > +#define SIZE_T_ZERO         ((size_t)0)
> > +#define SIZE_T_ONE          ((size_t)1)
> > +#define SIZE_T_TWO          ((size_t)2)
> > +#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
> > +#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
> > +#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
> > +#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
> > +
> > +/* The bit mask value corresponding to MALLOC_ALIGNMENT */
> > +#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
> > +
> > +/* True if address a has acceptable alignment */
> > +#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
> > +
> > +/* the number of bytes to offset an address to align it */
> > +#define align_offset(A)\
> > + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
> > +  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &
> > CHUNK_ALIGN_MASK))
> > +
> > +/* --------------------------- Lock preliminaries
> ------------------------
> > */
> > +
> > +#if USE_LOCKS
> > +
> > +/*
> > +  When locks are defined, there are up to two global locks:
> > +
> > +  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
> > +    MORECORE.  In many cases sys_alloc requires two calls, that should
> > +    not be interleaved with calls by other threads.  This does not
> > +    protect against direct calls to MORECORE by other threads not
> > +    using this lock, so there is still code to cope the best we can on
> > +    interference.
> > +
> > +  * magic_init_mutex ensures that mparams.magic and other
> > +    unique mparams values are initialized only once.
> > +*/
> > +
> > +
> > +#define USE_LOCK_BIT               (2U)
> > +#else  /* USE_LOCKS */
> > +#define USE_LOCK_BIT               (0U)
> > +#define INITIAL_LOCK(l)
> > +#endif /* USE_LOCKS */
> > +
> > +#if USE_LOCKS
> > +#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
> > +#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
> > +#else  /* USE_LOCKS */
> > +#define ACQUIRE_MAGIC_INIT_LOCK()
> > +#define RELEASE_MAGIC_INIT_LOCK()
> > +#endif /* USE_LOCKS */
> > +
> > +
> > +
> > +/* -----------------------  Chunk representations
> ------------------------
> > */
> > +
> > +/*
> > +  (The following includes lightly edited explanations by Colin Plumb.)
> > +
> > +  The malloc_chunk declaration below is misleading (but accurate and
> > +  necessary).  It declares a "view" into memory allowing access to
> > +  necessary fields at known offsets from a given base.
> > +
> > +  Chunks of memory are maintained using a `boundary tag' method as
> > +  originally described by Knuth.  (See the paper by Paul Wilson
> > +  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
> > +  techniques.)  Sizes of free chunks are stored both in the front of
> > +  each chunk and at the end.  This makes consolidating fragmented
> > +  chunks into bigger chunks fast.  The head fields also hold bits
> > +  representing whether chunks are free or in use.
> > +
> > +  Here are some pictures to make it clearer.  They are "exploded" to
> > +  show that the state of a chunk can be thought of as extending from
> > +  the high 31 bits of the head field of its header through the
> > +  prev_foot and PINUSE_BIT bit of the following chunk header.
> > +
> > +  A chunk that's in use looks like:
> > +
> > +   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +           | Size of previous chunk (if P = 1)
>    |
> > +           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > +         | Size of this chunk
>  1| +-+
> > +   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         |
>  |
> > +         +-
>  -+
> > +         |
>  |
> > +         +-
>  -+
> > +         |
>  :
> > +         +-      size - sizeof(size_t) available payload bytes
> -+
> > +         :
>  |
> > + chunk-> +-
>  -+
> > +         |
>  |
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |1|
> > +       | Size of next chunk (may or may not be in use)               |
> +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +
> > +    And if it's free, it looks like this:
> > +
> > +   chunk-> +-
>    -+
> > +           | User payload (must be in use, or we would have merged!)
>    |
> > +           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |P|
> > +         | Size of this chunk
>  0| +-+
> > +   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         | Next pointer
>   |
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         | Prev pointer
>   |
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         |
>  :
> > +         +-      size - sizeof(struct chunk) unused bytes
>  -+
> > +         :
>  |
> > + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +         | Size of this chunk
>   |
> > +         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> |0|
> > +       | Size of next chunk (must be in use, or we would have merged)|
> +-+
> > + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +       |                                                               :
> > +       +- User payload                                                -+
> > +       :                                                               |
> > +       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
> +-+-+
> > +                                                                     |0|
> > +                                                                     +-+
> > +  Note that since we always merge adjacent free chunks, the chunks
> > +  adjacent to a free chunk must be in use.
> > +
> > +  Given a pointer to a chunk (which can be derived trivially from the
> > +  payload pointer) we can, in O(1) time, find out whether the adjacent
> > +  chunks are free, and if so, unlink them from the lists that they
> > +  are on and merge them with the current chunk.
> > +
> > +  Chunks always begin on even word boundaries, so the mem portion
> > +  (which is returned to the user) is also on an even word boundary, and
> > +  thus at least double-word aligned.
> > +
> > +  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
> > +  chunk size (which is always a multiple of two words), is an in-use
> > +  bit for the *previous* chunk.  If that bit is *clear*, then the
> > +  word before the current chunk size contains the previous chunk
> > +  size, and can be used to find the front of the previous chunk.
> > +  The very first chunk allocated always has this bit set, preventing
> > +  access to non-existent (or non-owned) memory. If pinuse is set for
> > +  any given chunk, then you CANNOT determine the size of the
> > +  previous chunk, and might even get a memory addressing fault when
> > +  trying to do so.
> > +
> > +  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
> > +  the chunk size redundantly records whether the current chunk is
> > +  inuse. This redundancy enables usage checks within free and realloc,
> > +  and reduces indirection when freeing and consolidating chunks.
> > +
> > +  Each freshly allocated chunk must have both cinuse and pinuse set.
> > +  That is, each allocated chunk borders either a previously allocated
> > +  and still in-use chunk, or the base of its memory arena. This is
> > +  ensured by making all allocations from the the `lowest' part of any
> > +  found chunk.  Further, no free chunk physically borders another one,
> > +  so each free chunk is known to be preceded and followed by either
> > +  inuse chunks or the ends of memory.
> > +
> > +  Note that the `foot' of the current chunk is actually represented
> > +  as the prev_foot of the NEXT chunk. This makes it easier to
> > +  deal with alignments etc but can be very confusing when trying
> > +  to extend or adapt this code.
> > +
> > +  The exceptions to all this are
> > +
> > +     1. The special chunk `top' is the top-most available chunk (i.e.,
> > +        the one bordering the end of available memory). It is treated
> > +        specially.  Top is never included in any bin, is used only if
> > +        no other chunk is available, and is released back to the
> > +        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
> > +        the top chunk is treated as larger (and thus less well
> > +        fitting) than any other available chunk.  The top chunk
> > +        doesn't update its trailing size field since there is no next
> > +        contiguous chunk that would have to index off it. However,
> > +        space is still allocated for it (TOP_FOOT_SIZE) to enable
> > +        separation or merging when space is extended.
> > +
> > +     3. Chunks allocated via mmap, which have the lowest-order bit
> > +        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
> > +        PINUSE_BIT in their head fields.  Because they are allocated
> > +        one-by-one, each must carry its own prev_foot field, which is
> > +        also used to hold the offset this chunk has within its mmapped
> > +        region, which is needed to preserve alignment. Each mmapped
> > +        chunk is trailed by the first two fields of a fake next-chunk
> > +        for sake of usage checks.
> > +
> > +*/
> > +
> > +struct malloc_chunk {
> > +  size_t               prev_foot;  /* Size of previous chunk (if
> free).  */
> > +  size_t               head;       /* Size and inuse bits. */
> > +  struct malloc_chunk* fd;         /* double links -- used only if
> free. */
> > +  struct malloc_chunk* bk;
> > +};
> > +
> > +typedef struct malloc_chunk  mchunk;
> > +typedef struct malloc_chunk* mchunkptr;
> > +typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
> > +typedef unsigned int bindex_t;         /* Described below */
> > +typedef unsigned int binmap_t;         /* Described below */
> > +typedef unsigned int flag_t;           /* The type of various bit flag
> sets
> > */
> > +
> > +
> > +/* ------------------- Chunks sizes and alignments
> -----------------------
> > */
> > +
> > +#define MCHUNK_SIZE         (sizeof(mchunk))
> > +
> > +#if FOOTERS
> > +#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
> > +#else /* FOOTERS */
> > +#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
> > +#endif /* FOOTERS */
> > +
> > +/* The smallest size we can malloc is an aligned minimal chunk */
> > +#define MIN_CHUNK_SIZE\
> > +  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > +
> > +/* conversion from malloc headers to user pointers, and back */
> > +#define chunk2mem(p)        ((void*)((char*)(p)       +
> TWO_SIZE_T_SIZES))
> > +#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) -
> TWO_SIZE_T_SIZES))
> > +/* chunk associated with aligned address A */
> > +#define align_as_chunk(A)   (mchunkptr)((A) +
> align_offset(chunk2mem(A)))
> > +
> > +/* Bounds on request (not chunk) sizes. */
> > +#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
> > +#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD -
> SIZE_T_ONE)
> > +
> > +/* pad request bytes into a usable size */
> > +#define pad_request(req) \
> > +   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
> > +
> > +/* pad request, checking for minimum (but not maximum) */
> > +#define request2size(req) \
> > +  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
> > +
> > +/* ------------------ Operations on head and foot fields
> -----------------
> > */
> > +
> > +/*
> > +  The head field of a chunk is or'ed with PINUSE_BIT when previous
> > +  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
> > +  use. If the chunk was obtained with mmap, the prev_foot field has
> > +  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
> > +  mmapped region to the base of the chunk.
> > +*/
> > +
> > +#define PINUSE_BIT          (SIZE_T_ONE)
> > +#define CINUSE_BIT          (SIZE_T_TWO)
> > +#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
> > +
> > +/* Head value for fenceposts */
> > +#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
> > +
> > +/* extraction of fields from head words */
> > +#define cinuse(p)           ((p)->head & CINUSE_BIT)
> > +#define pinuse(p)           ((p)->head & PINUSE_BIT)
> > +#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
> > +
> > +#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
> > +#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
> > +
> > +/* Treat space at ptr +/- offset as a chunk */
> > +#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
> > +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
> > +
> > +/* Ptr to next or previous physical malloc_chunk. */
> > +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &
> > ~INUSE_BITS)))
> > +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
> > +
> > +/* extract next chunk's pinuse bit */
> > +#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
> > +
> > +/* Get/set size at footer */
> > +#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
> > +#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot =
> (s))
> > +
> > +/* Set size, pinuse bit, and foot */
> > +#define set_size_and_pinuse_of_free_chunk(p, s)\
> > +  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
> > +
> > +/* Set size, pinuse bit, foot, and clear next pinuse */
> > +#define set_free_with_pinuse(p, s, n)\
> > +  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
> > +
> > +/* Get the internal overhead associated with chunk p */
> > +#define overhead_for(p) CHUNK_OVERHEAD
> > +
> > +/* Return true if malloced space is not necessarily cleared */
> > +#define calloc_must_clear(p) (1)
> > +
> > +
> > +/* ---------------------- Overlaid data structures
> -----------------------
> > */
> > +
> > +/*
> > +  When chunks are not in use, they are treated as nodes of either
> > +  lists or trees.
> > +
> > +  "Small"  chunks are stored in circular doubly-linked lists, and look
> > +  like this:
> > +
> > +    chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Size of previous chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +    `head:' |             Size of chunk, in bytes
> > |P|
> > +      mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Forward pointer to next chunk in list
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Back pointer to previous chunk in list
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Unused space (may be 0 bytes long)
> > .
> > +            .
> > .
> > +            .
> > |
> > +nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +    `foot:' |             Size of chunk, in bytes
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +
> > +  Larger chunks are kept in a form of bitwise digital trees (aka
> > +  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
> > +  free chunks greater than 256 bytes, their size doesn't impose any
> > +  constraints on user chunk sizes.  Each node looks like:
> > +
> > +    chunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Size of previous chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +    `head:' |             Size of chunk, in bytes
> > |P|
> > +      mem->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Forward pointer to next chunk of same size
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Back pointer to previous chunk of same size
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Pointer to left child (child[0])
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Pointer to right child (child[1])
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Pointer to parent
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             bin index of this chunk
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +            |             Unused space
> > .
> > +            .
> > |
> > +nextchunk->
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +    `foot:' |             Size of chunk, in bytes
> > |
> > +
> > +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
> > +
> > +  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
> > +  of the same size are arranged in a circularly-linked list, with only
> > +  the oldest chunk (the next to be used, in our FIFO ordering)
> > +  actually in the tree.  (Tree members are distinguished by a non-null
> > +  parent pointer.)  If a chunk with the same size an an existing node
> > +  is inserted, it is linked off the existing node using pointers that
> > +  work in the same way as fd/bk pointers of small chunks.
> > +
> > +  Each tree contains a power of 2 sized range of chunk sizes (the
> > +  smallest is 0x100 <= x < 0x180), which is is divided in half at each
> > +  tree level, with the chunks in the smaller half of the range (0x100
> > +  <= x < 0x140 for the top nose) in the left subtree and the larger
> > +  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
> > +  done by inspecting individual bits.
> > +
> > +  Using these rules, each node's left subtree contains all smaller
> > +  sizes than its right subtree.  However, the node at the root of each
> > +  subtree has no particular ordering relationship to either.  (The
> > +  dividing line between the subtree sizes is based on trie relation.)
> > +  If we remove the last chunk of a given size from the interior of the
> > +  tree, we need to replace it with a leaf node.  The tree ordering
> > +  rules permit a node to be replaced by any leaf below it.
> > +
> > +  The smallest chunk in a tree (a common operation in a best-fit
> > +  allocator) can be found by walking a path to the leftmost leaf in
> > +  the tree.  Unlike a usual binary tree, where we follow left child
> > +  pointers until we reach a null, here we follow the right child
> > +  pointer any time the left one is null, until we reach a leaf with
> > +  both child pointers null. The smallest chunk in the tree will be
> > +  somewhere along that path.
> > +
> > +  The worst case number of steps to add, find, or remove a node is
> > +  bounded by the number of bits differentiating chunks within
> > +  bins. Under current bin calculations, this ranges from 6 up to 21
> > +  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
> > +  is of course much better.
> > +*/
> > +
> > +struct malloc_tree_chunk {
> > +  /* The first four fields must be compatible with malloc_chunk */
> > +  size_t                    prev_foot;
> > +  size_t                    head;
> > +  struct malloc_tree_chunk* fd;
> > +  struct malloc_tree_chunk* bk;
> > +
> > +  struct malloc_tree_chunk* child[2];
> > +  struct malloc_tree_chunk* parent;
> > +  bindex_t                  index;
> > +};
> > +
> > +typedef struct malloc_tree_chunk  tchunk;
> > +typedef struct malloc_tree_chunk* tchunkptr;
> > +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees
> */
> > +
> > +/* A little helper macro for trees */
> > +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :
> > (t)->child[1])
> > +
> > +/* ----------------------------- Segments ------------------------------
> --
> > */
> > +
> > +/*
> > +  Each malloc space may include non-contiguous segments, held in a
> > +  list headed by an embedded malloc_segment record representing the
> > +  top-most space. Segments also include flags holding properties of
> > +  the space. Large chunks that are directly allocated by mmap are not
> > +  included in this list. They are instead independently created and
> > +  destroyed without otherwise keeping track of them.
> > +
> > +  Segment management mainly comes into play for spaces allocated by
> > +  MMAP.  Any call to MMAP might or might not return memory that is
> > +  adjacent to an existing segment.  MORECORE normally contiguously
> > +  extends the current space, so this space is almost always adjacent,
> > +  which is simpler and faster to deal with. (This is why MORECORE is
> > +  used preferentially to MMAP when both are available -- see
> > +  sys_alloc.)  When allocating using MMAP, we don't use any of the
> > +  hinting mechanisms (inconsistently) supported in various
> > +  implementations of unix mmap, or distinguish reserving from
> > +  committing memory. Instead, we just ask for space, and exploit
> > +  contiguity when we get it.  It is probably possible to do
> > +  better than this on some systems, but no general scheme seems
> > +  to be significantly better.
> > +
> > +  Management entails a simpler variant of the consolidation scheme
> > +  used for chunks to reduce fragmentation -- new adjacent memory is
> > +  normally prepended or appended to an existing segment. However,
> > +  there are limitations compared to chunk consolidation that mostly
> > +  reflect the fact that segment processing is relatively infrequent
> > +  (occurring only when getting memory from system) and that we
> > +  don't expect to have huge numbers of segments:
> > +
> > +  * Segments are not indexed, so traversal requires linear scans.  (It
> > +    would be possible to index these, but is not worth the extra
> > +    overhead and complexity for most programs on most platforms.)
> > +  * New segments are only appended to old ones when holding top-most
> > +    memory; if they cannot be prepended to others, they are held in
> > +    different segments.
> > +
> > +  Except for the top-most segment of an mstate, each segment record
> > +  is kept at the tail of its segment. Segments are added by pushing
> > +  segment records onto the list headed by &mstate.seg for the
> > +  containing mstate.
> > +
> > +  Segment flags control allocation/merge/deallocation policies:
> > +  * If EXTERN_BIT set, then we did not allocate this segment,
> > +    and so should not try to deallocate or merge with others.
> > +    (This currently holds only for the initial segment passed
> > +    into create_mspace_with_base.)
> > +  * If IS_MMAPPED_BIT set, the segment may be merged with
> > +    other surrounding mmapped segments and trimmed/de-allocated
> > +    using munmap.
> > +  * If neither bit is set, then the segment was obtained using
> > +    MORECORE so can be merged with surrounding MORECORE'd segments
> > +    and deallocated/trimmed using MORECORE with negative arguments.
> > +*/
> > +
> > +struct malloc_segment {
> > +  char*        base;             /* base address */
> > +  size_t       size;             /* allocated size */
> > +  struct malloc_segment* next;   /* ptr to next segment */
> > +};
> > +
> > +typedef struct malloc_segment  msegment;
> > +typedef struct malloc_segment* msegmentptr;
> > +
> > +/* ---------------------------- malloc_state
> -----------------------------
> > */
> > +
> > +/*
> > +   A malloc_state holds all of the bookkeeping for a space.
> > +   The main fields are:
> > +
> > +  Top
> > +    The topmost chunk of the currently active segment. Its size is
> > +    cached in topsize.  The actual size of topmost space is
> > +    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
> > +    fenceposts and segment records if necessary when getting more
> > +    space from the system.  The size at which to autotrim top is
> > +    cached from mparams in trim_check, except that it is disabled if
> > +    an autotrim fails.
> > +
> > +  Designated victim (dv)
> > +    This is the preferred chunk for servicing small requests that
> > +    don't have exact fits.  It is normally the chunk split off most
> > +    recently to service another small request.  Its size is cached in
> > +    dvsize. The link fields of this chunk are not maintained since it
> > +    is not kept in a bin.
> > +
> > +  SmallBins
> > +    An array of bin headers for free chunks.  These bins hold chunks
> > +    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
> > +    chunks of all the same size, spaced 8 bytes apart.  To simplify
> > +    use in double-linked lists, each bin header acts as a malloc_chunk
> > +    pointing to the real first node, if it exists (else pointing to
> > +    itself).  This avoids special-casing for headers.  But to avoid
> > +    waste, we allocate only the fd/bk pointers of bins, and then use
> > +    repositioning tricks to treat these as the fields of a chunk.
> > +
> > +  TreeBins
> > +    Treebins are pointers to the roots of trees holding a range of
> > +    sizes. There are 2 equally spaced treebins for each power of two
> > +    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
> > +    larger.
> > +
> > +  Bin maps
> > +    There is one bit map for small bins ("smallmap") and one for
> > +    treebins ("treemap).  Each bin sets its bit when non-empty, and
> > +    clears the bit when empty.  Bit operations are then used to avoid
> > +    bin-by-bin searching -- nearly all "search" is done without ever
> > +    looking at bins that won't be selected.  The bit maps
> > +    conservatively use 32 bits per map word, even if on 64bit system.
> > +    For a good description of some of the bit-based techniques used
> > +    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
> > +    supplement at http://hackersdelight.org/). Many of these are
> > +    intended to reduce the branchiness of paths through malloc etc, as
> > +    well as to reduce the number of memory locations read or written.
> > +
> > +  Segments
> > +    A list of segments headed by an embedded malloc_segment record
> > +    representing the initial space.
> > +
> > +  Address check support
> > +    The least_addr field is the least address ever obtained from
> > +    MORECORE or MMAP. Attempted frees and reallocs of any address less
> > +    than this are trapped (unless INSECURE is defined).
> > +
> > +  Magic tag
> > +    A cross-check field that should always hold same value as
> mparams.magic.
> > +
> > +  Flags
> > +    Bits recording whether to use MMAP, locks, or contiguous MORECORE
> > +
> > +  Statistics
> > +    Each space keeps track of current and maximum system memory
> > +    obtained via MORECORE or MMAP.
> > +
> > +  Locking
> > +    If USE_LOCKS is defined, the "mutex" lock is acquired and released
> > +    around every public call using this mspace.
> > +*/
> > +
> > +/* Bin types, widths and sizes */
> > +#define NSMALLBINS        (32U)
> > +#define NTREEBINS         (32U)
> > +#define SMALLBIN_SHIFT    (3U)
> > +#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
> > +#define TREEBIN_SHIFT     (8U)
> > +#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
> > +#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
> > +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -
> > CHUNK_OVERHEAD)
> > +
> > +struct malloc_state {
> > +  binmap_t   smallmap;
> > +  binmap_t   treemap;
> > +  size_t     dvsize;
> > +  size_t     topsize;
> > +  char*      least_addr;
> > +  mchunkptr  dv;
> > +  mchunkptr  top;
> > +  size_t     magic;
> > +  mchunkptr  smallbins[(NSMALLBINS+1)*2];
> > +  tbinptr    treebins[NTREEBINS];
> > +  size_t     footprint;
> > +  size_t     max_footprint;
> > +  flag_t     mflags;
> > +  void      *user_data;
> > +#if USE_LOCKS
> > +  MLOCK_T    mutex;     /* locate lock among fields that rarely change
> */
> > +#endif /* USE_LOCKS */
> > +  msegment   seg;
> > +};
> > +
> > +typedef struct malloc_state*    mstate;
> > +
> > +/* ------------- Global malloc_state and malloc_params
> -------------------
> > */
> > +
> > +/*
> > +  malloc_params holds global properties, including those that can be
> > +  dynamically set using mallopt. There is a single instance, mparams,
> > +  initialized in init_mparams.
> > +*/
> > +
> > +struct malloc_params {
> > +  size_t magic;
> > +  size_t page_size;
> > +  size_t granularity;
> > +  flag_t default_mflags;
> > +};
> > +
> > +static struct malloc_params mparams;
> > +
> > +/* The global malloc_state used for all non-"mspace" calls */
> > +//static struct malloc_state _gm_;
> > +//#define gm                 (&_gm_)
> > +//#define is_global(M)       ((M) == &_gm_)
> > +#define is_initialized(M)  ((M)->top != 0)
> > +
> > +/* -------------------------- system alloc setup
> -------------------------
> > */
> > +
> > +/* Operations on mflags */
> > +
> > +#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
> > +#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
> > +#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
> > +
> > +#define set_lock(M,L)\
> > + ((M)->mflags = (L)?\
> > +  ((M)->mflags | USE_LOCK_BIT) :\
> > +  ((M)->mflags & ~USE_LOCK_BIT))
> > +
> > +/* page-align a size */
> > +#define page_align(S)\
> > + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
> > +
> > +/* granularity-align a size */
> > +#define granularity_align(S)\
> > +  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
> > +
> > +#define is_page_aligned(S)\
> > +   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
> > +#define is_granularity_aligned(S)\
> > +   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
> > +
> > +/*  True if segment S holds address A */
> > +#define segment_holds(S, A)\
> > +  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
> > +
> > +/* Return segment holding given address */
> > +static msegmentptr segment_holding(mstate m, char* addr) {
> > +  msegmentptr sp = &m->seg;
> > +  for (;;) {
> > +    if (addr >= sp->base && addr < sp->base + sp->size)
> > +      return sp;
> > +    if ((sp = sp->next) == 0)
> > +      return 0;
> > +  }
> > +}
> > +
> > +/* Return true if segment contains a segment link */
> > +static int has_segment_link(mstate m, msegmentptr ss) {
> > +  msegmentptr sp = &m->seg;
> > +  for (;;) {
> > +    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
> > +      return 1;
> > +    if ((sp = sp->next) == 0)
> > +      return 0;
> > +  }
> > +}
> > +
> > +
> > +
> > +/*
> > +  TOP_FOOT_SIZE is padding at the end of a segment, including space
> > +  that may be needed to place segment records and fenceposts when new
> > +  noncontiguous segments are added.
> > +*/
> > +#define TOP_FOOT_SIZE\
> > +  (align_offset(chunk2mem(0))+pad_request(sizeof(struct
> > malloc_segment))+MIN_CHUNK_SIZE)
> > +
> > +
> > +/* -------------------------------  Hooks
> --------------------------------
> > */
> > +
> > +/*
> > +  PREACTION should be defined to return 0 on success, and nonzero on
> > +  failure. If you are not using locking, you can redefine these to do
> > +  anything you like.
> > +*/
> > +
> > +#if USE_LOCKS
> > +
> > +/* Ensure locks are initialized */
> > +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
> > +
> > +#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))?
> > ACQUIRE_LOCK(&(M)->mutex) : 0)
> > +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
> > +#else /* USE_LOCKS */
> > +
> > +#ifndef PREACTION
> > +#define PREACTION(M) (0)
> > +#endif  /* PREACTION */
> > +
> > +#ifndef POSTACTION
> > +#define POSTACTION(M)
> > +#endif  /* POSTACTION */
> > +
> > +#endif /* USE_LOCKS */
> > +
> > +/*
> > +  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
> > +  USAGE_ERROR_ACTION is triggered on detected bad frees and
> > +  reallocs. The argument p is an address that might have triggered the
> > +  fault. It is ignored by the two predefined actions, but might be
> > +  useful in custom actions that try to help diagnose errors.
> > +*/
> > +
> > +#if PROCEED_ON_ERROR
> > +
> > +/* A count of the number of corruption errors causing resets */
> > +int malloc_corruption_error_count;
> > +
> > +/* default corruption action */
> > +static void reset_on_error(mstate m);
> > +
> > +#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
> > +#define USAGE_ERROR_ACTION(m, p)
> > +
> > +#else /* PROCEED_ON_ERROR */
> > +
> > +#ifndef CORRUPTION_ERROR_ACTION
> > +#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
> > +#endif /* CORRUPTION_ERROR_ACTION */
> > +
> > +#ifndef USAGE_ERROR_ACTION
> > +#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
> > +#endif /* USAGE_ERROR_ACTION */
> > +
> > +#endif /* PROCEED_ON_ERROR */
> > +
> > +/* -------------------------- Debugging setup
> ----------------------------
> > */
> > +
> > +#if ! DEBUG
> > +
> > +#define check_free_chunk(M,P)
> > +#define check_inuse_chunk(M,P)
> > +#define check_malloced_chunk(M,P,N)
> > +#define check_malloc_state(M)
> > +#define check_top_chunk(M,P)
> > +
> > +#else /* DEBUG */
> > +#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
> > +#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
> > +#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
> > +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
> > +#define check_malloc_state(M)       do_check_malloc_state(M)
> > +
> > +static void   do_check_any_chunk(mstate m, mchunkptr p);
> > +static void   do_check_top_chunk(mstate m, mchunkptr p);
> > +static void   do_check_inuse_chunk(mstate m, mchunkptr p);
> > +static void   do_check_free_chunk(mstate m, mchunkptr p);
> > +static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
> > +static void   do_check_tree(mstate m, tchunkptr t);
> > +static void   do_check_treebin(mstate m, bindex_t i);
> > +static void   do_check_smallbin(mstate m, bindex_t i);
> > +static void   do_check_malloc_state(mstate m);
> > +static int    bin_find(mstate m, mchunkptr x);
> > +static size_t traverse_and_check(mstate m);
> > +#endif /* DEBUG */
> > +
> > +/* ---------------------------- Indexing Bins
> ----------------------------
> > */
> > +
> > +#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
> > +#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
> > +#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
> > +#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
> > +
> > +/* addressing by index. See above about smallbin repositioning */
> > +#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->
> smallbins[(i)<<1])))
> > +#define treebin_at(M,i)     (&((M)->treebins[i]))
> > +
> > +/* assign tree index for size S to variable I */
> > +#if defined(__GNUC__) && defined(i386)
> > +#define compute_tree_index(S, I)\
> > +{\
> > +  size_t X = S >> TREEBIN_SHIFT;\
> > +  if (X == 0)\
> > +    I = 0;\
> > +  else if (X > 0xFFFF)\
> > +    I = NTREEBINS-1;\
> > +  else {\
> > +    unsigned int K;\
> > +    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
> > +    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
> > +  }\
> > +}
> > +#else /* GNUC */
> > +#define compute_tree_index(S, I)\
> > +{\
> > +  size_t X = S >> TREEBIN_SHIFT;\
> > +  if (X == 0)\
> > +    I = 0;\
> > +  else if (X > 0xFFFF)\
> > +    I = NTREEBINS-1;\
> > +  else {\
> > +    unsigned int Y = (unsigned int)X;\
> > +    unsigned int N = ((Y - 0x100) >> 16) & 8;\
> > +    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
> > +    N += K;\
> > +    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
> > +    K = 14 - N + ((Y <<= K) >> 15);\
> > +    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
> > +  }\
> > +}
> > +#endif /* GNUC */
> > +
> > +/* Bit representing maximum resolved size in a treebin at i */
> > +#define bit_for_tree_index(i) \
> > +   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT
> - 2)
> > +
> > +/* Shift placing maximum resolved bit in a treebin at i as sign bit */
> > +#define leftshift_for_tree_index(i) \
> > +   ((i == NTREEBINS-1)? 0 : \
> > +    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
> > +
> > +/* The size of the smallest chunk held in bin with index i */
> > +#define minsize_for_tree_index(i) \
> > +   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
> > +   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
> > +
> > +/* ------------------------ Operations on bin maps
> -----------------------
> > */
> > +
> > +/* bit corresponding to given index */
> > +#define idx2bit(i)              ((binmap_t)(1) << (i))
> > +
> > +/* Mark/Clear bits with given index */
> > +#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
> > +#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
> > +#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
> > +
> > +#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
> > +#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
> > +#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
> > +
> > +/* index corresponding to given bit */
> > +
> > +#if defined(__GNUC__) && defined(i386)
> > +#define compute_bit2idx(X, I)\
> > +{\
> > +  unsigned int J;\
> > +  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
> > +  I = (bindex_t)J;\
> > +}
> > +
> > +#else /* GNUC */
> > +#if  USE_BUILTIN_FFS
> > +#define compute_bit2idx(X, I) I = ffs(X)-1
> > +
> > +#else /* USE_BUILTIN_FFS */
> > +#define compute_bit2idx(X, I)\
> > +{\
> > +  unsigned int Y = X - 1;\
> > +  unsigned int K = Y >> (16-4) & 16;\
> > +  unsigned int N = K;        Y >>= K;\
> > +  N += K = Y >> (8-3) &  8;  Y >>= K;\
> > +  N += K = Y >> (4-2) &  4;  Y >>= K;\
> > +  N += K = Y >> (2-1) &  2;  Y >>= K;\
> > +  N += K = Y >> (1-0) &  1;  Y >>= K;\
> > +  I = (bindex_t)(N + Y);\
> > +}
> > +#endif /* USE_BUILTIN_FFS */
> > +#endif /* GNUC */
> > +
> > +/* isolate the least set bit of a bitmap */
> > +#define least_bit(x)         ((x) & -(x))
> > +
> > +/* mask with all bits to left of least bit of x on */
> > +#define left_bits(x)         ((x<<1) | -(x<<1))
> > +
> > +/* mask with all bits to left of or equal to least bit of x on */
> > +#define same_or_left_bits(x) ((x) | -(x))
> > +
> > +
> > +/* ----------------------- Runtime Check Support
> -------------------------
> > */
> > +
> > +/*
> > +  For security, the main invariant is that malloc/free/etc never
> > +  writes to a static address other than malloc_state, unless static
> > +  malloc_state itself has been corrupted, which cannot occur via
> > +  malloc (because of these checks). In essence this means that we
> > +  believe all pointers, sizes, maps etc held in malloc_state, but
> > +  check all of those linked or offsetted from other embedded data
> > +  structures.  These checks are interspersed with main code in a way
> > +  that tends to minimize their run-time cost.
> > +
> > +  When FOOTERS is defined, in addition to range checking, we also
> > +  verify footer fields of inuse chunks, which can be used guarantee
> > +  that the mstate controlling malloc/free is intact.  This is a
> > +  streamlined version of the approach described by William Robertson
> > +  et al in "Run-time Detection of Heap-based Overflows" LISA'03
> > +  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
> > +  of an inuse chunk holds the xor of its mstate and a random seed,
> > +  that is checked upon calls to free() and realloc().  This is
> > +  (probablistically) unguessable from outside the program, but can be
> > +  computed by any code successfully malloc'ing any chunk, so does not
> > +  itself provide protection against code that has already broken
> > +  security through some other means.  Unlike Robertson et al, we
> > +  always dynamically check addresses of all offset chunks (previous,
> > +  next, etc). This turns out to be cheaper than relying on hashes.
> > +*/
> > +
> > +#if !INSECURE
> > +/* Check if address a is at least as high as any from MORECORE or MMAP
> */
> > +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
> > +/* Check if address of next chunk n is higher than base chunk p */
> > +#define ok_next(p, n)    ((char*)(p) < (char*)(n))
> > +/* Check if p has its cinuse bit on */
> > +#define ok_cinuse(p)     cinuse(p)
> > +/* Check if p has its pinuse bit on */
> > +#define ok_pinuse(p)     pinuse(p)
> > +
> > +#else /* !INSECURE */
> > +#define ok_address(M, a) (1)
> > +#define ok_next(b, n)    (1)
> > +#define ok_cinuse(p)     (1)
> > +#define ok_pinuse(p)     (1)
> > +#endif /* !INSECURE */
> > +
> > +#if (FOOTERS && !INSECURE)
> > +/* Check if (alleged) mstate m has expected magic field */
> > +#define ok_magic(M)      ((M)->magic == mparams.magic)
> > +#else  /* (FOOTERS && !INSECURE) */
> > +#define ok_magic(M)      (1)
> > +#endif /* (FOOTERS && !INSECURE) */
> > +
> > +
> > +/* In gcc, use __builtin_expect to minimize impact of checks */
> > +#if !INSECURE
> > +#if defined(__GNUC__) && __GNUC__ >= 3
> > +#define RTCHECK(e)  __builtin_expect(e, 1)
> > +#else /* GNUC */
> > +#define RTCHECK(e)  (e)
> > +#endif /* GNUC */
> > +#else /* !INSECURE */
> > +#define RTCHECK(e)  (1)
> > +#endif /* !INSECURE */
> > +
> > +/* macros to set up inuse chunks with or without footers */
> > +
> > +#if !FOOTERS
> > +
> > +#define mark_inuse_foot(M,p,s)
> > +
> > +/* Set cinuse bit and pinuse bit of next chunk */
> > +#define set_inuse(M,p,s)\
> > +  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > +  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > +
> > +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
> > +#define set_inuse_and_pinuse(M,p,s)\
> > +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > +  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
> > +
> > +/* Set size, cinuse and pinuse bit of this chunk */
> > +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
> > +
> > +#else /* FOOTERS */
> > +
> > +/* Set foot of inuse chunk to be xor of mstate and seed */
> > +#define mark_inuse_foot(M,p,s)\
> > +  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^
> > mparams.magic))
> > +
> > +#define get_mstate_for(p)\
> > +  ((mstate)(((mchunkptr)((char*)(p) +\
> > +    (chunksize(p))))->prev_foot ^ mparams.magic))
> > +
> > +#define set_inuse(M,p,s)\
> > +  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
> > +  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
> > +  mark_inuse_foot(M,p,s))
> > +
> > +#define set_inuse_and_pinuse(M,p,s)\
> > +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > +  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
> > + mark_inuse_foot(M,p,s))
> > +
> > +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
> > +  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
> > +  mark_inuse_foot(M, p, s))
> > +
> > +#endif /* !FOOTERS */
> > +
> > +/* ---------------------------- setting mparams
> --------------------------
> > */
> > +
> > +/* Initialize mparams */
> > +static int init_mparams(void) {
> > +  if (mparams.page_size == 0) {
> > +    size_t s;
> > +
> > +    mparams.default_mflags = USE_LOCK_BIT;
> > +
> > +#if (FOOTERS && !INSECURE)
> > +    {
> > +#if USE_DEV_RANDOM
> > +      int fd;
> > +      unsigned char buf[sizeof(size_t)];
> > +      /* Try to use /dev/urandom, else fall back on using time */
> > +      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
> > +          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
> > +        s = *((size_t *) buf);
> > +        close(fd);
> > +      }
> > +      else
> > +#endif /* USE_DEV_RANDOM */
> > +        s = (size_t)(time(0) ^ (size_t)0x55555555U);
> > +
> > +      s |= (size_t)8U;    /* ensure nonzero */
> > +      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
> > +
> > +    }
> > +#else /* (FOOTERS && !INSECURE) */
> > +    s = (size_t)0x58585858U;
> > +#endif /* (FOOTERS && !INSECURE) */
> > +    ACQUIRE_MAGIC_INIT_LOCK();
> > +    if (mparams.magic == 0) {
> > +      mparams.magic = s;
> > +      /* Set up lock for main malloc area */
> > +      //INITIAL_LOCK(&gm->mutex);
> > +      //gm->mflags = mparams.default_mflags;
> > +    }
> > +    RELEASE_MAGIC_INIT_LOCK();
> > +
> > +
> > +    mparams.page_size = malloc_getpagesize;
> > +    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
> > +                           DEFAULT_GRANULARITY : mparams.page_size);
> > +
> > +    /* Sanity-check configuration:
> > +       size_t must be unsigned and as wide as pointer type.
> > +       ints must be at least 4 bytes.
> > +       alignment must be at least 8.
> > +       Alignment, min chunk size, and page size must all be powers of 2.
> > +    */
> > +    if ((sizeof(size_t) != sizeof(char*)) ||
> > +        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
> > +        (sizeof(int) < 4)  ||
> > +        (MALLOC_ALIGNMENT < (size_t)8U) ||
> > +        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0)
> ||
> > +        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0)
> ||
> > +        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) !=
> 0) ||
> > +        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   !=
> 0))
> > +      ABORT(NULL);
> > +  }
> > +  return 0;
> > +}
> > +
> > +/* support for mallopt */
> > +static int change_mparam(int param_number, int value) {
> > +  size_t val = (size_t)value;
> > +  init_mparams();
> > +  switch(param_number) {
> > +  case M_GRANULARITY:
> > +    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
> > +      mparams.granularity = val;
> > +      return 1;
> > +    }
> > +    else
> > +      return 0;
> > +  default:
> > +    return 0;
> > +  }
> > +}
> > +
> > +#if DEBUG
> > +/* ------------------------- Debugging Support
> ---------------------------
> > */
> > +
> > +/* Check properties of any chunk, whether free, inuse, mmapped etc  */
> > +static void do_check_any_chunk(mstate m, mchunkptr p) {
> > +  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > +  assert(m->user_data, ok_address(m, p));
> > +}
> > +
> > +/* Check properties of top chunk */
> > +static void do_check_top_chunk(mstate m, mchunkptr p) {
> > +  msegmentptr sp = segment_holding(m, (char*)p);
> > +  size_t  sz = chunksize(p);
> > +  assert(m->user_data, sp != 0);
> > +  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==
> > FENCEPOST_HEAD));
> > +  assert(m->user_data, ok_address(m, p));
> > +  assert(m->user_data, sz == m->topsize);
> > +  assert(m->user_data, sz > 0);
> > +  assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -
> > TOP_FOOT_SIZE);
> > +  assert(m->user_data, pinuse(p));
> > +  assert(m->user_data, !next_pinuse(p));
> > +}
> > +
> > +/* Check properties of inuse chunks */
> > +static void do_check_inuse_chunk(mstate m, mchunkptr p) {
> > +  do_check_any_chunk(m, p);
> > +  assert(m->user_data, cinuse(p));
> > +  assert(m->user_data, next_pinuse(p));
> > +  /* If not pinuse, previous chunk has OK offset */
> > +  assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
> > +}
> > +
> > +/* Check properties of free chunks */
> > +static void do_check_free_chunk(mstate m, mchunkptr p) {
> > +  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > +  mchunkptr next = chunk_plus_offset(p, sz);
> > +  do_check_any_chunk(m, p);
> > +  assert(m->user_data, !cinuse(p));
> > +  assert(m->user_data, !next_pinuse(p));
> > +  if (p != m->dv && p != m->top) {
> > +    if (sz >= MIN_CHUNK_SIZE) {
> > +      assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > +      assert(m->user_data, is_aligned(chunk2mem(p)));
> > +      assert(m->user_data, next->prev_foot == sz);
> > +      assert(m->user_data, pinuse(p));
> > +      assert(m->user_data, next == m->top || cinuse(next));
> > +      assert(m->user_data, p->fd->bk == p);
> > +      assert(m->user_data, p->bk->fd == p);
> > +    }
> > +    else  /* markers are always of size SIZE_T_SIZE */
> > +      assert(m->user_data, sz == SIZE_T_SIZE);
> > +  }
> > +}
> > +
> > +/* Check properties of malloced chunks at the point they are malloced */
> > +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
> > +  if (mem != 0) {
> > +    mchunkptr p = mem2chunk(mem);
> > +    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
> > +    do_check_inuse_chunk(m, p);
> > +    assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
> > +    assert(m->user_data, sz >= MIN_CHUNK_SIZE);
> > +    assert(m->user_data, sz >= s);
> > +    /* size is less than MIN_CHUNK_SIZE more than request */
> > +    assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
> > +  }
> > +}
> > +
> > +/* Check a tree and its subtrees.  */
> > +static void do_check_tree(mstate m, tchunkptr t) {
> > +  tchunkptr head = 0;
> > +  tchunkptr u = t;
> > +  bindex_t tindex = t->index;
> > +  size_t tsize = chunksize(t);
> > +  bindex_t idx;
> > +  compute_tree_index(tsize, idx);
> > +  assert(m->user_data, tindex == idx);
> > +  assert(m->user_data, tsize >= MIN_LARGE_SIZE);
> > +  assert(m->user_data, tsize >= minsize_for_tree_index(idx));
> > +  assert(m->user_data, (idx == NTREEBINS-1) || (tsize <
> > minsize_for_tree_index((idx+1))));
> > +
> > +  do { /* traverse through chain of same-sized nodes */
> > +    do_check_any_chunk(m, ((mchunkptr)u));
> > +    assert(m->user_data, u->index == tindex);
> > +    assert(m->user_data, chunksize(u) == tsize);
> > +    assert(m->user_data, !cinuse(u));
> > +    assert(m->user_data, !next_pinuse(u));
> > +    assert(m->user_data, u->fd->bk == u);
> > +    assert(m->user_data, u->bk->fd == u);
> > +    if (u->parent == 0) {
> > +      assert(m->user_data, u->child[0] == 0);
> > +      assert(m->user_data, u->child[1] == 0);
> > +    }
> > +    else {
> > +      assert(m->user_data, head == 0); /* only one node on chain has
> parent
> > */
> > +      head = u;
> > +      assert(m->user_data, u->parent != u);
> > +      assert(m->user_data, u->parent->child[0] == u ||
> > +             u->parent->child[1] == u ||
> > +             *((tbinptr*)(u->parent)) == u);
> > +      if (u->child[0] != 0) {
> > +        assert(m->user_data, u->child[0]->parent == u);
> > +        assert(m->user_data, u->child[0] != u);
> > +        do_check_tree(m, u->child[0]);
> > +      }
> > +      if (u->child[1] != 0) {
> > +        assert(m->user_data, u->child[1]->parent == u);
> > +        assert(m->user_data, u->child[1] != u);
> > +        do_check_tree(m, u->child[1]);
> > +      }
> > +      if (u->child[0] != 0 && u->child[1] != 0) {
> > +        assert(m->user_data, chunksize(u->child[0]) <
> > chunksize(u->child[1]));
> > +      }
> > +    }
> > +    u = u->fd;
> > +  } while (u != t);
> > +  assert(m->user_data, head != 0);
> > +}
> > +
> > +/*  Check all the chunks in a treebin.  */
> > +static void do_check_treebin(mstate m, bindex_t i) {
> > +  tbinptr* tb = treebin_at(m, i);
> > +  tchunkptr t = *tb;
> > +  int empty = (m->treemap & (1U << i)) == 0;
> > +  if (t == 0)
> > +    assert(m->user_data, empty);
> > +  if (!empty)
> > +    do_check_tree(m, t);
> > +}
> > +
> > +/*  Check all the chunks in a smallbin.  */
> > +static void do_check_smallbin(mstate m, bindex_t i) {
> > +  sbinptr b = smallbin_at(m, i);
> > +  mchunkptr p = b->bk;
> > +  unsigned int empty = (m->smallmap & (1U << i)) == 0;
> > +  if (p == b)
> > +    assert(m->user_data, empty);
> > +  if (!empty) {
> > +    for (; p != b; p = p->bk) {
> > +      size_t size = chunksize(p);
> > +      mchunkptr q;
> > +      /* each chunk claims to be free */
> > +      do_check_free_chunk(m, p);
> > +      /* chunk belongs in bin */
> > +      assert(m->user_data, small_index(size) == i);
> > +      assert(m->user_data, p->bk == b || chunksize(p->bk) ==
> chunksize(p));
> > +      /* chunk is followed by an inuse chunk */
> > +      q = next_chunk(p);
> > +      if (q->head != FENCEPOST_HEAD)
> > +        do_check_inuse_chunk(m, q);
> > +    }
> > +  }
> > +}
> > +
> > +/* Find x in a bin. Used in other check functions. */
> > +static int bin_find(mstate m, mchunkptr x) {
> > +  size_t size = chunksize(x);
> > +  if (is_small(size)) {
> > +    bindex_t sidx = small_index(size);
> > +    sbinptr b = smallbin_at(m, sidx);
> > +    if (smallmap_is_marked(m, sidx)) {
> > +      mchunkptr p = b;
> > +      do {
> > +        if (p == x)
> > +          return 1;
> > +      } while ((p = p->fd) != b);
> > +    }
> > +  }
> > +  else {
> > +    bindex_t tidx;
> > +    compute_tree_index(size, tidx);
> > +    if (treemap_is_marked(m, tidx)) {
> > +      tchunkptr t = *treebin_at(m, tidx);
> > +      size_t sizebits = size << leftshift_for_tree_index(tidx);
> > +      while (t != 0 && chunksize(t) != size) {
> > +        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > +        sizebits <<= 1;
> > +      }
> > +      if (t != 0) {
> > +        tchunkptr u = t;
> > +        do {
> > +          if (u == (tchunkptr)x)
> > +            return 1;
> > +        } while ((u = u->fd) != t);
> > +      }
> > +    }
> > +  }
> > +  return 0;
> > +}
> > +
> > +/* Traverse each chunk and check it; return total */
> > +static size_t traverse_and_check(mstate m) {
> > +  size_t sum = 0;
> > +  if (is_initialized(m)) {
> > +    msegmentptr s = &m->seg;
> > +    sum += m->topsize + TOP_FOOT_SIZE;
> > +    while (s != 0) {
> > +      mchunkptr q = align_as_chunk(s->base);
> > +      mchunkptr lastq = 0;
> > +      assert(m->user_data, pinuse(q));
> > +      while (segment_holds(s, q) &&
> > +             q != m->top && q->head != FENCEPOST_HEAD) {
> > +        sum += chunksize(q);
> > +        if (cinuse(q)) {
> > +          assert(m->user_data, !bin_find(m, q));
> > +          do_check_inuse_chunk(m, q);
> > +        }
> > +        else {
> > +          assert(m->user_data, q == m->dv || bin_find(m, q));
> > +          assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2
> > consecutive free */
> > +          do_check_free_chunk(m, q);
> > +        }
> > +        lastq = q;
> > +        q = next_chunk(q);
> > +      }
> > +      s = s->next;
> > +    }
> > +  }
> > +  return sum;
> > +}
> > +
> > +/* Check all properties of malloc_state. */
> > +static void do_check_malloc_state(mstate m) {
> > +  bindex_t i;
> > +  size_t total;
> > +  /* check bins */
> > +  for (i = 0; i < NSMALLBINS; ++i)
> > +    do_check_smallbin(m, i);
> > +  for (i = 0; i < NTREEBINS; ++i)
> > +    do_check_treebin(m, i);
> > +
> > +  if (m->dvsize != 0) { /* check dv chunk */
> > +    do_check_any_chunk(m, m->dv);
> > +    assert(m->user_data, m->dvsize == chunksize(m->dv));
> > +    assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
> > +    assert(m->user_data, bin_find(m, m->dv) == 0);
> > +  }
> > +
> > +  if (m->top != 0) {   /* check top chunk */
> > +    do_check_top_chunk(m, m->top);
> > +    assert(m->user_data, m->topsize == chunksize(m->top));
> > +    assert(m->user_data, m->topsize > 0);
> > +    assert(m->user_data, bin_find(m, m->top) == 0);
> > +  }
> > +
> > +  total = traverse_and_check(m);
> > +  assert(m->user_data, total <= m->footprint);
> > +  assert(m->user_data, m->footprint <= m->max_footprint);
> > +}
> > +#endif /* DEBUG */
> > +
> > +/* ----------------------------- statistics
> ------------------------------
> > */
> > +
> > +#if !NO_MALLINFO
> > +static struct mallinfo internal_mallinfo(mstate m) {
> > +  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
> > +  if (!PREACTION(m)) {
> > +    check_malloc_state(m);
> > +    if (is_initialized(m)) {
> > +      size_t nfree = SIZE_T_ONE; /* top always free */
> > +      size_t mfree = m->topsize + TOP_FOOT_SIZE;
> > +      size_t sum = mfree;
> > +      msegmentptr s = &m->seg;
> > +      while (s != 0) {
> > +        mchunkptr q = align_as_chunk(s->base);
> > +        while (segment_holds(s, q) &&
> > +               q != m->top && q->head != FENCEPOST_HEAD) {
> > +          size_t sz = chunksize(q);
> > +          sum += sz;
> > +          if (!cinuse(q)) {
> > +            mfree += sz;
> > +            ++nfree;
> > +          }
> > +          q = next_chunk(q);
> > +        }
> > +        s = s->next;
> > +      }
> > +
> > +      nm.arena    = sum;
> > +      nm.ordblks  = nfree;
> > +      nm.hblkhd   = m->footprint - sum;
> > +      nm.usmblks  = m->max_footprint;
> > +      nm.uordblks = m->footprint - mfree;
> > +      nm.fordblks = mfree;
> > +      nm.keepcost = m->topsize;
> > +    }
> > +
> > +    POSTACTION(m);
> > +  }
> > +  return nm;
> > +}
> > +#endif /* !NO_MALLINFO */
> > +
> > +static void internal_malloc_stats(mstate m) {
> > +  if (!PREACTION(m)) {
> > +    size_t maxfp = 0;
> > +    size_t fp = 0;
> > +    size_t used = 0;
> > +    check_malloc_state(m);
> > +    if (is_initialized(m)) {
> > +      msegmentptr s = &m->seg;
> > +      maxfp = m->max_footprint;
> > +      fp = m->footprint;
> > +      used = fp - (m->topsize + TOP_FOOT_SIZE);
> > +
> > +      while (s != 0) {
> > +        mchunkptr q = align_as_chunk(s->base);
> > +        while (segment_holds(s, q) &&
> > +               q != m->top && q->head != FENCEPOST_HEAD) {
> > +          if (!cinuse(q))
> > +            used -= chunksize(q);
> > +          q = next_chunk(q);
> > +        }
> > +        s = s->next;
> > +      }
> > +    }
> > +
> > +    PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned
> > long)(maxfp)));
> > +    PRINT((m->user_data, "system bytes     = %10lu\n", (unsigned
> > long)(fp)));
> > +    PRINT((m->user_data, "in use bytes     = %10lu\n", (unsigned
> > long)(used)));
> > +
> > +    POSTACTION(m);
> > +  }
> > +}
> > +
> > +/* ----------------------- Operations on smallbins
> -----------------------
> > */
> > +
> > +/*
> > +  Various forms of linking and unlinking are defined as macros.  Even
> > +  the ones for trees, which are very long but have very short typical
> > +  paths.  This is ugly but reduces reliance on inlining support of
> > +  compilers.
> > +*/
> > +
> > +/* Link a free chunk into a smallbin  */
> > +#define insert_small_chunk(M, P, S) {\
> > +  bindex_t I  = small_index(S);\
> > +  mchunkptr B = smallbin_at(M, I);\
> > +  mchunkptr F = B;\
> > +  assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
> > +  if (!smallmap_is_marked(M, I))\
> > +    mark_smallmap(M, I);\
> > +  else if (RTCHECK(ok_address(M, B->fd)))\
> > +    F = B->fd;\
> > +  else {\
> > +    CORRUPTION_ERROR_ACTION(M);\
> > +  }\
> > +  B->fd = P;\
> > +  F->bk = P;\
> > +  P->fd = F;\
> > +  P->bk = B;\
> > +}
> > +
> > +/* Unlink a chunk from a smallbin  */
> > +#define unlink_small_chunk(M, P, S) {\
> > +  mchunkptr F = P->fd;\
> > +  mchunkptr B = P->bk;\
> > +  bindex_t I = small_index(S);\
> > +  assert((M)->user_data, P != B);\
> > +  assert((M)->user_data, P != F);\
> > +  assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > +  if (F == B)\
> > +    clear_smallmap(M, I);\
> > +  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
> > +                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
> > +    F->bk = B;\
> > +    B->fd = F;\
> > +  }\
> > +  else {\
> > +    CORRUPTION_ERROR_ACTION(M);\
> > +  }\
> > +}
> > +
> > +/* Unlink the first chunk from a smallbin */
> > +#define unlink_first_small_chunk(M, B, P, I) {\
> > +  mchunkptr F = P->fd;\
> > +  assert((M)->user_data, P != B);\
> > +  assert((M)->user_data, P != F);\
> > +  assert((M)->user_data, chunksize(P) == small_index2size(I));\
> > +  if (B == F)\
> > +    clear_smallmap(M, I);\
> > +  else if (RTCHECK(ok_address(M, F))) {\
> > +    B->fd = F;\
> > +    F->bk = B;\
> > +  }\
> > +  else {\
> > +    CORRUPTION_ERROR_ACTION(M);\
> > +  }\
> > +}
> > +
> > +/* Replace dv node, binning the old one */
> > +/* Used only when dvsize known to be small */
> > +#define replace_dv(M, P, S) {\
> > +  size_t DVS = M->dvsize;\
> > +  if (DVS != 0) {\
> > +    mchunkptr DV = M->dv;\
> > +    assert((M)->user_data, is_small(DVS));\
> > +    insert_small_chunk(M, DV, DVS);\
> > +  }\
> > +  M->dvsize = S;\
> > +  M->dv = P;\
> > +}
> > +
> > +
> > +/* ------------------------- Operations on trees
> -------------------------
> > */
> > +
> > +/* Insert chunk into tree */
> > +#define insert_large_chunk(M, X, S) {\
> > +  tbinptr* H;\
> > +  bindex_t I;\
> > +  compute_tree_index(S, I);\
> > +  H = treebin_at(M, I);\
> > +  X->index = I;\
> > +  X->child[0] = X->child[1] = 0;\
> > +  if (!treemap_is_marked(M, I)) {\
> > +    mark_treemap(M, I);\
> > +    *H = X;\
> > +    X->parent = (tchunkptr)H;\
> > +    X->fd = X->bk = X;\
> > +  }\
> > +  else {\
> > +    tchunkptr T = *H;\
> > +    size_t K = S << leftshift_for_tree_index(I);\
> > +    for (;;) {\
> > +      if (chunksize(T) != S) {\
> > +        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) &
> 1]);\
> > +        K <<= 1;\
> > +        if (*C != 0)\
> > +          T = *C;\
> > +        else if (RTCHECK(ok_address(M, C))) {\
> > +          *C = X;\
> > +          X->parent = T;\
> > +          X->fd = X->bk = X;\
> > +          break;\
> > +        }\
> > +        else {\
> > +          CORRUPTION_ERROR_ACTION(M);\
> > +          break;\
> > +        }\
> > +      }\
> > +      else {\
> > +        tchunkptr F = T->fd;\
> > +        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
> > +          T->fd = F->bk = X;\
> > +          X->fd = F;\
> > +          X->bk = T;\
> > +          X->parent = 0;\
> > +          break;\
> > +        }\
> > +        else {\
> > +          CORRUPTION_ERROR_ACTION(M);\
> > +          break;\
> > +        }\
> > +      }\
> > +    }\
> > +  }\
> > +}
> > +
> > +/*
> > +  Unlink steps:
> > +
> > +  1. If x is a chained node, unlink it from its same-sized fd/bk links
> > +     and choose its bk node as its replacement.
> > +  2. If x was the last node of its size, but not a leaf node, it must
> > +     be replaced with a leaf node (not merely one with an open left or
> > +     right), to make sure that lefts and rights of descendents
> > +     correspond properly to bit masks.  We use the rightmost descendent
> > +     of x.  We could use any other leaf, but this is easy to locate and
> > +     tends to counteract removal of leftmosts elsewhere, and so keeps
> > +     paths shorter than minimally guaranteed.  This doesn't loop much
> > +     because on average a node in a tree is near the bottom.
> > +  3. If x is the base of a chain (i.e., has parent links) relink
> > +     x's parent and children to x's replacement (or null if none).
> > +*/
> > +
> > +#define unlink_large_chunk(M, X) {\
> > +  tchunkptr XP = X->parent;\
> > +  tchunkptr R;\
> > +  if (X->bk != X) {\
> > +    tchunkptr F = X->fd;\
> > +    R = X->bk;\
> > +    if (RTCHECK(ok_address(M, F))) {\
> > +      F->bk = R;\
> > +      R->fd = F;\
> > +    }\
> > +    else {\
> > +      CORRUPTION_ERROR_ACTION(M);\
> > +    }\
> > +  }\
> > +  else {\
> > +    tchunkptr* RP;\
> > +    if (((R = *(RP = &(X->child[1]))) != 0) ||\
> > +        ((R = *(RP = &(X->child[0]))) != 0)) {\
> > +      tchunkptr* CP;\
> > +      while ((*(CP = &(R->child[1])) != 0) ||\
> > +             (*(CP = &(R->child[0])) != 0)) {\
> > +        R = *(RP = CP);\
> > +      }\
> > +      if (RTCHECK(ok_address(M, RP)))\
> > +        *RP = 0;\
> > +      else {\
> > +        CORRUPTION_ERROR_ACTION(M);\
> > +      }\
> > +    }\
> > +  }\
> > +  if (XP != 0) {\
> > +    tbinptr* H = treebin_at(M, X->index);\
> > +    if (X == *H) {\
> > +      if ((*H = R) == 0) \
> > +        clear_treemap(M, X->index);\
> > +    }\
> > +    else if (RTCHECK(ok_address(M, XP))) {\
> > +      if (XP->child[0] == X) \
> > +        XP->child[0] = R;\
> > +      else \
> > +        XP->child[1] = R;\
> > +    }\
> > +    else\
> > +      CORRUPTION_ERROR_ACTION(M);\
> > +    if (R != 0) {\
> > +      if (RTCHECK(ok_address(M, R))) {\
> > +        tchunkptr C0, C1;\
> > +        R->parent = XP;\
> > +        if ((C0 = X->child[0]) != 0) {\
> > +          if (RTCHECK(ok_address(M, C0))) {\
> > +            R->child[0] = C0;\
> > +            C0->parent = R;\
> > +          }\
> > +          else\
> > +            CORRUPTION_ERROR_ACTION(M);\
> > +        }\
> > +        if ((C1 = X->child[1]) != 0) {\
> > +          if (RTCHECK(ok_address(M, C1))) {\
> > +            R->child[1] = C1;\
> > +            C1->parent = R;\
> > +          }\
> > +          else\
> > +            CORRUPTION_ERROR_ACTION(M);\
> > +        }\
> > +      }\
> > +      else\
> > +        CORRUPTION_ERROR_ACTION(M);\
> > +    }\
> > +  }\
> > +}
> > +
> > +/* Relays to large vs small bin operations */
> > +
> > +#define insert_chunk(M, P, S)\
> > +  if (is_small(S)) insert_small_chunk(M, P, S)\
> > +  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
> > +
> > +#define unlink_chunk(M, P, S)\
> > +  if (is_small(S)) unlink_small_chunk(M, P, S)\
> > +  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
> > +
> > +
> > +/* Relays to internal calls to malloc/free from realloc, memalign etc */
> > +
> > +#define internal_malloc(m, b) mspace_malloc(m, b)
> > +#define internal_free(m, mem) mspace_free(m,mem);
> > +
> > +
> > +/* -------------------------- mspace management
> --------------------------
> > */
> > +
> > +/* Initialize top chunk and its size */
> > +static void init_top(mstate m, mchunkptr p, size_t psize) {
> > +  /* Ensure alignment */
> > +  size_t offset = align_offset(chunk2mem(p));
> > +  p = (mchunkptr)((char*)p + offset);
> > +  psize -= offset;
> > +
> > +  m->top = p;
> > +  m->topsize = psize;
> > +  p->head = psize | PINUSE_BIT;
> > +  /* set size of fake trailing chunk holding overhead space only once */
> > +  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
> > +}
> > +
> > +/* Initialize bins for a new mstate that is otherwise zeroed out */
> > +static void init_bins(mstate m) {
> > +  /* Establish circular links for smallbins */
> > +  bindex_t i;
> > +  for (i = 0; i < NSMALLBINS; ++i) {
> > +    sbinptr bin = smallbin_at(m,i);
> > +    bin->fd = bin->bk = bin;
> > +  }
> > +}
> > +
> > +#if PROCEED_ON_ERROR
> > +
> > +/* default corruption action */
> > +static void reset_on_error(mstate m) {
> > +  int i;
> > +  ++malloc_corruption_error_count;
> > +  /* Reinitialize fields to forget about all memory */
> > +  m->smallbins = m->treebins = 0;
> > +  m->dvsize = m->topsize = 0;
> > +  m->seg.base = 0;
> > +  m->seg.size = 0;
> > +  m->seg.next = 0;
> > +  m->top = m->dv = 0;
> > +  for (i = 0; i < NTREEBINS; ++i)
> > +    *treebin_at(m, i) = 0;
> > +  init_bins(m);
> > +}
> > +#endif /* PROCEED_ON_ERROR */
> > +
> > +/* Allocate chunk and prepend remainder with chunk in successor base. */
> > +static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
> > +                           size_t nb) {
> > +  mchunkptr p = align_as_chunk(newbase);
> > +  mchunkptr oldfirst = align_as_chunk(oldbase);
> > +  size_t psize = (char*)oldfirst - (char*)p;
> > +  mchunkptr q = chunk_plus_offset(p, nb);
> > +  size_t qsize = psize - nb;
> > +  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
> > +
> > +  assert(m->user_data, (char*)oldfirst > (char*)q);
> > +  assert(m->user_data, pinuse(oldfirst));
> > +  assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
> > +
> > +  /* consolidate remainder with first chunk of old base */
> > +  if (oldfirst == m->top) {
> > +    size_t tsize = m->topsize += qsize;
> > +    m->top = q;
> > +    q->head = tsize | PINUSE_BIT;
> > +    check_top_chunk(m, q);
> > +  }
> > +  else if (oldfirst == m->dv) {
> > +    size_t dsize = m->dvsize += qsize;
> > +    m->dv = q;
> > +    set_size_and_pinuse_of_free_chunk(q, dsize);
> > +  }
> > +  else {
> > +    if (!cinuse(oldfirst)) {
> > +      size_t nsize = chunksize(oldfirst);
> > +      unlink_chunk(m, oldfirst, nsize);
> > +      oldfirst = chunk_plus_offset(oldfirst, nsize);
> > +      qsize += nsize;
> > +    }
> > +    set_free_with_pinuse(q, qsize, oldfirst);
> > +#pragma warning(suppress: 28182) /* code analysis noise*/
> > +    insert_chunk(m, q, qsize);
> > +    check_free_chunk(m, q);
> > +  }
> > +
> > +  check_malloced_chunk(m, chunk2mem(p), nb);
> > +  return chunk2mem(p);
> > +}
> > +
> > +/* -------------------------- System allocation
> --------------------------
> > */
> > +
> > +/* Get memory from system using MORECORE or MMAP */
> > +static void* sys_alloc(mstate m, size_t nb) {
> > +  MALLOC_FAILURE_ACTION;
> > +  return 0;
> > +}
> > +
> > +/* ---------------------------- malloc support
> ---------------------------
> > */
> > +
> > +/* allocate a large request from the best fitting chunk in a treebin */
> > +static void* tmalloc_large(mstate m, size_t nb) {
> > +  tchunkptr v = 0;
> > +  size_t rsize = -nb; /* Unsigned negation */
> > +  tchunkptr t;
> > +  bindex_t idx;
> > +  compute_tree_index(nb, idx);
> > +
> > +  if ((t = *treebin_at(m, idx)) != 0) {
> > +    /* Traverse tree for this bin looking for node with size == nb */
> > +    size_t sizebits = nb << leftshift_for_tree_index(idx);
> > +    tchunkptr rst = 0;  /* The deepest untaken right subtree */
> > +    for (;;) {
> > +      tchunkptr rt;
> > +      size_t trem = chunksize(t) - nb;
> > +      if (trem < rsize) {
> > +        v = t;
> > +        if ((rsize = trem) == 0)
> > +          break;
> > +      }
> > +      rt = t->child[1];
> > +      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
> > +      if (rt != 0 && rt != t)
> > +        rst = rt;
> > +      if (t == 0) {
> > +        t = rst; /* set t to least subtree holding sizes > nb */
> > +        break;
> > +      }
> > +      sizebits <<= 1;
> > +    }
> > +  }
> > +
> > +  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
> > +    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
> > +    if (leftbits != 0) {
> > +      bindex_t i;
> > +      binmap_t leastbit = least_bit(leftbits);
> > +      compute_bit2idx(leastbit, i);
> > +      t = *treebin_at(m, i);
> > +    }
> > +  }
> > +
> > +  while (t != 0) { /* find smallest of tree or subtree */
> > +    size_t trem = chunksize(t) - nb;
> > +    if (trem < rsize) {
> > +      rsize = trem;
> > +      v = t;
> > +    }
> > +    t = leftmost_child(t);
> > +  }
> > +
> > +  /*  If dv is a better fit, return 0 so malloc will use it */
> > +  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
> > +    if (RTCHECK(ok_address(m, v))) { /* split */
> > +      mchunkptr r = chunk_plus_offset(v, nb);
> > +      assert(m->user_data, chunksize(v) == rsize + nb);
> > +      if (RTCHECK(ok_next(v, r))) {
> > +        unlink_large_chunk(m, v);
> > +        if (rsize < MIN_CHUNK_SIZE)
> > +          set_inuse_and_pinuse(m, v, (rsize + nb));
> > +        else {
> > +          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > +          set_size_and_pinuse_of_free_chunk(r, rsize);
> > +#pragma warning(suppress: 28182) /* code analysis noise*/
> > +          insert_chunk(m, r, rsize);
> > +        }
> > +        return chunk2mem(v);
> > +      }
> > +    }
> > +    CORRUPTION_ERROR_ACTION(m);
> > +  }
> > +  return 0;
> > +}
> > +
> > +/* allocate a small request from the best fitting chunk in a treebin */
> > +static void* tmalloc_small(mstate m, size_t nb) {
> > +  tchunkptr t, v;
> > +  size_t rsize;
> > +  bindex_t i;
> > +  binmap_t leastbit = least_bit(m->treemap);
> > +  compute_bit2idx(leastbit, i);
> > +
> > +  v = t = *treebin_at(m, i);
> > +  rsize = chunksize(t) - nb;
> > +
> > +  while ((t = leftmost_child(t)) != 0) {
> > +    size_t trem = chunksize(t) - nb;
> > +    if (trem < rsize) {
> > +      rsize = trem;
> > +      v = t;
> > +    }
> > +  }
> > +
> > +  if (RTCHECK(ok_address(m, v))) {
> > +    mchunkptr r = chunk_plus_offset(v, nb);
> > +    assert(m->user_data, chunksize(v) == rsize + nb);
> > +    if (RTCHECK(ok_next(v, r))) {
> > +      unlink_large_chunk(m, v);
> > +      if (rsize < MIN_CHUNK_SIZE)
> > +        set_inuse_and_pinuse(m, v, (rsize + nb));
> > +      else {
> > +        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
> > +        set_size_and_pinuse_of_free_chunk(r, rsize);
> > +        replace_dv(m, r, rsize);
> > +      }
> > +      return chunk2mem(v);
> > +    }
> > +  }
> > +
> > +  CORRUPTION_ERROR_ACTION(m);
> > +  return 0;
> > +}
> > +
> > +/* --------------------------- realloc support
> ---------------------------
> > */
> > +
> > +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
> > +  if (bytes >= MAX_REQUEST) {
> > +    MALLOC_FAILURE_ACTION;
> > +    return 0;
> > +  }
> > +  if (!PREACTION(m)) {
> > +    mchunkptr oldp = mem2chunk(oldmem);
> > +    size_t oldsize = chunksize(oldp);
> > +    mchunkptr next = chunk_plus_offset(oldp, oldsize);
> > +    mchunkptr newp = 0;
> > +    void* extra = 0;
> > +
> > +    /* Try to either shrink or extend into top. Else malloc-copy-free */
> > +
> > +    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
> > +                ok_next(oldp, next) && ok_pinuse(next))) {
> > +      size_t nb = request2size(bytes);
> > +      if (oldsize >= nb) { /* already big enough */
> > +        size_t rsize = oldsize - nb;
> > +        newp = oldp;
> > +        if (rsize >= MIN_CHUNK_SIZE) {
> > +          mchunkptr remainder = chunk_plus_offset(newp, nb);
> > +          set_inuse(m, newp, nb);
> > +          set_inuse(m, remainder, rsize);
> > +          extra = chunk2mem(remainder);
> > +        }
> > +      }
> > +      else if (next == m->top && oldsize + m->topsize > nb) {
> > +        /* Expand into top */
> > +        size_t newsize = oldsize + m->topsize;
> > +        size_t newtopsize = newsize - nb;
> > +        mchunkptr newtop = chunk_plus_offset(oldp, nb);
> > +        set_inuse(m, oldp, nb);
> > +        newtop->head = newtopsize |PINUSE_BIT;
> > +        m->top = newtop;
> > +        m->topsize = newtopsize;
> > +        newp = oldp;
> > +      }
> > +    }
> > +    else {
> > +      USAGE_ERROR_ACTION(m, oldmem);
> > +      POSTACTION(m);
> > +      return 0;
> > +    }
> > +
> > +    POSTACTION(m);
> > +
> > +    if (newp != 0) {
> > +      if (extra != 0) {
> > +        internal_free(m, extra);
> > +      }
> > +      check_inuse_chunk(m, newp);
> > +      return chunk2mem(newp);
> > +    }
> > +    else {
> > +      void* newmem = internal_malloc(m, bytes);
> > +      if (newmem != 0) {
> > +        size_t oc = oldsize - overhead_for(oldp);
> > +        MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
> > +        internal_free(m, oldmem);
> > +      }
> > +      return newmem;
> > +    }
> > +  }
> > +  return 0;
> > +}
> > +
> > +/* --------------------------- memalign support
> --------------------------
> > */
> > +
> > +static void* internal_memalign(mstate m, size_t alignment, size_t
> bytes) {
> > +  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
> > +    return internal_malloc(m, bytes);
> > +  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk
> size
> > */
> > +    alignment = MIN_CHUNK_SIZE;
> > +  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of
> 2 */
> > +    size_t a = MALLOC_ALIGNMENT << 1;
> > +    while (a < alignment) a <<= 1;
> > +    alignment = a;
> > +  }
> > +
> > +  if (bytes >= MAX_REQUEST - alignment) {
> > +    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
> > +      MALLOC_FAILURE_ACTION;
> > +    }
> > +  }
> > +  else {
> > +    size_t nb = request2size(bytes);
> > +    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
> > +    char* mem = (char*)internal_malloc(m, req);
> > +    if (mem != 0) {
> > +      void* leader = 0;
> > +      void* trailer = 0;
> > +      mchunkptr p = mem2chunk(mem);
> > +
> > +      if (PREACTION(m)) return 0;
> > +      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
> > +        /*
> > +          Find an aligned spot inside chunk.  Since we need to give
> > +          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
> > +          the first calculation places us at a spot with less than
> > +          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
> > +          We've allocated enough total room so that this is always
> > +          possible.
> > +        */
> > +        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
> > +                                                       alignment -
> > +                                                       SIZE_T_ONE)) &
> > +                                             -alignment));
> > +        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
> > +          br : br+alignment;
> > +        mchunkptr newp = (mchunkptr)pos;
> > +        size_t leadsize = pos - (char*)(p);
> > +        size_t newsize = chunksize(p) - leadsize;
> > +
> > +        /* Otherwise, give back leader, use the rest */
> > +        set_inuse(m, newp, newsize);
> > +        set_inuse(m, p, leadsize);
> > +        leader = chunk2mem(p);
> > +
> > +        p = newp;
> > +      }
> > +
> > +      assert(m->user_data, chunksize(p) >= nb);
> > +      assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
> > +      check_inuse_chunk(m, p);
> > +      POSTACTION(m);
> > +      if (leader != 0) {
> > +        internal_free(m, leader);
> > +      }
> > +      if (trailer != 0) {
> > +        internal_free(m, trailer);
> > +      }
> > +      return chunk2mem(p);
> > +    }
> > +  }
> > +  return 0;
> > +}
> > +
> > +/* ----------------------------- user mspaces
> ----------------------------
> > */
> > +
> > +static mstate init_user_mstate(char* tbase, size_t tsize, void
> *user_data) {
> > +  size_t msize = pad_request(sizeof(struct malloc_state));
> > +  mchunkptr mn;
> > +  mchunkptr msp = align_as_chunk(tbase);
> > +  mstate m = (mstate)(chunk2mem(msp));
> > +  MEMCLEAR(m, msize);
> > +  INITIAL_LOCK(&m->mutex);
> > +  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
> > +  m->seg.base = m->least_addr = tbase;
> > +  m->seg.size = m->footprint = m->max_footprint = tsize;
> > +  m->magic = mparams.magic;
> > +  m->mflags = mparams.default_mflags;
> > +  m->user_data = user_data;
> > +  init_bins(m);
> > +  mn = next_chunk(mem2chunk(m));
> > +  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -
> TOP_FOOT_SIZE);
> > +  check_top_chunk(m, m->top);
> > +  return m;
> > +}
> > +
> > +mspace create_mspace_with_base(void* base, size_t capacity, int locked,
> void
> > *user_data) {
> > +  mstate m = 0;
> > +  size_t msize = pad_request(sizeof(struct malloc_state));
> > +  init_mparams(); /* Ensure pagesize etc initialized */
> > +
> > +  if (capacity > msize + TOP_FOOT_SIZE &&
> > +      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size))
> {
> > +    m = init_user_mstate((char*)base, capacity, user_data);
> > +    set_lock(m, locked);
> > +  }
> > +  return (mspace)m;
> > +}
> > +
> > +/*
> > +  mspace versions of routines are near-clones of the global
> > +  versions. This is not so nice but better than the alternatives.
> > +*/
> > +
> > +
> > +void* mspace_malloc(mspace msp, size_t bytes) {
> > +  mstate ms = (mstate)msp;
> > +  if (!ok_magic(ms)) {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +    return 0;
> > +  }
> > +  if (!PREACTION(ms)) {
> > +    void* mem;
> > +    size_t nb;
> > +    if (bytes <= MAX_SMALL_REQUEST) {
> > +      bindex_t idx;
> > +      binmap_t smallbits;
> > +      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
> > +      idx = small_index(nb);
> > +      smallbits = ms->smallmap >> idx;
> > +
> > +      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a
> smallbin. */
> > +        mchunkptr b, p;
> > +        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
> > +        b = smallbin_at(ms, idx);
> > +        p = b->fd;
> > +        assert(ms->user_data, chunksize(p) == small_index2size(idx));
> > +        unlink_first_small_chunk(ms, b, p, idx);
> > +        set_inuse_and_pinuse(ms, p, small_index2size(idx));
> > +        mem = chunk2mem(p);
> > +        check_malloced_chunk(ms, mem, nb);
> > +        goto postaction;
> > +      }
> > +
> > +      else if (nb > ms->dvsize) {
> > +        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
> > +          mchunkptr b, p, r;
> > +          size_t rsize;
> > +          bindex_t i;
> > +          binmap_t leftbits = (smallbits << idx) &
> left_bits(idx2bit(idx));
> > +          binmap_t leastbit = least_bit(leftbits);
> > +          compute_bit2idx(leastbit, i);
> > +          b = smallbin_at(ms, i);
> > +          p = b->fd;
> > +          assert(ms->user_data, chunksize(p) == small_index2size(i));
> > +          unlink_first_small_chunk(ms, b, p, i);
> > +          rsize = small_index2size(i) - nb;
> > +          /* Fit here cannot be remainderless if 4byte sizes */
> > +          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
> > +            set_inuse_and_pinuse(ms, p, small_index2size(i));
> > +          else {
> > +            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > +            r = chunk_plus_offset(p, nb);
> > +            set_size_and_pinuse_of_free_chunk(r, rsize);
> > +            replace_dv(ms, r, rsize);
> > +          }
> > +          mem = chunk2mem(p);
> > +          check_malloced_chunk(ms, mem, nb);
> > +          goto postaction;
> > +        }
> > +
> > +        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) !=
> 0) {
> > +          check_malloced_chunk(ms, mem, nb);
> > +          goto postaction;
> > +        }
> > +      }
> > +    }
> > +    else if (bytes >= MAX_REQUEST)
> > +      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys
> alloc)
> > */
> > +    else {
> > +      nb = pad_request(bytes);
> > +      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
> > +        check_malloced_chunk(ms, mem, nb);
> > +        goto postaction;
> > +      }
> > +    }
> > +
> > +    if (nb <= ms->dvsize) {
> > +      size_t rsize = ms->dvsize - nb;
> > +      mchunkptr p = ms->dv;
> > +      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
> > +        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
> > +        ms->dvsize = rsize;
> > +        set_size_and_pinuse_of_free_chunk(r, rsize);
> > +        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > +      }
> > +      else { /* exhaust dv */
> > +        size_t dvs = ms->dvsize;
> > +        ms->dvsize = 0;
> > +        ms->dv = 0;
> > +        set_inuse_and_pinuse(ms, p, dvs);
> > +      }
> > +      mem = chunk2mem(p);
> > +      check_malloced_chunk(ms, mem, nb);
> > +      goto postaction;
> > +    }
> > +
> > +    else if (nb < ms->topsize) { /* Split top */
> > +      size_t rsize = ms->topsize -= nb;
> > +      mchunkptr p = ms->top;
> > +      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
> > +      r->head = rsize | PINUSE_BIT;
> > +      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
> > +      mem = chunk2mem(p);
> > +      check_top_chunk(ms, ms->top);
> > +      check_malloced_chunk(ms, mem, nb);
> > +      goto postaction;
> > +    }
> > +
> > +    mem = sys_alloc(ms, nb);
> > +
> > +  postaction:
> > +    POSTACTION(ms);
> > +    return mem;
> > +  }
> > +
> > +  return 0;
> > +}
> > +
> > +void mspace_free(mspace msp, void* mem) {
> > +  if (mem != 0) {
> > +    mchunkptr p  = mem2chunk(mem);
> > +#if FOOTERS
> > +    mstate fm = get_mstate_for(p);
> > +#else /* FOOTERS */
> > +    mstate fm = (mstate)msp;
> > +#endif /* FOOTERS */
> > +    if (!ok_magic(fm)) {
> > +      USAGE_ERROR_ACTION(fm, p);
> > +      return;
> > +    }
> > +    if (!PREACTION(fm)) {
> > +      check_inuse_chunk(fm, p);
> > +      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
> > +        size_t psize = chunksize(p);
> > +        mchunkptr next = chunk_plus_offset(p, psize);
> > +        if (!pinuse(p)) {
> > +          size_t prevsize = p->prev_foot;
> > +
> > +          mchunkptr prev = chunk_minus_offset(p, prevsize);
> > +          psize += prevsize;
> > +          p = prev;
> > +          if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward
> */
> > +            if (p != fm->dv) {
> > +              unlink_chunk(fm, p, prevsize);
> > +            }
> > +            else if ((next->head & INUSE_BITS) == INUSE_BITS) {
> > +              fm->dvsize = psize;
> > +              set_free_with_pinuse(p, psize, next);
> > +              goto postaction;
> > +            }
> > +          }
> > +          else
> > +            goto erroraction;
> > +        }
> > +
> > +        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
> > +          if (!cinuse(next)) {  /* consolidate forward */
> > +            if (next == fm->top) {
> > +              size_t tsize = fm->topsize += psize;
> > +              fm->top = p;
> > +              p->head = tsize | PINUSE_BIT;
> > +              if (p == fm->dv) {
> > +                fm->dv = 0;
> > +                fm->dvsize = 0;
> > +              }
> > +              goto postaction;
> > +            }
> > +            else if (next == fm->dv) {
> > +              size_t dsize = fm->dvsize += psize;
> > +              fm->dv = p;
> > +              set_size_and_pinuse_of_free_chunk(p, dsize);
> > +              goto postaction;
> > +            }
> > +            else {
> > +              size_t nsize = chunksize(next);
> > +              psize += nsize;
> > +              unlink_chunk(fm, next, nsize);
> > +              set_size_and_pinuse_of_free_chunk(p, psize);
> > +              if (p == fm->dv) {
> > +                fm->dvsize = psize;
> > +                goto postaction;
> > +              }
> > +            }
> > +          }
> > +          else
> > +            set_free_with_pinuse(p, psize, next);
> > +          insert_chunk(fm, p, psize);
> > +          check_free_chunk(fm, p);
> > +          goto postaction;
> > +        }
> > +      }
> > +    erroraction:
> > +      USAGE_ERROR_ACTION(fm, p);
> > +    postaction:
> > +      POSTACTION(fm);
> > +    }
> > +  }
> > +}
> > +
> > +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
> > +  void* mem;
> > +  size_t req = 0;
> > +  mstate ms = (mstate)msp;
> > +  if (!ok_magic(ms)) {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +    return 0;
> > +  }
> > +  if (n_elements != 0) {
> > +    req = n_elements * elem_size;
> > +    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
> > +        (req / n_elements != elem_size))
> > +      req = MAX_SIZE_T; /* force downstream failure on overflow */
> > +  }
> > +  mem = internal_malloc(ms, req);
> > +  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
> > +    MEMCLEAR(mem, req);
> > +  return mem;
> > +}
> > +
> > +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
> > +  if (oldmem == 0)
> > +    return mspace_malloc(msp, bytes);
> > +#ifdef REALLOC_ZERO_BYTES_FREES
> > +  if (bytes == 0) {
> > +    mspace_free(msp, oldmem);
> > +    return 0;
> > +  }
> > +#endif /* REALLOC_ZERO_BYTES_FREES */
> > +  else {
> > +#if FOOTERS
> > +    mchunkptr p  = mem2chunk(oldmem);
> > +    mstate ms = get_mstate_for(p);
> > +#else /* FOOTERS */
> > +    mstate ms = (mstate)msp;
> > +#endif /* FOOTERS */
> > +    if (!ok_magic(ms)) {
> > +      USAGE_ERROR_ACTION(ms,ms);
> > +      return 0;
> > +    }
> > +    return internal_realloc(ms, oldmem, bytes);
> > +  }
> > +}
> > +
> > +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
> > +  mstate ms = (mstate)msp;
> > +  if (!ok_magic(ms)) {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +    return 0;
> > +  }
> > +  return internal_memalign(ms, alignment, bytes);
> > +}
> > +
> > +void mspace_malloc_stats(mspace msp) {
> > +  mstate ms = (mstate)msp;
> > +  if (ok_magic(ms)) {
> > +    internal_malloc_stats(ms);
> > +  }
> > +  else {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +  }
> > +}
> > +
> > +size_t mspace_footprint(mspace msp) {
> > +  size_t result;
> > +  mstate ms = (mstate)msp;
> > +  if (ok_magic(ms)) {
> > +    result = ms->footprint;
> > +  } else {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +  }
> > +  return result;
> > +}
> > +
> > +
> > +size_t mspace_max_footprint(mspace msp) {
> > +  size_t result;
> > +  mstate ms = (mstate)msp;
> > +  if (ok_magic(ms)) {
> > +    result = ms->max_footprint;
> > +  } else {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +  }
> > +  return result;
> > +}
> > +
> > +
> > +#if !NO_MALLINFO
> > +struct mallinfo mspace_mallinfo(mspace msp) {
> > +  mstate ms = (mstate)msp;
> > +  if (!ok_magic(ms)) {
> > +    USAGE_ERROR_ACTION(ms,ms);
> > +  }
> > +  return internal_mallinfo(ms);
> > +}
> > +#endif /* NO_MALLINFO */
> > +
> > +int mspace_mallopt(int param_number, int value) {
> > +  return change_mparam(param_number, value);
> > +}
> > +
> > diff --git a/qxldod/qxldod.vcxproj b/qxldod/qxldod.vcxproj
> > index 15b116c..be4bb42 100755
> > --- a/qxldod/qxldod.vcxproj
> > +++ b/qxldod/qxldod.vcxproj
> > @@ -279,7 +279,7 @@
> >    <ItemGroup>
> >      <ClCompile Include="BaseObject.cpp" />
> >      <ClCompile Include="driver.cpp" />
> > -    <ClCompile Include="mspace.c" />
> > +    <ClCompile Include="mspace.cpp" />
> >      <ClCompile Include="QxlDod.cpp" />
> >    </ItemGroup>
> >    <ItemGroup>
> > diff --git a/qxldod/qxldod.vcxproj.filters b/qxldod/qxldod.vcxproj.
> filters
> > index 1ba05af..6e241a2 100755
> > --- a/qxldod/qxldod.vcxproj.filters
> > +++ b/qxldod/qxldod.vcxproj.filters
> > @@ -42,7 +42,7 @@
> >      <ClCompile Include="QxlDod.cpp">
> >        <Filter>Source Files</Filter>
> >      </ClCompile>
> > -    <ClCompile Include="mspace.c">
> > +    <ClCompile Include="mspace.cpp">
> >        <Filter>Source Files</Filter>
> >      </ClCompile>
> >    </ItemGroup>
>
> This patch cannot be accepted in this form.
> - it contains many space/indentation changes;
> - code movements;
> - page/non-page movement;
> - mspace.c file rename.
>
All these changes should be split. Also it's better to use -M to reduce
> size with file rename

what's -M, can you please explain?

>




> Frediano
>



-- 
Respectfully,
*Sameeh Jubran*
*Linkedin <https://il.linkedin.com/pub/sameeh-jubran/87/747/a8a>*
*Junior Software Engineer @ Daynix <http://www.daynix.com>.*
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/spice-devel/attachments/20160905/b76886de/attachment-0001.html>


More information about the Spice-devel mailing list