<div dir="ltr"><br><div class="gmail_extra"><br><div class="gmail_quote">On Tue, Aug 30, 2016 at 6:17 PM, Frediano Ziglio <span dir="ltr"><<a href="mailto:fziglio@redhat.com" target="_blank">fziglio@redhat.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">><br>
> Signed-off-by: Sameeh Jubran <<a href="mailto:sameeh@daynix.com">sameeh@daynix.com</a>><br>
> ---<br>
> qxldod/QxlDod.cpp | 520 +++++----<br>
> qxldod/mspace.c | 2437<br>
> ------------------------------<wbr>----------<br>
> qxldod/mspace.cpp | 2439<br>
> ++++++++++++++++++++++++++++++<wbr>+++++++++++<br>
> qxldod/qxldod.vcxproj | 2 +-<br>
> qxldod/qxldod.vcxproj.filters | 2 +-<br>
> 5 files changed, 2729 insertions(+), 2671 deletions(-)<br>
> delete mode 100755 qxldod/mspace.c<br>
> create mode 100644 qxldod/mspace.cpp<br>
><br>
> diff --git a/qxldod/QxlDod.cpp b/qxldod/QxlDod.cpp<br>
> index a1718a4..a328d2d 100755<br>
> --- a/qxldod/QxlDod.cpp<br>
> +++ b/qxldod/QxlDod.cpp<br>
> @@ -3,7 +3,7 @@<br>
> #include "qxl_windows.h"<br>
><br>
> #pragma code_seg(push)<br>
> -#pragma code_seg()<br>
> +#pragma code_seg("PAGE")<br>
><br>
> #define WIN_QXL_INT_MASK ((QXL_INTERRUPT_DISPLAY) | \<br>
> (QXL_INTERRUPT_CURSOR) | \<br>
> @@ -55,15 +55,11 @@ typedef struct _QXL_ESCAPE {<br>
> };<br>
> }QXL_ESCAPE;<br>
><br>
> -#pragma code_seg(pop)<br>
> -<br>
> -#pragma code_seg("PAGE")<br>
> -<br>
> -<br>
> QxlDod::QxlDod(_In_ DEVICE_OBJECT* pPhysicalDeviceObject) :<br>
> m_pPhysicalDevice(<wbr>pPhysicalDeviceObject),<br>
> m_MonitorPowerState(<wbr>PowerDeviceD0),<br>
> m_AdapterPowerState(<wbr>PowerDeviceD0)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s\n", __FUNCTION__));<br>
> *((UINT*)&m_Flags) = 0;<br>
> RtlZeroMemory(&m_<wbr>DxgkInterface, sizeof(m_DxgkInterface));<br>
> @@ -246,6 +242,7 @@ DbgDevicePowerString(<br>
> __in DEVICE_POWER_STATE Type<br>
> )<br>
> {<br>
> + PAGED_CODE();<br>
> switch (Type)<br>
> {<br>
> case PowerDeviceUnspecified:<br>
> @@ -270,6 +267,7 @@ DbgPowerActionString(<br>
> __in POWER_ACTION Type<br>
> )<br>
> {<br>
> + PAGED_CODE();<br>
> switch (Type)<br>
> {<br>
> case PowerActionNone:<br>
> @@ -1474,7 +1472,7 @@ NTSTATUS QxlDod::CommitVidPn(_In_ CONST<br>
> DXGKARG_COMMITVIDPN* CONST pCommitVidPn)<br>
><br>
> CommitVidPnExit:<br>
><br>
> - NTSTATUS TempStatus;<br>
> + NTSTATUS TempStatus(STATUS_SUCCESS);<br>
> UNREFERENCED_PARAMETER(<wbr>TempStatus);<br>
><br>
> if ((pVidPnSourceModeSetInterface != NULL) &&<br>
> @@ -1664,7 +1662,7 @@ NTSTATUS QxlDod::<wbr>UpdateActiveVidPnPresentPath(_<wbr>In_<br>
> CONST DXGKARG_UPDATEACTIVEVID<br>
> //<br>
> // Non-Paged Code<br>
> //<br>
> -#pragma code_seg(push)<br>
> +#pragma code_seg(push) //Non-Paged Code<br>
> #pragma code_seg()<br>
><br>
> VOID QxlDod::DpcRoutine(VOID)<br>
> @@ -1835,7 +1833,7 @@ NTSTATUS QxlDod::WriteHWInfoStr(_In_ HANDLE<br>
> DevInstRegKeyHandle, _In_ PCWSTR psz<br>
> return Status;<br>
> }<br>
><br>
> -NTSTATUS QxlDod::RegisterHWInfo(ULONG Id)<br>
> +NTSTATUS QxlDod::RegisterHWInfo(_In_ ULONG Id)<br>
> {<br>
> PAGED_CODE();<br>
><br>
> @@ -1914,11 +1912,10 @@ NTSTATUS QxlDod::RegisterHWInfo(ULONG Id)<br>
> return Status;<br>
> }<br>
><br>
> -<br>
> //<br>
> // Non-Paged Code<br>
> //<br>
> -#pragma code_seg(push)<br>
> +#pragma code_seg(push) //Non-Paged<br>
> #pragma code_seg()<br>
><br>
> UINT BPPFromPixelFormat(<wbr>D3DDDIFORMAT Format)<br>
> @@ -1947,122 +1944,42 @@ D3DDDI_VIDEO_PRESENT_SOURCE_ID<br>
> QxlDod::FindSourceForTarget(<wbr>D3DDDI_VIDEO_PRESENT_<br>
><br>
> return DefaultToZero ? 0 : D3DDDI_ID_UNINITIALIZED;<br>
> }<br>
> +// HW specific code<br>
><br>
> -<br>
> -//<br>
> -// Frame buffer map/unmap<br>
> -//<br>
> -<br>
> -NTSTATUS<br>
> -MapFrameBuffer(<br>
> - _In_ PHYSICAL_ADDRESS PhysicalAddress,<br>
> - _In_ ULONG Length,<br>
> - _Outptr_result_bytebuffer_(<wbr>Length) VOID** VirtualAddress)<br>
> +VOID GetPitches(_In_ CONST BLT_INFO* pBltInfo, _Out_ LONG* pPixelPitch,<br>
> _Out_ LONG* pRowPitch)<br>
> {<br>
> - PAGED_CODE();<br>
> -<br>
> - //<br>
> - // Check for parameters<br>
> - //<br>
> - if ((PhysicalAddress.QuadPart == (ULONGLONG)0) ||<br>
> - (Length == 0) ||<br>
> - (VirtualAddress == NULL))<br>
> + switch (pBltInfo->Rotation) {<br>
> + case D3DKMDT_VPPR_IDENTITY:<br>
> {<br>
> - DbgPrint(TRACE_LEVEL_ERROR, ("One of PhysicalAddress.QuadPart<br>
> (0x%I64x), Length (%lu), VirtualAddress (%p) is NULL or 0\n",<br>
> - PhysicalAddress.QuadPart, Length, VirtualAddress));<br>
> - return STATUS_INVALID_PARAMETER;<br>
> + *pPixelPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> + *pRowPitch = pBltInfo->Pitch;<br>
> + return;<br>
> }<br>
> -<br>
> - *VirtualAddress = MmMapIoSpace(PhysicalAddress,<br>
> - Length,<br>
> - MmWriteCombined);<br>
> - if (*VirtualAddress == NULL)<br>
> + case D3DKMDT_VPPR_ROTATE90:<br>
> {<br>
> - // The underlying call to MmMapIoSpace failed. This may be because,<br>
> MmWriteCombined<br>
> - // isn't supported, so try again with MmNonCached<br>
> -<br>
> - *VirtualAddress = MmMapIoSpace(PhysicalAddress,<br>
> - Length,<br>
> - MmNonCached);<br>
> - if (*VirtualAddress == NULL)<br>
> - {<br>
> - DbgPrint(TRACE_LEVEL_ERROR, ("MmMapIoSpace returned a NULL<br>
> buffer when trying to allocate %lu bytes", Length));<br>
> - return STATUS_NO_MEMORY;<br>
> - }<br>
> + *pPixelPitch = -((LONG) pBltInfo->Pitch);<br>
> + *pRowPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> + return;<br>
> }<br>
> -<br>
> - return STATUS_SUCCESS;<br>
> -}<br>
> -<br>
> -NTSTATUS<br>
> -UnmapFrameBuffer(<br>
> - _In_reads_bytes_(Length) VOID* VirtualAddress,<br>
> - _In_ ULONG Length)<br>
> -{<br>
> - PAGED_CODE();<br>
> -<br>
> -<br>
> - //<br>
> - // Check for parameters<br>
> - //<br>
> - if ((VirtualAddress == NULL) && (Length == 0))<br>
> + case D3DKMDT_VPPR_ROTATE180:<br>
> {<br>
> - // Allow this function to be called when there's no work to do, and<br>
> treat as successful<br>
> - return STATUS_SUCCESS;<br>
> + *pPixelPitch = -((LONG) pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> + *pRowPitch = -((LONG) pBltInfo->Pitch);<br>
> + return;<br>
> }<br>
> - else if ((VirtualAddress == NULL) || (Length == 0))<br>
> + case D3DKMDT_VPPR_ROTATE270:<br>
> {<br>
> - DbgPrint(TRACE_LEVEL_ERROR, ("Only one of Length (%lu),<br>
> VirtualAddress (%p) is NULL or 0",<br>
> - Length, VirtualAddress));<br>
> - return STATUS_INVALID_PARAMETER;<br>
> + *pPixelPitch = pBltInfo->Pitch;<br>
> + *pRowPitch = -((LONG) pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> + return;<br>
> }<br>
> -<br>
> - MmUnmapIoSpace(VirtualAddress,<br>
> - Length);<br>
> -<br>
> - return STATUS_SUCCESS;<br>
> -}<br>
> -<br>
> -<br>
> -<br>
> -<br>
> -// HW specific code<br>
> -<br>
> -VOID GetPitches(_In_ CONST BLT_INFO* pBltInfo, _Out_ LONG* pPixelPitch,<br>
> _Out_ LONG* pRowPitch)<br>
> -{<br>
> - switch (pBltInfo->Rotation)<br>
> + default:<br>
> {<br>
> - case D3DKMDT_VPPR_IDENTITY:<br>
> - {<br>
> - *pPixelPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> - *pRowPitch = pBltInfo->Pitch;<br>
> - return;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE90:<br>
> - {<br>
> - *pPixelPitch = -((LONG)pBltInfo->Pitch);<br>
> - *pRowPitch = (pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> - return;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE180:<br>
> - {<br>
> - *pPixelPitch = -((LONG)pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> - *pRowPitch = -((LONG)pBltInfo->Pitch);<br>
> - return;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE270:<br>
> - {<br>
> - *pPixelPitch = pBltInfo->Pitch;<br>
> - *pRowPitch = -((LONG)pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> - return;<br>
> - }<br>
> - default:<br>
> - {<br>
> - QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",<br>
> pBltInfo->Rotation);<br>
> - *pPixelPitch = 0;<br>
> - *pRowPitch = 0;<br>
> - return;<br>
> - }<br>
> + QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",<br>
> pBltInfo->Rotation);<br>
> + *pPixelPitch = 0;<br>
> + *pRowPitch = 0;<br>
> + return;<br>
> + }<br>
> }<br>
> }<br>
><br>
> @@ -2072,61 +1989,60 @@ BYTE* GetRowStart(_In_ CONST BLT_INFO* pBltInfo,<br>
> CONST RECT* pRect)<br>
> LONG OffLeft = pRect->left + pBltInfo->Offset.x;<br>
> LONG OffTop = pRect->top + pBltInfo->Offset.y;<br>
> LONG BytesPerPixel = (pBltInfo->BitsPerPel / BITS_PER_BYTE);<br>
> - switch (pBltInfo->Rotation)<br>
> + switch (pBltInfo->Rotation) {<br>
> + case D3DKMDT_VPPR_IDENTITY:<br>
> {<br>
> - case D3DKMDT_VPPR_IDENTITY:<br>
> - {<br>
> - pRet = ((BYTE*)pBltInfo->pBits +<br>
> - OffTop * pBltInfo->Pitch +<br>
> - OffLeft * BytesPerPixel);<br>
> - break;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE90:<br>
> - {<br>
> - pRet = ((BYTE*)pBltInfo->pBits +<br>
> - (pBltInfo->Height - 1 - OffLeft) *<br>
> pBltInfo->Pitch +<br>
> - OffTop * BytesPerPixel);<br>
> - break;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE180:<br>
> - {<br>
> - pRet = ((BYTE*)pBltInfo->pBits +<br>
> - (pBltInfo->Height - 1 - OffTop) * pBltInfo->Pitch<br>
> +<br>
> - (pBltInfo->Width - 1 - OffLeft) * BytesPerPixel);<br>
> - break;<br>
> - }<br>
> - case D3DKMDT_VPPR_ROTATE270:<br>
> - {<br>
> - pRet = ((BYTE*)pBltInfo->pBits +<br>
> - OffLeft * pBltInfo->Pitch +<br>
> - (pBltInfo->Width - 1 - OffTop) * BytesPerPixel);<br>
> - break;<br>
> - }<br>
> - default:<br>
> - {<br>
> - QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",<br>
> pBltInfo->Rotation);<br>
> - break;<br>
> - }<br>
> + pRet = ((BYTE*) pBltInfo->pBits +<br>
> + OffTop * pBltInfo->Pitch +<br>
> + OffLeft * BytesPerPixel);<br>
> + break;<br>
> + }<br>
> + case D3DKMDT_VPPR_ROTATE90:<br>
> + {<br>
> + pRet = ((BYTE*) pBltInfo->pBits +<br>
> + (pBltInfo->Height - 1 - OffLeft) * pBltInfo->Pitch +<br>
> + OffTop * BytesPerPixel);<br>
> + break;<br>
> + }<br>
> + case D3DKMDT_VPPR_ROTATE180:<br>
> + {<br>
> + pRet = ((BYTE*) pBltInfo->pBits +<br>
> + (pBltInfo->Height - 1 - OffTop) * pBltInfo->Pitch +<br>
> + (pBltInfo->Width - 1 - OffLeft) * BytesPerPixel);<br>
> + break;<br>
> + }<br>
> + case D3DKMDT_VPPR_ROTATE270:<br>
> + {<br>
> + pRet = ((BYTE*) pBltInfo->pBits +<br>
> + OffLeft * pBltInfo->Pitch +<br>
> + (pBltInfo->Width - 1 - OffTop) * BytesPerPixel);<br>
> + break;<br>
> + }<br>
> + default:<br>
> + {<br>
> + QXL_LOG_ASSERTION1("Invalid rotation (0x%I64x) specified",<br>
> pBltInfo->Rotation);<br>
> + break;<br>
> + }<br>
> }<br>
><br>
> return pRet;<br>
> }<br>
><br>
> /****************************<wbr>Internal*Routine**************<wbr>****************\<br>
> - * CopyBitsGeneric<br>
> - *<br>
> - *<br>
> - * Blt function which can handle a rotated dst/src, offset rects in dst/src<br>
> - * and bpp combinations of:<br>
> - * dst | src<br>
> - * 32 | 32 // For identity rotation this is much faster in<br>
> CopyBits32_32<br>
> - * 32 | 24<br>
> - * 32 | 16<br>
> - * 24 | 32<br>
> - * 16 | 32<br>
> - * 8 | 32<br>
> - * 24 | 24 // untested<br>
> - *<br>
> +* CopyBitsGeneric<br>
> +*<br>
> +*<br>
> +* Blt function which can handle a rotated dst/src, offset rects in dst/src<br>
> +* and bpp combinations of:<br>
> +* dst | src<br>
> +* 32 | 32 // For identity rotation this is much faster in CopyBits32_32<br>
> +* 32 | 24<br>
> +* 32 | 16<br>
> +* 24 | 32<br>
> +* 16 | 32<br>
> +* 8 | 32<br>
> +* 24 | 24 // untested<br>
> +*<br>
> \*****************************<wbr>******************************<wbr>***************/<br>
><br>
> VOID CopyBitsGeneric(<br>
> @@ -2140,13 +2056,12 @@ VOID CopyBitsGeneric(<br>
> LONG SrcPixelPitch = 0;<br>
> LONG SrcRowPitch = 0;<br>
><br>
> - DbgPrint(TRACE_LEVEL_VERBOSE , ("---> %s NumRects = %d Dst = %p Src =<br>
> %p\n", __FUNCTION__, NumRects, pDst->pBits, pSrc->pBits));<br>
> + DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s NumRects = %d Dst = %p Src =<br>
> %p\n", __FUNCTION__, NumRects, pDst->pBits, pSrc->pBits));<br>
><br>
> GetPitches(pDst, &DstPixelPitch, &DstRowPitch);<br>
> GetPitches(pSrc, &SrcPixelPitch, &SrcRowPitch);<br>
><br>
> - for (UINT iRect = 0; iRect < NumRects; iRect++)<br>
> - {<br>
> + for (UINT iRect = 0; iRect < NumRects; iRect++) {<br>
> CONST RECT* pRect = &pRects[iRect];<br>
><br>
> NT_ASSERT(pRect->right >= pRect->left);<br>
> @@ -2158,57 +2073,47 @@ VOID CopyBitsGeneric(<br>
> BYTE* pDstRow = GetRowStart(pDst, pRect);<br>
> CONST BYTE* pSrcRow = GetRowStart(pSrc, pRect);<br>
><br>
> - for (UINT y=0; y < NumRows; y++)<br>
> - {<br>
> + for (UINT y = 0; y < NumRows; y++) {<br>
> BYTE* pDstPixel = pDstRow;<br>
> CONST BYTE* pSrcPixel = pSrcRow;<br>
><br>
> - for (UINT x=0; x < NumPixels; x++)<br>
> - {<br>
> + for (UINT x = 0; x < NumPixels; x++) {<br>
> if ((pDst->BitsPerPel == 24) ||<br>
> - (pSrc->BitsPerPel == 24))<br>
> - {<br>
> + (pSrc->BitsPerPel == 24)) {<br>
> pDstPixel[0] = pSrcPixel[0];<br>
> pDstPixel[1] = pSrcPixel[1];<br>
> pDstPixel[2] = pSrcPixel[2];<br>
> // pPixel[3] is the alpha channel and is ignored for<br>
> whichever of Src/Dst is 32bpp<br>
> }<br>
> - else if (pDst->BitsPerPel == 32)<br>
> - {<br>
> - if (pSrc->BitsPerPel == 32)<br>
> - {<br>
> - UINT32* pDstPixelAs32 = (UINT32*)pDstPixel;<br>
> - UINT32* pSrcPixelAs32 = (UINT32*)pSrcPixel;<br>
> + else if (pDst->BitsPerPel == 32) {<br>
> + if (pSrc->BitsPerPel == 32) {<br>
> + UINT32* pDstPixelAs32 = (UINT32*) pDstPixel;<br>
> + UINT32* pSrcPixelAs32 = (UINT32*) pSrcPixel;<br>
> *pDstPixelAs32 = *pSrcPixelAs32;<br>
> }<br>
> - else if (pSrc->BitsPerPel == 16)<br>
> - {<br>
> - UINT32* pDstPixelAs32 = (UINT32*)pDstPixel;<br>
> - UINT16* pSrcPixelAs16 = (UINT16*)pSrcPixel;<br>
> + else if (pSrc->BitsPerPel == 16) {<br>
> + UINT32* pDstPixelAs32 = (UINT32*) pDstPixel;<br>
> + UINT16* pSrcPixelAs16 = (UINT16*) pSrcPixel;<br>
><br>
> *pDstPixelAs32 =<br>
> CONVERT_16BPP_TO_32BPP(*<wbr>pSrcPixelAs16);<br>
> }<br>
> - else<br>
> - {<br>
> + else {<br>
> // Invalid pSrc->BitsPerPel on a pDst->BitsPerPel of<br>
> 32<br>
> NT_ASSERT(FALSE);<br>
> }<br>
> }<br>
> - else if (pDst->BitsPerPel == 16)<br>
> - {<br>
> + else if (pDst->BitsPerPel == 16) {<br>
> NT_ASSERT(pSrc->BitsPerPel == 32);<br>
><br>
> - UINT16* pDstPixelAs16 = (UINT16*)pDstPixel;<br>
> + UINT16* pDstPixelAs16 = (UINT16*) pDstPixel;<br>
> *pDstPixelAs16 = CONVERT_32BPP_TO_16BPP(<wbr>pSrcPixel);<br>
> }<br>
> - else if (pDst->BitsPerPel == 8)<br>
> - {<br>
> + else if (pDst->BitsPerPel == 8) {<br>
> NT_ASSERT(pSrc->BitsPerPel == 32);<br>
><br>
> *pDstPixel = CONVERT_32BPP_TO_8BPP(<wbr>pSrcPixel);<br>
> }<br>
> - else<br>
> - {<br>
> + else {<br>
> // Invalid pDst->BitsPerPel<br>
> NT_ASSERT(FALSE);<br>
> }<br>
> @@ -2236,8 +2141,7 @@ VOID CopyBits32_32(<br>
><br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> - for (UINT iRect = 0; iRect < NumRects; iRect++)<br>
> - {<br>
> + for (UINT iRect = 0; iRect < NumRects; iRect++) {<br>
> CONST RECT* pRect = &pRects[iRect];<br>
><br>
> NT_ASSERT(pRect->right >= pRect->left);<br>
> @@ -2246,15 +2150,14 @@ VOID CopyBits32_32(<br>
> UINT NumPixels = pRect->right - pRect->left;<br>
> UINT NumRows = pRect->bottom - pRect->top;<br>
> UINT BytesToCopy = NumPixels * 4;<br>
> - BYTE* pStartDst = ((BYTE*)pDst->pBits +<br>
> - (pRect->top + pDst->Offset.y) * pDst->Pitch +<br>
> - (pRect->left + pDst->Offset.x) * 4);<br>
> - CONST BYTE* pStartSrc = ((BYTE*)pSrc->pBits +<br>
> - (pRect->top + pSrc->Offset.y) * pSrc->Pitch<br>
> +<br>
> - (pRect->left + pSrc->Offset.x) * 4);<br>
> -<br>
> - for (UINT i = 0; i < NumRows; ++i)<br>
> - {<br>
> + BYTE* pStartDst = ((BYTE*) pDst->pBits +<br>
> + (pRect->top + pDst->Offset.y) * pDst->Pitch +<br>
> + (pRect->left + pDst->Offset.x) * 4);<br>
> + CONST BYTE* pStartSrc = ((BYTE*) pSrc->pBits +<br>
> + (pRect->top + pSrc->Offset.y) * pSrc->Pitch +<br>
> + (pRect->left + pSrc->Offset.x) * 4);<br>
> +<br>
> + for (UINT i = 0; i < NumRows; ++i) {<br>
> RtlCopyMemory(pStartDst, pStartSrc, BytesToCopy);<br>
> pStartDst += pDst->Pitch;<br>
> pStartSrc += pSrc->Pitch;<br>
> @@ -2264,7 +2167,7 @@ VOID CopyBits32_32(<br>
> }<br>
><br>
><br>
> -VOID BltBits (<br>
> +VOID BltBits(<br>
> BLT_INFO* pDst,<br>
> CONST BLT_INFO* pSrc,<br>
> UINT NumRects,<br>
> @@ -2274,31 +2177,105 @@ VOID BltBits (<br>
> // This usage is redundant in the sample driver since it is already<br>
> being used for MmProbeAndLockPages. However, it is very important<br>
> // to have this in place and to make sure developers don't miss it, it<br>
> is in these two locations.<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> - __try<br>
> - {<br>
> + __try {<br>
> if (pDst->BitsPerPel == 32 &&<br>
> pSrc->BitsPerPel == 32 &&<br>
> pDst->Rotation == D3DKMDT_VPPR_IDENTITY &&<br>
> - pSrc->Rotation == D3DKMDT_VPPR_IDENTITY)<br>
> - {<br>
> + pSrc->Rotation == D3DKMDT_VPPR_IDENTITY) {<br>
> // This is by far the most common copy function being called<br>
> CopyBits32_32(pDst, pSrc, NumRects, pRects);<br>
> }<br>
> - else<br>
> - {<br>
> + else {<br>
> CopyBitsGeneric(pDst, pSrc, NumRects, pRects);<br>
> }<br>
> }<br>
> - #pragma prefast(suppress: __WARNING_<wbr>EXCEPTIONEXECUTEHANDLER, "try/except<br>
> is only able to protect against user-mode errors and these are the only<br>
> errors we try to catch here");<br>
> - __except(EXCEPTION_EXECUTE_<wbr>HANDLER)<br>
> +#pragma prefast(suppress: __WARNING_<wbr>EXCEPTIONEXECUTEHANDLER, "try/except is<br>
> only able to protect against user-mode errors and these are the only errors<br>
> we try to catch here");<br>
> + __except (EXCEPTION_EXECUTE_HANDLER)<br>
> {<br>
> DbgPrint(TRACE_LEVEL_ERROR, ("Either dst (0x%I64x) or src (0x%I64x)<br>
> bits encountered exception during access.\n", pDst->pBits,<br>
> pSrc->pBits));<br>
> }<br>
> }<br>
> -#pragma code_seg(pop) // End Non-Paged Code<br>
> +<br>
> +#pragma code seg(pop) // End Non Paged Code<br>
> +#pragma code_seg("PAGE")<br>
> +//<br>
> +// Frame buffer map/unmap<br>
> +//<br>
> +<br>
> +NTSTATUS<br>
> +MapFrameBuffer(<br>
> + _In_ PHYSICAL_ADDRESS PhysicalAddress,<br>
> + _In_ ULONG Length,<br>
> + _Outptr_result_bytebuffer_(<wbr>Length) VOID** VirtualAddress)<br>
> +{<br>
> + PAGED_CODE();<br>
> +<br>
> + //<br>
> + // Check for parameters<br>
> + //<br>
> + if ((PhysicalAddress.QuadPart == (ULONGLONG)0) ||<br>
> + (Length == 0) ||<br>
> + (VirtualAddress == NULL))<br>
> + {<br>
> + DbgPrint(TRACE_LEVEL_ERROR, ("One of PhysicalAddress.QuadPart<br>
> (0x%I64x), Length (%lu), VirtualAddress (%p) is NULL or 0\n",<br>
> + PhysicalAddress.QuadPart, Length, VirtualAddress));<br>
> + return STATUS_INVALID_PARAMETER;<br>
> + }<br>
> +<br>
> + *VirtualAddress = MmMapIoSpaceEx(<wbr>PhysicalAddress,<br>
> + Length,<br>
> + PAGE_WRITECOMBINE | PAGE_READWRITE);<br>
> + if (*VirtualAddress == NULL)<br>
> + {<br>
> + // The underlying call to MmMapIoSpace failed. This may be because,<br>
> MmWriteCombined<br>
> + // isn't supported, so try again with MmNonCached<br>
> +<br>
> + *VirtualAddress = MmMapIoSpaceEx(<wbr>PhysicalAddress,<br>
> + Length,<br>
> + (ULONG) (PAGE_NOCACHE |<br>
> PAGE_READWRITE));<br>
> + if (*VirtualAddress == NULL)<br>
> + {<br>
> + DbgPrint(TRACE_LEVEL_ERROR, ("MmMapIoSpace returned a NULL<br>
> buffer when trying to allocate %lu bytes", Length));<br>
> + return STATUS_NO_MEMORY;<br>
> + }<br>
> + }<br>
> +<br>
> + return STATUS_SUCCESS;<br>
> +}<br>
> +<br>
> +NTSTATUS<br>
> +UnmapFrameBuffer(<br>
> + _In_reads_bytes_(Length) VOID* VirtualAddress,<br>
> + _In_ ULONG Length)<br>
> +{<br>
> + PAGED_CODE();<br>
> +<br>
> +<br>
> + //<br>
> + // Check for parameters<br>
> + //<br>
> + if ((VirtualAddress == NULL) && (Length == 0))<br>
> + {<br>
> + // Allow this function to be called when there's no work to do, and<br>
> treat as successful<br>
> + return STATUS_SUCCESS;<br>
> + }<br>
> + else if ((VirtualAddress == NULL) || (Length == 0))<br>
> + {<br>
> + DbgPrint(TRACE_LEVEL_ERROR, ("Only one of Length (%lu),<br>
> VirtualAddress (%p) is NULL or 0",<br>
> + Length, VirtualAddress));<br>
> + return STATUS_INVALID_PARAMETER;<br>
> + }<br>
> +<br>
> + MmUnmapIoSpace(VirtualAddress,<br>
> + Length);<br>
> +<br>
> + return STATUS_SUCCESS;<br>
> +}<br>
> +<br>
><br>
> VgaDevice::VgaDevice(_In_ QxlDod* pQxlDod) : HwDeviceInterface(pQxlDod)<br>
> {<br>
> + PAGED_CODE();<br>
> m_pQxlDod = pQxlDod;<br>
> m_ModeInfo = NULL;<br>
> m_ModeCount = 0;<br>
> @@ -2310,6 +2287,7 @@ VgaDevice::VgaDevice(_In_ QxlDod* pQxlDod) :<br>
> HwDeviceInterface(pQxlDod)<br>
><br>
> VgaDevice::~VgaDevice(void)<br>
> {<br>
> + PAGED_CODE();<br>
> HWClose();<br>
> delete [] reinterpret_cast<BYTE*>(m_<wbr>ModeInfo);<br>
> delete [] reinterpret_cast<BYTE*>(m_<wbr>ModeNumbers);<br>
> @@ -2551,6 +2529,7 @@ NTSTATUS<br>
> VgaDevice::GetModeList(DXGK_<wbr>DISPLAY_INFORMATION* pDispInfo)<br>
><br>
> NTSTATUS VgaDevice::QueryCurrentMode(<wbr>PVIDEO_MODE RequestedMode)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
> UNREFERENCED_PARAMETER(<wbr>RequestedMode);<br>
> @@ -2560,6 +2539,8 @@ NTSTATUS VgaDevice::QueryCurrentMode(<wbr>PVIDEO_MODE<br>
> RequestedMode)<br>
><br>
> NTSTATUS VgaDevice::SetCurrentMode(<wbr>ULONG Mode)<br>
> {<br>
> + PAGED_CODE();<br>
> +<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s Mode = %x\n", __FUNCTION__,<br>
> Mode));<br>
> X86BIOS_REGISTERS regs = {0};<br>
> @@ -2576,6 +2557,8 @@ NTSTATUS VgaDevice::SetCurrentMode(<wbr>ULONG Mode)<br>
><br>
> NTSTATUS VgaDevice::GetCurrentMode(<wbr>ULONG* pMode)<br>
> {<br>
> + PAGED_CODE();<br>
> +<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s\n", __FUNCTION__));<br>
> X86BIOS_REGISTERS regs = {0};<br>
> @@ -2592,6 +2575,8 @@ NTSTATUS VgaDevice::GetCurrentMode(<wbr>ULONG* pMode)<br>
><br>
> NTSTATUS VgaDevice::HWInit(PCM_<wbr>RESOURCE_LIST pResList,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> +<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> UNREFERENCED_PARAMETER(<wbr>pResList);<br>
> UNREFERENCED_PARAMETER(<wbr>pDispInfo);<br>
> @@ -2601,13 +2586,17 @@ NTSTATUS VgaDevice::HWInit(PCM_<wbr>RESOURCE_LIST<br>
> pResList, DXGK_DISPLAY_INFORMATION*<br>
><br>
> NTSTATUS VgaDevice::HWClose(void)<br>
> {<br>
> + PAGED_CODE();<br>
> +<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> return STATUS_SUCCESS;<br>
> }<br>
><br>
> -NTSTATUS VgaDevice::SetPowerState(_In_ DEVICE_POWER_STATE DevicePowerState,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> +NTSTATUS VgaDevice::SetPowerState(<wbr>DEVICE_POWER_STATE DevicePowerState,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> +<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s\n", __FUNCTION__));<br>
><br>
> X86BIOS_REGISTERS regs = {0};<br>
> @@ -2630,7 +2619,6 @@ NTSTATUS VgaDevice::SetPowerState(_In_<br>
> DEVICE_POWER_STATE DevicePowerState, DXG<br>
> return STATUS_SUCCESS;<br>
> }<br>
><br>
> -<br>
> NTSTATUS<br>
> VgaDevice::<wbr>ExecutePresentDisplayOnly(<br>
> _In_ BYTE* DstAddr,<br>
> @@ -2903,18 +2891,21 @@ VOID VgaDevice::HWResetDevice(VOID)<br>
><br>
> NTSTATUS VgaDevice::SetPointerShape(_<wbr>In_ CONST DXGKARG_SETPOINTERSHAPE*<br>
> pSetPointerShape)<br>
> {<br>
> + PAGED_CODE();<br>
> UNREFERENCED_PARAMETER(<wbr>pSetPointerShape);<br>
> return STATUS_NOT_SUPPORTED;<br>
> }<br>
><br>
> NTSTATUS VgaDevice::SetPointerPosition(<wbr>_In_ CONST<br>
> DXGKARG_SETPOINTERPOSITION* pSetPointerPosition)<br>
> {<br>
> + PAGED_CODE();<br>
> UNREFERENCED_PARAMETER(<wbr>pSetPointerPosition);<br>
> return STATUS_SUCCESS;<br>
> }<br>
><br>
> NTSTATUS VgaDevice::Escape(_In_ CONST DXGKARG_ESCAPE* pEscap)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> return STATUS_NOT_IMPLEMENTED;<br>
> @@ -2922,6 +2913,7 @@ NTSTATUS VgaDevice::Escape(_In_ CONST DXGKARG_ESCAPE*<br>
> pEscap)<br>
><br>
> QxlDevice::QxlDevice(_In_ QxlDod* pQxlDod) : HwDeviceInterface(pQxlDod)<br>
> {<br>
> + PAGED_CODE();<br>
> m_pQxlDod = pQxlDod;<br>
> m_ModeInfo = NULL;<br>
> m_ModeCount = 0;<br>
> @@ -2935,6 +2927,7 @@ QxlDevice::QxlDevice(_In_ QxlDod* pQxlDod) :<br>
> HwDeviceInterface(pQxlDod)<br>
><br>
> QxlDevice::~QxlDevice(void)<br>
> {<br>
> + PAGED_CODE();<br>
> HWClose();<br>
> delete [] reinterpret_cast<BYTE*>(m_<wbr>ModeInfo);<br>
> delete [] reinterpret_cast<BYTE*>(m_<wbr>ModeNumbers);<br>
> @@ -3108,6 +3101,7 @@ NTSTATUS<br>
> QxlDevice::GetModeList(DXGK_<wbr>DISPLAY_INFORMATION* pDispInfo)<br>
><br>
> NTSTATUS QxlDevice::QueryCurrentMode(<wbr>PVIDEO_MODE RequestedMode)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
> UNREFERENCED_PARAMETER(<wbr>RequestedMode);<br>
> @@ -3116,6 +3110,7 @@ NTSTATUS QxlDevice::QueryCurrentMode(<wbr>PVIDEO_MODE<br>
> RequestedMode)<br>
><br>
> NTSTATUS QxlDevice::SetCurrentMode(<wbr>ULONG Mode)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s - %d: Mode = %d\n",<br>
> __FUNCTION__, m_Id, Mode));<br>
> for (ULONG idx = 0; idx < GetModeCount(); idx++)<br>
> {<br>
> @@ -3135,6 +3130,7 @@ NTSTATUS QxlDevice::SetCurrentMode(<wbr>ULONG Mode)<br>
><br>
> NTSTATUS QxlDevice::GetCurrentMode(<wbr>ULONG* pMode)<br>
> {<br>
> + PAGED_CODE();<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s\n", __FUNCTION__));<br>
> UNREFERENCED_PARAMETER(pMode);<br>
> @@ -3142,8 +3138,9 @@ NTSTATUS QxlDevice::GetCurrentMode(<wbr>ULONG* pMode)<br>
> return Status;<br>
> }<br>
><br>
> -NTSTATUS QxlDevice::SetPowerState(_In_ DEVICE_POWER_STATE DevicePowerState,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> +NTSTATUS QxlDevice::SetPowerState(<wbr>DEVICE_POWER_STATE DevicePowerState,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> switch (DevicePowerState)<br>
> {<br>
> @@ -3159,6 +3156,7 @@ NTSTATUS QxlDevice::SetPowerState(_In_<br>
> DEVICE_POWER_STATE DevicePowerState, DXGK<br>
><br>
> NTSTATUS QxlDevice::HWInit(PCM_<wbr>RESOURCE_LIST pResList,<br>
> DXGK_DISPLAY_INFORMATION* pDispInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> PDXGKRNL_INTERFACE pDxgkInterface = m_pQxlDod->GetDxgkInterface();<br>
> UINT pci_range = QXL_RAM_RANGE_INDEX;<br>
> @@ -3306,6 +3304,7 @@ NTSTATUS QxlDevice::HWInit(PCM_<wbr>RESOURCE_LIST pResList,<br>
> DXGK_DISPLAY_INFORMATION*<br>
><br>
> NTSTATUS QxlDevice::QxlInit(DXGK_<wbr>DISPLAY_INFORMATION* pDispInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> NTSTATUS Status = STATUS_SUCCESS;<br>
><br>
> if (!InitMemSlots()) {<br>
> @@ -3333,11 +3332,13 @@ NTSTATUS QxlDevice::QxlInit(DXGK_<wbr>DISPLAY_INFORMATION*<br>
> pDispInfo)<br>
><br>
> void QxlDevice::QxlClose()<br>
> {<br>
> + PAGED_CODE();<br>
> DestroyMemSlots();<br>
> }<br>
><br>
> void QxlDevice::UnmapMemory(void)<br>
> {<br>
> + PAGED_CODE();<br>
> PDXGKRNL_INTERFACE pDxgkInterface = m_pQxlDod->GetDxgkInterface();<br>
> if (m_IoMapped && m_IoBase)<br>
> {<br>
> @@ -3365,6 +3366,7 @@ void QxlDevice::UnmapMemory(void)<br>
><br>
> BOOL QxlDevice::InitMemSlots(void)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> m_NumMemSlots = m_RomHdr->slots_end;<br>
> m_SlotGenBits = m_RomHdr->slot_gen_bits;<br>
> @@ -3384,6 +3386,7 @@ BOOL QxlDevice::InitMemSlots(void)<br>
><br>
> void QxlDevice::DestroyMemSlots(<wbr>void)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> delete [] reinterpret_cast<BYTE*>(m_<wbr>MemSlots);<br>
> m_MemSlots = NULL;<br>
> @@ -3392,6 +3395,7 @@ void QxlDevice::DestroyMemSlots(<wbr>void)<br>
><br>
> void QxlDevice::<wbr>CreatePrimarySurface(PVIDEO_<wbr>MODE_INFORMATION pModeInfo)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLSurfaceCreate *primary_surface_create;<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("---> %s - %d: (%d x %d)\n",<br>
> __FUNCTION__, m_Id,<br>
> pModeInfo->VisScreenWidth, pModeInfo->VisScreenHeight));<br>
> @@ -3414,6 +3418,7 @@ void<br>
> QxlDevice::<wbr>CreatePrimarySurface(PVIDEO_<wbr>MODE_INFORMATION pModeInfo)<br>
><br>
> void QxlDevice::<wbr>DestroyPrimarySurface(void)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> // AsyncIo(QXL_IO_DESTROY_<wbr>PRIMARY_ASYNC, 0);<br>
> SyncIo(QXL_IO_DESTROY_PRIMARY, 0);<br>
> @@ -3422,6 +3427,7 @@ void QxlDevice::<wbr>DestroyPrimarySurface(void)<br>
><br>
> _inline QXLPHYSICAL QxlDevice::PA(PVOID virt, UINT8 slot_id)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));<br>
> MemSlot *pSlot = &m_MemSlots[slot_id];;<br>
> return pSlot->high_bits | ((UINT64)virt - pSlot->start_virt_addr);<br>
> @@ -3429,6 +3435,7 @@ _inline QXLPHYSICAL QxlDevice::PA(PVOID virt, UINT8<br>
> slot_id)<br>
><br>
> _inline UINT64 QxlDevice::VA(QXLPHYSICAL paddr, UINT8 slot_id)<br>
> {<br>
> + PAGED_CODE();<br>
> UINT64 virt;<br>
> MemSlot *pSlot = &m_MemSlots[slot_id];;<br>
><br>
> @@ -3441,6 +3448,7 @@ _inline UINT64 QxlDevice::VA(QXLPHYSICAL paddr, UINT8<br>
> slot_id)<br>
><br>
> void QxlDevice::SetupHWSlot(UINT8 Idx, MemSlot *pSlot)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> m_RamHdr->mem_slot.mem_start = pSlot->start_phys_addr;<br>
> m_RamHdr->mem_slot.mem_end = pSlot->end_phys_addr;<br>
> @@ -3450,6 +3458,7 @@ void QxlDevice::SetupHWSlot(UINT8 Idx, MemSlot *pSlot)<br>
><br>
> BOOL QxlDevice::CreateEvents()<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> KeInitializeEvent(&m_<wbr>DisplayEvent,<br>
> SynchronizationEvent,<br>
> @@ -3471,6 +3480,7 @@ BOOL QxlDevice::CreateEvents()<br>
><br>
> BOOL QxlDevice::CreateRings()<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> m_CommandRing = &(m_RamHdr->cmd_ring);<br>
> m_CursorRing = &(m_RamHdr->cursor_ring);<br>
> @@ -3481,6 +3491,7 @@ BOOL QxlDevice::CreateRings()<br>
><br>
> void QxlDevice::AsyncIo(UCHAR Port, UCHAR Value)<br>
> {<br>
> + PAGED_CODE();<br>
> LARGE_INTEGER timeout;<br>
> BOOLEAN locked = FALSE;<br>
> locked = WaitForObject(&m_IoLock, NULL);<br>
> @@ -3492,6 +3503,7 @@ void QxlDevice::AsyncIo(UCHAR Port, UCHAR Value)<br>
><br>
> void QxlDevice::SyncIo(UCHAR Port, UCHAR Value)<br>
> {<br>
> + PAGED_CODE();<br>
> BOOLEAN locked = FALSE;<br>
> locked = WaitForObject(&m_IoLock, NULL);<br>
> WRITE_PORT_UCHAR(m_IoBase + Port, Value);<br>
> @@ -3500,6 +3512,7 @@ void QxlDevice::SyncIo(UCHAR Port, UCHAR Value)<br>
><br>
> UINT8 QxlDevice::SetupMemSlot(UINT8 Idx, UINT64 pastart, UINT64 paend,<br>
> UINT64 vastart, UINT64 vaend)<br>
> {<br>
> + PAGED_CODE();<br>
> UINT64 high_bits;<br>
> MemSlot *pSlot;<br>
> UINT8 slot_index;<br>
> @@ -3525,6 +3538,7 @@ UINT8 QxlDevice::SetupMemSlot(UINT8 Idx, UINT64<br>
> pastart, UINT64 paend, UINT64 va<br>
><br>
> BOOL QxlDevice::CreateMemSlots(<wbr>void)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s 3\n", __FUNCTION__));<br>
> UINT64 len = m_RomHdr->surface0_area_size + m_RomHdr->num_pages *<br>
> PAGE_SIZE;<br>
> m_MainMemSlot = SetupMemSlot(0,<br>
> @@ -3544,6 +3558,7 @@ BOOL QxlDevice::CreateMemSlots(<wbr>void)<br>
><br>
> void QxlDevice::<wbr>InitDeviceMemoryResources(<wbr>void)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s num_pages = %d\n", __FUNCTION__,<br>
> m_RomHdr->num_pages));<br>
> InitMspace(MSPACE_TYPE_DEVRAM, (m_RamStart +<br>
> m_RomHdr->surface0_area_size), (size_t)(m_RomHdr->num_pages *<br>
> PAGE_SIZE));<br>
> InitMspace(MSPACE_TYPE_VRAM, m_VRamStart, m_VRamSize);<br>
> @@ -3552,6 +3567,7 @@ void QxlDevice::<wbr>InitDeviceMemoryResources(<wbr>void)<br>
><br>
> void QxlDevice::InitMonitorConfig(<wbr>void)<br>
> {<br>
> + PAGED_CODE();<br>
> size_t config_size = sizeof(QXLMonitorsConfig) + sizeof(QXLHead);<br>
> m_monitor_config = (QXLMonitorsConfig*) AllocMem(MSPACE_TYPE_DEVRAM,<br>
> config_size, TRUE);<br>
> RtlZeroMemory(m_monitor_<wbr>config, config_size);<br>
> @@ -3560,9 +3576,9 @@ void QxlDevice::InitMonitorConfig(<wbr>void)<br>
> *m_monitor_config_pa = PA(m_monitor_config, m_MainMemSlot);<br>
> }<br>
><br>
> -<br>
> void QxlDevice::InitMspace(UINT32 mspace_type, UINT8 *start, size_t<br>
> capacity)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s type = %d, start = %p, capacity<br>
> = %d\n", __FUNCTION__, mspace_type, start, capacity));<br>
> m_MSInfo[mspace_type]._mspace = create_mspace_with_base(start, capacity,<br>
> 0, this);<br>
> m_MSInfo[mspace_type].mspace_<wbr>start = start;<br>
> @@ -3754,6 +3770,7 @@ QxlDevice::<wbr>ExecutePresentDisplayOnly(<br>
><br>
> void QxlDevice::WaitForReleaseRing(<wbr>void)<br>
> {<br>
> + PAGED_CODE();<br>
> int wait;<br>
><br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("--->%s\n", __FUNCTION__));<br>
> @@ -3786,6 +3803,7 @@ void QxlDevice::WaitForReleaseRing(<wbr>void)<br>
><br>
> void QxlDevice::FlushReleaseRing()<br>
> {<br>
> + PAGED_CODE();<br>
> UINT64 output;<br>
> int notify;<br>
> int num_to_release = 50;<br>
> @@ -3817,6 +3835,7 @@ void QxlDevice::FlushReleaseRing()<br>
><br>
> void QxlDevice::EmptyReleaseRing()<br>
> {<br>
> + PAGED_CODE();<br>
> BOOLEAN locked = FALSE;<br>
> locked = WaitForObject(&m_MemLock, NULL);<br>
> while (m_FreeOutputs || !SPICE_RING_IS_EMPTY(m_<wbr>ReleaseRing)) {<br>
> @@ -3827,6 +3846,7 @@ void QxlDevice::EmptyReleaseRing()<br>
><br>
> UINT64 QxlDevice::ReleaseOutput(<wbr>UINT64 output_id)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLOutput *output = (QXLOutput *)output_id;<br>
> Resource **now;<br>
> Resource **end;<br>
> @@ -3846,6 +3866,7 @@ UINT64 QxlDevice::ReleaseOutput(<wbr>UINT64 output_id)<br>
><br>
> void *QxlDevice::AllocMem(UINT32 mspace_type, size_t size, BOOL force)<br>
> {<br>
> + PAGED_CODE();<br>
> PVOID ptr;<br>
> BOOLEAN locked = FALSE;<br>
><br>
> @@ -3894,6 +3915,7 @@ void *QxlDevice::AllocMem(UINT32 mspace_type, size_t<br>
> size, BOOL force)<br>
><br>
> void QxlDevice::FreeMem(UINT32 mspace_type, void *ptr)<br>
> {<br>
> + PAGED_CODE();<br>
> ASSERT(m_MSInfo[mspace_type]._<wbr>mspace);<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> @@ -3914,6 +3936,7 @@ void QxlDevice::FreeMem(UINT32 mspace_type, void *ptr)<br>
><br>
> QXLDrawable *QxlDevice::GetDrawable()<br>
> {<br>
> + PAGED_CODE();<br>
> QXLOutput *output;<br>
><br>
> output = (QXLOutput *)AllocMem(MSPACE_TYPE_DEVRAM, sizeof(QXLOutput) +<br>
> sizeof(QXLDrawable), TRUE);<br>
> @@ -3926,6 +3949,7 @@ QXLDrawable *QxlDevice::GetDrawable()<br>
><br>
> QXLCursorCmd *QxlDevice::CursorCmd()<br>
> {<br>
> + PAGED_CODE();<br>
> QXLCursorCmd *cursor_cmd;<br>
> QXLOutput *output;<br>
><br>
> @@ -3941,6 +3965,7 @@ QXLCursorCmd *QxlDevice::CursorCmd()<br>
><br>
> BOOL QxlDevice::SetClip(const RECT *clip, QXLDrawable *drawable)<br>
> {<br>
> + PAGED_CODE();<br>
> Resource *rects_res;<br>
><br>
> if (clip == NULL) {<br>
> @@ -3970,12 +3995,14 @@ BOOL QxlDevice::SetClip(const RECT *clip, QXLDrawable<br>
> *drawable)<br>
><br>
> void QxlDevice::AddRes(QXLOutput *output, Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> res->refs++;<br>
> output->resources[output->num_<wbr>res++] = res;<br>
> }<br>
><br>
> void QxlDevice::DrawableAddRes(<wbr>QXLDrawable *drawable, Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLOutput *output;<br>
><br>
> output = (QXLOutput *)((UINT8 *)drawable - sizeof(QXLOutput));<br>
> @@ -3984,6 +4011,7 @@ void QxlDevice::DrawableAddRes(<wbr>QXLDrawable *drawable,<br>
> Resource *res)<br>
><br>
> void QxlDevice::CursorCmdAddRes(<wbr>QXLCursorCmd *cmd, Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLOutput *output;<br>
><br>
> output = (QXLOutput *)((UINT8 *)cmd - sizeof(QXLOutput));<br>
> @@ -3992,6 +4020,7 @@ void QxlDevice::CursorCmdAddRes(<wbr>QXLCursorCmd *cmd,<br>
> Resource *res)<br>
><br>
> void QxlDevice::FreeClipRectsEx(<wbr>Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));<br>
> QxlDevice* pqxl = (QxlDevice*)res->ptr;<br>
> pqxl->FreeClipRects(res);<br>
> @@ -3999,6 +4028,7 @@ void QxlDevice::FreeClipRectsEx(<wbr>Resource *res)<br>
><br>
> void QxlDevice::FreeClipRects(<wbr>Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLPHYSICAL chunk_phys;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> @@ -4014,6 +4044,7 @@ void QxlDevice::FreeClipRects(<wbr>Resource *res)<br>
><br>
> void QxlDevice::FreeBitmapImageEx(<wbr>Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));<br>
> QxlDevice* pqxl = (QxlDevice*)res->ptr;<br>
> pqxl->FreeBitmapImage(res);<br>
> @@ -4021,6 +4052,7 @@ void QxlDevice::FreeBitmapImageEx(<wbr>Resource *res)<br>
><br>
> void QxlDevice::FreeBitmapImage(<wbr>Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> InternalImage *internal;<br>
> QXLPHYSICAL chunk_phys;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> @@ -4040,6 +4072,7 @@ void QxlDevice::FreeBitmapImage(<wbr>Resource *res)<br>
><br>
> void QxlDevice::FreeCursorEx(<wbr>Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));<br>
> QxlDevice* pqxl = (QxlDevice*)res->ptr;<br>
> pqxl->FreeCursor(res);<br>
> @@ -4047,6 +4080,7 @@ void QxlDevice::FreeCursorEx(<wbr>Resource *res)<br>
><br>
> void QxlDevice::FreeCursor(Resource *res)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLPHYSICAL chunk_phys;<br>
><br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> @@ -4063,6 +4097,7 @@ void QxlDevice::FreeCursor(Resource *res)<br>
><br>
> QXLDrawable *QxlDevice::Drawable(UINT8 type, CONST RECT *area, CONST RECT<br>
> *clip, UINT32 surface_id)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLDrawable *drawable;<br>
><br>
> ASSERT(area);<br>
> @@ -4088,7 +4123,9 @@ QXLDrawable *QxlDevice::Drawable(UINT8 type, CONST RECT<br>
> *area, CONST RECT *clip,<br>
> return drawable;<br>
> }<br>
><br>
> -void QxlDevice::PushDrawable(<wbr>QXLDrawable *drawable) {<br>
> +void QxlDevice::PushDrawable(<wbr>QXLDrawable *drawable)<br>
> +{<br>
> + PAGED_CODE();<br>
> QXLCommand *cmd;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> @@ -4105,6 +4142,7 @@ void QxlDevice::PushDrawable(<wbr>QXLDrawable *drawable) {<br>
><br>
> void QxlDevice::PushCursorCmd(<wbr>QXLCursorCmd *cursor_cmd)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLCommand *cmd;<br>
><br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> @@ -4126,6 +4164,7 @@ VOID QxlDevice::SetImageId(<wbr>InternalImage *internal,<br>
> LONG height,<br>
> UINT8 format, UINT32 key)<br>
> {<br>
> + PAGED_CODE();<br>
> UINT32 image_info = IMAGE_HASH_INIT_VAL(width, height, format);<br>
><br>
> if (cache_me) {<br>
> @@ -4145,6 +4184,7 @@ VOID QxlDevice::BltBits (<br>
> UINT NumRects,<br>
> _In_reads_(NumRects) CONST RECT *pRects)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLDrawable *drawable;<br>
> Resource *image_res;<br>
> InternalImage *internal;<br>
> @@ -4160,6 +4200,7 @@ VOID QxlDevice::BltBits (<br>
><br>
> if (!(drawable = Drawable(QXL_DRAW_COPY, pRects, NULL, 0))) {<br>
> DbgPrint(TRACE_LEVEL_ERROR, ("Cannot get Drawable.\n"));<br>
> + return;<br>
> }<br>
><br>
> CONST RECT* pRect = &pRects[0];<br>
> @@ -4241,6 +4282,7 @@ VOID QxlDevice::PutBytesAlign(<wbr>QXLDataChunk **chunk_ptr,<br>
> UINT8 **now_ptr,<br>
> UINT8 **end_ptr, UINT8 *src, int size,<br>
> size_t alloc_size, uint32_t alignment)<br>
> {<br>
> + PAGED_CODE();<br>
> QXLDataChunk *chunk = *chunk_ptr;<br>
> UINT8 *now = *now_ptr;<br>
> UINT8 *end = *end_ptr;<br>
> @@ -4305,6 +4347,7 @@ VOID QxlDevice::BlackOutScreen(<wbr>CURRENT_BDD_MODE*<br>
> pCurrentBddMod)<br>
><br>
> NTSTATUS QxlDevice::HWClose(void)<br>
> {<br>
> + PAGED_CODE();<br>
> QxlClose();<br>
> UnmapMemory();<br>
> return STATUS_SUCCESS;<br>
> @@ -4312,6 +4355,7 @@ NTSTATUS QxlDevice::HWClose(void)<br>
><br>
> NTSTATUS QxlDevice::SetPointerShape(_<wbr>In_ CONST DXGKARG_SETPOINTERSHAPE*<br>
> pSetPointerShape)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s flag = %x\n", __FUNCTION__,<br>
> pSetPointerShape->Flags.Value)<wbr>);<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("<--> %s flag = %d pitch = %d, pixels<br>
> = %p, id = %d, w = %d, h = %d, x = %d, y = %d\n", __FUNCTION__,<br>
> pSetPointerShape->Flags.Value,<br>
> @@ -4394,6 +4438,7 @@ NTSTATUS QxlDevice::SetPointerShape(_<wbr>In_ CONST<br>
> DXGKARG_SETPOINTERSHAPE* pSetPoi<br>
><br>
> NTSTATUS QxlDevice::SetPointerPosition(<wbr>_In_ CONST<br>
> DXGKARG_SETPOINTERPOSITION* pSetPointerPosition)<br>
> {<br>
> + PAGED_CODE();<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> DbgPrint(TRACE_LEVEL_<wbr>INFORMATION, ("<--> %s flag = %d id = %d, x = %d, y<br>
> = %d\n", __FUNCTION__,<br>
> pSetPointerPosition->Flags.<wbr>Value,<br>
> @@ -4417,6 +4462,7 @@ NTSTATUS QxlDevice::SetPointerPosition(<wbr>_In_ CONST<br>
> DXGKARG_SETPOINTERPOSITION* pS<br>
><br>
> NTSTATUS QxlDevice::UpdateChildStatus(<wbr>BOOLEAN connect)<br>
> {<br>
> + PAGED_CODE();<br>
> NTSTATUS Status(STATUS_SUCCESS);<br>
> DXGK_CHILD_STATUS ChildStatus;<br>
> PDXGKRNL_INTERFACE pDXGKInterface(m_pQxlDod-><wbr>GetDxgkInterface());<br>
> @@ -4430,6 +4476,7 @@ NTSTATUS QxlDevice::UpdateChildStatus(<wbr>BOOLEAN connect)<br>
><br>
> NTSTATUS QxlDevice::SetCustomDisplay(<wbr>QXLEscapeSetCustomDisplay*<br>
> custom_display)<br>
> {<br>
> + PAGED_CODE();<br>
> NTSTATUS status;<br>
> UINT xres = custom_display->xres;<br>
> UINT yres = custom_display->yres;<br>
> @@ -4454,6 +4501,7 @@ NTSTATUS<br>
> QxlDevice::SetCustomDisplay(<wbr>QXLEscapeSetCustomDisplay* custom_display)<br>
><br>
> void QxlDevice::SetMonitorConfig(<wbr>QXLHead * monitor_config)<br>
> {<br>
> + PAGED_CODE();<br>
> m_monitor_config->count = 1;<br>
> m_monitor_config->max_allowed = 1;<br>
><br>
> @@ -4469,6 +4517,7 @@ void QxlDevice::SetMonitorConfig(<wbr>QXLHead *<br>
> monitor_config)<br>
><br>
> NTSTATUS QxlDevice::Escape(_In_ CONST DXGKARG_ESCAPE* pEscape)<br>
> {<br>
> + PAGED_CODE();<br>
> size_t data_size(sizeof(int));<br>
> QXL_ESCAPE* pQXLEscape((QXL_ESCAPE*) pEscape->pPrivateDriverData);<br>
> NTSTATUS status(STATUS_SUCCESS);<br>
> @@ -4510,6 +4559,7 @@ NTSTATUS QxlDevice::Escape(_In_ CONST DXGKARG_ESCAPE*<br>
> pEscape)<br>
><br>
> VOID QxlDevice::WaitForCmdRing()<br>
> {<br>
> + PAGED_CODE();<br>
> int wait;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> @@ -4526,6 +4576,7 @@ VOID QxlDevice::WaitForCmdRing()<br>
><br>
> VOID QxlDevice::PushCmd()<br>
> {<br>
> + PAGED_CODE();<br>
> int notify;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> SPICE_RING_PUSH(m_CommandRing, notify);<br>
> @@ -4537,6 +4588,7 @@ VOID QxlDevice::PushCmd()<br>
><br>
> VOID QxlDevice::WaitForCursorRing(<wbr>VOID)<br>
> {<br>
> + PAGED_CODE();<br>
> int wait;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
><br>
> @@ -4559,6 +4611,7 @@ VOID QxlDevice::WaitForCursorRing(<wbr>VOID)<br>
><br>
> VOID QxlDevice::PushCursor(VOID)<br>
> {<br>
> + PAGED_CODE();<br>
> int notify;<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> SPICE_RING_PUSH(m_CursorRing, notify);<br>
> @@ -4665,7 +4718,7 @@ BOOLEAN HwDeviceInterface::<wbr>InterruptRoutine(_In_<br>
> PDXGKRNL_INTERFACE pDxgkInterfa<br>
> }<br>
> }<br>
><br>
> -VOID HwDeviceInterface::DpcRoutine(<wbr>PDXGKRNL_INTERFACE pDxgkInterface)<br>
> +VOID HwDeviceInterface::DpcRoutine(<wbr>_In_ PDXGKRNL_INTERFACE pDxgkInterface)<br>
> {<br>
> QxlDevice * qxl_device;<br>
> VgaDevice * vga_device;<br>
> @@ -4707,23 +4760,12 @@ VOID HwDeviceInterface::<wbr>ResetDevice(void)<br>
> }<br>
> }<br>
><br>
> -#pragma code_seg(pop) //end non-paged code<br>
><br>
> -VOID QxlDevice::UpdateArea(CONST RECT* area, UINT32 surface_id)<br>
> -{<br>
> - DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> - CopyRect(&m_RamHdr->update_<wbr>area, area);<br>
> - m_RamHdr->update_surface = surface_id;<br>
> -// AsyncIo(QXL_IO_UPDATE_AREA_<wbr>ASYNC, 0);<br>
> - SyncIo(QXL_IO_UPDATE_AREA, 0);<br>
> - DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> -}<br>
> -<br>
> -BOOLEAN QxlDevice:: DpcCallbackEx(PVOID ptr)<br>
> +BOOLEAN QxlDevice::DpcCallbackEx(PVOID ptr)<br>
> {<br>
> DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s\n", __FUNCTION__));<br>
> PDPC_CB_CONTEXT ctx = (PDPC_CB_CONTEXT) ptr;<br>
> - QxlDevice* pqxl = (QxlDevice*)ctx->ptr;<br>
> + QxlDevice* pqxl = (QxlDevice*) ctx->ptr;<br>
> pqxl->DpcCallback(ctx);<br>
> return TRUE;<br>
> }<br>
> @@ -4734,9 +4776,22 @@ VOID QxlDevice::DpcCallback(PDPC_<wbr>CB_CONTEXT ctx)<br>
> m_Pending = 0;<br>
><br>
> }<br>
> +#pragma code_seg(pop) //end non-paged code<br>
> +<br>
> +VOID QxlDevice::UpdateArea(CONST RECT* area, UINT32 surface_id)<br>
> +{<br>
> + PAGED_CODE();<br>
> + DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));<br>
> + CopyRect(&m_RamHdr->update_<wbr>area, area);<br>
> + m_RamHdr->update_surface = surface_id;<br>
> +// AsyncIo(QXL_IO_UPDATE_AREA_<wbr>ASYNC, 0);<br>
> + SyncIo(QXL_IO_UPDATE_AREA, 0);<br>
> + DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));<br>
> +}<br>
><br>
> UINT SpiceFromPixelFormat(<wbr>D3DDDIFORMAT Format)<br>
> {<br>
> + PAGED_CODE();<br>
> switch (Format)<br>
> {<br>
> case D3DDDIFMT_UNKNOWN:<br>
> @@ -4748,3 +4803,4 @@ UINT SpiceFromPixelFormat(<wbr>D3DDDIFORMAT Format)<br>
> default: QXL_LOG_ASSERTION1("Unknown D3DDDIFORMAT 0x%I64x", Format);<br>
> return 0;<br>
> }<br>
> }<br>
> +#pragma code_seg(pop)<br>
> \ No newline at end of file<br>
> diff --git a/qxldod/mspace.c b/qxldod/mspace.c<br>
> deleted file mode 100755<br>
> index d0ba123..0000000<br>
> --- a/qxldod/mspace.c<br>
> +++ /dev/null<br>
> @@ -1,2437 +0,0 @@<br>
> -// based on dlmalloc from Doug Lea<br>
> -<br>
> -<br>
> -// quote from the Doug Lea original file<br>
> - /*<br>
> - This is a version (aka dlmalloc) of malloc/free/realloc written by<br>
> - Doug Lea and released to the public domain, as explained at<br>
> - <a href="http://creativecommons.org/licenses/publicdomain" rel="noreferrer" target="_blank">http://creativecommons.org/<wbr>licenses/publicdomain</a>. Send questions,<br>
> - comments, complaints, performance data, etc to <a href="mailto:dl@cs.oswego.edu">dl@cs.oswego.edu</a><br>
> -<br>
> - * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)<br>
> -<br>
> - Note: There may be an updated version of this malloc obtainable at<br>
> - <a href="ftp://gee.cs.oswego.edu/pub/misc/malloc.c" rel="noreferrer" target="_blank">ftp://gee.cs.oswego.edu/pub/<wbr>misc/malloc.c</a><br>
> - Check before installing!<br>
> - */<br>
> -<br>
> -<br>
> -#include <ntddk.h><br>
> -<br>
> -#include "mspace.h"<br>
> -<br>
> -#pragma warning( disable : 4146 ) /* no "unsigned" warnings */<br>
> -<br>
> -#define MALLOC_ALIGNMENT ((size_t)8U)<br>
> -#define USE_LOCKS 0<br>
> -#define malloc_getpagesize ((size_t)4096U)<br>
> -#define DEFAULT_GRANULARITY malloc_getpagesize<br>
> -#define MAX_SIZE_T (~(size_t)0)<br>
> -#define MALLOC_FAILURE_ACTION<br>
> -#define MALLINFO_FIELD_TYPE size_t<br>
> -#define FOOTERS 0<br>
> -#define INSECURE 0<br>
> -#define PROCEED_ON_ERROR 0<br>
> -#define DEBUG 0<br>
> -#define ABORT_ON_ASSERT_FAILURE 1<br>
> -#define ABORT(user_data) abort_func(user_data)<br>
> -#define USE_BUILTIN_FFS 0<br>
> -#define USE_DEV_RANDOM 0<br>
> -#define PRINT(params) print_func params<br>
> -<br>
> -<br>
> -#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)<br>
> -#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)<br>
> -<br>
> -<br>
> -#define M_GRANULARITY (-1)<br>
> -<br>
> -void default_abort_func(void *user_data)<br>
> -{<br>
> - for (;;);<br>
> -}<br>
> -<br>
> -void default_print_func(void *user_data, char *format, ...)<br>
> -{<br>
> -}<br>
> -<br>
> -static mspace_abort_t abort_func = default_abort_func;<br>
> -static mspace_print_t print_func = default_print_func;<br>
> -<br>
> -void mspace_set_abort_func(mspace_<wbr>abort_t f)<br>
> -{<br>
> - abort_func = f;<br>
> -}<br>
> -<br>
> -void mspace_set_print_func(mspace_<wbr>print_t f)<br>
> -{<br>
> - print_func = f;<br>
> -}<br>
> -<br>
> -/* ------------------------ Mallinfo declarations ------------------------<br>
> */<br>
> -<br>
> -#if !NO_MALLINFO<br>
> -/*<br>
> - This version of malloc supports the standard SVID/XPG mallinfo<br>
> - routine that returns a struct containing usage properties and<br>
> - statistics. It should work on any system that has a<br>
> - /usr/include/malloc.h defining struct mallinfo. The main<br>
> - declaration needed is the mallinfo struct that is returned (by-copy)<br>
> - by mallinfo(). The malloinfo struct contains a bunch of fields that<br>
> - are not even meaningful in this version of malloc. These fields are<br>
> - are instead filled by mallinfo() with other numbers that might be of<br>
> - interest.<br>
> -<br>
> - HAVE_USR_INCLUDE_MALLOC_H should be set if you have a<br>
> - /usr/include/malloc.h file that includes a declaration of struct<br>
> - mallinfo. If so, it is included; else a compliant version is<br>
> - declared below. These must be precisely the same for mallinfo() to<br>
> - work. The original SVID version of this struct, defined on most<br>
> - systems with mallinfo, declares all fields as ints. But some others<br>
> - define as unsigned long. If your system defines the fields using a<br>
> - type of different width than listed here, you MUST #include your<br>
> - system version and #define HAVE_USR_INCLUDE_MALLOC_H.<br>
> -*/<br>
> -<br>
> -/* #define HAVE_USR_INCLUDE_MALLOC_H */<br>
> -<br>
> -<br>
> -struct mallinfo {<br>
> - MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system<br>
> */<br>
> - MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */<br>
> - MALLINFO_FIELD_TYPE smblks; /* always 0 */<br>
> - MALLINFO_FIELD_TYPE hblks; /* always 0 */<br>
> - MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */<br>
> - MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */<br>
> - MALLINFO_FIELD_TYPE fsmblks; /* always 0 */<br>
> - MALLINFO_FIELD_TYPE uordblks; /* total allocated space */<br>
> - MALLINFO_FIELD_TYPE fordblks; /* total free space */<br>
> - MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */<br>
> -};<br>
> -<br>
> -#endif /* NO_MALLINFO */<br>
> -<br>
> -<br>
> -<br>
> -#ifdef DEBUG<br>
> -#if ABORT_ON_ASSERT_FAILURE<br>
> -#define assert(user_data, x) if(!(x)) ABORT(user_data)<br>
> -#else /* ABORT_ON_ASSERT_FAILURE */<br>
> -#include <assert.h><br>
> -#endif /* ABORT_ON_ASSERT_FAILURE */<br>
> -#else /* DEBUG */<br>
> -#define assert(user_data, x)<br>
> -#endif /* DEBUG */<br>
> -<br>
> -/* ------------------- size_t and alignment properties --------------------<br>
> */<br>
> -<br>
> -/* The byte and bit size of a size_t */<br>
> -#define SIZE_T_SIZE (sizeof(size_t))<br>
> -#define SIZE_T_BITSIZE (sizeof(size_t) << 3)<br>
> -<br>
> -/* Some constants coerced to size_t */<br>
> -/* Annoying but necessary to avoid errors on some plaftorms */<br>
> -#define SIZE_T_ZERO ((size_t)0)<br>
> -#define SIZE_T_ONE ((size_t)1)<br>
> -#define SIZE_T_TWO ((size_t)2)<br>
> -#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)<br>
> -#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)<br>
> -#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_<wbr>SIZES)<br>
> -#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)<br>
> -<br>
> -/* The bit mask value corresponding to MALLOC_ALIGNMENT */<br>
> -#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)<br>
> -<br>
> -/* True if address a has acceptable alignment */<br>
> -#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)<br>
> -<br>
> -/* the number of bytes to offset an address to align it */<br>
> -#define align_offset(A)\<br>
> - ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\<br>
> - ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &<br>
> CHUNK_ALIGN_MASK))<br>
> -<br>
> -/* --------------------------- Lock preliminaries ------------------------<br>
> */<br>
> -<br>
> -#if USE_LOCKS<br>
> -<br>
> -/*<br>
> - When locks are defined, there are up to two global locks:<br>
> -<br>
> - * If HAVE_MORECORE, morecore_mutex protects sequences of calls to<br>
> - MORECORE. In many cases sys_alloc requires two calls, that should<br>
> - not be interleaved with calls by other threads. This does not<br>
> - protect against direct calls to MORECORE by other threads not<br>
> - using this lock, so there is still code to cope the best we can on<br>
> - interference.<br>
> -<br>
> - * magic_init_mutex ensures that mparams.magic and other<br>
> - unique mparams values are initialized only once.<br>
> -*/<br>
> -<br>
> -<br>
> -#define USE_LOCK_BIT (2U)<br>
> -#else /* USE_LOCKS */<br>
> -#define USE_LOCK_BIT (0U)<br>
> -#define INITIAL_LOCK(l)<br>
> -#endif /* USE_LOCKS */<br>
> -<br>
> -#if USE_LOCKS<br>
> -#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_<wbr>mutex);<br>
> -#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_<wbr>mutex);<br>
> -#else /* USE_LOCKS */<br>
> -#define ACQUIRE_MAGIC_INIT_LOCK()<br>
> -#define RELEASE_MAGIC_INIT_LOCK()<br>
> -#endif /* USE_LOCKS */<br>
> -<br>
> -<br>
> -<br>
> -/* ----------------------- Chunk representations ------------------------<br>
> */<br>
> -<br>
> -/*<br>
> - (The following includes lightly edited explanations by Colin Plumb.)<br>
> -<br>
> - The malloc_chunk declaration below is misleading (but accurate and<br>
> - necessary). It declares a "view" into memory allowing access to<br>
> - necessary fields at known offsets from a given base.<br>
> -<br>
> - Chunks of memory are maintained using a `boundary tag' method as<br>
> - originally described by Knuth. (See the paper by Paul Wilson<br>
> - <a href="ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps" rel="noreferrer" target="_blank">ftp://ftp.cs.utexas.edu/pub/<wbr>garbage/allocsrv.ps</a> for a survey of such<br>
> - techniques.) Sizes of free chunks are stored both in the front of<br>
> - each chunk and at the end. This makes consolidating fragmented<br>
> - chunks into bigger chunks fast. The head fields also hold bits<br>
> - representing whether chunks are free or in use.<br>
> -<br>
> - Here are some pictures to make it clearer. They are "exploded" to<br>
> - show that the state of a chunk can be thought of as extending from<br>
> - the high 31 bits of the head field of its header through the<br>
> - prev_foot and PINUSE_BIT bit of the following chunk header.<br>
> -<br>
> - A chunk that's in use looks like:<br>
> -<br>
> - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Size of previous chunk (if P = 1) |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |P|<br>
> - | Size of this chunk 1| +-+<br>
> - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | |<br>
> - +- -+<br>
> - | |<br>
> - +- -+<br>
> - | :<br>
> - +- size - sizeof(size_t) available payload bytes -+<br>
> - : |<br>
> - chunk-> +- -+<br>
> - | |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |1|<br>
> - | Size of next chunk (may or may not be in use) | +-+<br>
> - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> -<br>
> - And if it's free, it looks like this:<br>
> -<br>
> - chunk-> +- -+<br>
> - | User payload (must be in use, or we would have merged!) |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |P|<br>
> - | Size of this chunk 0| +-+<br>
> - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Next pointer |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Prev pointer |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | :<br>
> - +- size - sizeof(struct chunk) unused bytes -+<br>
> - : |<br>
> - chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Size of this chunk |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |0|<br>
> - | Size of next chunk (must be in use, or we would have merged)| +-+<br>
> - mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | :<br>
> - +- User payload -+<br>
> - : |<br>
> - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - |0|<br>
> - +-+<br>
> - Note that since we always merge adjacent free chunks, the chunks<br>
> - adjacent to a free chunk must be in use.<br>
> -<br>
> - Given a pointer to a chunk (which can be derived trivially from the<br>
> - payload pointer) we can, in O(1) time, find out whether the adjacent<br>
> - chunks are free, and if so, unlink them from the lists that they<br>
> - are on and merge them with the current chunk.<br>
> -<br>
> - Chunks always begin on even word boundaries, so the mem portion<br>
> - (which is returned to the user) is also on an even word boundary, and<br>
> - thus at least double-word aligned.<br>
> -<br>
> - The P (PINUSE_BIT) bit, stored in the unused low-order bit of the<br>
> - chunk size (which is always a multiple of two words), is an in-use<br>
> - bit for the *previous* chunk. If that bit is *clear*, then the<br>
> - word before the current chunk size contains the previous chunk<br>
> - size, and can be used to find the front of the previous chunk.<br>
> - The very first chunk allocated always has this bit set, preventing<br>
> - access to non-existent (or non-owned) memory. If pinuse is set for<br>
> - any given chunk, then you CANNOT determine the size of the<br>
> - previous chunk, and might even get a memory addressing fault when<br>
> - trying to do so.<br>
> -<br>
> - The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of<br>
> - the chunk size redundantly records whether the current chunk is<br>
> - inuse. This redundancy enables usage checks within free and realloc,<br>
> - and reduces indirection when freeing and consolidating chunks.<br>
> -<br>
> - Each freshly allocated chunk must have both cinuse and pinuse set.<br>
> - That is, each allocated chunk borders either a previously allocated<br>
> - and still in-use chunk, or the base of its memory arena. This is<br>
> - ensured by making all allocations from the the `lowest' part of any<br>
> - found chunk. Further, no free chunk physically borders another one,<br>
> - so each free chunk is known to be preceded and followed by either<br>
> - inuse chunks or the ends of memory.<br>
> -<br>
> - Note that the `foot' of the current chunk is actually represented<br>
> - as the prev_foot of the NEXT chunk. This makes it easier to<br>
> - deal with alignments etc but can be very confusing when trying<br>
> - to extend or adapt this code.<br>
> -<br>
> - The exceptions to all this are<br>
> -<br>
> - 1. The special chunk `top' is the top-most available chunk (i.e.,<br>
> - the one bordering the end of available memory). It is treated<br>
> - specially. Top is never included in any bin, is used only if<br>
> - no other chunk is available, and is released back to the<br>
> - system if it is very large (see M_TRIM_THRESHOLD). In effect,<br>
> - the top chunk is treated as larger (and thus less well<br>
> - fitting) than any other available chunk. The top chunk<br>
> - doesn't update its trailing size field since there is no next<br>
> - contiguous chunk that would have to index off it. However,<br>
> - space is still allocated for it (TOP_FOOT_SIZE) to enable<br>
> - separation or merging when space is extended.<br>
> -<br>
> - 3. Chunks allocated via mmap, which have the lowest-order bit<br>
> - (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set<br>
> - PINUSE_BIT in their head fields. Because they are allocated<br>
> - one-by-one, each must carry its own prev_foot field, which is<br>
> - also used to hold the offset this chunk has within its mmapped<br>
> - region, which is needed to preserve alignment. Each mmapped<br>
> - chunk is trailed by the first two fields of a fake next-chunk<br>
> - for sake of usage checks.<br>
> -<br>
> -*/<br>
> -<br>
> -struct malloc_chunk {<br>
> - size_t prev_foot; /* Size of previous chunk (if free). */<br>
> - size_t head; /* Size and inuse bits. */<br>
> - struct malloc_chunk* fd; /* double links -- used only if free. */<br>
> - struct malloc_chunk* bk;<br>
> -};<br>
> -<br>
> -typedef struct malloc_chunk mchunk;<br>
> -typedef struct malloc_chunk* mchunkptr;<br>
> -typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */<br>
> -typedef unsigned int bindex_t; /* Described below */<br>
> -typedef unsigned int binmap_t; /* Described below */<br>
> -typedef unsigned int flag_t; /* The type of various bit flag sets<br>
> */<br>
> -<br>
> -<br>
> -/* ------------------- Chunks sizes and alignments -----------------------<br>
> */<br>
> -<br>
> -#define MCHUNK_SIZE (sizeof(mchunk))<br>
> -<br>
> -#if FOOTERS<br>
> -#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)<br>
> -#else /* FOOTERS */<br>
> -#define CHUNK_OVERHEAD (SIZE_T_SIZE)<br>
> -#endif /* FOOTERS */<br>
> -<br>
> -/* The smallest size we can malloc is an aligned minimal chunk */<br>
> -#define MIN_CHUNK_SIZE\<br>
> - ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)<br>
> -<br>
> -/* conversion from malloc headers to user pointers, and back */<br>
> -#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))<br>
> -#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))<br>
> -/* chunk associated with aligned address A */<br>
> -#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))<br>
> -<br>
> -/* Bounds on request (not chunk) sizes. */<br>
> -#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)<br>
> -#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)<br>
> -<br>
> -/* pad request bytes into a usable size */<br>
> -#define pad_request(req) \<br>
> - (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)<br>
> -<br>
> -/* pad request, checking for minimum (but not maximum) */<br>
> -#define request2size(req) \<br>
> - (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))<br>
> -<br>
> -/* ------------------ Operations on head and foot fields -----------------<br>
> */<br>
> -<br>
> -/*<br>
> - The head field of a chunk is or'ed with PINUSE_BIT when previous<br>
> - adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in<br>
> - use. If the chunk was obtained with mmap, the prev_foot field has<br>
> - IS_MMAPPED_BIT set, otherwise holding the offset of the base of the<br>
> - mmapped region to the base of the chunk.<br>
> -*/<br>
> -<br>
> -#define PINUSE_BIT (SIZE_T_ONE)<br>
> -#define CINUSE_BIT (SIZE_T_TWO)<br>
> -#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)<br>
> -<br>
> -/* Head value for fenceposts */<br>
> -#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)<br>
> -<br>
> -/* extraction of fields from head words */<br>
> -#define cinuse(p) ((p)->head & CINUSE_BIT)<br>
> -#define pinuse(p) ((p)->head & PINUSE_BIT)<br>
> -#define chunksize(p) ((p)->head & ~(INUSE_BITS))<br>
> -<br>
> -#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)<br>
> -#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)<br>
> -<br>
> -/* Treat space at ptr +/- offset as a chunk */<br>
> -#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))<br>
> -#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))<br>
> -<br>
> -/* Ptr to next or previous physical malloc_chunk. */<br>
> -#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &<br>
> ~INUSE_BITS)))<br>
> -#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))<br>
> -<br>
> -/* extract next chunk's pinuse bit */<br>
> -#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)<br>
> -<br>
> -/* Get/set size at footer */<br>
> -#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)<br>
> -#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))<br>
> -<br>
> -/* Set size, pinuse bit, and foot */<br>
> -#define set_size_and_pinuse_of_free_<wbr>chunk(p, s)\<br>
> - ((p)->head = (s|PINUSE_BIT), set_foot(p, s))<br>
> -<br>
> -/* Set size, pinuse bit, foot, and clear next pinuse */<br>
> -#define set_free_with_pinuse(p, s, n)\<br>
> - (clear_pinuse(n), set_size_and_pinuse_of_free_<wbr>chunk(p, s))<br>
> -<br>
> -/* Get the internal overhead associated with chunk p */<br>
> -#define overhead_for(p) CHUNK_OVERHEAD<br>
> -<br>
> -/* Return true if malloced space is not necessarily cleared */<br>
> -#define calloc_must_clear(p) (1)<br>
> -<br>
> -<br>
> -/* ---------------------- Overlaid data structures -----------------------<br>
> */<br>
> -<br>
> -/*<br>
> - When chunks are not in use, they are treated as nodes of either<br>
> - lists or trees.<br>
> -<br>
> - "Small" chunks are stored in circular doubly-linked lists, and look<br>
> - like this:<br>
> -<br>
> - chunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Size of previous chunk<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - `head:' | Size of chunk, in bytes<br>
> |P|<br>
> - mem-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Forward pointer to next chunk in list<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Back pointer to previous chunk in list<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Unused space (may be 0 bytes long)<br>
> .<br>
> - .<br>
> .<br>
> - .<br>
> |<br>
> -nextchunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - `foot:' | Size of chunk, in bytes<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> -<br>
> - Larger chunks are kept in a form of bitwise digital trees (aka<br>
> - tries) keyed on chunksizes. Because malloc_tree_chunks are only for<br>
> - free chunks greater than 256 bytes, their size doesn't impose any<br>
> - constraints on user chunk sizes. Each node looks like:<br>
> -<br>
> - chunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Size of previous chunk<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - `head:' | Size of chunk, in bytes<br>
> |P|<br>
> - mem-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Forward pointer to next chunk of same size<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Back pointer to previous chunk of same size<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Pointer to left child (child[0])<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Pointer to right child (child[1])<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Pointer to parent<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | bin index of this chunk<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - | Unused space<br>
> .<br>
> - .<br>
> |<br>
> -nextchunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> - `foot:' | Size of chunk, in bytes<br>
> |<br>
> -<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> -<br>
> - Each tree holding treenodes is a tree of unique chunk sizes. Chunks<br>
> - of the same size are arranged in a circularly-linked list, with only<br>
> - the oldest chunk (the next to be used, in our FIFO ordering)<br>
> - actually in the tree. (Tree members are distinguished by a non-null<br>
> - parent pointer.) If a chunk with the same size an an existing node<br>
> - is inserted, it is linked off the existing node using pointers that<br>
> - work in the same way as fd/bk pointers of small chunks.<br>
> -<br>
> - Each tree contains a power of 2 sized range of chunk sizes (the<br>
> - smallest is 0x100 <= x < 0x180), which is is divided in half at each<br>
> - tree level, with the chunks in the smaller half of the range (0x100<br>
> - <= x < 0x140 for the top nose) in the left subtree and the larger<br>
> - half (0x140 <= x < 0x180) in the right subtree. This is, of course,<br>
> - done by inspecting individual bits.<br>
> -<br>
> - Using these rules, each node's left subtree contains all smaller<br>
> - sizes than its right subtree. However, the node at the root of each<br>
> - subtree has no particular ordering relationship to either. (The<br>
> - dividing line between the subtree sizes is based on trie relation.)<br>
> - If we remove the last chunk of a given size from the interior of the<br>
> - tree, we need to replace it with a leaf node. The tree ordering<br>
> - rules permit a node to be replaced by any leaf below it.<br>
> -<br>
> - The smallest chunk in a tree (a common operation in a best-fit<br>
> - allocator) can be found by walking a path to the leftmost leaf in<br>
> - the tree. Unlike a usual binary tree, where we follow left child<br>
> - pointers until we reach a null, here we follow the right child<br>
> - pointer any time the left one is null, until we reach a leaf with<br>
> - both child pointers null. The smallest chunk in the tree will be<br>
> - somewhere along that path.<br>
> -<br>
> - The worst case number of steps to add, find, or remove a node is<br>
> - bounded by the number of bits differentiating chunks within<br>
> - bins. Under current bin calculations, this ranges from 6 up to 21<br>
> - (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case<br>
> - is of course much better.<br>
> -*/<br>
> -<br>
> -struct malloc_tree_chunk {<br>
> - /* The first four fields must be compatible with malloc_chunk */<br>
> - size_t prev_foot;<br>
> - size_t head;<br>
> - struct malloc_tree_chunk* fd;<br>
> - struct malloc_tree_chunk* bk;<br>
> -<br>
> - struct malloc_tree_chunk* child[2];<br>
> - struct malloc_tree_chunk* parent;<br>
> - bindex_t index;<br>
> -};<br>
> -<br>
> -typedef struct malloc_tree_chunk tchunk;<br>
> -typedef struct malloc_tree_chunk* tchunkptr;<br>
> -typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */<br>
> -<br>
> -/* A little helper macro for trees */<br>
> -#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :<br>
> (t)->child[1])<br>
> -<br>
> -/* ----------------------------- Segments ------------------------------<wbr>--<br>
> */<br>
> -<br>
> -/*<br>
> - Each malloc space may include non-contiguous segments, held in a<br>
> - list headed by an embedded malloc_segment record representing the<br>
> - top-most space. Segments also include flags holding properties of<br>
> - the space. Large chunks that are directly allocated by mmap are not<br>
> - included in this list. They are instead independently created and<br>
> - destroyed without otherwise keeping track of them.<br>
> -<br>
> - Segment management mainly comes into play for spaces allocated by<br>
> - MMAP. Any call to MMAP might or might not return memory that is<br>
> - adjacent to an existing segment. MORECORE normally contiguously<br>
> - extends the current space, so this space is almost always adjacent,<br>
> - which is simpler and faster to deal with. (This is why MORECORE is<br>
> - used preferentially to MMAP when both are available -- see<br>
> - sys_alloc.) When allocating using MMAP, we don't use any of the<br>
> - hinting mechanisms (inconsistently) supported in various<br>
> - implementations of unix mmap, or distinguish reserving from<br>
> - committing memory. Instead, we just ask for space, and exploit<br>
> - contiguity when we get it. It is probably possible to do<br>
> - better than this on some systems, but no general scheme seems<br>
> - to be significantly better.<br>
> -<br>
> - Management entails a simpler variant of the consolidation scheme<br>
> - used for chunks to reduce fragmentation -- new adjacent memory is<br>
> - normally prepended or appended to an existing segment. However,<br>
> - there are limitations compared to chunk consolidation that mostly<br>
> - reflect the fact that segment processing is relatively infrequent<br>
> - (occurring only when getting memory from system) and that we<br>
> - don't expect to have huge numbers of segments:<br>
> -<br>
> - * Segments are not indexed, so traversal requires linear scans. (It<br>
> - would be possible to index these, but is not worth the extra<br>
> - overhead and complexity for most programs on most platforms.)<br>
> - * New segments are only appended to old ones when holding top-most<br>
> - memory; if they cannot be prepended to others, they are held in<br>
> - different segments.<br>
> -<br>
> - Except for the top-most segment of an mstate, each segment record<br>
> - is kept at the tail of its segment. Segments are added by pushing<br>
> - segment records onto the list headed by &mstate.seg for the<br>
> - containing mstate.<br>
> -<br>
> - Segment flags control allocation/merge/deallocation policies:<br>
> - * If EXTERN_BIT set, then we did not allocate this segment,<br>
> - and so should not try to deallocate or merge with others.<br>
> - (This currently holds only for the initial segment passed<br>
> - into create_mspace_with_base.)<br>
> - * If IS_MMAPPED_BIT set, the segment may be merged with<br>
> - other surrounding mmapped segments and trimmed/de-allocated<br>
> - using munmap.<br>
> - * If neither bit is set, then the segment was obtained using<br>
> - MORECORE so can be merged with surrounding MORECORE'd segments<br>
> - and deallocated/trimmed using MORECORE with negative arguments.<br>
> -*/<br>
> -<br>
> -struct malloc_segment {<br>
> - char* base; /* base address */<br>
> - size_t size; /* allocated size */<br>
> - struct malloc_segment* next; /* ptr to next segment */<br>
> -};<br>
> -<br>
> -typedef struct malloc_segment msegment;<br>
> -typedef struct malloc_segment* msegmentptr;<br>
> -<br>
> -/* ---------------------------- malloc_state -----------------------------<br>
> */<br>
> -<br>
> -/*<br>
> - A malloc_state holds all of the bookkeeping for a space.<br>
> - The main fields are:<br>
> -<br>
> - Top<br>
> - The topmost chunk of the currently active segment. Its size is<br>
> - cached in topsize. The actual size of topmost space is<br>
> - topsize+TOP_FOOT_SIZE, which includes space reserved for adding<br>
> - fenceposts and segment records if necessary when getting more<br>
> - space from the system. The size at which to autotrim top is<br>
> - cached from mparams in trim_check, except that it is disabled if<br>
> - an autotrim fails.<br>
> -<br>
> - Designated victim (dv)<br>
> - This is the preferred chunk for servicing small requests that<br>
> - don't have exact fits. It is normally the chunk split off most<br>
> - recently to service another small request. Its size is cached in<br>
> - dvsize. The link fields of this chunk are not maintained since it<br>
> - is not kept in a bin.<br>
> -<br>
> - SmallBins<br>
> - An array of bin headers for free chunks. These bins hold chunks<br>
> - with sizes less than MIN_LARGE_SIZE bytes. Each bin contains<br>
> - chunks of all the same size, spaced 8 bytes apart. To simplify<br>
> - use in double-linked lists, each bin header acts as a malloc_chunk<br>
> - pointing to the real first node, if it exists (else pointing to<br>
> - itself). This avoids special-casing for headers. But to avoid<br>
> - waste, we allocate only the fd/bk pointers of bins, and then use<br>
> - repositioning tricks to treat these as the fields of a chunk.<br>
> -<br>
> - TreeBins<br>
> - Treebins are pointers to the roots of trees holding a range of<br>
> - sizes. There are 2 equally spaced treebins for each power of two<br>
> - from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything<br>
> - larger.<br>
> -<br>
> - Bin maps<br>
> - There is one bit map for small bins ("smallmap") and one for<br>
> - treebins ("treemap). Each bin sets its bit when non-empty, and<br>
> - clears the bit when empty. Bit operations are then used to avoid<br>
> - bin-by-bin searching -- nearly all "search" is done without ever<br>
> - looking at bins that won't be selected. The bit maps<br>
> - conservatively use 32 bits per map word, even if on 64bit system.<br>
> - For a good description of some of the bit-based techniques used<br>
> - here, see Henry S. Warren Jr's book "Hacker's Delight" (and<br>
> - supplement at <a href="http://hackersdelight.org/" rel="noreferrer" target="_blank">http://hackersdelight.org/</a>). Many of these are<br>
> - intended to reduce the branchiness of paths through malloc etc, as<br>
> - well as to reduce the number of memory locations read or written.<br>
> -<br>
> - Segments<br>
> - A list of segments headed by an embedded malloc_segment record<br>
> - representing the initial space.<br>
> -<br>
> - Address check support<br>
> - The least_addr field is the least address ever obtained from<br>
> - MORECORE or MMAP. Attempted frees and reallocs of any address less<br>
> - than this are trapped (unless INSECURE is defined).<br>
> -<br>
> - Magic tag<br>
> - A cross-check field that should always hold same value as mparams.magic.<br>
> -<br>
> - Flags<br>
> - Bits recording whether to use MMAP, locks, or contiguous MORECORE<br>
> -<br>
> - Statistics<br>
> - Each space keeps track of current and maximum system memory<br>
> - obtained via MORECORE or MMAP.<br>
> -<br>
> - Locking<br>
> - If USE_LOCKS is defined, the "mutex" lock is acquired and released<br>
> - around every public call using this mspace.<br>
> -*/<br>
> -<br>
> -/* Bin types, widths and sizes */<br>
> -#define NSMALLBINS (32U)<br>
> -#define NTREEBINS (32U)<br>
> -#define SMALLBIN_SHIFT (3U)<br>
> -#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)<br>
> -#define TREEBIN_SHIFT (8U)<br>
> -#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)<br>
> -#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)<br>
> -#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -<br>
> CHUNK_OVERHEAD)<br>
> -<br>
> -struct malloc_state {<br>
> - binmap_t smallmap;<br>
> - binmap_t treemap;<br>
> - size_t dvsize;<br>
> - size_t topsize;<br>
> - char* least_addr;<br>
> - mchunkptr dv;<br>
> - mchunkptr top;<br>
> - size_t magic;<br>
> - mchunkptr smallbins[(NSMALLBINS+1)*2];<br>
> - tbinptr treebins[NTREEBINS];<br>
> - size_t footprint;<br>
> - size_t max_footprint;<br>
> - flag_t mflags;<br>
> - void *user_data;<br>
> -#if USE_LOCKS<br>
> - MLOCK_T mutex; /* locate lock among fields that rarely change */<br>
> -#endif /* USE_LOCKS */<br>
> - msegment seg;<br>
> -};<br>
> -<br>
> -typedef struct malloc_state* mstate;<br>
> -<br>
> -/* ------------- Global malloc_state and malloc_params -------------------<br>
> */<br>
> -<br>
> -/*<br>
> - malloc_params holds global properties, including those that can be<br>
> - dynamically set using mallopt. There is a single instance, mparams,<br>
> - initialized in init_mparams.<br>
> -*/<br>
> -<br>
> -struct malloc_params {<br>
> - size_t magic;<br>
> - size_t page_size;<br>
> - size_t granularity;<br>
> - flag_t default_mflags;<br>
> -};<br>
> -<br>
> -static struct malloc_params mparams;<br>
> -<br>
> -/* The global malloc_state used for all non-"mspace" calls */<br>
> -//static struct malloc_state _gm_;<br>
> -//#define gm (&_gm_)<br>
> -//#define is_global(M) ((M) == &_gm_)<br>
> -#define is_initialized(M) ((M)->top != 0)<br>
> -<br>
> -/* -------------------------- system alloc setup -------------------------<br>
> */<br>
> -<br>
> -/* Operations on mflags */<br>
> -<br>
> -#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)<br>
> -#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)<br>
> -#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)<br>
> -<br>
> -#define set_lock(M,L)\<br>
> - ((M)->mflags = (L)?\<br>
> - ((M)->mflags | USE_LOCK_BIT) :\<br>
> - ((M)->mflags & ~USE_LOCK_BIT))<br>
> -<br>
> -/* page-align a size */<br>
> -#define page_align(S)\<br>
> - (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))<br>
> -<br>
> -/* granularity-align a size */<br>
> -#define granularity_align(S)\<br>
> - (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))<br>
> -<br>
> -#define is_page_aligned(S)\<br>
> - (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)<br>
> -#define is_granularity_aligned(S)\<br>
> - (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)<br>
> -<br>
> -/* True if segment S holds address A */<br>
> -#define segment_holds(S, A)\<br>
> - ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)<br>
> -<br>
> -/* Return segment holding given address */<br>
> -static msegmentptr segment_holding(mstate m, char* addr) {<br>
> - msegmentptr sp = &m->seg;<br>
> - for (;;) {<br>
> - if (addr >= sp->base && addr < sp->base + sp->size)<br>
> - return sp;<br>
> - if ((sp = sp->next) == 0)<br>
> - return 0;<br>
> - }<br>
> -}<br>
> -<br>
> -/* Return true if segment contains a segment link */<br>
> -static int has_segment_link(mstate m, msegmentptr ss) {<br>
> - msegmentptr sp = &m->seg;<br>
> - for (;;) {<br>
> - if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)<br>
> - return 1;<br>
> - if ((sp = sp->next) == 0)<br>
> - return 0;<br>
> - }<br>
> -}<br>
> -<br>
> -<br>
> -<br>
> -/*<br>
> - TOP_FOOT_SIZE is padding at the end of a segment, including space<br>
> - that may be needed to place segment records and fenceposts when new<br>
> - noncontiguous segments are added.<br>
> -*/<br>
> -#define TOP_FOOT_SIZE\<br>
> - (align_offset(chunk2mem(0))+<wbr>pad_request(sizeof(struct<br>
> malloc_segment))+MIN_CHUNK_<wbr>SIZE)<br>
> -<br>
> -<br>
> -/* ------------------------------<wbr>- Hooks ------------------------------<wbr>--<br>
> */<br>
> -<br>
> -/*<br>
> - PREACTION should be defined to return 0 on success, and nonzero on<br>
> - failure. If you are not using locking, you can redefine these to do<br>
> - anything you like.<br>
> -*/<br>
> -<br>
> -#if USE_LOCKS<br>
> -<br>
> -/* Ensure locks are initialized */<br>
> -#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())<br>
> -<br>
> -#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))?<br>
> ACQUIRE_LOCK(&(M)->mutex) : 0)<br>
> -#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }<br>
> -#else /* USE_LOCKS */<br>
> -<br>
> -#ifndef PREACTION<br>
> -#define PREACTION(M) (0)<br>
> -#endif /* PREACTION */<br>
> -<br>
> -#ifndef POSTACTION<br>
> -#define POSTACTION(M)<br>
> -#endif /* POSTACTION */<br>
> -<br>
> -#endif /* USE_LOCKS */<br>
> -<br>
> -/*<br>
> - CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.<br>
> - USAGE_ERROR_ACTION is triggered on detected bad frees and<br>
> - reallocs. The argument p is an address that might have triggered the<br>
> - fault. It is ignored by the two predefined actions, but might be<br>
> - useful in custom actions that try to help diagnose errors.<br>
> -*/<br>
> -<br>
> -#if PROCEED_ON_ERROR<br>
> -<br>
> -/* A count of the number of corruption errors causing resets */<br>
> -int malloc_corruption_error_count;<br>
> -<br>
> -/* default corruption action */<br>
> -static void reset_on_error(mstate m);<br>
> -<br>
> -#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)<br>
> -#define USAGE_ERROR_ACTION(m, p)<br>
> -<br>
> -#else /* PROCEED_ON_ERROR */<br>
> -<br>
> -#ifndef CORRUPTION_ERROR_ACTION<br>
> -#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)<br>
> -#endif /* CORRUPTION_ERROR_ACTION */<br>
> -<br>
> -#ifndef USAGE_ERROR_ACTION<br>
> -#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)<br>
> -#endif /* USAGE_ERROR_ACTION */<br>
> -<br>
> -#endif /* PROCEED_ON_ERROR */<br>
> -<br>
> -/* -------------------------- Debugging setup ----------------------------<br>
> */<br>
> -<br>
> -#if ! DEBUG<br>
> -<br>
> -#define check_free_chunk(M,P)<br>
> -#define check_inuse_chunk(M,P)<br>
> -#define check_malloced_chunk(M,P,N)<br>
> -#define check_malloc_state(M)<br>
> -#define check_top_chunk(M,P)<br>
> -<br>
> -#else /* DEBUG */<br>
> -#define check_free_chunk(M,P) do_check_free_chunk(M,P)<br>
> -#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)<br>
> -#define check_top_chunk(M,P) do_check_top_chunk(M,P)<br>
> -#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)<br>
> -#define check_malloc_state(M) do_check_malloc_state(M)<br>
> -<br>
> -static void do_check_any_chunk(mstate m, mchunkptr p);<br>
> -static void do_check_top_chunk(mstate m, mchunkptr p);<br>
> -static void do_check_inuse_chunk(mstate m, mchunkptr p);<br>
> -static void do_check_free_chunk(mstate m, mchunkptr p);<br>
> -static void do_check_malloced_chunk(mstate m, void* mem, size_t s);<br>
> -static void do_check_tree(mstate m, tchunkptr t);<br>
> -static void do_check_treebin(mstate m, bindex_t i);<br>
> -static void do_check_smallbin(mstate m, bindex_t i);<br>
> -static void do_check_malloc_state(mstate m);<br>
> -static int bin_find(mstate m, mchunkptr x);<br>
> -static size_t traverse_and_check(mstate m);<br>
> -#endif /* DEBUG */<br>
> -<br>
> -/* ---------------------------- Indexing Bins ----------------------------<br>
> */<br>
> -<br>
> -#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)<br>
> -#define small_index(s) ((s) >> SMALLBIN_SHIFT)<br>
> -#define small_index2size(i) ((i) << SMALLBIN_SHIFT)<br>
> -#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))<br>
> -<br>
> -/* addressing by index. See above about smallbin repositioning */<br>
> -#define smallbin_at(M, i) ((sbinptr)((char*)&((M)-><wbr>smallbins[(i)<<1])))<br>
> -#define treebin_at(M,i) (&((M)->treebins[i]))<br>
> -<br>
> -/* assign tree index for size S to variable I */<br>
> -#if defined(__GNUC__) && defined(i386)<br>
> -#define compute_tree_index(S, I)\<br>
> -{\<br>
> - size_t X = S >> TREEBIN_SHIFT;\<br>
> - if (X == 0)\<br>
> - I = 0;\<br>
> - else if (X > 0xFFFF)\<br>
> - I = NTREEBINS-1;\<br>
> - else {\<br>
> - unsigned int K;\<br>
> - __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\<br>
> - I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\<br>
> - }\<br>
> -}<br>
> -#else /* GNUC */<br>
> -#define compute_tree_index(S, I)\<br>
> -{\<br>
> - size_t X = S >> TREEBIN_SHIFT;\<br>
> - if (X == 0)\<br>
> - I = 0;\<br>
> - else if (X > 0xFFFF)\<br>
> - I = NTREEBINS-1;\<br>
> - else {\<br>
> - unsigned int Y = (unsigned int)X;\<br>
> - unsigned int N = ((Y - 0x100) >> 16) & 8;\<br>
> - unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\<br>
> - N += K;\<br>
> - N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\<br>
> - K = 14 - N + ((Y <<= K) >> 15);\<br>
> - I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\<br>
> - }\<br>
> -}<br>
> -#endif /* GNUC */<br>
> -<br>
> -/* Bit representing maximum resolved size in a treebin at i */<br>
> -#define bit_for_tree_index(i) \<br>
> - (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)<br>
> -<br>
> -/* Shift placing maximum resolved bit in a treebin at i as sign bit */<br>
> -#define leftshift_for_tree_index(i) \<br>
> - ((i == NTREEBINS-1)? 0 : \<br>
> - ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))<br>
> -<br>
> -/* The size of the smallest chunk held in bin with index i */<br>
> -#define minsize_for_tree_index(i) \<br>
> - ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \<br>
> - (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))<br>
> -<br>
> -/* ------------------------ Operations on bin maps -----------------------<br>
> */<br>
> -<br>
> -/* bit corresponding to given index */<br>
> -#define idx2bit(i) ((binmap_t)(1) << (i))<br>
> -<br>
> -/* Mark/Clear bits with given index */<br>
> -#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))<br>
> -#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))<br>
> -#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))<br>
> -<br>
> -#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))<br>
> -#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))<br>
> -#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))<br>
> -<br>
> -/* index corresponding to given bit */<br>
> -<br>
> -#if defined(__GNUC__) && defined(i386)<br>
> -#define compute_bit2idx(X, I)\<br>
> -{\<br>
> - unsigned int J;\<br>
> - __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\<br>
> - I = (bindex_t)J;\<br>
> -}<br>
> -<br>
> -#else /* GNUC */<br>
> -#if USE_BUILTIN_FFS<br>
> -#define compute_bit2idx(X, I) I = ffs(X)-1<br>
> -<br>
> -#else /* USE_BUILTIN_FFS */<br>
> -#define compute_bit2idx(X, I)\<br>
> -{\<br>
> - unsigned int Y = X - 1;\<br>
> - unsigned int K = Y >> (16-4) & 16;\<br>
> - unsigned int N = K; Y >>= K;\<br>
> - N += K = Y >> (8-3) & 8; Y >>= K;\<br>
> - N += K = Y >> (4-2) & 4; Y >>= K;\<br>
> - N += K = Y >> (2-1) & 2; Y >>= K;\<br>
> - N += K = Y >> (1-0) & 1; Y >>= K;\<br>
> - I = (bindex_t)(N + Y);\<br>
> -}<br>
> -#endif /* USE_BUILTIN_FFS */<br>
> -#endif /* GNUC */<br>
> -<br>
> -/* isolate the least set bit of a bitmap */<br>
> -#define least_bit(x) ((x) & -(x))<br>
> -<br>
> -/* mask with all bits to left of least bit of x on */<br>
> -#define left_bits(x) ((x<<1) | -(x<<1))<br>
> -<br>
> -/* mask with all bits to left of or equal to least bit of x on */<br>
> -#define same_or_left_bits(x) ((x) | -(x))<br>
> -<br>
> -<br>
> -/* ----------------------- Runtime Check Support -------------------------<br>
> */<br>
> -<br>
> -/*<br>
> - For security, the main invariant is that malloc/free/etc never<br>
> - writes to a static address other than malloc_state, unless static<br>
> - malloc_state itself has been corrupted, which cannot occur via<br>
> - malloc (because of these checks). In essence this means that we<br>
> - believe all pointers, sizes, maps etc held in malloc_state, but<br>
> - check all of those linked or offsetted from other embedded data<br>
> - structures. These checks are interspersed with main code in a way<br>
> - that tends to minimize their run-time cost.<br>
> -<br>
> - When FOOTERS is defined, in addition to range checking, we also<br>
> - verify footer fields of inuse chunks, which can be used guarantee<br>
> - that the mstate controlling malloc/free is intact. This is a<br>
> - streamlined version of the approach described by William Robertson<br>
> - et al in "Run-time Detection of Heap-based Overflows" LISA'03<br>
> - <a href="http://www.usenix.org/events/lisa03/tech/robertson.html" rel="noreferrer" target="_blank">http://www.usenix.org/events/<wbr>lisa03/tech/robertson.html</a> The footer<br>
> - of an inuse chunk holds the xor of its mstate and a random seed,<br>
> - that is checked upon calls to free() and realloc(). This is<br>
> - (probablistically) unguessable from outside the program, but can be<br>
> - computed by any code successfully malloc'ing any chunk, so does not<br>
> - itself provide protection against code that has already broken<br>
> - security through some other means. Unlike Robertson et al, we<br>
> - always dynamically check addresses of all offset chunks (previous,<br>
> - next, etc). This turns out to be cheaper than relying on hashes.<br>
> -*/<br>
> -<br>
> -#if !INSECURE<br>
> -/* Check if address a is at least as high as any from MORECORE or MMAP */<br>
> -#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)<br>
> -/* Check if address of next chunk n is higher than base chunk p */<br>
> -#define ok_next(p, n) ((char*)(p) < (char*)(n))<br>
> -/* Check if p has its cinuse bit on */<br>
> -#define ok_cinuse(p) cinuse(p)<br>
> -/* Check if p has its pinuse bit on */<br>
> -#define ok_pinuse(p) pinuse(p)<br>
> -<br>
> -#else /* !INSECURE */<br>
> -#define ok_address(M, a) (1)<br>
> -#define ok_next(b, n) (1)<br>
> -#define ok_cinuse(p) (1)<br>
> -#define ok_pinuse(p) (1)<br>
> -#endif /* !INSECURE */<br>
> -<br>
> -#if (FOOTERS && !INSECURE)<br>
> -/* Check if (alleged) mstate m has expected magic field */<br>
> -#define ok_magic(M) ((M)->magic == mparams.magic)<br>
> -#else /* (FOOTERS && !INSECURE) */<br>
> -#define ok_magic(M) (1)<br>
> -#endif /* (FOOTERS && !INSECURE) */<br>
> -<br>
> -<br>
> -/* In gcc, use __builtin_expect to minimize impact of checks */<br>
> -#if !INSECURE<br>
> -#if defined(__GNUC__) && __GNUC__ >= 3<br>
> -#define RTCHECK(e) __builtin_expect(e, 1)<br>
> -#else /* GNUC */<br>
> -#define RTCHECK(e) (e)<br>
> -#endif /* GNUC */<br>
> -#else /* !INSECURE */<br>
> -#define RTCHECK(e) (1)<br>
> -#endif /* !INSECURE */<br>
> -<br>
> -/* macros to set up inuse chunks with or without footers */<br>
> -<br>
> -#if !FOOTERS<br>
> -<br>
> -#define mark_inuse_foot(M,p,s)<br>
> -<br>
> -/* Set cinuse bit and pinuse bit of next chunk */<br>
> -#define set_inuse(M,p,s)\<br>
> - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\<br>
> - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)<br>
> -<br>
> -/* Set cinuse and pinuse of this chunk and pinuse of next chunk */<br>
> -#define set_inuse_and_pinuse(M,p,s)\<br>
> - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> - ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)<br>
> -<br>
> -/* Set size, cinuse and pinuse bit of this chunk */<br>
> -#define set_size_and_pinuse_of_inuse_<wbr>chunk(M, p, s)\<br>
> - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))<br>
> -<br>
> -#else /* FOOTERS */<br>
> -<br>
> -/* Set foot of inuse chunk to be xor of mstate and seed */<br>
> -#define mark_inuse_foot(M,p,s)\<br>
> - (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^<br>
> mparams.magic))<br>
> -<br>
> -#define get_mstate_for(p)\<br>
> - ((mstate)(((mchunkptr)((char*)<wbr>(p) +\<br>
> - (chunksize(p))))->prev_foot ^ mparams.magic))<br>
> -<br>
> -#define set_inuse(M,p,s)\<br>
> - ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\<br>
> - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \<br>
> - mark_inuse_foot(M,p,s))<br>
> -<br>
> -#define set_inuse_and_pinuse(M,p,s)\<br>
> - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> - (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\<br>
> - mark_inuse_foot(M,p,s))<br>
> -<br>
> -#define set_size_and_pinuse_of_inuse_<wbr>chunk(M, p, s)\<br>
> - ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> - mark_inuse_foot(M, p, s))<br>
> -<br>
> -#endif /* !FOOTERS */<br>
> -<br>
> -/* ---------------------------- setting mparams --------------------------<br>
> */<br>
> -<br>
> -/* Initialize mparams */<br>
> -static int init_mparams(void) {<br>
> - if (mparams.page_size == 0) {<br>
> - size_t s;<br>
> -<br>
> - mparams.default_mflags = USE_LOCK_BIT;<br>
> -<br>
> -#if (FOOTERS && !INSECURE)<br>
> - {<br>
> -#if USE_DEV_RANDOM<br>
> - int fd;<br>
> - unsigned char buf[sizeof(size_t)];<br>
> - /* Try to use /dev/urandom, else fall back on using time */<br>
> - if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&<br>
> - read(fd, buf, sizeof(buf)) == sizeof(buf)) {<br>
> - s = *((size_t *) buf);<br>
> - close(fd);<br>
> - }<br>
> - else<br>
> -#endif /* USE_DEV_RANDOM */<br>
> - s = (size_t)(time(0) ^ (size_t)0x55555555U);<br>
> -<br>
> - s |= (size_t)8U; /* ensure nonzero */<br>
> - s &= ~(size_t)7U; /* improve chances of fault for bad values */<br>
> -<br>
> - }<br>
> -#else /* (FOOTERS && !INSECURE) */<br>
> - s = (size_t)0x58585858U;<br>
> -#endif /* (FOOTERS && !INSECURE) */<br>
> - ACQUIRE_MAGIC_INIT_LOCK();<br>
> - if (mparams.magic == 0) {<br>
> - mparams.magic = s;<br>
> - /* Set up lock for main malloc area */<br>
> - //INITIAL_LOCK(&gm->mutex);<br>
> - //gm->mflags = mparams.default_mflags;<br>
> - }<br>
> - RELEASE_MAGIC_INIT_LOCK();<br>
> -<br>
> -<br>
> - mparams.page_size = malloc_getpagesize;<br>
> - mparams.granularity = ((DEFAULT_GRANULARITY != 0)?<br>
> - DEFAULT_GRANULARITY : mparams.page_size);<br>
> -<br>
> - /* Sanity-check configuration:<br>
> - size_t must be unsigned and as wide as pointer type.<br>
> - ints must be at least 4 bytes.<br>
> - alignment must be at least 8.<br>
> - Alignment, min chunk size, and page size must all be powers of 2.<br>
> - */<br>
> - if ((sizeof(size_t) != sizeof(char*)) ||<br>
> - (MAX_SIZE_T < MIN_CHUNK_SIZE) ||<br>
> - (sizeof(int) < 4) ||<br>
> - (MALLOC_ALIGNMENT < (size_t)8U) ||<br>
> - ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||<br>
> - ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||<br>
> - ((mparams.granularity & (mparams.granularity-SIZE_T_<wbr>ONE)) != 0) ||<br>
> - ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)<wbr>) != 0))<br>
> - ABORT(NULL);<br>
> - }<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* support for mallopt */<br>
> -static int change_mparam(int param_number, int value) {<br>
> - size_t val = (size_t)value;<br>
> - init_mparams();<br>
> - switch(param_number) {<br>
> - case M_GRANULARITY:<br>
> - if (val >= mparams.page_size && ((val & (val-1)) == 0)) {<br>
> - mparams.granularity = val;<br>
> - return 1;<br>
> - }<br>
> - else<br>
> - return 0;<br>
> - default:<br>
> - return 0;<br>
> - }<br>
> -}<br>
> -<br>
> -#if DEBUG<br>
> -/* ------------------------- Debugging Support ---------------------------<br>
> */<br>
> -<br>
> -/* Check properties of any chunk, whether free, inuse, mmapped etc */<br>
> -static void do_check_any_chunk(mstate m, mchunkptr p) {<br>
> - assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==<br>
> FENCEPOST_HEAD));<br>
> - assert(m->user_data, ok_address(m, p));<br>
> -}<br>
> -<br>
> -/* Check properties of top chunk */<br>
> -static void do_check_top_chunk(mstate m, mchunkptr p) {<br>
> - msegmentptr sp = segment_holding(m, (char*)p);<br>
> - size_t sz = chunksize(p);<br>
> - assert(m->user_data, sp != 0);<br>
> - assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==<br>
> FENCEPOST_HEAD));<br>
> - assert(m->user_data, ok_address(m, p));<br>
> - assert(m->user_data, sz == m->topsize);<br>
> - assert(m->user_data, sz > 0);<br>
> - assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -<br>
> TOP_FOOT_SIZE);<br>
> - assert(m->user_data, pinuse(p));<br>
> - assert(m->user_data, !next_pinuse(p));<br>
> -}<br>
> -<br>
> -/* Check properties of inuse chunks */<br>
> -static void do_check_inuse_chunk(mstate m, mchunkptr p) {<br>
> - do_check_any_chunk(m, p);<br>
> - assert(m->user_data, cinuse(p));<br>
> - assert(m->user_data, next_pinuse(p));<br>
> - /* If not pinuse, previous chunk has OK offset */<br>
> - assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);<br>
> -}<br>
> -<br>
> -/* Check properties of free chunks */<br>
> -static void do_check_free_chunk(mstate m, mchunkptr p) {<br>
> - size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);<br>
> - mchunkptr next = chunk_plus_offset(p, sz);<br>
> - do_check_any_chunk(m, p);<br>
> - assert(m->user_data, !cinuse(p));<br>
> - assert(m->user_data, !next_pinuse(p));<br>
> - if (p != m->dv && p != m->top) {<br>
> - if (sz >= MIN_CHUNK_SIZE) {<br>
> - assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);<br>
> - assert(m->user_data, is_aligned(chunk2mem(p)));<br>
> - assert(m->user_data, next->prev_foot == sz);<br>
> - assert(m->user_data, pinuse(p));<br>
> - assert(m->user_data, next == m->top || cinuse(next));<br>
> - assert(m->user_data, p->fd->bk == p);<br>
> - assert(m->user_data, p->bk->fd == p);<br>
> - }<br>
> - else /* markers are always of size SIZE_T_SIZE */<br>
> - assert(m->user_data, sz == SIZE_T_SIZE);<br>
> - }<br>
> -}<br>
> -<br>
> -/* Check properties of malloced chunks at the point they are malloced */<br>
> -static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {<br>
> - if (mem != 0) {<br>
> - mchunkptr p = mem2chunk(mem);<br>
> - size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);<br>
> - do_check_inuse_chunk(m, p);<br>
> - assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);<br>
> - assert(m->user_data, sz >= MIN_CHUNK_SIZE);<br>
> - assert(m->user_data, sz >= s);<br>
> - /* size is less than MIN_CHUNK_SIZE more than request */<br>
> - assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));<br>
> - }<br>
> -}<br>
> -<br>
> -/* Check a tree and its subtrees. */<br>
> -static void do_check_tree(mstate m, tchunkptr t) {<br>
> - tchunkptr head = 0;<br>
> - tchunkptr u = t;<br>
> - bindex_t tindex = t->index;<br>
> - size_t tsize = chunksize(t);<br>
> - bindex_t idx;<br>
> - compute_tree_index(tsize, idx);<br>
> - assert(m->user_data, tindex == idx);<br>
> - assert(m->user_data, tsize >= MIN_LARGE_SIZE);<br>
> - assert(m->user_data, tsize >= minsize_for_tree_index(idx));<br>
> - assert(m->user_data, (idx == NTREEBINS-1) || (tsize <<br>
> minsize_for_tree_index((idx+1)<wbr>)));<br>
> -<br>
> - do { /* traverse through chain of same-sized nodes */<br>
> - do_check_any_chunk(m, ((mchunkptr)u));<br>
> - assert(m->user_data, u->index == tindex);<br>
> - assert(m->user_data, chunksize(u) == tsize);<br>
> - assert(m->user_data, !cinuse(u));<br>
> - assert(m->user_data, !next_pinuse(u));<br>
> - assert(m->user_data, u->fd->bk == u);<br>
> - assert(m->user_data, u->bk->fd == u);<br>
> - if (u->parent == 0) {<br>
> - assert(m->user_data, u->child[0] == 0);<br>
> - assert(m->user_data, u->child[1] == 0);<br>
> - }<br>
> - else {<br>
> - assert(m->user_data, head == 0); /* only one node on chain has parent<br>
> */<br>
> - head = u;<br>
> - assert(m->user_data, u->parent != u);<br>
> - assert(m->user_data, u->parent->child[0] == u ||<br>
> - u->parent->child[1] == u ||<br>
> - *((tbinptr*)(u->parent)) == u);<br>
> - if (u->child[0] != 0) {<br>
> - assert(m->user_data, u->child[0]->parent == u);<br>
> - assert(m->user_data, u->child[0] != u);<br>
> - do_check_tree(m, u->child[0]);<br>
> - }<br>
> - if (u->child[1] != 0) {<br>
> - assert(m->user_data, u->child[1]->parent == u);<br>
> - assert(m->user_data, u->child[1] != u);<br>
> - do_check_tree(m, u->child[1]);<br>
> - }<br>
> - if (u->child[0] != 0 && u->child[1] != 0) {<br>
> - assert(m->user_data, chunksize(u->child[0]) <<br>
> chunksize(u->child[1]));<br>
> - }<br>
> - }<br>
> - u = u->fd;<br>
> - } while (u != t);<br>
> - assert(m->user_data, head != 0);<br>
> -}<br>
> -<br>
> -/* Check all the chunks in a treebin. */<br>
> -static void do_check_treebin(mstate m, bindex_t i) {<br>
> - tbinptr* tb = treebin_at(m, i);<br>
> - tchunkptr t = *tb;<br>
> - int empty = (m->treemap & (1U << i)) == 0;<br>
> - if (t == 0)<br>
> - assert(m->user_data, empty);<br>
> - if (!empty)<br>
> - do_check_tree(m, t);<br>
> -}<br>
> -<br>
> -/* Check all the chunks in a smallbin. */<br>
> -static void do_check_smallbin(mstate m, bindex_t i) {<br>
> - sbinptr b = smallbin_at(m, i);<br>
> - mchunkptr p = b->bk;<br>
> - unsigned int empty = (m->smallmap & (1U << i)) == 0;<br>
> - if (p == b)<br>
> - assert(m->user_data, empty);<br>
> - if (!empty) {<br>
> - for (; p != b; p = p->bk) {<br>
> - size_t size = chunksize(p);<br>
> - mchunkptr q;<br>
> - /* each chunk claims to be free */<br>
> - do_check_free_chunk(m, p);<br>
> - /* chunk belongs in bin */<br>
> - assert(m->user_data, small_index(size) == i);<br>
> - assert(m->user_data, p->bk == b || chunksize(p->bk) == chunksize(p));<br>
> - /* chunk is followed by an inuse chunk */<br>
> - q = next_chunk(p);<br>
> - if (q->head != FENCEPOST_HEAD)<br>
> - do_check_inuse_chunk(m, q);<br>
> - }<br>
> - }<br>
> -}<br>
> -<br>
> -/* Find x in a bin. Used in other check functions. */<br>
> -static int bin_find(mstate m, mchunkptr x) {<br>
> - size_t size = chunksize(x);<br>
> - if (is_small(size)) {<br>
> - bindex_t sidx = small_index(size);<br>
> - sbinptr b = smallbin_at(m, sidx);<br>
> - if (smallmap_is_marked(m, sidx)) {<br>
> - mchunkptr p = b;<br>
> - do {<br>
> - if (p == x)<br>
> - return 1;<br>
> - } while ((p = p->fd) != b);<br>
> - }<br>
> - }<br>
> - else {<br>
> - bindex_t tidx;<br>
> - compute_tree_index(size, tidx);<br>
> - if (treemap_is_marked(m, tidx)) {<br>
> - tchunkptr t = *treebin_at(m, tidx);<br>
> - size_t sizebits = size << leftshift_for_tree_index(tidx)<wbr>;<br>
> - while (t != 0 && chunksize(t) != size) {<br>
> - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];<br>
> - sizebits <<= 1;<br>
> - }<br>
> - if (t != 0) {<br>
> - tchunkptr u = t;<br>
> - do {<br>
> - if (u == (tchunkptr)x)<br>
> - return 1;<br>
> - } while ((u = u->fd) != t);<br>
> - }<br>
> - }<br>
> - }<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* Traverse each chunk and check it; return total */<br>
> -static size_t traverse_and_check(mstate m) {<br>
> - size_t sum = 0;<br>
> - if (is_initialized(m)) {<br>
> - msegmentptr s = &m->seg;<br>
> - sum += m->topsize + TOP_FOOT_SIZE;<br>
> - while (s != 0) {<br>
> - mchunkptr q = align_as_chunk(s->base);<br>
> - mchunkptr lastq = 0;<br>
> - assert(m->user_data, pinuse(q));<br>
> - while (segment_holds(s, q) &&<br>
> - q != m->top && q->head != FENCEPOST_HEAD) {<br>
> - sum += chunksize(q);<br>
> - if (cinuse(q)) {<br>
> - assert(m->user_data, !bin_find(m, q));<br>
> - do_check_inuse_chunk(m, q);<br>
> - }<br>
> - else {<br>
> - assert(m->user_data, q == m->dv || bin_find(m, q));<br>
> - assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2<br>
> consecutive free */<br>
> - do_check_free_chunk(m, q);<br>
> - }<br>
> - lastq = q;<br>
> - q = next_chunk(q);<br>
> - }<br>
> - s = s->next;<br>
> - }<br>
> - }<br>
> - return sum;<br>
> -}<br>
> -<br>
> -/* Check all properties of malloc_state. */<br>
> -static void do_check_malloc_state(mstate m) {<br>
> - bindex_t i;<br>
> - size_t total;<br>
> - /* check bins */<br>
> - for (i = 0; i < NSMALLBINS; ++i)<br>
> - do_check_smallbin(m, i);<br>
> - for (i = 0; i < NTREEBINS; ++i)<br>
> - do_check_treebin(m, i);<br>
> -<br>
> - if (m->dvsize != 0) { /* check dv chunk */<br>
> - do_check_any_chunk(m, m->dv);<br>
> - assert(m->user_data, m->dvsize == chunksize(m->dv));<br>
> - assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);<br>
> - assert(m->user_data, bin_find(m, m->dv) == 0);<br>
> - }<br>
> -<br>
> - if (m->top != 0) { /* check top chunk */<br>
> - do_check_top_chunk(m, m->top);<br>
> - assert(m->user_data, m->topsize == chunksize(m->top));<br>
> - assert(m->user_data, m->topsize > 0);<br>
> - assert(m->user_data, bin_find(m, m->top) == 0);<br>
> - }<br>
> -<br>
> - total = traverse_and_check(m);<br>
> - assert(m->user_data, total <= m->footprint);<br>
> - assert(m->user_data, m->footprint <= m->max_footprint);<br>
> -}<br>
> -#endif /* DEBUG */<br>
> -<br>
> -/* ----------------------------- statistics ------------------------------<br>
> */<br>
> -<br>
> -#if !NO_MALLINFO<br>
> -static struct mallinfo internal_mallinfo(mstate m) {<br>
> - struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };<br>
> - if (!PREACTION(m)) {<br>
> - check_malloc_state(m);<br>
> - if (is_initialized(m)) {<br>
> - size_t nfree = SIZE_T_ONE; /* top always free */<br>
> - size_t mfree = m->topsize + TOP_FOOT_SIZE;<br>
> - size_t sum = mfree;<br>
> - msegmentptr s = &m->seg;<br>
> - while (s != 0) {<br>
> - mchunkptr q = align_as_chunk(s->base);<br>
> - while (segment_holds(s, q) &&<br>
> - q != m->top && q->head != FENCEPOST_HEAD) {<br>
> - size_t sz = chunksize(q);<br>
> - sum += sz;<br>
> - if (!cinuse(q)) {<br>
> - mfree += sz;<br>
> - ++nfree;<br>
> - }<br>
> - q = next_chunk(q);<br>
> - }<br>
> - s = s->next;<br>
> - }<br>
> -<br>
> - nm.arena = sum;<br>
> - nm.ordblks = nfree;<br>
> - nm.hblkhd = m->footprint - sum;<br>
> - nm.usmblks = m->max_footprint;<br>
> - nm.uordblks = m->footprint - mfree;<br>
> - nm.fordblks = mfree;<br>
> - nm.keepcost = m->topsize;<br>
> - }<br>
> -<br>
> - POSTACTION(m);<br>
> - }<br>
> - return nm;<br>
> -}<br>
> -#endif /* !NO_MALLINFO */<br>
> -<br>
> -static void internal_malloc_stats(mstate m) {<br>
> - if (!PREACTION(m)) {<br>
> - size_t maxfp = 0;<br>
> - size_t fp = 0;<br>
> - size_t used = 0;<br>
> - check_malloc_state(m);<br>
> - if (is_initialized(m)) {<br>
> - msegmentptr s = &m->seg;<br>
> - maxfp = m->max_footprint;<br>
> - fp = m->footprint;<br>
> - used = fp - (m->topsize + TOP_FOOT_SIZE);<br>
> -<br>
> - while (s != 0) {<br>
> - mchunkptr q = align_as_chunk(s->base);<br>
> - while (segment_holds(s, q) &&<br>
> - q != m->top && q->head != FENCEPOST_HEAD) {<br>
> - if (!cinuse(q))<br>
> - used -= chunksize(q);<br>
> - q = next_chunk(q);<br>
> - }<br>
> - s = s->next;<br>
> - }<br>
> - }<br>
> -<br>
> - PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned<br>
> long)(maxfp)));<br>
> - PRINT((m->user_data, "system bytes = %10lu\n", (unsigned<br>
> long)(fp)));<br>
> - PRINT((m->user_data, "in use bytes = %10lu\n", (unsigned<br>
> long)(used)));<br>
> -<br>
> - POSTACTION(m);<br>
> - }<br>
> -}<br>
> -<br>
> -/* ----------------------- Operations on smallbins -----------------------<br>
> */<br>
> -<br>
> -/*<br>
> - Various forms of linking and unlinking are defined as macros. Even<br>
> - the ones for trees, which are very long but have very short typical<br>
> - paths. This is ugly but reduces reliance on inlining support of<br>
> - compilers.<br>
> -*/<br>
> -<br>
> -/* Link a free chunk into a smallbin */<br>
> -#define insert_small_chunk(M, P, S) {\<br>
> - bindex_t I = small_index(S);\<br>
> - mchunkptr B = smallbin_at(M, I);\<br>
> - mchunkptr F = B;\<br>
> - assert((M)->user_data, S >= MIN_CHUNK_SIZE);\<br>
> - if (!smallmap_is_marked(M, I))\<br>
> - mark_smallmap(M, I);\<br>
> - else if (RTCHECK(ok_address(M, B->fd)))\<br>
> - F = B->fd;\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - B->fd = P;\<br>
> - F->bk = P;\<br>
> - P->fd = F;\<br>
> - P->bk = B;\<br>
> -}<br>
> -<br>
> -/* Unlink a chunk from a smallbin */<br>
> -#define unlink_small_chunk(M, P, S) {\<br>
> - mchunkptr F = P->fd;\<br>
> - mchunkptr B = P->bk;\<br>
> - bindex_t I = small_index(S);\<br>
> - assert((M)->user_data, P != B);\<br>
> - assert((M)->user_data, P != F);\<br>
> - assert((M)->user_data, chunksize(P) == small_index2size(I));\<br>
> - if (F == B)\<br>
> - clear_smallmap(M, I);\<br>
> - else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\<br>
> - (B == smallbin_at(M,I) || ok_address(M, B)))) {\<br>
> - F->bk = B;\<br>
> - B->fd = F;\<br>
> - }\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> -}<br>
> -<br>
> -/* Unlink the first chunk from a smallbin */<br>
> -#define unlink_first_small_chunk(M, B, P, I) {\<br>
> - mchunkptr F = P->fd;\<br>
> - assert((M)->user_data, P != B);\<br>
> - assert((M)->user_data, P != F);\<br>
> - assert((M)->user_data, chunksize(P) == small_index2size(I));\<br>
> - if (B == F)\<br>
> - clear_smallmap(M, I);\<br>
> - else if (RTCHECK(ok_address(M, F))) {\<br>
> - B->fd = F;\<br>
> - F->bk = B;\<br>
> - }\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> -}<br>
> -<br>
> -/* Replace dv node, binning the old one */<br>
> -/* Used only when dvsize known to be small */<br>
> -#define replace_dv(M, P, S) {\<br>
> - size_t DVS = M->dvsize;\<br>
> - if (DVS != 0) {\<br>
> - mchunkptr DV = M->dv;\<br>
> - assert((M)->user_data, is_small(DVS));\<br>
> - insert_small_chunk(M, DV, DVS);\<br>
> - }\<br>
> - M->dvsize = S;\<br>
> - M->dv = P;\<br>
> -}<br>
> -<br>
> -<br>
> -/* ------------------------- Operations on trees -------------------------<br>
> */<br>
> -<br>
> -/* Insert chunk into tree */<br>
> -#define insert_large_chunk(M, X, S) {\<br>
> - tbinptr* H;\<br>
> - bindex_t I;\<br>
> - compute_tree_index(S, I);\<br>
> - H = treebin_at(M, I);\<br>
> - X->index = I;\<br>
> - X->child[0] = X->child[1] = 0;\<br>
> - if (!treemap_is_marked(M, I)) {\<br>
> - mark_treemap(M, I);\<br>
> - *H = X;\<br>
> - X->parent = (tchunkptr)H;\<br>
> - X->fd = X->bk = X;\<br>
> - }\<br>
> - else {\<br>
> - tchunkptr T = *H;\<br>
> - size_t K = S << leftshift_for_tree_index(I);\<br>
> - for (;;) {\<br>
> - if (chunksize(T) != S) {\<br>
> - tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\<br>
> - K <<= 1;\<br>
> - if (*C != 0)\<br>
> - T = *C;\<br>
> - else if (RTCHECK(ok_address(M, C))) {\<br>
> - *C = X;\<br>
> - X->parent = T;\<br>
> - X->fd = X->bk = X;\<br>
> - break;\<br>
> - }\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - break;\<br>
> - }\<br>
> - }\<br>
> - else {\<br>
> - tchunkptr F = T->fd;\<br>
> - if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\<br>
> - T->fd = F->bk = X;\<br>
> - X->fd = F;\<br>
> - X->bk = T;\<br>
> - X->parent = 0;\<br>
> - break;\<br>
> - }\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - break;\<br>
> - }\<br>
> - }\<br>
> - }\<br>
> - }\<br>
> -}<br>
> -<br>
> -/*<br>
> - Unlink steps:<br>
> -<br>
> - 1. If x is a chained node, unlink it from its same-sized fd/bk links<br>
> - and choose its bk node as its replacement.<br>
> - 2. If x was the last node of its size, but not a leaf node, it must<br>
> - be replaced with a leaf node (not merely one with an open left or<br>
> - right), to make sure that lefts and rights of descendents<br>
> - correspond properly to bit masks. We use the rightmost descendent<br>
> - of x. We could use any other leaf, but this is easy to locate and<br>
> - tends to counteract removal of leftmosts elsewhere, and so keeps<br>
> - paths shorter than minimally guaranteed. This doesn't loop much<br>
> - because on average a node in a tree is near the bottom.<br>
> - 3. If x is the base of a chain (i.e., has parent links) relink<br>
> - x's parent and children to x's replacement (or null if none).<br>
> -*/<br>
> -<br>
> -#define unlink_large_chunk(M, X) {\<br>
> - tchunkptr XP = X->parent;\<br>
> - tchunkptr R;\<br>
> - if (X->bk != X) {\<br>
> - tchunkptr F = X->fd;\<br>
> - R = X->bk;\<br>
> - if (RTCHECK(ok_address(M, F))) {\<br>
> - F->bk = R;\<br>
> - R->fd = F;\<br>
> - }\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - }\<br>
> - else {\<br>
> - tchunkptr* RP;\<br>
> - if (((R = *(RP = &(X->child[1]))) != 0) ||\<br>
> - ((R = *(RP = &(X->child[0]))) != 0)) {\<br>
> - tchunkptr* CP;\<br>
> - while ((*(CP = &(R->child[1])) != 0) ||\<br>
> - (*(CP = &(R->child[0])) != 0)) {\<br>
> - R = *(RP = CP);\<br>
> - }\<br>
> - if (RTCHECK(ok_address(M, RP)))\<br>
> - *RP = 0;\<br>
> - else {\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - }\<br>
> - }\<br>
> - if (XP != 0) {\<br>
> - tbinptr* H = treebin_at(M, X->index);\<br>
> - if (X == *H) {\<br>
> - if ((*H = R) == 0) \<br>
> - clear_treemap(M, X->index);\<br>
> - }\<br>
> - else if (RTCHECK(ok_address(M, XP))) {\<br>
> - if (XP->child[0] == X) \<br>
> - XP->child[0] = R;\<br>
> - else \<br>
> - XP->child[1] = R;\<br>
> - }\<br>
> - else\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - if (R != 0) {\<br>
> - if (RTCHECK(ok_address(M, R))) {\<br>
> - tchunkptr C0, C1;\<br>
> - R->parent = XP;\<br>
> - if ((C0 = X->child[0]) != 0) {\<br>
> - if (RTCHECK(ok_address(M, C0))) {\<br>
> - R->child[0] = C0;\<br>
> - C0->parent = R;\<br>
> - }\<br>
> - else\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - if ((C1 = X->child[1]) != 0) {\<br>
> - if (RTCHECK(ok_address(M, C1))) {\<br>
> - R->child[1] = C1;\<br>
> - C1->parent = R;\<br>
> - }\<br>
> - else\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - }\<br>
> - else\<br>
> - CORRUPTION_ERROR_ACTION(M);\<br>
> - }\<br>
> - }\<br>
> -}<br>
> -<br>
> -/* Relays to large vs small bin operations */<br>
> -<br>
> -#define insert_chunk(M, P, S)\<br>
> - if (is_small(S)) insert_small_chunk(M, P, S)\<br>
> - else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }<br>
> -<br>
> -#define unlink_chunk(M, P, S)\<br>
> - if (is_small(S)) unlink_small_chunk(M, P, S)\<br>
> - else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }<br>
> -<br>
> -<br>
> -/* Relays to internal calls to malloc/free from realloc, memalign etc */<br>
> -<br>
> -#define internal_malloc(m, b) mspace_malloc(m, b)<br>
> -#define internal_free(m, mem) mspace_free(m,mem);<br>
> -<br>
> -<br>
> -/* -------------------------- mspace management --------------------------<br>
> */<br>
> -<br>
> -/* Initialize top chunk and its size */<br>
> -static void init_top(mstate m, mchunkptr p, size_t psize) {<br>
> - /* Ensure alignment */<br>
> - size_t offset = align_offset(chunk2mem(p));<br>
> - p = (mchunkptr)((char*)p + offset);<br>
> - psize -= offset;<br>
> -<br>
> - m->top = p;<br>
> - m->topsize = psize;<br>
> - p->head = psize | PINUSE_BIT;<br>
> - /* set size of fake trailing chunk holding overhead space only once */<br>
> - chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;<br>
> -}<br>
> -<br>
> -/* Initialize bins for a new mstate that is otherwise zeroed out */<br>
> -static void init_bins(mstate m) {<br>
> - /* Establish circular links for smallbins */<br>
> - bindex_t i;<br>
> - for (i = 0; i < NSMALLBINS; ++i) {<br>
> - sbinptr bin = smallbin_at(m,i);<br>
> - bin->fd = bin->bk = bin;<br>
> - }<br>
> -}<br>
> -<br>
> -#if PROCEED_ON_ERROR<br>
> -<br>
> -/* default corruption action */<br>
> -static void reset_on_error(mstate m) {<br>
> - int i;<br>
> - ++malloc_corruption_error_<wbr>count;<br>
> - /* Reinitialize fields to forget about all memory */<br>
> - m->smallbins = m->treebins = 0;<br>
> - m->dvsize = m->topsize = 0;<br>
> - m->seg.base = 0;<br>
> - m->seg.size = 0;<br>
> - m->seg.next = 0;<br>
> - m->top = m->dv = 0;<br>
> - for (i = 0; i < NTREEBINS; ++i)<br>
> - *treebin_at(m, i) = 0;<br>
> - init_bins(m);<br>
> -}<br>
> -#endif /* PROCEED_ON_ERROR */<br>
> -<br>
> -/* Allocate chunk and prepend remainder with chunk in successor base. */<br>
> -static void* prepend_alloc(mstate m, char* newbase, char* oldbase,<br>
> - size_t nb) {<br>
> - mchunkptr p = align_as_chunk(newbase);<br>
> - mchunkptr oldfirst = align_as_chunk(oldbase);<br>
> - size_t psize = (char*)oldfirst - (char*)p;<br>
> - mchunkptr q = chunk_plus_offset(p, nb);<br>
> - size_t qsize = psize - nb;<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(m, p, nb);<br>
> -<br>
> - assert(m->user_data, (char*)oldfirst > (char*)q);<br>
> - assert(m->user_data, pinuse(oldfirst));<br>
> - assert(m->user_data, qsize >= MIN_CHUNK_SIZE);<br>
> -<br>
> - /* consolidate remainder with first chunk of old base */<br>
> - if (oldfirst == m->top) {<br>
> - size_t tsize = m->topsize += qsize;<br>
> - m->top = q;<br>
> - q->head = tsize | PINUSE_BIT;<br>
> - check_top_chunk(m, q);<br>
> - }<br>
> - else if (oldfirst == m->dv) {<br>
> - size_t dsize = m->dvsize += qsize;<br>
> - m->dv = q;<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(q, dsize);<br>
> - }<br>
> - else {<br>
> - if (!cinuse(oldfirst)) {<br>
> - size_t nsize = chunksize(oldfirst);<br>
> - unlink_chunk(m, oldfirst, nsize);<br>
> - oldfirst = chunk_plus_offset(oldfirst, nsize);<br>
> - qsize += nsize;<br>
> - }<br>
> - set_free_with_pinuse(q, qsize, oldfirst);<br>
> - insert_chunk(m, q, qsize);<br>
> - check_free_chunk(m, q);<br>
> - }<br>
> -<br>
> - check_malloced_chunk(m, chunk2mem(p), nb);<br>
> - return chunk2mem(p);<br>
> -}<br>
> -<br>
> -/* -------------------------- System allocation --------------------------<br>
> */<br>
> -<br>
> -/* Get memory from system using MORECORE or MMAP */<br>
> -static void* sys_alloc(mstate m, size_t nb) {<br>
> - MALLOC_FAILURE_ACTION;<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* ---------------------------- malloc support ---------------------------<br>
> */<br>
> -<br>
> -/* allocate a large request from the best fitting chunk in a treebin */<br>
> -static void* tmalloc_large(mstate m, size_t nb) {<br>
> - tchunkptr v = 0;<br>
> - size_t rsize = -nb; /* Unsigned negation */<br>
> - tchunkptr t;<br>
> - bindex_t idx;<br>
> - compute_tree_index(nb, idx);<br>
> -<br>
> - if ((t = *treebin_at(m, idx)) != 0) {<br>
> - /* Traverse tree for this bin looking for node with size == nb */<br>
> - size_t sizebits = nb << leftshift_for_tree_index(idx);<br>
> - tchunkptr rst = 0; /* The deepest untaken right subtree */<br>
> - for (;;) {<br>
> - tchunkptr rt;<br>
> - size_t trem = chunksize(t) - nb;<br>
> - if (trem < rsize) {<br>
> - v = t;<br>
> - if ((rsize = trem) == 0)<br>
> - break;<br>
> - }<br>
> - rt = t->child[1];<br>
> - t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];<br>
> - if (rt != 0 && rt != t)<br>
> - rst = rt;<br>
> - if (t == 0) {<br>
> - t = rst; /* set t to least subtree holding sizes > nb */<br>
> - break;<br>
> - }<br>
> - sizebits <<= 1;<br>
> - }<br>
> - }<br>
> -<br>
> - if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */<br>
> - binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;<br>
> - if (leftbits != 0) {<br>
> - bindex_t i;<br>
> - binmap_t leastbit = least_bit(leftbits);<br>
> - compute_bit2idx(leastbit, i);<br>
> - t = *treebin_at(m, i);<br>
> - }<br>
> - }<br>
> -<br>
> - while (t != 0) { /* find smallest of tree or subtree */<br>
> - size_t trem = chunksize(t) - nb;<br>
> - if (trem < rsize) {<br>
> - rsize = trem;<br>
> - v = t;<br>
> - }<br>
> - t = leftmost_child(t);<br>
> - }<br>
> -<br>
> - /* If dv is a better fit, return 0 so malloc will use it */<br>
> - if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {<br>
> - if (RTCHECK(ok_address(m, v))) { /* split */<br>
> - mchunkptr r = chunk_plus_offset(v, nb);<br>
> - assert(m->user_data, chunksize(v) == rsize + nb);<br>
> - if (RTCHECK(ok_next(v, r))) {<br>
> - unlink_large_chunk(m, v);<br>
> - if (rsize < MIN_CHUNK_SIZE)<br>
> - set_inuse_and_pinuse(m, v, (rsize + nb));<br>
> - else {<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(m, v, nb);<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> - insert_chunk(m, r, rsize);<br>
> - }<br>
> - return chunk2mem(v);<br>
> - }<br>
> - }<br>
> - CORRUPTION_ERROR_ACTION(m);<br>
> - }<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* allocate a small request from the best fitting chunk in a treebin */<br>
> -static void* tmalloc_small(mstate m, size_t nb) {<br>
> - tchunkptr t, v;<br>
> - size_t rsize;<br>
> - bindex_t i;<br>
> - binmap_t leastbit = least_bit(m->treemap);<br>
> - compute_bit2idx(leastbit, i);<br>
> -<br>
> - v = t = *treebin_at(m, i);<br>
> - rsize = chunksize(t) - nb;<br>
> -<br>
> - while ((t = leftmost_child(t)) != 0) {<br>
> - size_t trem = chunksize(t) - nb;<br>
> - if (trem < rsize) {<br>
> - rsize = trem;<br>
> - v = t;<br>
> - }<br>
> - }<br>
> -<br>
> - if (RTCHECK(ok_address(m, v))) {<br>
> - mchunkptr r = chunk_plus_offset(v, nb);<br>
> - assert(m->user_data, chunksize(v) == rsize + nb);<br>
> - if (RTCHECK(ok_next(v, r))) {<br>
> - unlink_large_chunk(m, v);<br>
> - if (rsize < MIN_CHUNK_SIZE)<br>
> - set_inuse_and_pinuse(m, v, (rsize + nb));<br>
> - else {<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(m, v, nb);<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> - replace_dv(m, r, rsize);<br>
> - }<br>
> - return chunk2mem(v);<br>
> - }<br>
> - }<br>
> -<br>
> - CORRUPTION_ERROR_ACTION(m);<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* --------------------------- realloc support ---------------------------<br>
> */<br>
> -<br>
> -static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {<br>
> - if (bytes >= MAX_REQUEST) {<br>
> - MALLOC_FAILURE_ACTION;<br>
> - return 0;<br>
> - }<br>
> - if (!PREACTION(m)) {<br>
> - mchunkptr oldp = mem2chunk(oldmem);<br>
> - size_t oldsize = chunksize(oldp);<br>
> - mchunkptr next = chunk_plus_offset(oldp, oldsize);<br>
> - mchunkptr newp = 0;<br>
> - void* extra = 0;<br>
> -<br>
> - /* Try to either shrink or extend into top. Else malloc-copy-free */<br>
> -<br>
> - if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&<br>
> - ok_next(oldp, next) && ok_pinuse(next))) {<br>
> - size_t nb = request2size(bytes);<br>
> - if (oldsize >= nb) { /* already big enough */<br>
> - size_t rsize = oldsize - nb;<br>
> - newp = oldp;<br>
> - if (rsize >= MIN_CHUNK_SIZE) {<br>
> - mchunkptr remainder = chunk_plus_offset(newp, nb);<br>
> - set_inuse(m, newp, nb);<br>
> - set_inuse(m, remainder, rsize);<br>
> - extra = chunk2mem(remainder);<br>
> - }<br>
> - }<br>
> - else if (next == m->top && oldsize + m->topsize > nb) {<br>
> - /* Expand into top */<br>
> - size_t newsize = oldsize + m->topsize;<br>
> - size_t newtopsize = newsize - nb;<br>
> - mchunkptr newtop = chunk_plus_offset(oldp, nb);<br>
> - set_inuse(m, oldp, nb);<br>
> - newtop->head = newtopsize |PINUSE_BIT;<br>
> - m->top = newtop;<br>
> - m->topsize = newtopsize;<br>
> - newp = oldp;<br>
> - }<br>
> - }<br>
> - else {<br>
> - USAGE_ERROR_ACTION(m, oldmem);<br>
> - POSTACTION(m);<br>
> - return 0;<br>
> - }<br>
> -<br>
> - POSTACTION(m);<br>
> -<br>
> - if (newp != 0) {<br>
> - if (extra != 0) {<br>
> - internal_free(m, extra);<br>
> - }<br>
> - check_inuse_chunk(m, newp);<br>
> - return chunk2mem(newp);<br>
> - }<br>
> - else {<br>
> - void* newmem = internal_malloc(m, bytes);<br>
> - if (newmem != 0) {<br>
> - size_t oc = oldsize - overhead_for(oldp);<br>
> - MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);<br>
> - internal_free(m, oldmem);<br>
> - }<br>
> - return newmem;<br>
> - }<br>
> - }<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* --------------------------- memalign support --------------------------<br>
> */<br>
> -<br>
> -static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {<br>
> - if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */<br>
> - return internal_malloc(m, bytes);<br>
> - if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size<br>
> */<br>
> - alignment = MIN_CHUNK_SIZE;<br>
> - if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */<br>
> - size_t a = MALLOC_ALIGNMENT << 1;<br>
> - while (a < alignment) a <<= 1;<br>
> - alignment = a;<br>
> - }<br>
> -<br>
> - if (bytes >= MAX_REQUEST - alignment) {<br>
> - if (m != 0) { /* Test isn't needed but avoids compiler warning */<br>
> - MALLOC_FAILURE_ACTION;<br>
> - }<br>
> - }<br>
> - else {<br>
> - size_t nb = request2size(bytes);<br>
> - size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;<br>
> - char* mem = (char*)internal_malloc(m, req);<br>
> - if (mem != 0) {<br>
> - void* leader = 0;<br>
> - void* trailer = 0;<br>
> - mchunkptr p = mem2chunk(mem);<br>
> -<br>
> - if (PREACTION(m)) return 0;<br>
> - if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */<br>
> - /*<br>
> - Find an aligned spot inside chunk. Since we need to give<br>
> - back leading space in a chunk of at least MIN_CHUNK_SIZE, if<br>
> - the first calculation places us at a spot with less than<br>
> - MIN_CHUNK_SIZE leader, we can move to the next aligned spot.<br>
> - We've allocated enough total room so that this is always<br>
> - possible.<br>
> - */<br>
> - char* br = (char*)mem2chunk((size_t)(((<wbr>size_t)(mem +<br>
> - alignment -<br>
> - SIZE_T_ONE)) &<br>
> - -alignment));<br>
> - char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?<br>
> - br : br+alignment;<br>
> - mchunkptr newp = (mchunkptr)pos;<br>
> - size_t leadsize = pos - (char*)(p);<br>
> - size_t newsize = chunksize(p) - leadsize;<br>
> -<br>
> - /* Otherwise, give back leader, use the rest */<br>
> - set_inuse(m, newp, newsize);<br>
> - set_inuse(m, p, leadsize);<br>
> - leader = chunk2mem(p);<br>
> -<br>
> - p = newp;<br>
> - }<br>
> -<br>
> - assert(m->user_data, chunksize(p) >= nb);<br>
> - assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);<br>
> - check_inuse_chunk(m, p);<br>
> - POSTACTION(m);<br>
> - if (leader != 0) {<br>
> - internal_free(m, leader);<br>
> - }<br>
> - if (trailer != 0) {<br>
> - internal_free(m, trailer);<br>
> - }<br>
> - return chunk2mem(p);<br>
> - }<br>
> - }<br>
> - return 0;<br>
> -}<br>
> -<br>
> -/* ----------------------------- user mspaces ----------------------------<br>
> */<br>
> -<br>
> -static mstate init_user_mstate(char* tbase, size_t tsize, void *user_data) {<br>
> - size_t msize = pad_request(sizeof(struct malloc_state));<br>
> - mchunkptr mn;<br>
> - mchunkptr msp = align_as_chunk(tbase);<br>
> - mstate m = (mstate)(chunk2mem(msp));<br>
> - MEMCLEAR(m, msize);<br>
> - INITIAL_LOCK(&m->mutex);<br>
> - msp->head = (msize|PINUSE_BIT|CINUSE_BIT);<br>
> - m->seg.base = m->least_addr = tbase;<br>
> - m->seg.size = m->footprint = m->max_footprint = tsize;<br>
> - m->magic = mparams.magic;<br>
> - m->mflags = mparams.default_mflags;<br>
> - m->user_data = user_data;<br>
> - init_bins(m);<br>
> - mn = next_chunk(mem2chunk(m));<br>
> - init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);<br>
> - check_top_chunk(m, m->top);<br>
> - return m;<br>
> -}<br>
> -<br>
> -mspace create_mspace_with_base(void* base, size_t capacity, int locked, void<br>
> *user_data) {<br>
> - mstate m = 0;<br>
> - size_t msize = pad_request(sizeof(struct malloc_state));<br>
> - init_mparams(); /* Ensure pagesize etc initialized */<br>
> -<br>
> - if (capacity > msize + TOP_FOOT_SIZE &&<br>
> - capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {<br>
> - m = init_user_mstate((char*)base, capacity, user_data);<br>
> - set_lock(m, locked);<br>
> - }<br>
> - return (mspace)m;<br>
> -}<br>
> -<br>
> -/*<br>
> - mspace versions of routines are near-clones of the global<br>
> - versions. This is not so nice but better than the alternatives.<br>
> -*/<br>
> -<br>
> -<br>
> -void* mspace_malloc(mspace msp, size_t bytes) {<br>
> - mstate ms = (mstate)msp;<br>
> - if (!ok_magic(ms)) {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - return 0;<br>
> - }<br>
> - if (!PREACTION(ms)) {<br>
> - void* mem;<br>
> - size_t nb;<br>
> - if (bytes <= MAX_SMALL_REQUEST) {<br>
> - bindex_t idx;<br>
> - binmap_t smallbits;<br>
> - nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);<br>
> - idx = small_index(nb);<br>
> - smallbits = ms->smallmap >> idx;<br>
> -<br>
> - if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */<br>
> - mchunkptr b, p;<br>
> - idx += ~smallbits & 1; /* Uses next bin if idx empty */<br>
> - b = smallbin_at(ms, idx);<br>
> - p = b->fd;<br>
> - assert(ms->user_data, chunksize(p) == small_index2size(idx));<br>
> - unlink_first_small_chunk(ms, b, p, idx);<br>
> - set_inuse_and_pinuse(ms, p, small_index2size(idx));<br>
> - mem = chunk2mem(p);<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> -<br>
> - else if (nb > ms->dvsize) {<br>
> - if (smallbits != 0) { /* Use chunk in next nonempty smallbin */<br>
> - mchunkptr b, p, r;<br>
> - size_t rsize;<br>
> - bindex_t i;<br>
> - binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));<br>
> - binmap_t leastbit = least_bit(leftbits);<br>
> - compute_bit2idx(leastbit, i);<br>
> - b = smallbin_at(ms, i);<br>
> - p = b->fd;<br>
> - assert(ms->user_data, chunksize(p) == small_index2size(i));<br>
> - unlink_first_small_chunk(ms, b, p, i);<br>
> - rsize = small_index2size(i) - nb;<br>
> - /* Fit here cannot be remainderless if 4byte sizes */<br>
> - if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)<br>
> - set_inuse_and_pinuse(ms, p, small_index2size(i));<br>
> - else {<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> - r = chunk_plus_offset(p, nb);<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> - replace_dv(ms, r, rsize);<br>
> - }<br>
> - mem = chunk2mem(p);<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> -<br>
> - else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> - }<br>
> - }<br>
> - else if (bytes >= MAX_REQUEST)<br>
> - nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc)<br>
> */<br>
> - else {<br>
> - nb = pad_request(bytes);<br>
> - if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> - }<br>
> -<br>
> - if (nb <= ms->dvsize) {<br>
> - size_t rsize = ms->dvsize - nb;<br>
> - mchunkptr p = ms->dv;<br>
> - if (rsize >= MIN_CHUNK_SIZE) { /* split dv */<br>
> - mchunkptr r = ms->dv = chunk_plus_offset(p, nb);<br>
> - ms->dvsize = rsize;<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> - }<br>
> - else { /* exhaust dv */<br>
> - size_t dvs = ms->dvsize;<br>
> - ms->dvsize = 0;<br>
> - ms->dv = 0;<br>
> - set_inuse_and_pinuse(ms, p, dvs);<br>
> - }<br>
> - mem = chunk2mem(p);<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> -<br>
> - else if (nb < ms->topsize) { /* Split top */<br>
> - size_t rsize = ms->topsize -= nb;<br>
> - mchunkptr p = ms->top;<br>
> - mchunkptr r = ms->top = chunk_plus_offset(p, nb);<br>
> - r->head = rsize | PINUSE_BIT;<br>
> - set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> - mem = chunk2mem(p);<br>
> - check_top_chunk(ms, ms->top);<br>
> - check_malloced_chunk(ms, mem, nb);<br>
> - goto postaction;<br>
> - }<br>
> -<br>
> - mem = sys_alloc(ms, nb);<br>
> -<br>
> - postaction:<br>
> - POSTACTION(ms);<br>
> - return mem;<br>
> - }<br>
> -<br>
> - return 0;<br>
> -}<br>
> -<br>
> -void mspace_free(mspace msp, void* mem) {<br>
> - if (mem != 0) {<br>
> - mchunkptr p = mem2chunk(mem);<br>
> -#if FOOTERS<br>
> - mstate fm = get_mstate_for(p);<br>
> -#else /* FOOTERS */<br>
> - mstate fm = (mstate)msp;<br>
> -#endif /* FOOTERS */<br>
> - if (!ok_magic(fm)) {<br>
> - USAGE_ERROR_ACTION(fm, p);<br>
> - return;<br>
> - }<br>
> - if (!PREACTION(fm)) {<br>
> - check_inuse_chunk(fm, p);<br>
> - if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {<br>
> - size_t psize = chunksize(p);<br>
> - mchunkptr next = chunk_plus_offset(p, psize);<br>
> - if (!pinuse(p)) {<br>
> - size_t prevsize = p->prev_foot;<br>
> -<br>
> - mchunkptr prev = chunk_minus_offset(p, prevsize);<br>
> - psize += prevsize;<br>
> - p = prev;<br>
> - if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */<br>
> - if (p != fm->dv) {<br>
> - unlink_chunk(fm, p, prevsize);<br>
> - }<br>
> - else if ((next->head & INUSE_BITS) == INUSE_BITS) {<br>
> - fm->dvsize = psize;<br>
> - set_free_with_pinuse(p, psize, next);<br>
> - goto postaction;<br>
> - }<br>
> - }<br>
> - else<br>
> - goto erroraction;<br>
> - }<br>
> -<br>
> - if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {<br>
> - if (!cinuse(next)) { /* consolidate forward */<br>
> - if (next == fm->top) {<br>
> - size_t tsize = fm->topsize += psize;<br>
> - fm->top = p;<br>
> - p->head = tsize | PINUSE_BIT;<br>
> - if (p == fm->dv) {<br>
> - fm->dv = 0;<br>
> - fm->dvsize = 0;<br>
> - }<br>
> - goto postaction;<br>
> - }<br>
> - else if (next == fm->dv) {<br>
> - size_t dsize = fm->dvsize += psize;<br>
> - fm->dv = p;<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(p, dsize);<br>
> - goto postaction;<br>
> - }<br>
> - else {<br>
> - size_t nsize = chunksize(next);<br>
> - psize += nsize;<br>
> - unlink_chunk(fm, next, nsize);<br>
> - set_size_and_pinuse_of_free_<wbr>chunk(p, psize);<br>
> - if (p == fm->dv) {<br>
> - fm->dvsize = psize;<br>
> - goto postaction;<br>
> - }<br>
> - }<br>
> - }<br>
> - else<br>
> - set_free_with_pinuse(p, psize, next);<br>
> - insert_chunk(fm, p, psize);<br>
> - check_free_chunk(fm, p);<br>
> - goto postaction;<br>
> - }<br>
> - }<br>
> - erroraction:<br>
> - USAGE_ERROR_ACTION(fm, p);<br>
> - postaction:<br>
> - POSTACTION(fm);<br>
> - }<br>
> - }<br>
> -}<br>
> -<br>
> -void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {<br>
> - void* mem;<br>
> - size_t req = 0;<br>
> - mstate ms = (mstate)msp;<br>
> - if (!ok_magic(ms)) {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - return 0;<br>
> - }<br>
> - if (n_elements != 0) {<br>
> - req = n_elements * elem_size;<br>
> - if (((n_elements | elem_size) & ~(size_t)0xffff) &&<br>
> - (req / n_elements != elem_size))<br>
> - req = MAX_SIZE_T; /* force downstream failure on overflow */<br>
> - }<br>
> - mem = internal_malloc(ms, req);<br>
> - if (mem != 0 && calloc_must_clear(mem2chunk(<wbr>mem)))<br>
> - MEMCLEAR(mem, req);<br>
> - return mem;<br>
> -}<br>
> -<br>
> -void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {<br>
> - if (oldmem == 0)<br>
> - return mspace_malloc(msp, bytes);<br>
> -#ifdef REALLOC_ZERO_BYTES_FREES<br>
> - if (bytes == 0) {<br>
> - mspace_free(msp, oldmem);<br>
> - return 0;<br>
> - }<br>
> -#endif /* REALLOC_ZERO_BYTES_FREES */<br>
> - else {<br>
> -#if FOOTERS<br>
> - mchunkptr p = mem2chunk(oldmem);<br>
> - mstate ms = get_mstate_for(p);<br>
> -#else /* FOOTERS */<br>
> - mstate ms = (mstate)msp;<br>
> -#endif /* FOOTERS */<br>
> - if (!ok_magic(ms)) {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - return 0;<br>
> - }<br>
> - return internal_realloc(ms, oldmem, bytes);<br>
> - }<br>
> -}<br>
> -<br>
> -void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {<br>
> - mstate ms = (mstate)msp;<br>
> - if (!ok_magic(ms)) {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - return 0;<br>
> - }<br>
> - return internal_memalign(ms, alignment, bytes);<br>
> -}<br>
> -<br>
> -void mspace_malloc_stats(mspace msp) {<br>
> - mstate ms = (mstate)msp;<br>
> - if (ok_magic(ms)) {<br>
> - internal_malloc_stats(ms);<br>
> - }<br>
> - else {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - }<br>
> -}<br>
> -<br>
> -size_t mspace_footprint(mspace msp) {<br>
> - size_t result;<br>
> - mstate ms = (mstate)msp;<br>
> - if (ok_magic(ms)) {<br>
> - result = ms->footprint;<br>
> - } else {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - }<br>
> - return result;<br>
> -}<br>
> -<br>
> -<br>
> -size_t mspace_max_footprint(mspace msp) {<br>
> - size_t result;<br>
> - mstate ms = (mstate)msp;<br>
> - if (ok_magic(ms)) {<br>
> - result = ms->max_footprint;<br>
> - } else {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - }<br>
> - return result;<br>
> -}<br>
> -<br>
> -<br>
> -#if !NO_MALLINFO<br>
> -struct mallinfo mspace_mallinfo(mspace msp) {<br>
> - mstate ms = (mstate)msp;<br>
> - if (!ok_magic(ms)) {<br>
> - USAGE_ERROR_ACTION(ms,ms);<br>
> - }<br>
> - return internal_mallinfo(ms);<br>
> -}<br>
> -#endif /* NO_MALLINFO */<br>
> -<br>
> -int mspace_mallopt(int param_number, int value) {<br>
> - return change_mparam(param_number, value);<br>
> -}<br>
> -<br>
> diff --git a/qxldod/mspace.cpp b/qxldod/mspace.cpp<br>
> new file mode 100644<br>
> index 0000000..28c9f96<br>
> --- /dev/null<br>
> +++ b/qxldod/mspace.cpp<br>
> @@ -0,0 +1,2439 @@<br>
> +// based on dlmalloc from Doug Lea<br>
> +<br>
> +<br>
> +// quote from the Doug Lea original file<br>
> + /*<br>
> + This is a version (aka dlmalloc) of malloc/free/realloc written by<br>
> + Doug Lea and released to the public domain, as explained at<br>
> + <a href="http://creativecommons.org/licenses/publicdomain" rel="noreferrer" target="_blank">http://creativecommons.org/<wbr>licenses/publicdomain</a>. Send questions,<br>
> + comments, complaints, performance data, etc to <a href="mailto:dl@cs.oswego.edu">dl@cs.oswego.edu</a><br>
> +<br>
> + * Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)<br>
> +<br>
> + Note: There may be an updated version of this malloc obtainable at<br>
> + <a href="ftp://gee.cs.oswego.edu/pub/misc/malloc.c" rel="noreferrer" target="_blank">ftp://gee.cs.oswego.edu/pub/<wbr>misc/malloc.c</a><br>
> + Check before installing!<br>
> + */<br>
> +<br>
> +<br>
> +#include <ntddk.h><br>
> +<br>
> +#include "mspace.h"<br>
> +<br>
> +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */<br>
> +<br>
> +#define MALLOC_ALIGNMENT ((size_t)8U)<br>
> +#define USE_LOCKS 0<br>
> +#define malloc_getpagesize ((size_t)4096U)<br>
> +#define DEFAULT_GRANULARITY malloc_getpagesize<br>
> +#define MAX_SIZE_T (~(size_t)0)<br>
> +#define MALLOC_FAILURE_ACTION<br>
> +#define MALLINFO_FIELD_TYPE size_t<br>
> +#define FOOTERS 0<br>
> +#define INSECURE 0<br>
> +#define PROCEED_ON_ERROR 0<br>
> +#define DEBUG 0<br>
> +#define ABORT_ON_ASSERT_FAILURE 1<br>
> +#define ABORT(user_data) abort_func(user_data)<br>
> +#define USE_BUILTIN_FFS 0<br>
> +#define USE_DEV_RANDOM 0<br>
> +#define PRINT(params) print_func params<br>
> +<br>
> +<br>
> +#define MEMCPY(dest, src, n) RtlCopyMemory(dest, src, n)<br>
> +#define MEMCLEAR(dest, n) RtlZeroMemory(dest, n)<br>
> +<br>
> +<br>
> +#define M_GRANULARITY (-1)<br>
> +<br>
> +void default_abort_func(void *user_data)<br>
> +{<br>
> + for (;;);<br>
> +}<br>
> +<br>
> +void default_print_func(void *user_data, char *format, ...)<br>
> +{<br>
> +}<br>
> +<br>
> +static mspace_abort_t abort_func = default_abort_func;<br>
> +static mspace_print_t print_func = default_print_func;<br>
> +<br>
> +void mspace_set_abort_func(mspace_<wbr>abort_t f)<br>
> +{<br>
> + abort_func = f;<br>
> +}<br>
> +<br>
> +void mspace_set_print_func(mspace_<wbr>print_t f)<br>
> +{<br>
> + print_func = f;<br>
> +}<br>
> +<br>
> +/* ------------------------ Mallinfo declarations ------------------------<br>
> */<br>
> +<br>
> +#if !NO_MALLINFO<br>
> +/*<br>
> + This version of malloc supports the standard SVID/XPG mallinfo<br>
> + routine that returns a struct containing usage properties and<br>
> + statistics. It should work on any system that has a<br>
> + /usr/include/malloc.h defining struct mallinfo. The main<br>
> + declaration needed is the mallinfo struct that is returned (by-copy)<br>
> + by mallinfo(). The malloinfo struct contains a bunch of fields that<br>
> + are not even meaningful in this version of malloc. These fields are<br>
> + are instead filled by mallinfo() with other numbers that might be of<br>
> + interest.<br>
> +<br>
> + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a<br>
> + /usr/include/malloc.h file that includes a declaration of struct<br>
> + mallinfo. If so, it is included; else a compliant version is<br>
> + declared below. These must be precisely the same for mallinfo() to<br>
> + work. The original SVID version of this struct, defined on most<br>
> + systems with mallinfo, declares all fields as ints. But some others<br>
> + define as unsigned long. If your system defines the fields using a<br>
> + type of different width than listed here, you MUST #include your<br>
> + system version and #define HAVE_USR_INCLUDE_MALLOC_H.<br>
> +*/<br>
> +<br>
> +/* #define HAVE_USR_INCLUDE_MALLOC_H */<br>
> +<br>
> +<br>
> +struct mallinfo {<br>
> + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system<br>
> */<br>
> + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */<br>
> + MALLINFO_FIELD_TYPE smblks; /* always 0 */<br>
> + MALLINFO_FIELD_TYPE hblks; /* always 0 */<br>
> + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */<br>
> + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */<br>
> + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */<br>
> + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */<br>
> + MALLINFO_FIELD_TYPE fordblks; /* total free space */<br>
> + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */<br>
> +};<br>
> +<br>
> +#endif /* NO_MALLINFO */<br>
> +<br>
> +<br>
> +<br>
> +#ifdef DEBUG<br>
> +#if ABORT_ON_ASSERT_FAILURE<br>
> +#define assert(user_data, x) if(!(x)) ABORT(user_data)<br>
> +#else /* ABORT_ON_ASSERT_FAILURE */<br>
> +#include <assert.h><br>
> +#endif /* ABORT_ON_ASSERT_FAILURE */<br>
> +#else /* DEBUG */<br>
> +#define assert(user_data, x)<br>
> +#endif /* DEBUG */<br>
> +<br>
> +/* ------------------- size_t and alignment properties --------------------<br>
> */<br>
> +<br>
> +/* The byte and bit size of a size_t */<br>
> +#define SIZE_T_SIZE (sizeof(size_t))<br>
> +#define SIZE_T_BITSIZE (sizeof(size_t) << 3)<br>
> +<br>
> +/* Some constants coerced to size_t */<br>
> +/* Annoying but necessary to avoid errors on some plaftorms */<br>
> +#define SIZE_T_ZERO ((size_t)0)<br>
> +#define SIZE_T_ONE ((size_t)1)<br>
> +#define SIZE_T_TWO ((size_t)2)<br>
> +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)<br>
> +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)<br>
> +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_<wbr>SIZES)<br>
> +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)<br>
> +<br>
> +/* The bit mask value corresponding to MALLOC_ALIGNMENT */<br>
> +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)<br>
> +<br>
> +/* True if address a has acceptable alignment */<br>
> +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)<br>
> +<br>
> +/* the number of bytes to offset an address to align it */<br>
> +#define align_offset(A)\<br>
> + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\<br>
> + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) &<br>
> CHUNK_ALIGN_MASK))<br>
> +<br>
> +/* --------------------------- Lock preliminaries ------------------------<br>
> */<br>
> +<br>
> +#if USE_LOCKS<br>
> +<br>
> +/*<br>
> + When locks are defined, there are up to two global locks:<br>
> +<br>
> + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to<br>
> + MORECORE. In many cases sys_alloc requires two calls, that should<br>
> + not be interleaved with calls by other threads. This does not<br>
> + protect against direct calls to MORECORE by other threads not<br>
> + using this lock, so there is still code to cope the best we can on<br>
> + interference.<br>
> +<br>
> + * magic_init_mutex ensures that mparams.magic and other<br>
> + unique mparams values are initialized only once.<br>
> +*/<br>
> +<br>
> +<br>
> +#define USE_LOCK_BIT (2U)<br>
> +#else /* USE_LOCKS */<br>
> +#define USE_LOCK_BIT (0U)<br>
> +#define INITIAL_LOCK(l)<br>
> +#endif /* USE_LOCKS */<br>
> +<br>
> +#if USE_LOCKS<br>
> +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_<wbr>mutex);<br>
> +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_<wbr>mutex);<br>
> +#else /* USE_LOCKS */<br>
> +#define ACQUIRE_MAGIC_INIT_LOCK()<br>
> +#define RELEASE_MAGIC_INIT_LOCK()<br>
> +#endif /* USE_LOCKS */<br>
> +<br>
> +<br>
> +<br>
> +/* ----------------------- Chunk representations ------------------------<br>
> */<br>
> +<br>
> +/*<br>
> + (The following includes lightly edited explanations by Colin Plumb.)<br>
> +<br>
> + The malloc_chunk declaration below is misleading (but accurate and<br>
> + necessary). It declares a "view" into memory allowing access to<br>
> + necessary fields at known offsets from a given base.<br>
> +<br>
> + Chunks of memory are maintained using a `boundary tag' method as<br>
> + originally described by Knuth. (See the paper by Paul Wilson<br>
> + <a href="ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps" rel="noreferrer" target="_blank">ftp://ftp.cs.utexas.edu/pub/<wbr>garbage/allocsrv.ps</a> for a survey of such<br>
> + techniques.) Sizes of free chunks are stored both in the front of<br>
> + each chunk and at the end. This makes consolidating fragmented<br>
> + chunks into bigger chunks fast. The head fields also hold bits<br>
> + representing whether chunks are free or in use.<br>
> +<br>
> + Here are some pictures to make it clearer. They are "exploded" to<br>
> + show that the state of a chunk can be thought of as extending from<br>
> + the high 31 bits of the head field of its header through the<br>
> + prev_foot and PINUSE_BIT bit of the following chunk header.<br>
> +<br>
> + A chunk that's in use looks like:<br>
> +<br>
> + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Size of previous chunk (if P = 1) |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |P|<br>
> + | Size of this chunk 1| +-+<br>
> + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | |<br>
> + +- -+<br>
> + | |<br>
> + +- -+<br>
> + | :<br>
> + +- size - sizeof(size_t) available payload bytes -+<br>
> + : |<br>
> + chunk-> +- -+<br>
> + | |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |1|<br>
> + | Size of next chunk (may or may not be in use) | +-+<br>
> + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> +<br>
> + And if it's free, it looks like this:<br>
> +<br>
> + chunk-> +- -+<br>
> + | User payload (must be in use, or we would have merged!) |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |P|<br>
> + | Size of this chunk 0| +-+<br>
> + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Next pointer |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Prev pointer |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | :<br>
> + +- size - sizeof(struct chunk) unused bytes -+<br>
> + : |<br>
> + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Size of this chunk |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+ |0|<br>
> + | Size of next chunk (must be in use, or we would have merged)| +-+<br>
> + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | :<br>
> + +- User payload -+<br>
> + : |<br>
> + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + |0|<br>
> + +-+<br>
> + Note that since we always merge adjacent free chunks, the chunks<br>
> + adjacent to a free chunk must be in use.<br>
> +<br>
> + Given a pointer to a chunk (which can be derived trivially from the<br>
> + payload pointer) we can, in O(1) time, find out whether the adjacent<br>
> + chunks are free, and if so, unlink them from the lists that they<br>
> + are on and merge them with the current chunk.<br>
> +<br>
> + Chunks always begin on even word boundaries, so the mem portion<br>
> + (which is returned to the user) is also on an even word boundary, and<br>
> + thus at least double-word aligned.<br>
> +<br>
> + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the<br>
> + chunk size (which is always a multiple of two words), is an in-use<br>
> + bit for the *previous* chunk. If that bit is *clear*, then the<br>
> + word before the current chunk size contains the previous chunk<br>
> + size, and can be used to find the front of the previous chunk.<br>
> + The very first chunk allocated always has this bit set, preventing<br>
> + access to non-existent (or non-owned) memory. If pinuse is set for<br>
> + any given chunk, then you CANNOT determine the size of the<br>
> + previous chunk, and might even get a memory addressing fault when<br>
> + trying to do so.<br>
> +<br>
> + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of<br>
> + the chunk size redundantly records whether the current chunk is<br>
> + inuse. This redundancy enables usage checks within free and realloc,<br>
> + and reduces indirection when freeing and consolidating chunks.<br>
> +<br>
> + Each freshly allocated chunk must have both cinuse and pinuse set.<br>
> + That is, each allocated chunk borders either a previously allocated<br>
> + and still in-use chunk, or the base of its memory arena. This is<br>
> + ensured by making all allocations from the the `lowest' part of any<br>
> + found chunk. Further, no free chunk physically borders another one,<br>
> + so each free chunk is known to be preceded and followed by either<br>
> + inuse chunks or the ends of memory.<br>
> +<br>
> + Note that the `foot' of the current chunk is actually represented<br>
> + as the prev_foot of the NEXT chunk. This makes it easier to<br>
> + deal with alignments etc but can be very confusing when trying<br>
> + to extend or adapt this code.<br>
> +<br>
> + The exceptions to all this are<br>
> +<br>
> + 1. The special chunk `top' is the top-most available chunk (i.e.,<br>
> + the one bordering the end of available memory). It is treated<br>
> + specially. Top is never included in any bin, is used only if<br>
> + no other chunk is available, and is released back to the<br>
> + system if it is very large (see M_TRIM_THRESHOLD). In effect,<br>
> + the top chunk is treated as larger (and thus less well<br>
> + fitting) than any other available chunk. The top chunk<br>
> + doesn't update its trailing size field since there is no next<br>
> + contiguous chunk that would have to index off it. However,<br>
> + space is still allocated for it (TOP_FOOT_SIZE) to enable<br>
> + separation or merging when space is extended.<br>
> +<br>
> + 3. Chunks allocated via mmap, which have the lowest-order bit<br>
> + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set<br>
> + PINUSE_BIT in their head fields. Because they are allocated<br>
> + one-by-one, each must carry its own prev_foot field, which is<br>
> + also used to hold the offset this chunk has within its mmapped<br>
> + region, which is needed to preserve alignment. Each mmapped<br>
> + chunk is trailed by the first two fields of a fake next-chunk<br>
> + for sake of usage checks.<br>
> +<br>
> +*/<br>
> +<br>
> +struct malloc_chunk {<br>
> + size_t prev_foot; /* Size of previous chunk (if free). */<br>
> + size_t head; /* Size and inuse bits. */<br>
> + struct malloc_chunk* fd; /* double links -- used only if free. */<br>
> + struct malloc_chunk* bk;<br>
> +};<br>
> +<br>
> +typedef struct malloc_chunk mchunk;<br>
> +typedef struct malloc_chunk* mchunkptr;<br>
> +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */<br>
> +typedef unsigned int bindex_t; /* Described below */<br>
> +typedef unsigned int binmap_t; /* Described below */<br>
> +typedef unsigned int flag_t; /* The type of various bit flag sets<br>
> */<br>
> +<br>
> +<br>
> +/* ------------------- Chunks sizes and alignments -----------------------<br>
> */<br>
> +<br>
> +#define MCHUNK_SIZE (sizeof(mchunk))<br>
> +<br>
> +#if FOOTERS<br>
> +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)<br>
> +#else /* FOOTERS */<br>
> +#define CHUNK_OVERHEAD (SIZE_T_SIZE)<br>
> +#endif /* FOOTERS */<br>
> +<br>
> +/* The smallest size we can malloc is an aligned minimal chunk */<br>
> +#define MIN_CHUNK_SIZE\<br>
> + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)<br>
> +<br>
> +/* conversion from malloc headers to user pointers, and back */<br>
> +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))<br>
> +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))<br>
> +/* chunk associated with aligned address A */<br>
> +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))<br>
> +<br>
> +/* Bounds on request (not chunk) sizes. */<br>
> +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)<br>
> +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)<br>
> +<br>
> +/* pad request bytes into a usable size */<br>
> +#define pad_request(req) \<br>
> + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)<br>
> +<br>
> +/* pad request, checking for minimum (but not maximum) */<br>
> +#define request2size(req) \<br>
> + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))<br>
> +<br>
> +/* ------------------ Operations on head and foot fields -----------------<br>
> */<br>
> +<br>
> +/*<br>
> + The head field of a chunk is or'ed with PINUSE_BIT when previous<br>
> + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in<br>
> + use. If the chunk was obtained with mmap, the prev_foot field has<br>
> + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the<br>
> + mmapped region to the base of the chunk.<br>
> +*/<br>
> +<br>
> +#define PINUSE_BIT (SIZE_T_ONE)<br>
> +#define CINUSE_BIT (SIZE_T_TWO)<br>
> +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)<br>
> +<br>
> +/* Head value for fenceposts */<br>
> +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)<br>
> +<br>
> +/* extraction of fields from head words */<br>
> +#define cinuse(p) ((p)->head & CINUSE_BIT)<br>
> +#define pinuse(p) ((p)->head & PINUSE_BIT)<br>
> +#define chunksize(p) ((p)->head & ~(INUSE_BITS))<br>
> +<br>
> +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)<br>
> +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)<br>
> +<br>
> +/* Treat space at ptr +/- offset as a chunk */<br>
> +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))<br>
> +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))<br>
> +<br>
> +/* Ptr to next or previous physical malloc_chunk. */<br>
> +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head &<br>
> ~INUSE_BITS)))<br>
> +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))<br>
> +<br>
> +/* extract next chunk's pinuse bit */<br>
> +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)<br>
> +<br>
> +/* Get/set size at footer */<br>
> +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)<br>
> +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))<br>
> +<br>
> +/* Set size, pinuse bit, and foot */<br>
> +#define set_size_and_pinuse_of_free_<wbr>chunk(p, s)\<br>
> + ((p)->head = (s|PINUSE_BIT), set_foot(p, s))<br>
> +<br>
> +/* Set size, pinuse bit, foot, and clear next pinuse */<br>
> +#define set_free_with_pinuse(p, s, n)\<br>
> + (clear_pinuse(n), set_size_and_pinuse_of_free_<wbr>chunk(p, s))<br>
> +<br>
> +/* Get the internal overhead associated with chunk p */<br>
> +#define overhead_for(p) CHUNK_OVERHEAD<br>
> +<br>
> +/* Return true if malloced space is not necessarily cleared */<br>
> +#define calloc_must_clear(p) (1)<br>
> +<br>
> +<br>
> +/* ---------------------- Overlaid data structures -----------------------<br>
> */<br>
> +<br>
> +/*<br>
> + When chunks are not in use, they are treated as nodes of either<br>
> + lists or trees.<br>
> +<br>
> + "Small" chunks are stored in circular doubly-linked lists, and look<br>
> + like this:<br>
> +<br>
> + chunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Size of previous chunk<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + `head:' | Size of chunk, in bytes<br>
> |P|<br>
> + mem-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Forward pointer to next chunk in list<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Back pointer to previous chunk in list<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Unused space (may be 0 bytes long)<br>
> .<br>
> + .<br>
> .<br>
> + .<br>
> |<br>
> +nextchunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + `foot:' | Size of chunk, in bytes<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> +<br>
> + Larger chunks are kept in a form of bitwise digital trees (aka<br>
> + tries) keyed on chunksizes. Because malloc_tree_chunks are only for<br>
> + free chunks greater than 256 bytes, their size doesn't impose any<br>
> + constraints on user chunk sizes. Each node looks like:<br>
> +<br>
> + chunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Size of previous chunk<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + `head:' | Size of chunk, in bytes<br>
> |P|<br>
> + mem-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Forward pointer to next chunk of same size<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Back pointer to previous chunk of same size<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Pointer to left child (child[0])<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Pointer to right child (child[1])<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Pointer to parent<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | bin index of this chunk<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + | Unused space<br>
> .<br>
> + .<br>
> |<br>
> +nextchunk-><br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> + `foot:' | Size of chunk, in bytes<br>
> |<br>
> +<br>
> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-<wbr>+-+-+<br>
> +<br>
> + Each tree holding treenodes is a tree of unique chunk sizes. Chunks<br>
> + of the same size are arranged in a circularly-linked list, with only<br>
> + the oldest chunk (the next to be used, in our FIFO ordering)<br>
> + actually in the tree. (Tree members are distinguished by a non-null<br>
> + parent pointer.) If a chunk with the same size an an existing node<br>
> + is inserted, it is linked off the existing node using pointers that<br>
> + work in the same way as fd/bk pointers of small chunks.<br>
> +<br>
> + Each tree contains a power of 2 sized range of chunk sizes (the<br>
> + smallest is 0x100 <= x < 0x180), which is is divided in half at each<br>
> + tree level, with the chunks in the smaller half of the range (0x100<br>
> + <= x < 0x140 for the top nose) in the left subtree and the larger<br>
> + half (0x140 <= x < 0x180) in the right subtree. This is, of course,<br>
> + done by inspecting individual bits.<br>
> +<br>
> + Using these rules, each node's left subtree contains all smaller<br>
> + sizes than its right subtree. However, the node at the root of each<br>
> + subtree has no particular ordering relationship to either. (The<br>
> + dividing line between the subtree sizes is based on trie relation.)<br>
> + If we remove the last chunk of a given size from the interior of the<br>
> + tree, we need to replace it with a leaf node. The tree ordering<br>
> + rules permit a node to be replaced by any leaf below it.<br>
> +<br>
> + The smallest chunk in a tree (a common operation in a best-fit<br>
> + allocator) can be found by walking a path to the leftmost leaf in<br>
> + the tree. Unlike a usual binary tree, where we follow left child<br>
> + pointers until we reach a null, here we follow the right child<br>
> + pointer any time the left one is null, until we reach a leaf with<br>
> + both child pointers null. The smallest chunk in the tree will be<br>
> + somewhere along that path.<br>
> +<br>
> + The worst case number of steps to add, find, or remove a node is<br>
> + bounded by the number of bits differentiating chunks within<br>
> + bins. Under current bin calculations, this ranges from 6 up to 21<br>
> + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case<br>
> + is of course much better.<br>
> +*/<br>
> +<br>
> +struct malloc_tree_chunk {<br>
> + /* The first four fields must be compatible with malloc_chunk */<br>
> + size_t prev_foot;<br>
> + size_t head;<br>
> + struct malloc_tree_chunk* fd;<br>
> + struct malloc_tree_chunk* bk;<br>
> +<br>
> + struct malloc_tree_chunk* child[2];<br>
> + struct malloc_tree_chunk* parent;<br>
> + bindex_t index;<br>
> +};<br>
> +<br>
> +typedef struct malloc_tree_chunk tchunk;<br>
> +typedef struct malloc_tree_chunk* tchunkptr;<br>
> +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */<br>
> +<br>
> +/* A little helper macro for trees */<br>
> +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] :<br>
> (t)->child[1])<br>
> +<br>
> +/* ----------------------------- Segments ------------------------------<wbr>--<br>
> */<br>
> +<br>
> +/*<br>
> + Each malloc space may include non-contiguous segments, held in a<br>
> + list headed by an embedded malloc_segment record representing the<br>
> + top-most space. Segments also include flags holding properties of<br>
> + the space. Large chunks that are directly allocated by mmap are not<br>
> + included in this list. They are instead independently created and<br>
> + destroyed without otherwise keeping track of them.<br>
> +<br>
> + Segment management mainly comes into play for spaces allocated by<br>
> + MMAP. Any call to MMAP might or might not return memory that is<br>
> + adjacent to an existing segment. MORECORE normally contiguously<br>
> + extends the current space, so this space is almost always adjacent,<br>
> + which is simpler and faster to deal with. (This is why MORECORE is<br>
> + used preferentially to MMAP when both are available -- see<br>
> + sys_alloc.) When allocating using MMAP, we don't use any of the<br>
> + hinting mechanisms (inconsistently) supported in various<br>
> + implementations of unix mmap, or distinguish reserving from<br>
> + committing memory. Instead, we just ask for space, and exploit<br>
> + contiguity when we get it. It is probably possible to do<br>
> + better than this on some systems, but no general scheme seems<br>
> + to be significantly better.<br>
> +<br>
> + Management entails a simpler variant of the consolidation scheme<br>
> + used for chunks to reduce fragmentation -- new adjacent memory is<br>
> + normally prepended or appended to an existing segment. However,<br>
> + there are limitations compared to chunk consolidation that mostly<br>
> + reflect the fact that segment processing is relatively infrequent<br>
> + (occurring only when getting memory from system) and that we<br>
> + don't expect to have huge numbers of segments:<br>
> +<br>
> + * Segments are not indexed, so traversal requires linear scans. (It<br>
> + would be possible to index these, but is not worth the extra<br>
> + overhead and complexity for most programs on most platforms.)<br>
> + * New segments are only appended to old ones when holding top-most<br>
> + memory; if they cannot be prepended to others, they are held in<br>
> + different segments.<br>
> +<br>
> + Except for the top-most segment of an mstate, each segment record<br>
> + is kept at the tail of its segment. Segments are added by pushing<br>
> + segment records onto the list headed by &mstate.seg for the<br>
> + containing mstate.<br>
> +<br>
> + Segment flags control allocation/merge/deallocation policies:<br>
> + * If EXTERN_BIT set, then we did not allocate this segment,<br>
> + and so should not try to deallocate or merge with others.<br>
> + (This currently holds only for the initial segment passed<br>
> + into create_mspace_with_base.)<br>
> + * If IS_MMAPPED_BIT set, the segment may be merged with<br>
> + other surrounding mmapped segments and trimmed/de-allocated<br>
> + using munmap.<br>
> + * If neither bit is set, then the segment was obtained using<br>
> + MORECORE so can be merged with surrounding MORECORE'd segments<br>
> + and deallocated/trimmed using MORECORE with negative arguments.<br>
> +*/<br>
> +<br>
> +struct malloc_segment {<br>
> + char* base; /* base address */<br>
> + size_t size; /* allocated size */<br>
> + struct malloc_segment* next; /* ptr to next segment */<br>
> +};<br>
> +<br>
> +typedef struct malloc_segment msegment;<br>
> +typedef struct malloc_segment* msegmentptr;<br>
> +<br>
> +/* ---------------------------- malloc_state -----------------------------<br>
> */<br>
> +<br>
> +/*<br>
> + A malloc_state holds all of the bookkeeping for a space.<br>
> + The main fields are:<br>
> +<br>
> + Top<br>
> + The topmost chunk of the currently active segment. Its size is<br>
> + cached in topsize. The actual size of topmost space is<br>
> + topsize+TOP_FOOT_SIZE, which includes space reserved for adding<br>
> + fenceposts and segment records if necessary when getting more<br>
> + space from the system. The size at which to autotrim top is<br>
> + cached from mparams in trim_check, except that it is disabled if<br>
> + an autotrim fails.<br>
> +<br>
> + Designated victim (dv)<br>
> + This is the preferred chunk for servicing small requests that<br>
> + don't have exact fits. It is normally the chunk split off most<br>
> + recently to service another small request. Its size is cached in<br>
> + dvsize. The link fields of this chunk are not maintained since it<br>
> + is not kept in a bin.<br>
> +<br>
> + SmallBins<br>
> + An array of bin headers for free chunks. These bins hold chunks<br>
> + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains<br>
> + chunks of all the same size, spaced 8 bytes apart. To simplify<br>
> + use in double-linked lists, each bin header acts as a malloc_chunk<br>
> + pointing to the real first node, if it exists (else pointing to<br>
> + itself). This avoids special-casing for headers. But to avoid<br>
> + waste, we allocate only the fd/bk pointers of bins, and then use<br>
> + repositioning tricks to treat these as the fields of a chunk.<br>
> +<br>
> + TreeBins<br>
> + Treebins are pointers to the roots of trees holding a range of<br>
> + sizes. There are 2 equally spaced treebins for each power of two<br>
> + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything<br>
> + larger.<br>
> +<br>
> + Bin maps<br>
> + There is one bit map for small bins ("smallmap") and one for<br>
> + treebins ("treemap). Each bin sets its bit when non-empty, and<br>
> + clears the bit when empty. Bit operations are then used to avoid<br>
> + bin-by-bin searching -- nearly all "search" is done without ever<br>
> + looking at bins that won't be selected. The bit maps<br>
> + conservatively use 32 bits per map word, even if on 64bit system.<br>
> + For a good description of some of the bit-based techniques used<br>
> + here, see Henry S. Warren Jr's book "Hacker's Delight" (and<br>
> + supplement at <a href="http://hackersdelight.org/" rel="noreferrer" target="_blank">http://hackersdelight.org/</a>). Many of these are<br>
> + intended to reduce the branchiness of paths through malloc etc, as<br>
> + well as to reduce the number of memory locations read or written.<br>
> +<br>
> + Segments<br>
> + A list of segments headed by an embedded malloc_segment record<br>
> + representing the initial space.<br>
> +<br>
> + Address check support<br>
> + The least_addr field is the least address ever obtained from<br>
> + MORECORE or MMAP. Attempted frees and reallocs of any address less<br>
> + than this are trapped (unless INSECURE is defined).<br>
> +<br>
> + Magic tag<br>
> + A cross-check field that should always hold same value as mparams.magic.<br>
> +<br>
> + Flags<br>
> + Bits recording whether to use MMAP, locks, or contiguous MORECORE<br>
> +<br>
> + Statistics<br>
> + Each space keeps track of current and maximum system memory<br>
> + obtained via MORECORE or MMAP.<br>
> +<br>
> + Locking<br>
> + If USE_LOCKS is defined, the "mutex" lock is acquired and released<br>
> + around every public call using this mspace.<br>
> +*/<br>
> +<br>
> +/* Bin types, widths and sizes */<br>
> +#define NSMALLBINS (32U)<br>
> +#define NTREEBINS (32U)<br>
> +#define SMALLBIN_SHIFT (3U)<br>
> +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)<br>
> +#define TREEBIN_SHIFT (8U)<br>
> +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)<br>
> +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)<br>
> +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK -<br>
> CHUNK_OVERHEAD)<br>
> +<br>
> +struct malloc_state {<br>
> + binmap_t smallmap;<br>
> + binmap_t treemap;<br>
> + size_t dvsize;<br>
> + size_t topsize;<br>
> + char* least_addr;<br>
> + mchunkptr dv;<br>
> + mchunkptr top;<br>
> + size_t magic;<br>
> + mchunkptr smallbins[(NSMALLBINS+1)*2];<br>
> + tbinptr treebins[NTREEBINS];<br>
> + size_t footprint;<br>
> + size_t max_footprint;<br>
> + flag_t mflags;<br>
> + void *user_data;<br>
> +#if USE_LOCKS<br>
> + MLOCK_T mutex; /* locate lock among fields that rarely change */<br>
> +#endif /* USE_LOCKS */<br>
> + msegment seg;<br>
> +};<br>
> +<br>
> +typedef struct malloc_state* mstate;<br>
> +<br>
> +/* ------------- Global malloc_state and malloc_params -------------------<br>
> */<br>
> +<br>
> +/*<br>
> + malloc_params holds global properties, including those that can be<br>
> + dynamically set using mallopt. There is a single instance, mparams,<br>
> + initialized in init_mparams.<br>
> +*/<br>
> +<br>
> +struct malloc_params {<br>
> + size_t magic;<br>
> + size_t page_size;<br>
> + size_t granularity;<br>
> + flag_t default_mflags;<br>
> +};<br>
> +<br>
> +static struct malloc_params mparams;<br>
> +<br>
> +/* The global malloc_state used for all non-"mspace" calls */<br>
> +//static struct malloc_state _gm_;<br>
> +//#define gm (&_gm_)<br>
> +//#define is_global(M) ((M) == &_gm_)<br>
> +#define is_initialized(M) ((M)->top != 0)<br>
> +<br>
> +/* -------------------------- system alloc setup -------------------------<br>
> */<br>
> +<br>
> +/* Operations on mflags */<br>
> +<br>
> +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)<br>
> +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)<br>
> +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)<br>
> +<br>
> +#define set_lock(M,L)\<br>
> + ((M)->mflags = (L)?\<br>
> + ((M)->mflags | USE_LOCK_BIT) :\<br>
> + ((M)->mflags & ~USE_LOCK_BIT))<br>
> +<br>
> +/* page-align a size */<br>
> +#define page_align(S)\<br>
> + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))<br>
> +<br>
> +/* granularity-align a size */<br>
> +#define granularity_align(S)\<br>
> + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))<br>
> +<br>
> +#define is_page_aligned(S)\<br>
> + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)<br>
> +#define is_granularity_aligned(S)\<br>
> + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)<br>
> +<br>
> +/* True if segment S holds address A */<br>
> +#define segment_holds(S, A)\<br>
> + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)<br>
> +<br>
> +/* Return segment holding given address */<br>
> +static msegmentptr segment_holding(mstate m, char* addr) {<br>
> + msegmentptr sp = &m->seg;<br>
> + for (;;) {<br>
> + if (addr >= sp->base && addr < sp->base + sp->size)<br>
> + return sp;<br>
> + if ((sp = sp->next) == 0)<br>
> + return 0;<br>
> + }<br>
> +}<br>
> +<br>
> +/* Return true if segment contains a segment link */<br>
> +static int has_segment_link(mstate m, msegmentptr ss) {<br>
> + msegmentptr sp = &m->seg;<br>
> + for (;;) {<br>
> + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)<br>
> + return 1;<br>
> + if ((sp = sp->next) == 0)<br>
> + return 0;<br>
> + }<br>
> +}<br>
> +<br>
> +<br>
> +<br>
> +/*<br>
> + TOP_FOOT_SIZE is padding at the end of a segment, including space<br>
> + that may be needed to place segment records and fenceposts when new<br>
> + noncontiguous segments are added.<br>
> +*/<br>
> +#define TOP_FOOT_SIZE\<br>
> + (align_offset(chunk2mem(0))+<wbr>pad_request(sizeof(struct<br>
> malloc_segment))+MIN_CHUNK_<wbr>SIZE)<br>
> +<br>
> +<br>
> +/* ------------------------------<wbr>- Hooks ------------------------------<wbr>--<br>
> */<br>
> +<br>
> +/*<br>
> + PREACTION should be defined to return 0 on success, and nonzero on<br>
> + failure. If you are not using locking, you can redefine these to do<br>
> + anything you like.<br>
> +*/<br>
> +<br>
> +#if USE_LOCKS<br>
> +<br>
> +/* Ensure locks are initialized */<br>
> +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())<br>
> +<br>
> +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))?<br>
> ACQUIRE_LOCK(&(M)->mutex) : 0)<br>
> +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }<br>
> +#else /* USE_LOCKS */<br>
> +<br>
> +#ifndef PREACTION<br>
> +#define PREACTION(M) (0)<br>
> +#endif /* PREACTION */<br>
> +<br>
> +#ifndef POSTACTION<br>
> +#define POSTACTION(M)<br>
> +#endif /* POSTACTION */<br>
> +<br>
> +#endif /* USE_LOCKS */<br>
> +<br>
> +/*<br>
> + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.<br>
> + USAGE_ERROR_ACTION is triggered on detected bad frees and<br>
> + reallocs. The argument p is an address that might have triggered the<br>
> + fault. It is ignored by the two predefined actions, but might be<br>
> + useful in custom actions that try to help diagnose errors.<br>
> +*/<br>
> +<br>
> +#if PROCEED_ON_ERROR<br>
> +<br>
> +/* A count of the number of corruption errors causing resets */<br>
> +int malloc_corruption_error_count;<br>
> +<br>
> +/* default corruption action */<br>
> +static void reset_on_error(mstate m);<br>
> +<br>
> +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)<br>
> +#define USAGE_ERROR_ACTION(m, p)<br>
> +<br>
> +#else /* PROCEED_ON_ERROR */<br>
> +<br>
> +#ifndef CORRUPTION_ERROR_ACTION<br>
> +#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)<br>
> +#endif /* CORRUPTION_ERROR_ACTION */<br>
> +<br>
> +#ifndef USAGE_ERROR_ACTION<br>
> +#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)<br>
> +#endif /* USAGE_ERROR_ACTION */<br>
> +<br>
> +#endif /* PROCEED_ON_ERROR */<br>
> +<br>
> +/* -------------------------- Debugging setup ----------------------------<br>
> */<br>
> +<br>
> +#if ! DEBUG<br>
> +<br>
> +#define check_free_chunk(M,P)<br>
> +#define check_inuse_chunk(M,P)<br>
> +#define check_malloced_chunk(M,P,N)<br>
> +#define check_malloc_state(M)<br>
> +#define check_top_chunk(M,P)<br>
> +<br>
> +#else /* DEBUG */<br>
> +#define check_free_chunk(M,P) do_check_free_chunk(M,P)<br>
> +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)<br>
> +#define check_top_chunk(M,P) do_check_top_chunk(M,P)<br>
> +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)<br>
> +#define check_malloc_state(M) do_check_malloc_state(M)<br>
> +<br>
> +static void do_check_any_chunk(mstate m, mchunkptr p);<br>
> +static void do_check_top_chunk(mstate m, mchunkptr p);<br>
> +static void do_check_inuse_chunk(mstate m, mchunkptr p);<br>
> +static void do_check_free_chunk(mstate m, mchunkptr p);<br>
> +static void do_check_malloced_chunk(mstate m, void* mem, size_t s);<br>
> +static void do_check_tree(mstate m, tchunkptr t);<br>
> +static void do_check_treebin(mstate m, bindex_t i);<br>
> +static void do_check_smallbin(mstate m, bindex_t i);<br>
> +static void do_check_malloc_state(mstate m);<br>
> +static int bin_find(mstate m, mchunkptr x);<br>
> +static size_t traverse_and_check(mstate m);<br>
> +#endif /* DEBUG */<br>
> +<br>
> +/* ---------------------------- Indexing Bins ----------------------------<br>
> */<br>
> +<br>
> +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)<br>
> +#define small_index(s) ((s) >> SMALLBIN_SHIFT)<br>
> +#define small_index2size(i) ((i) << SMALLBIN_SHIFT)<br>
> +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))<br>
> +<br>
> +/* addressing by index. See above about smallbin repositioning */<br>
> +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)-><wbr>smallbins[(i)<<1])))<br>
> +#define treebin_at(M,i) (&((M)->treebins[i]))<br>
> +<br>
> +/* assign tree index for size S to variable I */<br>
> +#if defined(__GNUC__) && defined(i386)<br>
> +#define compute_tree_index(S, I)\<br>
> +{\<br>
> + size_t X = S >> TREEBIN_SHIFT;\<br>
> + if (X == 0)\<br>
> + I = 0;\<br>
> + else if (X > 0xFFFF)\<br>
> + I = NTREEBINS-1;\<br>
> + else {\<br>
> + unsigned int K;\<br>
> + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\<br>
> + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\<br>
> + }\<br>
> +}<br>
> +#else /* GNUC */<br>
> +#define compute_tree_index(S, I)\<br>
> +{\<br>
> + size_t X = S >> TREEBIN_SHIFT;\<br>
> + if (X == 0)\<br>
> + I = 0;\<br>
> + else if (X > 0xFFFF)\<br>
> + I = NTREEBINS-1;\<br>
> + else {\<br>
> + unsigned int Y = (unsigned int)X;\<br>
> + unsigned int N = ((Y - 0x100) >> 16) & 8;\<br>
> + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\<br>
> + N += K;\<br>
> + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\<br>
> + K = 14 - N + ((Y <<= K) >> 15);\<br>
> + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\<br>
> + }\<br>
> +}<br>
> +#endif /* GNUC */<br>
> +<br>
> +/* Bit representing maximum resolved size in a treebin at i */<br>
> +#define bit_for_tree_index(i) \<br>
> + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)<br>
> +<br>
> +/* Shift placing maximum resolved bit in a treebin at i as sign bit */<br>
> +#define leftshift_for_tree_index(i) \<br>
> + ((i == NTREEBINS-1)? 0 : \<br>
> + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))<br>
> +<br>
> +/* The size of the smallest chunk held in bin with index i */<br>
> +#define minsize_for_tree_index(i) \<br>
> + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \<br>
> + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))<br>
> +<br>
> +/* ------------------------ Operations on bin maps -----------------------<br>
> */<br>
> +<br>
> +/* bit corresponding to given index */<br>
> +#define idx2bit(i) ((binmap_t)(1) << (i))<br>
> +<br>
> +/* Mark/Clear bits with given index */<br>
> +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))<br>
> +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))<br>
> +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))<br>
> +<br>
> +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))<br>
> +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))<br>
> +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))<br>
> +<br>
> +/* index corresponding to given bit */<br>
> +<br>
> +#if defined(__GNUC__) && defined(i386)<br>
> +#define compute_bit2idx(X, I)\<br>
> +{\<br>
> + unsigned int J;\<br>
> + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\<br>
> + I = (bindex_t)J;\<br>
> +}<br>
> +<br>
> +#else /* GNUC */<br>
> +#if USE_BUILTIN_FFS<br>
> +#define compute_bit2idx(X, I) I = ffs(X)-1<br>
> +<br>
> +#else /* USE_BUILTIN_FFS */<br>
> +#define compute_bit2idx(X, I)\<br>
> +{\<br>
> + unsigned int Y = X - 1;\<br>
> + unsigned int K = Y >> (16-4) & 16;\<br>
> + unsigned int N = K; Y >>= K;\<br>
> + N += K = Y >> (8-3) & 8; Y >>= K;\<br>
> + N += K = Y >> (4-2) & 4; Y >>= K;\<br>
> + N += K = Y >> (2-1) & 2; Y >>= K;\<br>
> + N += K = Y >> (1-0) & 1; Y >>= K;\<br>
> + I = (bindex_t)(N + Y);\<br>
> +}<br>
> +#endif /* USE_BUILTIN_FFS */<br>
> +#endif /* GNUC */<br>
> +<br>
> +/* isolate the least set bit of a bitmap */<br>
> +#define least_bit(x) ((x) & -(x))<br>
> +<br>
> +/* mask with all bits to left of least bit of x on */<br>
> +#define left_bits(x) ((x<<1) | -(x<<1))<br>
> +<br>
> +/* mask with all bits to left of or equal to least bit of x on */<br>
> +#define same_or_left_bits(x) ((x) | -(x))<br>
> +<br>
> +<br>
> +/* ----------------------- Runtime Check Support -------------------------<br>
> */<br>
> +<br>
> +/*<br>
> + For security, the main invariant is that malloc/free/etc never<br>
> + writes to a static address other than malloc_state, unless static<br>
> + malloc_state itself has been corrupted, which cannot occur via<br>
> + malloc (because of these checks). In essence this means that we<br>
> + believe all pointers, sizes, maps etc held in malloc_state, but<br>
> + check all of those linked or offsetted from other embedded data<br>
> + structures. These checks are interspersed with main code in a way<br>
> + that tends to minimize their run-time cost.<br>
> +<br>
> + When FOOTERS is defined, in addition to range checking, we also<br>
> + verify footer fields of inuse chunks, which can be used guarantee<br>
> + that the mstate controlling malloc/free is intact. This is a<br>
> + streamlined version of the approach described by William Robertson<br>
> + et al in "Run-time Detection of Heap-based Overflows" LISA'03<br>
> + <a href="http://www.usenix.org/events/lisa03/tech/robertson.html" rel="noreferrer" target="_blank">http://www.usenix.org/events/<wbr>lisa03/tech/robertson.html</a> The footer<br>
> + of an inuse chunk holds the xor of its mstate and a random seed,<br>
> + that is checked upon calls to free() and realloc(). This is<br>
> + (probablistically) unguessable from outside the program, but can be<br>
> + computed by any code successfully malloc'ing any chunk, so does not<br>
> + itself provide protection against code that has already broken<br>
> + security through some other means. Unlike Robertson et al, we<br>
> + always dynamically check addresses of all offset chunks (previous,<br>
> + next, etc). This turns out to be cheaper than relying on hashes.<br>
> +*/<br>
> +<br>
> +#if !INSECURE<br>
> +/* Check if address a is at least as high as any from MORECORE or MMAP */<br>
> +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)<br>
> +/* Check if address of next chunk n is higher than base chunk p */<br>
> +#define ok_next(p, n) ((char*)(p) < (char*)(n))<br>
> +/* Check if p has its cinuse bit on */<br>
> +#define ok_cinuse(p) cinuse(p)<br>
> +/* Check if p has its pinuse bit on */<br>
> +#define ok_pinuse(p) pinuse(p)<br>
> +<br>
> +#else /* !INSECURE */<br>
> +#define ok_address(M, a) (1)<br>
> +#define ok_next(b, n) (1)<br>
> +#define ok_cinuse(p) (1)<br>
> +#define ok_pinuse(p) (1)<br>
> +#endif /* !INSECURE */<br>
> +<br>
> +#if (FOOTERS && !INSECURE)<br>
> +/* Check if (alleged) mstate m has expected magic field */<br>
> +#define ok_magic(M) ((M)->magic == mparams.magic)<br>
> +#else /* (FOOTERS && !INSECURE) */<br>
> +#define ok_magic(M) (1)<br>
> +#endif /* (FOOTERS && !INSECURE) */<br>
> +<br>
> +<br>
> +/* In gcc, use __builtin_expect to minimize impact of checks */<br>
> +#if !INSECURE<br>
> +#if defined(__GNUC__) && __GNUC__ >= 3<br>
> +#define RTCHECK(e) __builtin_expect(e, 1)<br>
> +#else /* GNUC */<br>
> +#define RTCHECK(e) (e)<br>
> +#endif /* GNUC */<br>
> +#else /* !INSECURE */<br>
> +#define RTCHECK(e) (1)<br>
> +#endif /* !INSECURE */<br>
> +<br>
> +/* macros to set up inuse chunks with or without footers */<br>
> +<br>
> +#if !FOOTERS<br>
> +<br>
> +#define mark_inuse_foot(M,p,s)<br>
> +<br>
> +/* Set cinuse bit and pinuse bit of next chunk */<br>
> +#define set_inuse(M,p,s)\<br>
> + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\<br>
> + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)<br>
> +<br>
> +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */<br>
> +#define set_inuse_and_pinuse(M,p,s)\<br>
> + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)<br>
> +<br>
> +/* Set size, cinuse and pinuse bit of this chunk */<br>
> +#define set_size_and_pinuse_of_inuse_<wbr>chunk(M, p, s)\<br>
> + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))<br>
> +<br>
> +#else /* FOOTERS */<br>
> +<br>
> +/* Set foot of inuse chunk to be xor of mstate and seed */<br>
> +#define mark_inuse_foot(M,p,s)\<br>
> + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^<br>
> mparams.magic))<br>
> +<br>
> +#define get_mstate_for(p)\<br>
> + ((mstate)(((mchunkptr)((char*)<wbr>(p) +\<br>
> + (chunksize(p))))->prev_foot ^ mparams.magic))<br>
> +<br>
> +#define set_inuse(M,p,s)\<br>
> + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\<br>
> + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \<br>
> + mark_inuse_foot(M,p,s))<br>
> +<br>
> +#define set_inuse_and_pinuse(M,p,s)\<br>
> + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\<br>
> + mark_inuse_foot(M,p,s))<br>
> +<br>
> +#define set_size_and_pinuse_of_inuse_<wbr>chunk(M, p, s)\<br>
> + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\<br>
> + mark_inuse_foot(M, p, s))<br>
> +<br>
> +#endif /* !FOOTERS */<br>
> +<br>
> +/* ---------------------------- setting mparams --------------------------<br>
> */<br>
> +<br>
> +/* Initialize mparams */<br>
> +static int init_mparams(void) {<br>
> + if (mparams.page_size == 0) {<br>
> + size_t s;<br>
> +<br>
> + mparams.default_mflags = USE_LOCK_BIT;<br>
> +<br>
> +#if (FOOTERS && !INSECURE)<br>
> + {<br>
> +#if USE_DEV_RANDOM<br>
> + int fd;<br>
> + unsigned char buf[sizeof(size_t)];<br>
> + /* Try to use /dev/urandom, else fall back on using time */<br>
> + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&<br>
> + read(fd, buf, sizeof(buf)) == sizeof(buf)) {<br>
> + s = *((size_t *) buf);<br>
> + close(fd);<br>
> + }<br>
> + else<br>
> +#endif /* USE_DEV_RANDOM */<br>
> + s = (size_t)(time(0) ^ (size_t)0x55555555U);<br>
> +<br>
> + s |= (size_t)8U; /* ensure nonzero */<br>
> + s &= ~(size_t)7U; /* improve chances of fault for bad values */<br>
> +<br>
> + }<br>
> +#else /* (FOOTERS && !INSECURE) */<br>
> + s = (size_t)0x58585858U;<br>
> +#endif /* (FOOTERS && !INSECURE) */<br>
> + ACQUIRE_MAGIC_INIT_LOCK();<br>
> + if (mparams.magic == 0) {<br>
> + mparams.magic = s;<br>
> + /* Set up lock for main malloc area */<br>
> + //INITIAL_LOCK(&gm->mutex);<br>
> + //gm->mflags = mparams.default_mflags;<br>
> + }<br>
> + RELEASE_MAGIC_INIT_LOCK();<br>
> +<br>
> +<br>
> + mparams.page_size = malloc_getpagesize;<br>
> + mparams.granularity = ((DEFAULT_GRANULARITY != 0)?<br>
> + DEFAULT_GRANULARITY : mparams.page_size);<br>
> +<br>
> + /* Sanity-check configuration:<br>
> + size_t must be unsigned and as wide as pointer type.<br>
> + ints must be at least 4 bytes.<br>
> + alignment must be at least 8.<br>
> + Alignment, min chunk size, and page size must all be powers of 2.<br>
> + */<br>
> + if ((sizeof(size_t) != sizeof(char*)) ||<br>
> + (MAX_SIZE_T < MIN_CHUNK_SIZE) ||<br>
> + (sizeof(int) < 4) ||<br>
> + (MALLOC_ALIGNMENT < (size_t)8U) ||<br>
> + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||<br>
> + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||<br>
> + ((mparams.granularity & (mparams.granularity-SIZE_T_<wbr>ONE)) != 0) ||<br>
> + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)<wbr>) != 0))<br>
> + ABORT(NULL);<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* support for mallopt */<br>
> +static int change_mparam(int param_number, int value) {<br>
> + size_t val = (size_t)value;<br>
> + init_mparams();<br>
> + switch(param_number) {<br>
> + case M_GRANULARITY:<br>
> + if (val >= mparams.page_size && ((val & (val-1)) == 0)) {<br>
> + mparams.granularity = val;<br>
> + return 1;<br>
> + }<br>
> + else<br>
> + return 0;<br>
> + default:<br>
> + return 0;<br>
> + }<br>
> +}<br>
> +<br>
> +#if DEBUG<br>
> +/* ------------------------- Debugging Support ---------------------------<br>
> */<br>
> +<br>
> +/* Check properties of any chunk, whether free, inuse, mmapped etc */<br>
> +static void do_check_any_chunk(mstate m, mchunkptr p) {<br>
> + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==<br>
> FENCEPOST_HEAD));<br>
> + assert(m->user_data, ok_address(m, p));<br>
> +}<br>
> +<br>
> +/* Check properties of top chunk */<br>
> +static void do_check_top_chunk(mstate m, mchunkptr p) {<br>
> + msegmentptr sp = segment_holding(m, (char*)p);<br>
> + size_t sz = chunksize(p);<br>
> + assert(m->user_data, sp != 0);<br>
> + assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head ==<br>
> FENCEPOST_HEAD));<br>
> + assert(m->user_data, ok_address(m, p));<br>
> + assert(m->user_data, sz == m->topsize);<br>
> + assert(m->user_data, sz > 0);<br>
> + assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) -<br>
> TOP_FOOT_SIZE);<br>
> + assert(m->user_data, pinuse(p));<br>
> + assert(m->user_data, !next_pinuse(p));<br>
> +}<br>
> +<br>
> +/* Check properties of inuse chunks */<br>
> +static void do_check_inuse_chunk(mstate m, mchunkptr p) {<br>
> + do_check_any_chunk(m, p);<br>
> + assert(m->user_data, cinuse(p));<br>
> + assert(m->user_data, next_pinuse(p));<br>
> + /* If not pinuse, previous chunk has OK offset */<br>
> + assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);<br>
> +}<br>
> +<br>
> +/* Check properties of free chunks */<br>
> +static void do_check_free_chunk(mstate m, mchunkptr p) {<br>
> + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);<br>
> + mchunkptr next = chunk_plus_offset(p, sz);<br>
> + do_check_any_chunk(m, p);<br>
> + assert(m->user_data, !cinuse(p));<br>
> + assert(m->user_data, !next_pinuse(p));<br>
> + if (p != m->dv && p != m->top) {<br>
> + if (sz >= MIN_CHUNK_SIZE) {<br>
> + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);<br>
> + assert(m->user_data, is_aligned(chunk2mem(p)));<br>
> + assert(m->user_data, next->prev_foot == sz);<br>
> + assert(m->user_data, pinuse(p));<br>
> + assert(m->user_data, next == m->top || cinuse(next));<br>
> + assert(m->user_data, p->fd->bk == p);<br>
> + assert(m->user_data, p->bk->fd == p);<br>
> + }<br>
> + else /* markers are always of size SIZE_T_SIZE */<br>
> + assert(m->user_data, sz == SIZE_T_SIZE);<br>
> + }<br>
> +}<br>
> +<br>
> +/* Check properties of malloced chunks at the point they are malloced */<br>
> +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {<br>
> + if (mem != 0) {<br>
> + mchunkptr p = mem2chunk(mem);<br>
> + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);<br>
> + do_check_inuse_chunk(m, p);<br>
> + assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);<br>
> + assert(m->user_data, sz >= MIN_CHUNK_SIZE);<br>
> + assert(m->user_data, sz >= s);<br>
> + /* size is less than MIN_CHUNK_SIZE more than request */<br>
> + assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));<br>
> + }<br>
> +}<br>
> +<br>
> +/* Check a tree and its subtrees. */<br>
> +static void do_check_tree(mstate m, tchunkptr t) {<br>
> + tchunkptr head = 0;<br>
> + tchunkptr u = t;<br>
> + bindex_t tindex = t->index;<br>
> + size_t tsize = chunksize(t);<br>
> + bindex_t idx;<br>
> + compute_tree_index(tsize, idx);<br>
> + assert(m->user_data, tindex == idx);<br>
> + assert(m->user_data, tsize >= MIN_LARGE_SIZE);<br>
> + assert(m->user_data, tsize >= minsize_for_tree_index(idx));<br>
> + assert(m->user_data, (idx == NTREEBINS-1) || (tsize <<br>
> minsize_for_tree_index((idx+1)<wbr>)));<br>
> +<br>
> + do { /* traverse through chain of same-sized nodes */<br>
> + do_check_any_chunk(m, ((mchunkptr)u));<br>
> + assert(m->user_data, u->index == tindex);<br>
> + assert(m->user_data, chunksize(u) == tsize);<br>
> + assert(m->user_data, !cinuse(u));<br>
> + assert(m->user_data, !next_pinuse(u));<br>
> + assert(m->user_data, u->fd->bk == u);<br>
> + assert(m->user_data, u->bk->fd == u);<br>
> + if (u->parent == 0) {<br>
> + assert(m->user_data, u->child[0] == 0);<br>
> + assert(m->user_data, u->child[1] == 0);<br>
> + }<br>
> + else {<br>
> + assert(m->user_data, head == 0); /* only one node on chain has parent<br>
> */<br>
> + head = u;<br>
> + assert(m->user_data, u->parent != u);<br>
> + assert(m->user_data, u->parent->child[0] == u ||<br>
> + u->parent->child[1] == u ||<br>
> + *((tbinptr*)(u->parent)) == u);<br>
> + if (u->child[0] != 0) {<br>
> + assert(m->user_data, u->child[0]->parent == u);<br>
> + assert(m->user_data, u->child[0] != u);<br>
> + do_check_tree(m, u->child[0]);<br>
> + }<br>
> + if (u->child[1] != 0) {<br>
> + assert(m->user_data, u->child[1]->parent == u);<br>
> + assert(m->user_data, u->child[1] != u);<br>
> + do_check_tree(m, u->child[1]);<br>
> + }<br>
> + if (u->child[0] != 0 && u->child[1] != 0) {<br>
> + assert(m->user_data, chunksize(u->child[0]) <<br>
> chunksize(u->child[1]));<br>
> + }<br>
> + }<br>
> + u = u->fd;<br>
> + } while (u != t);<br>
> + assert(m->user_data, head != 0);<br>
> +}<br>
> +<br>
> +/* Check all the chunks in a treebin. */<br>
> +static void do_check_treebin(mstate m, bindex_t i) {<br>
> + tbinptr* tb = treebin_at(m, i);<br>
> + tchunkptr t = *tb;<br>
> + int empty = (m->treemap & (1U << i)) == 0;<br>
> + if (t == 0)<br>
> + assert(m->user_data, empty);<br>
> + if (!empty)<br>
> + do_check_tree(m, t);<br>
> +}<br>
> +<br>
> +/* Check all the chunks in a smallbin. */<br>
> +static void do_check_smallbin(mstate m, bindex_t i) {<br>
> + sbinptr b = smallbin_at(m, i);<br>
> + mchunkptr p = b->bk;<br>
> + unsigned int empty = (m->smallmap & (1U << i)) == 0;<br>
> + if (p == b)<br>
> + assert(m->user_data, empty);<br>
> + if (!empty) {<br>
> + for (; p != b; p = p->bk) {<br>
> + size_t size = chunksize(p);<br>
> + mchunkptr q;<br>
> + /* each chunk claims to be free */<br>
> + do_check_free_chunk(m, p);<br>
> + /* chunk belongs in bin */<br>
> + assert(m->user_data, small_index(size) == i);<br>
> + assert(m->user_data, p->bk == b || chunksize(p->bk) == chunksize(p));<br>
> + /* chunk is followed by an inuse chunk */<br>
> + q = next_chunk(p);<br>
> + if (q->head != FENCEPOST_HEAD)<br>
> + do_check_inuse_chunk(m, q);<br>
> + }<br>
> + }<br>
> +}<br>
> +<br>
> +/* Find x in a bin. Used in other check functions. */<br>
> +static int bin_find(mstate m, mchunkptr x) {<br>
> + size_t size = chunksize(x);<br>
> + if (is_small(size)) {<br>
> + bindex_t sidx = small_index(size);<br>
> + sbinptr b = smallbin_at(m, sidx);<br>
> + if (smallmap_is_marked(m, sidx)) {<br>
> + mchunkptr p = b;<br>
> + do {<br>
> + if (p == x)<br>
> + return 1;<br>
> + } while ((p = p->fd) != b);<br>
> + }<br>
> + }<br>
> + else {<br>
> + bindex_t tidx;<br>
> + compute_tree_index(size, tidx);<br>
> + if (treemap_is_marked(m, tidx)) {<br>
> + tchunkptr t = *treebin_at(m, tidx);<br>
> + size_t sizebits = size << leftshift_for_tree_index(tidx)<wbr>;<br>
> + while (t != 0 && chunksize(t) != size) {<br>
> + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];<br>
> + sizebits <<= 1;<br>
> + }<br>
> + if (t != 0) {<br>
> + tchunkptr u = t;<br>
> + do {<br>
> + if (u == (tchunkptr)x)<br>
> + return 1;<br>
> + } while ((u = u->fd) != t);<br>
> + }<br>
> + }<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* Traverse each chunk and check it; return total */<br>
> +static size_t traverse_and_check(mstate m) {<br>
> + size_t sum = 0;<br>
> + if (is_initialized(m)) {<br>
> + msegmentptr s = &m->seg;<br>
> + sum += m->topsize + TOP_FOOT_SIZE;<br>
> + while (s != 0) {<br>
> + mchunkptr q = align_as_chunk(s->base);<br>
> + mchunkptr lastq = 0;<br>
> + assert(m->user_data, pinuse(q));<br>
> + while (segment_holds(s, q) &&<br>
> + q != m->top && q->head != FENCEPOST_HEAD) {<br>
> + sum += chunksize(q);<br>
> + if (cinuse(q)) {<br>
> + assert(m->user_data, !bin_find(m, q));<br>
> + do_check_inuse_chunk(m, q);<br>
> + }<br>
> + else {<br>
> + assert(m->user_data, q == m->dv || bin_find(m, q));<br>
> + assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2<br>
> consecutive free */<br>
> + do_check_free_chunk(m, q);<br>
> + }<br>
> + lastq = q;<br>
> + q = next_chunk(q);<br>
> + }<br>
> + s = s->next;<br>
> + }<br>
> + }<br>
> + return sum;<br>
> +}<br>
> +<br>
> +/* Check all properties of malloc_state. */<br>
> +static void do_check_malloc_state(mstate m) {<br>
> + bindex_t i;<br>
> + size_t total;<br>
> + /* check bins */<br>
> + for (i = 0; i < NSMALLBINS; ++i)<br>
> + do_check_smallbin(m, i);<br>
> + for (i = 0; i < NTREEBINS; ++i)<br>
> + do_check_treebin(m, i);<br>
> +<br>
> + if (m->dvsize != 0) { /* check dv chunk */<br>
> + do_check_any_chunk(m, m->dv);<br>
> + assert(m->user_data, m->dvsize == chunksize(m->dv));<br>
> + assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);<br>
> + assert(m->user_data, bin_find(m, m->dv) == 0);<br>
> + }<br>
> +<br>
> + if (m->top != 0) { /* check top chunk */<br>
> + do_check_top_chunk(m, m->top);<br>
> + assert(m->user_data, m->topsize == chunksize(m->top));<br>
> + assert(m->user_data, m->topsize > 0);<br>
> + assert(m->user_data, bin_find(m, m->top) == 0);<br>
> + }<br>
> +<br>
> + total = traverse_and_check(m);<br>
> + assert(m->user_data, total <= m->footprint);<br>
> + assert(m->user_data, m->footprint <= m->max_footprint);<br>
> +}<br>
> +#endif /* DEBUG */<br>
> +<br>
> +/* ----------------------------- statistics ------------------------------<br>
> */<br>
> +<br>
> +#if !NO_MALLINFO<br>
> +static struct mallinfo internal_mallinfo(mstate m) {<br>
> + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };<br>
> + if (!PREACTION(m)) {<br>
> + check_malloc_state(m);<br>
> + if (is_initialized(m)) {<br>
> + size_t nfree = SIZE_T_ONE; /* top always free */<br>
> + size_t mfree = m->topsize + TOP_FOOT_SIZE;<br>
> + size_t sum = mfree;<br>
> + msegmentptr s = &m->seg;<br>
> + while (s != 0) {<br>
> + mchunkptr q = align_as_chunk(s->base);<br>
> + while (segment_holds(s, q) &&<br>
> + q != m->top && q->head != FENCEPOST_HEAD) {<br>
> + size_t sz = chunksize(q);<br>
> + sum += sz;<br>
> + if (!cinuse(q)) {<br>
> + mfree += sz;<br>
> + ++nfree;<br>
> + }<br>
> + q = next_chunk(q);<br>
> + }<br>
> + s = s->next;<br>
> + }<br>
> +<br>
> + nm.arena = sum;<br>
> + nm.ordblks = nfree;<br>
> + nm.hblkhd = m->footprint - sum;<br>
> + nm.usmblks = m->max_footprint;<br>
> + nm.uordblks = m->footprint - mfree;<br>
> + nm.fordblks = mfree;<br>
> + nm.keepcost = m->topsize;<br>
> + }<br>
> +<br>
> + POSTACTION(m);<br>
> + }<br>
> + return nm;<br>
> +}<br>
> +#endif /* !NO_MALLINFO */<br>
> +<br>
> +static void internal_malloc_stats(mstate m) {<br>
> + if (!PREACTION(m)) {<br>
> + size_t maxfp = 0;<br>
> + size_t fp = 0;<br>
> + size_t used = 0;<br>
> + check_malloc_state(m);<br>
> + if (is_initialized(m)) {<br>
> + msegmentptr s = &m->seg;<br>
> + maxfp = m->max_footprint;<br>
> + fp = m->footprint;<br>
> + used = fp - (m->topsize + TOP_FOOT_SIZE);<br>
> +<br>
> + while (s != 0) {<br>
> + mchunkptr q = align_as_chunk(s->base);<br>
> + while (segment_holds(s, q) &&<br>
> + q != m->top && q->head != FENCEPOST_HEAD) {<br>
> + if (!cinuse(q))<br>
> + used -= chunksize(q);<br>
> + q = next_chunk(q);<br>
> + }<br>
> + s = s->next;<br>
> + }<br>
> + }<br>
> +<br>
> + PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned<br>
> long)(maxfp)));<br>
> + PRINT((m->user_data, "system bytes = %10lu\n", (unsigned<br>
> long)(fp)));<br>
> + PRINT((m->user_data, "in use bytes = %10lu\n", (unsigned<br>
> long)(used)));<br>
> +<br>
> + POSTACTION(m);<br>
> + }<br>
> +}<br>
> +<br>
> +/* ----------------------- Operations on smallbins -----------------------<br>
> */<br>
> +<br>
> +/*<br>
> + Various forms of linking and unlinking are defined as macros. Even<br>
> + the ones for trees, which are very long but have very short typical<br>
> + paths. This is ugly but reduces reliance on inlining support of<br>
> + compilers.<br>
> +*/<br>
> +<br>
> +/* Link a free chunk into a smallbin */<br>
> +#define insert_small_chunk(M, P, S) {\<br>
> + bindex_t I = small_index(S);\<br>
> + mchunkptr B = smallbin_at(M, I);\<br>
> + mchunkptr F = B;\<br>
> + assert((M)->user_data, S >= MIN_CHUNK_SIZE);\<br>
> + if (!smallmap_is_marked(M, I))\<br>
> + mark_smallmap(M, I);\<br>
> + else if (RTCHECK(ok_address(M, B->fd)))\<br>
> + F = B->fd;\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + B->fd = P;\<br>
> + F->bk = P;\<br>
> + P->fd = F;\<br>
> + P->bk = B;\<br>
> +}<br>
> +<br>
> +/* Unlink a chunk from a smallbin */<br>
> +#define unlink_small_chunk(M, P, S) {\<br>
> + mchunkptr F = P->fd;\<br>
> + mchunkptr B = P->bk;\<br>
> + bindex_t I = small_index(S);\<br>
> + assert((M)->user_data, P != B);\<br>
> + assert((M)->user_data, P != F);\<br>
> + assert((M)->user_data, chunksize(P) == small_index2size(I));\<br>
> + if (F == B)\<br>
> + clear_smallmap(M, I);\<br>
> + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\<br>
> + (B == smallbin_at(M,I) || ok_address(M, B)))) {\<br>
> + F->bk = B;\<br>
> + B->fd = F;\<br>
> + }\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> +}<br>
> +<br>
> +/* Unlink the first chunk from a smallbin */<br>
> +#define unlink_first_small_chunk(M, B, P, I) {\<br>
> + mchunkptr F = P->fd;\<br>
> + assert((M)->user_data, P != B);\<br>
> + assert((M)->user_data, P != F);\<br>
> + assert((M)->user_data, chunksize(P) == small_index2size(I));\<br>
> + if (B == F)\<br>
> + clear_smallmap(M, I);\<br>
> + else if (RTCHECK(ok_address(M, F))) {\<br>
> + B->fd = F;\<br>
> + F->bk = B;\<br>
> + }\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> +}<br>
> +<br>
> +/* Replace dv node, binning the old one */<br>
> +/* Used only when dvsize known to be small */<br>
> +#define replace_dv(M, P, S) {\<br>
> + size_t DVS = M->dvsize;\<br>
> + if (DVS != 0) {\<br>
> + mchunkptr DV = M->dv;\<br>
> + assert((M)->user_data, is_small(DVS));\<br>
> + insert_small_chunk(M, DV, DVS);\<br>
> + }\<br>
> + M->dvsize = S;\<br>
> + M->dv = P;\<br>
> +}<br>
> +<br>
> +<br>
> +/* ------------------------- Operations on trees -------------------------<br>
> */<br>
> +<br>
> +/* Insert chunk into tree */<br>
> +#define insert_large_chunk(M, X, S) {\<br>
> + tbinptr* H;\<br>
> + bindex_t I;\<br>
> + compute_tree_index(S, I);\<br>
> + H = treebin_at(M, I);\<br>
> + X->index = I;\<br>
> + X->child[0] = X->child[1] = 0;\<br>
> + if (!treemap_is_marked(M, I)) {\<br>
> + mark_treemap(M, I);\<br>
> + *H = X;\<br>
> + X->parent = (tchunkptr)H;\<br>
> + X->fd = X->bk = X;\<br>
> + }\<br>
> + else {\<br>
> + tchunkptr T = *H;\<br>
> + size_t K = S << leftshift_for_tree_index(I);\<br>
> + for (;;) {\<br>
> + if (chunksize(T) != S) {\<br>
> + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\<br>
> + K <<= 1;\<br>
> + if (*C != 0)\<br>
> + T = *C;\<br>
> + else if (RTCHECK(ok_address(M, C))) {\<br>
> + *C = X;\<br>
> + X->parent = T;\<br>
> + X->fd = X->bk = X;\<br>
> + break;\<br>
> + }\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + break;\<br>
> + }\<br>
> + }\<br>
> + else {\<br>
> + tchunkptr F = T->fd;\<br>
> + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\<br>
> + T->fd = F->bk = X;\<br>
> + X->fd = F;\<br>
> + X->bk = T;\<br>
> + X->parent = 0;\<br>
> + break;\<br>
> + }\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + break;\<br>
> + }\<br>
> + }\<br>
> + }\<br>
> + }\<br>
> +}<br>
> +<br>
> +/*<br>
> + Unlink steps:<br>
> +<br>
> + 1. If x is a chained node, unlink it from its same-sized fd/bk links<br>
> + and choose its bk node as its replacement.<br>
> + 2. If x was the last node of its size, but not a leaf node, it must<br>
> + be replaced with a leaf node (not merely one with an open left or<br>
> + right), to make sure that lefts and rights of descendents<br>
> + correspond properly to bit masks. We use the rightmost descendent<br>
> + of x. We could use any other leaf, but this is easy to locate and<br>
> + tends to counteract removal of leftmosts elsewhere, and so keeps<br>
> + paths shorter than minimally guaranteed. This doesn't loop much<br>
> + because on average a node in a tree is near the bottom.<br>
> + 3. If x is the base of a chain (i.e., has parent links) relink<br>
> + x's parent and children to x's replacement (or null if none).<br>
> +*/<br>
> +<br>
> +#define unlink_large_chunk(M, X) {\<br>
> + tchunkptr XP = X->parent;\<br>
> + tchunkptr R;\<br>
> + if (X->bk != X) {\<br>
> + tchunkptr F = X->fd;\<br>
> + R = X->bk;\<br>
> + if (RTCHECK(ok_address(M, F))) {\<br>
> + F->bk = R;\<br>
> + R->fd = F;\<br>
> + }\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + }\<br>
> + else {\<br>
> + tchunkptr* RP;\<br>
> + if (((R = *(RP = &(X->child[1]))) != 0) ||\<br>
> + ((R = *(RP = &(X->child[0]))) != 0)) {\<br>
> + tchunkptr* CP;\<br>
> + while ((*(CP = &(R->child[1])) != 0) ||\<br>
> + (*(CP = &(R->child[0])) != 0)) {\<br>
> + R = *(RP = CP);\<br>
> + }\<br>
> + if (RTCHECK(ok_address(M, RP)))\<br>
> + *RP = 0;\<br>
> + else {\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + }\<br>
> + }\<br>
> + if (XP != 0) {\<br>
> + tbinptr* H = treebin_at(M, X->index);\<br>
> + if (X == *H) {\<br>
> + if ((*H = R) == 0) \<br>
> + clear_treemap(M, X->index);\<br>
> + }\<br>
> + else if (RTCHECK(ok_address(M, XP))) {\<br>
> + if (XP->child[0] == X) \<br>
> + XP->child[0] = R;\<br>
> + else \<br>
> + XP->child[1] = R;\<br>
> + }\<br>
> + else\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + if (R != 0) {\<br>
> + if (RTCHECK(ok_address(M, R))) {\<br>
> + tchunkptr C0, C1;\<br>
> + R->parent = XP;\<br>
> + if ((C0 = X->child[0]) != 0) {\<br>
> + if (RTCHECK(ok_address(M, C0))) {\<br>
> + R->child[0] = C0;\<br>
> + C0->parent = R;\<br>
> + }\<br>
> + else\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + if ((C1 = X->child[1]) != 0) {\<br>
> + if (RTCHECK(ok_address(M, C1))) {\<br>
> + R->child[1] = C1;\<br>
> + C1->parent = R;\<br>
> + }\<br>
> + else\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + }\<br>
> + else\<br>
> + CORRUPTION_ERROR_ACTION(M);\<br>
> + }\<br>
> + }\<br>
> +}<br>
> +<br>
> +/* Relays to large vs small bin operations */<br>
> +<br>
> +#define insert_chunk(M, P, S)\<br>
> + if (is_small(S)) insert_small_chunk(M, P, S)\<br>
> + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }<br>
> +<br>
> +#define unlink_chunk(M, P, S)\<br>
> + if (is_small(S)) unlink_small_chunk(M, P, S)\<br>
> + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }<br>
> +<br>
> +<br>
> +/* Relays to internal calls to malloc/free from realloc, memalign etc */<br>
> +<br>
> +#define internal_malloc(m, b) mspace_malloc(m, b)<br>
> +#define internal_free(m, mem) mspace_free(m,mem);<br>
> +<br>
> +<br>
> +/* -------------------------- mspace management --------------------------<br>
> */<br>
> +<br>
> +/* Initialize top chunk and its size */<br>
> +static void init_top(mstate m, mchunkptr p, size_t psize) {<br>
> + /* Ensure alignment */<br>
> + size_t offset = align_offset(chunk2mem(p));<br>
> + p = (mchunkptr)((char*)p + offset);<br>
> + psize -= offset;<br>
> +<br>
> + m->top = p;<br>
> + m->topsize = psize;<br>
> + p->head = psize | PINUSE_BIT;<br>
> + /* set size of fake trailing chunk holding overhead space only once */<br>
> + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;<br>
> +}<br>
> +<br>
> +/* Initialize bins for a new mstate that is otherwise zeroed out */<br>
> +static void init_bins(mstate m) {<br>
> + /* Establish circular links for smallbins */<br>
> + bindex_t i;<br>
> + for (i = 0; i < NSMALLBINS; ++i) {<br>
> + sbinptr bin = smallbin_at(m,i);<br>
> + bin->fd = bin->bk = bin;<br>
> + }<br>
> +}<br>
> +<br>
> +#if PROCEED_ON_ERROR<br>
> +<br>
> +/* default corruption action */<br>
> +static void reset_on_error(mstate m) {<br>
> + int i;<br>
> + ++malloc_corruption_error_<wbr>count;<br>
> + /* Reinitialize fields to forget about all memory */<br>
> + m->smallbins = m->treebins = 0;<br>
> + m->dvsize = m->topsize = 0;<br>
> + m->seg.base = 0;<br>
> + m->seg.size = 0;<br>
> + m->seg.next = 0;<br>
> + m->top = m->dv = 0;<br>
> + for (i = 0; i < NTREEBINS; ++i)<br>
> + *treebin_at(m, i) = 0;<br>
> + init_bins(m);<br>
> +}<br>
> +#endif /* PROCEED_ON_ERROR */<br>
> +<br>
> +/* Allocate chunk and prepend remainder with chunk in successor base. */<br>
> +static void* prepend_alloc(mstate m, char* newbase, char* oldbase,<br>
> + size_t nb) {<br>
> + mchunkptr p = align_as_chunk(newbase);<br>
> + mchunkptr oldfirst = align_as_chunk(oldbase);<br>
> + size_t psize = (char*)oldfirst - (char*)p;<br>
> + mchunkptr q = chunk_plus_offset(p, nb);<br>
> + size_t qsize = psize - nb;<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(m, p, nb);<br>
> +<br>
> + assert(m->user_data, (char*)oldfirst > (char*)q);<br>
> + assert(m->user_data, pinuse(oldfirst));<br>
> + assert(m->user_data, qsize >= MIN_CHUNK_SIZE);<br>
> +<br>
> + /* consolidate remainder with first chunk of old base */<br>
> + if (oldfirst == m->top) {<br>
> + size_t tsize = m->topsize += qsize;<br>
> + m->top = q;<br>
> + q->head = tsize | PINUSE_BIT;<br>
> + check_top_chunk(m, q);<br>
> + }<br>
> + else if (oldfirst == m->dv) {<br>
> + size_t dsize = m->dvsize += qsize;<br>
> + m->dv = q;<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(q, dsize);<br>
> + }<br>
> + else {<br>
> + if (!cinuse(oldfirst)) {<br>
> + size_t nsize = chunksize(oldfirst);<br>
> + unlink_chunk(m, oldfirst, nsize);<br>
> + oldfirst = chunk_plus_offset(oldfirst, nsize);<br>
> + qsize += nsize;<br>
> + }<br>
> + set_free_with_pinuse(q, qsize, oldfirst);<br>
> +#pragma warning(suppress: 28182) /* code analysis noise*/<br>
> + insert_chunk(m, q, qsize);<br>
> + check_free_chunk(m, q);<br>
> + }<br>
> +<br>
> + check_malloced_chunk(m, chunk2mem(p), nb);<br>
> + return chunk2mem(p);<br>
> +}<br>
> +<br>
> +/* -------------------------- System allocation --------------------------<br>
> */<br>
> +<br>
> +/* Get memory from system using MORECORE or MMAP */<br>
> +static void* sys_alloc(mstate m, size_t nb) {<br>
> + MALLOC_FAILURE_ACTION;<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* ---------------------------- malloc support ---------------------------<br>
> */<br>
> +<br>
> +/* allocate a large request from the best fitting chunk in a treebin */<br>
> +static void* tmalloc_large(mstate m, size_t nb) {<br>
> + tchunkptr v = 0;<br>
> + size_t rsize = -nb; /* Unsigned negation */<br>
> + tchunkptr t;<br>
> + bindex_t idx;<br>
> + compute_tree_index(nb, idx);<br>
> +<br>
> + if ((t = *treebin_at(m, idx)) != 0) {<br>
> + /* Traverse tree for this bin looking for node with size == nb */<br>
> + size_t sizebits = nb << leftshift_for_tree_index(idx);<br>
> + tchunkptr rst = 0; /* The deepest untaken right subtree */<br>
> + for (;;) {<br>
> + tchunkptr rt;<br>
> + size_t trem = chunksize(t) - nb;<br>
> + if (trem < rsize) {<br>
> + v = t;<br>
> + if ((rsize = trem) == 0)<br>
> + break;<br>
> + }<br>
> + rt = t->child[1];<br>
> + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];<br>
> + if (rt != 0 && rt != t)<br>
> + rst = rt;<br>
> + if (t == 0) {<br>
> + t = rst; /* set t to least subtree holding sizes > nb */<br>
> + break;<br>
> + }<br>
> + sizebits <<= 1;<br>
> + }<br>
> + }<br>
> +<br>
> + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */<br>
> + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;<br>
> + if (leftbits != 0) {<br>
> + bindex_t i;<br>
> + binmap_t leastbit = least_bit(leftbits);<br>
> + compute_bit2idx(leastbit, i);<br>
> + t = *treebin_at(m, i);<br>
> + }<br>
> + }<br>
> +<br>
> + while (t != 0) { /* find smallest of tree or subtree */<br>
> + size_t trem = chunksize(t) - nb;<br>
> + if (trem < rsize) {<br>
> + rsize = trem;<br>
> + v = t;<br>
> + }<br>
> + t = leftmost_child(t);<br>
> + }<br>
> +<br>
> + /* If dv is a better fit, return 0 so malloc will use it */<br>
> + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {<br>
> + if (RTCHECK(ok_address(m, v))) { /* split */<br>
> + mchunkptr r = chunk_plus_offset(v, nb);<br>
> + assert(m->user_data, chunksize(v) == rsize + nb);<br>
> + if (RTCHECK(ok_next(v, r))) {<br>
> + unlink_large_chunk(m, v);<br>
> + if (rsize < MIN_CHUNK_SIZE)<br>
> + set_inuse_and_pinuse(m, v, (rsize + nb));<br>
> + else {<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(m, v, nb);<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> +#pragma warning(suppress: 28182) /* code analysis noise*/<br>
> + insert_chunk(m, r, rsize);<br>
> + }<br>
> + return chunk2mem(v);<br>
> + }<br>
> + }<br>
> + CORRUPTION_ERROR_ACTION(m);<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* allocate a small request from the best fitting chunk in a treebin */<br>
> +static void* tmalloc_small(mstate m, size_t nb) {<br>
> + tchunkptr t, v;<br>
> + size_t rsize;<br>
> + bindex_t i;<br>
> + binmap_t leastbit = least_bit(m->treemap);<br>
> + compute_bit2idx(leastbit, i);<br>
> +<br>
> + v = t = *treebin_at(m, i);<br>
> + rsize = chunksize(t) - nb;<br>
> +<br>
> + while ((t = leftmost_child(t)) != 0) {<br>
> + size_t trem = chunksize(t) - nb;<br>
> + if (trem < rsize) {<br>
> + rsize = trem;<br>
> + v = t;<br>
> + }<br>
> + }<br>
> +<br>
> + if (RTCHECK(ok_address(m, v))) {<br>
> + mchunkptr r = chunk_plus_offset(v, nb);<br>
> + assert(m->user_data, chunksize(v) == rsize + nb);<br>
> + if (RTCHECK(ok_next(v, r))) {<br>
> + unlink_large_chunk(m, v);<br>
> + if (rsize < MIN_CHUNK_SIZE)<br>
> + set_inuse_and_pinuse(m, v, (rsize + nb));<br>
> + else {<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(m, v, nb);<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> + replace_dv(m, r, rsize);<br>
> + }<br>
> + return chunk2mem(v);<br>
> + }<br>
> + }<br>
> +<br>
> + CORRUPTION_ERROR_ACTION(m);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* --------------------------- realloc support ---------------------------<br>
> */<br>
> +<br>
> +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {<br>
> + if (bytes >= MAX_REQUEST) {<br>
> + MALLOC_FAILURE_ACTION;<br>
> + return 0;<br>
> + }<br>
> + if (!PREACTION(m)) {<br>
> + mchunkptr oldp = mem2chunk(oldmem);<br>
> + size_t oldsize = chunksize(oldp);<br>
> + mchunkptr next = chunk_plus_offset(oldp, oldsize);<br>
> + mchunkptr newp = 0;<br>
> + void* extra = 0;<br>
> +<br>
> + /* Try to either shrink or extend into top. Else malloc-copy-free */<br>
> +<br>
> + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&<br>
> + ok_next(oldp, next) && ok_pinuse(next))) {<br>
> + size_t nb = request2size(bytes);<br>
> + if (oldsize >= nb) { /* already big enough */<br>
> + size_t rsize = oldsize - nb;<br>
> + newp = oldp;<br>
> + if (rsize >= MIN_CHUNK_SIZE) {<br>
> + mchunkptr remainder = chunk_plus_offset(newp, nb);<br>
> + set_inuse(m, newp, nb);<br>
> + set_inuse(m, remainder, rsize);<br>
> + extra = chunk2mem(remainder);<br>
> + }<br>
> + }<br>
> + else if (next == m->top && oldsize + m->topsize > nb) {<br>
> + /* Expand into top */<br>
> + size_t newsize = oldsize + m->topsize;<br>
> + size_t newtopsize = newsize - nb;<br>
> + mchunkptr newtop = chunk_plus_offset(oldp, nb);<br>
> + set_inuse(m, oldp, nb);<br>
> + newtop->head = newtopsize |PINUSE_BIT;<br>
> + m->top = newtop;<br>
> + m->topsize = newtopsize;<br>
> + newp = oldp;<br>
> + }<br>
> + }<br>
> + else {<br>
> + USAGE_ERROR_ACTION(m, oldmem);<br>
> + POSTACTION(m);<br>
> + return 0;<br>
> + }<br>
> +<br>
> + POSTACTION(m);<br>
> +<br>
> + if (newp != 0) {<br>
> + if (extra != 0) {<br>
> + internal_free(m, extra);<br>
> + }<br>
> + check_inuse_chunk(m, newp);<br>
> + return chunk2mem(newp);<br>
> + }<br>
> + else {<br>
> + void* newmem = internal_malloc(m, bytes);<br>
> + if (newmem != 0) {<br>
> + size_t oc = oldsize - overhead_for(oldp);<br>
> + MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);<br>
> + internal_free(m, oldmem);<br>
> + }<br>
> + return newmem;<br>
> + }<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* --------------------------- memalign support --------------------------<br>
> */<br>
> +<br>
> +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {<br>
> + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */<br>
> + return internal_malloc(m, bytes);<br>
> + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size<br>
> */<br>
> + alignment = MIN_CHUNK_SIZE;<br>
> + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */<br>
> + size_t a = MALLOC_ALIGNMENT << 1;<br>
> + while (a < alignment) a <<= 1;<br>
> + alignment = a;<br>
> + }<br>
> +<br>
> + if (bytes >= MAX_REQUEST - alignment) {<br>
> + if (m != 0) { /* Test isn't needed but avoids compiler warning */<br>
> + MALLOC_FAILURE_ACTION;<br>
> + }<br>
> + }<br>
> + else {<br>
> + size_t nb = request2size(bytes);<br>
> + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;<br>
> + char* mem = (char*)internal_malloc(m, req);<br>
> + if (mem != 0) {<br>
> + void* leader = 0;<br>
> + void* trailer = 0;<br>
> + mchunkptr p = mem2chunk(mem);<br>
> +<br>
> + if (PREACTION(m)) return 0;<br>
> + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */<br>
> + /*<br>
> + Find an aligned spot inside chunk. Since we need to give<br>
> + back leading space in a chunk of at least MIN_CHUNK_SIZE, if<br>
> + the first calculation places us at a spot with less than<br>
> + MIN_CHUNK_SIZE leader, we can move to the next aligned spot.<br>
> + We've allocated enough total room so that this is always<br>
> + possible.<br>
> + */<br>
> + char* br = (char*)mem2chunk((size_t)(((<wbr>size_t)(mem +<br>
> + alignment -<br>
> + SIZE_T_ONE)) &<br>
> + -alignment));<br>
> + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?<br>
> + br : br+alignment;<br>
> + mchunkptr newp = (mchunkptr)pos;<br>
> + size_t leadsize = pos - (char*)(p);<br>
> + size_t newsize = chunksize(p) - leadsize;<br>
> +<br>
> + /* Otherwise, give back leader, use the rest */<br>
> + set_inuse(m, newp, newsize);<br>
> + set_inuse(m, p, leadsize);<br>
> + leader = chunk2mem(p);<br>
> +<br>
> + p = newp;<br>
> + }<br>
> +<br>
> + assert(m->user_data, chunksize(p) >= nb);<br>
> + assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);<br>
> + check_inuse_chunk(m, p);<br>
> + POSTACTION(m);<br>
> + if (leader != 0) {<br>
> + internal_free(m, leader);<br>
> + }<br>
> + if (trailer != 0) {<br>
> + internal_free(m, trailer);<br>
> + }<br>
> + return chunk2mem(p);<br>
> + }<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* ----------------------------- user mspaces ----------------------------<br>
> */<br>
> +<br>
> +static mstate init_user_mstate(char* tbase, size_t tsize, void *user_data) {<br>
> + size_t msize = pad_request(sizeof(struct malloc_state));<br>
> + mchunkptr mn;<br>
> + mchunkptr msp = align_as_chunk(tbase);<br>
> + mstate m = (mstate)(chunk2mem(msp));<br>
> + MEMCLEAR(m, msize);<br>
> + INITIAL_LOCK(&m->mutex);<br>
> + msp->head = (msize|PINUSE_BIT|CINUSE_BIT);<br>
> + m->seg.base = m->least_addr = tbase;<br>
> + m->seg.size = m->footprint = m->max_footprint = tsize;<br>
> + m->magic = mparams.magic;<br>
> + m->mflags = mparams.default_mflags;<br>
> + m->user_data = user_data;<br>
> + init_bins(m);<br>
> + mn = next_chunk(mem2chunk(m));<br>
> + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);<br>
> + check_top_chunk(m, m->top);<br>
> + return m;<br>
> +}<br>
> +<br>
> +mspace create_mspace_with_base(void* base, size_t capacity, int locked, void<br>
> *user_data) {<br>
> + mstate m = 0;<br>
> + size_t msize = pad_request(sizeof(struct malloc_state));<br>
> + init_mparams(); /* Ensure pagesize etc initialized */<br>
> +<br>
> + if (capacity > msize + TOP_FOOT_SIZE &&<br>
> + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {<br>
> + m = init_user_mstate((char*)base, capacity, user_data);<br>
> + set_lock(m, locked);<br>
> + }<br>
> + return (mspace)m;<br>
> +}<br>
> +<br>
> +/*<br>
> + mspace versions of routines are near-clones of the global<br>
> + versions. This is not so nice but better than the alternatives.<br>
> +*/<br>
> +<br>
> +<br>
> +void* mspace_malloc(mspace msp, size_t bytes) {<br>
> + mstate ms = (mstate)msp;<br>
> + if (!ok_magic(ms)) {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + return 0;<br>
> + }<br>
> + if (!PREACTION(ms)) {<br>
> + void* mem;<br>
> + size_t nb;<br>
> + if (bytes <= MAX_SMALL_REQUEST) {<br>
> + bindex_t idx;<br>
> + binmap_t smallbits;<br>
> + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);<br>
> + idx = small_index(nb);<br>
> + smallbits = ms->smallmap >> idx;<br>
> +<br>
> + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */<br>
> + mchunkptr b, p;<br>
> + idx += ~smallbits & 1; /* Uses next bin if idx empty */<br>
> + b = smallbin_at(ms, idx);<br>
> + p = b->fd;<br>
> + assert(ms->user_data, chunksize(p) == small_index2size(idx));<br>
> + unlink_first_small_chunk(ms, b, p, idx);<br>
> + set_inuse_and_pinuse(ms, p, small_index2size(idx));<br>
> + mem = chunk2mem(p);<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> +<br>
> + else if (nb > ms->dvsize) {<br>
> + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */<br>
> + mchunkptr b, p, r;<br>
> + size_t rsize;<br>
> + bindex_t i;<br>
> + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));<br>
> + binmap_t leastbit = least_bit(leftbits);<br>
> + compute_bit2idx(leastbit, i);<br>
> + b = smallbin_at(ms, i);<br>
> + p = b->fd;<br>
> + assert(ms->user_data, chunksize(p) == small_index2size(i));<br>
> + unlink_first_small_chunk(ms, b, p, i);<br>
> + rsize = small_index2size(i) - nb;<br>
> + /* Fit here cannot be remainderless if 4byte sizes */<br>
> + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)<br>
> + set_inuse_and_pinuse(ms, p, small_index2size(i));<br>
> + else {<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> + r = chunk_plus_offset(p, nb);<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> + replace_dv(ms, r, rsize);<br>
> + }<br>
> + mem = chunk2mem(p);<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> +<br>
> + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> + }<br>
> + }<br>
> + else if (bytes >= MAX_REQUEST)<br>
> + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc)<br>
> */<br>
> + else {<br>
> + nb = pad_request(bytes);<br>
> + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> + }<br>
> +<br>
> + if (nb <= ms->dvsize) {<br>
> + size_t rsize = ms->dvsize - nb;<br>
> + mchunkptr p = ms->dv;<br>
> + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */<br>
> + mchunkptr r = ms->dv = chunk_plus_offset(p, nb);<br>
> + ms->dvsize = rsize;<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(r, rsize);<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> + }<br>
> + else { /* exhaust dv */<br>
> + size_t dvs = ms->dvsize;<br>
> + ms->dvsize = 0;<br>
> + ms->dv = 0;<br>
> + set_inuse_and_pinuse(ms, p, dvs);<br>
> + }<br>
> + mem = chunk2mem(p);<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> +<br>
> + else if (nb < ms->topsize) { /* Split top */<br>
> + size_t rsize = ms->topsize -= nb;<br>
> + mchunkptr p = ms->top;<br>
> + mchunkptr r = ms->top = chunk_plus_offset(p, nb);<br>
> + r->head = rsize | PINUSE_BIT;<br>
> + set_size_and_pinuse_of_inuse_<wbr>chunk(ms, p, nb);<br>
> + mem = chunk2mem(p);<br>
> + check_top_chunk(ms, ms->top);<br>
> + check_malloced_chunk(ms, mem, nb);<br>
> + goto postaction;<br>
> + }<br>
> +<br>
> + mem = sys_alloc(ms, nb);<br>
> +<br>
> + postaction:<br>
> + POSTACTION(ms);<br>
> + return mem;<br>
> + }<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +void mspace_free(mspace msp, void* mem) {<br>
> + if (mem != 0) {<br>
> + mchunkptr p = mem2chunk(mem);<br>
> +#if FOOTERS<br>
> + mstate fm = get_mstate_for(p);<br>
> +#else /* FOOTERS */<br>
> + mstate fm = (mstate)msp;<br>
> +#endif /* FOOTERS */<br>
> + if (!ok_magic(fm)) {<br>
> + USAGE_ERROR_ACTION(fm, p);<br>
> + return;<br>
> + }<br>
> + if (!PREACTION(fm)) {<br>
> + check_inuse_chunk(fm, p);<br>
> + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {<br>
> + size_t psize = chunksize(p);<br>
> + mchunkptr next = chunk_plus_offset(p, psize);<br>
> + if (!pinuse(p)) {<br>
> + size_t prevsize = p->prev_foot;<br>
> +<br>
> + mchunkptr prev = chunk_minus_offset(p, prevsize);<br>
> + psize += prevsize;<br>
> + p = prev;<br>
> + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */<br>
> + if (p != fm->dv) {<br>
> + unlink_chunk(fm, p, prevsize);<br>
> + }<br>
> + else if ((next->head & INUSE_BITS) == INUSE_BITS) {<br>
> + fm->dvsize = psize;<br>
> + set_free_with_pinuse(p, psize, next);<br>
> + goto postaction;<br>
> + }<br>
> + }<br>
> + else<br>
> + goto erroraction;<br>
> + }<br>
> +<br>
> + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {<br>
> + if (!cinuse(next)) { /* consolidate forward */<br>
> + if (next == fm->top) {<br>
> + size_t tsize = fm->topsize += psize;<br>
> + fm->top = p;<br>
> + p->head = tsize | PINUSE_BIT;<br>
> + if (p == fm->dv) {<br>
> + fm->dv = 0;<br>
> + fm->dvsize = 0;<br>
> + }<br>
> + goto postaction;<br>
> + }<br>
> + else if (next == fm->dv) {<br>
> + size_t dsize = fm->dvsize += psize;<br>
> + fm->dv = p;<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(p, dsize);<br>
> + goto postaction;<br>
> + }<br>
> + else {<br>
> + size_t nsize = chunksize(next);<br>
> + psize += nsize;<br>
> + unlink_chunk(fm, next, nsize);<br>
> + set_size_and_pinuse_of_free_<wbr>chunk(p, psize);<br>
> + if (p == fm->dv) {<br>
> + fm->dvsize = psize;<br>
> + goto postaction;<br>
> + }<br>
> + }<br>
> + }<br>
> + else<br>
> + set_free_with_pinuse(p, psize, next);<br>
> + insert_chunk(fm, p, psize);<br>
> + check_free_chunk(fm, p);<br>
> + goto postaction;<br>
> + }<br>
> + }<br>
> + erroraction:<br>
> + USAGE_ERROR_ACTION(fm, p);<br>
> + postaction:<br>
> + POSTACTION(fm);<br>
> + }<br>
> + }<br>
> +}<br>
> +<br>
> +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {<br>
> + void* mem;<br>
> + size_t req = 0;<br>
> + mstate ms = (mstate)msp;<br>
> + if (!ok_magic(ms)) {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + return 0;<br>
> + }<br>
> + if (n_elements != 0) {<br>
> + req = n_elements * elem_size;<br>
> + if (((n_elements | elem_size) & ~(size_t)0xffff) &&<br>
> + (req / n_elements != elem_size))<br>
> + req = MAX_SIZE_T; /* force downstream failure on overflow */<br>
> + }<br>
> + mem = internal_malloc(ms, req);<br>
> + if (mem != 0 && calloc_must_clear(mem2chunk(<wbr>mem)))<br>
> + MEMCLEAR(mem, req);<br>
> + return mem;<br>
> +}<br>
> +<br>
> +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {<br>
> + if (oldmem == 0)<br>
> + return mspace_malloc(msp, bytes);<br>
> +#ifdef REALLOC_ZERO_BYTES_FREES<br>
> + if (bytes == 0) {<br>
> + mspace_free(msp, oldmem);<br>
> + return 0;<br>
> + }<br>
> +#endif /* REALLOC_ZERO_BYTES_FREES */<br>
> + else {<br>
> +#if FOOTERS<br>
> + mchunkptr p = mem2chunk(oldmem);<br>
> + mstate ms = get_mstate_for(p);<br>
> +#else /* FOOTERS */<br>
> + mstate ms = (mstate)msp;<br>
> +#endif /* FOOTERS */<br>
> + if (!ok_magic(ms)) {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + return 0;<br>
> + }<br>
> + return internal_realloc(ms, oldmem, bytes);<br>
> + }<br>
> +}<br>
> +<br>
> +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {<br>
> + mstate ms = (mstate)msp;<br>
> + if (!ok_magic(ms)) {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + return 0;<br>
> + }<br>
> + return internal_memalign(ms, alignment, bytes);<br>
> +}<br>
> +<br>
> +void mspace_malloc_stats(mspace msp) {<br>
> + mstate ms = (mstate)msp;<br>
> + if (ok_magic(ms)) {<br>
> + internal_malloc_stats(ms);<br>
> + }<br>
> + else {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + }<br>
> +}<br>
> +<br>
> +size_t mspace_footprint(mspace msp) {<br>
> + size_t result;<br>
> + mstate ms = (mstate)msp;<br>
> + if (ok_magic(ms)) {<br>
> + result = ms->footprint;<br>
> + } else {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + }<br>
> + return result;<br>
> +}<br>
> +<br>
> +<br>
> +size_t mspace_max_footprint(mspace msp) {<br>
> + size_t result;<br>
> + mstate ms = (mstate)msp;<br>
> + if (ok_magic(ms)) {<br>
> + result = ms->max_footprint;<br>
> + } else {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + }<br>
> + return result;<br>
> +}<br>
> +<br>
> +<br>
> +#if !NO_MALLINFO<br>
> +struct mallinfo mspace_mallinfo(mspace msp) {<br>
> + mstate ms = (mstate)msp;<br>
> + if (!ok_magic(ms)) {<br>
> + USAGE_ERROR_ACTION(ms,ms);<br>
> + }<br>
> + return internal_mallinfo(ms);<br>
> +}<br>
> +#endif /* NO_MALLINFO */<br>
> +<br>
> +int mspace_mallopt(int param_number, int value) {<br>
> + return change_mparam(param_number, value);<br>
> +}<br>
> +<br>
> diff --git a/qxldod/qxldod.vcxproj b/qxldod/qxldod.vcxproj<br>
> index 15b116c..be4bb42 100755<br>
> --- a/qxldod/qxldod.vcxproj<br>
> +++ b/qxldod/qxldod.vcxproj<br>
> @@ -279,7 +279,7 @@<br>
> <ItemGroup><br>
> <ClCompile Include="BaseObject.cpp" /><br>
> <ClCompile Include="driver.cpp" /><br>
> - <ClCompile Include="mspace.c" /><br>
> + <ClCompile Include="mspace.cpp" /><br>
> <ClCompile Include="QxlDod.cpp" /><br>
> </ItemGroup><br>
> <ItemGroup><br>
> diff --git a/qxldod/qxldod.vcxproj.<wbr>filters b/qxldod/qxldod.vcxproj.<wbr>filters<br>
> index 1ba05af..6e241a2 100755<br>
> --- a/qxldod/qxldod.vcxproj.<wbr>filters<br>
> +++ b/qxldod/qxldod.vcxproj.<wbr>filters<br>
> @@ -42,7 +42,7 @@<br>
> <ClCompile Include="QxlDod.cpp"><br>
> <Filter>Source Files</Filter><br>
> </ClCompile><br>
> - <ClCompile Include="mspace.c"><br>
> + <ClCompile Include="mspace.cpp"><br>
> <Filter>Source Files</Filter><br>
> </ClCompile><br>
> </ItemGroup><br>
<br>
This patch cannot be accepted in this form.<br>
- it contains many space/indentation changes;<br>
- code movements;<br>
- page/non-page movement;<br>
- mspace.c file rename. <br></blockquote><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
All these changes should be split. Also it's better to use -M to reduce size with file rename</blockquote><div>what's -M, can you please explain? </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> </blockquote><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> </blockquote><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
<span class="HOEnZb"><font color="#888888"><br>
Frediano<br>
</font></span></blockquote></div><br><br clear="all"><div><br></div>-- <br><div class="gmail_signature" data-smartmail="gmail_signature"><div dir="ltr"><div><div dir="ltr"><div><div dir="ltr"><font size="4" color="#0b5394" face="times new roman, serif">Respectfully,<br></font><div style="font-size:12.8px;color:rgb(136,136,136)"><font size="4" color="#0b5394" face="times new roman, serif"><b><i>Sameeh Jubran</i></b></font></div><div style="font-size:12.8px;color:rgb(136,136,136)"><i style="color:rgb(7,55,99);font-family:"times new roman",serif;font-size:large"><span style="line-height:15px"><a href="https://il.linkedin.com/pub/sameeh-jubran/87/747/a8a" title="View public profile" name="UNIQUE_ID_SafeHtmlFilter_UNIQUE_ID_SafeHtmlFilter_UNIQUE_ID_SafeHtmlFilter_UNIQUE_ID_SafeHtmlFilter_14e2c1de96f8c195_UNIQUE_ID_SafeHtmlFilter_SafeHtmlFilter_SafeHtmlFilter_webProfileURL" style="color:rgb(17,85,204);margin:0px;padding:0px;border-width:0px;outline:none;vertical-align:baseline;text-decoration:none" target="_blank">Linkedin</a></span></i><br></div><div style="font-size:12.8px;color:rgb(136,136,136)"><font size="4" face="times new roman, serif" color="#073763"><i>Junior Software Engineer @ <a href="http://www.daynix.com" target="_blank">Daynix</a>.</i></font></div></div></div></div></div></div></div>
</div></div>