447#define WIN32_LEAN_AND_MEAN
450#define HAVE_MORECORE 0
451#define LACKS_UNISTD_H
452#define LACKS_SYS_PARAM_H
453#define LACKS_SYS_MMAN_H
454#define LACKS_STRING_H
455#define LACKS_STRINGS_H
456#define LACKS_SYS_TYPES_H
458#define MALLOC_FAILURE_ACTION
466#define HAVE_MORECORE 0
467#define LACKS_SYS_MMAN_H
470#if defined(DARWIN) || defined(_DARWIN)
473#define HAVE_MORECORE 0
478#ifndef LACKS_SYS_TYPES_H
479#include <sys/types.h>
483#define MAX_SIZE_T (~(size_t)0)
486#define ONLY_MSPACES 0
495#ifndef MALLOC_ALIGNMENT
496#define MALLOC_ALIGNMENT ((size_t)8U)
504#ifndef ABORT_ON_ASSERT_FAILURE
505#define ABORT_ON_ASSERT_FAILURE 1
507#ifndef PROCEED_ON_ERROR
508#define PROCEED_ON_ERROR 0
529#ifndef MALLOC_FAILURE_ACTION
530#define MALLOC_FAILURE_ACTION errno = ENOMEM;
534#define HAVE_MORECORE 0
536#define HAVE_MORECORE 1
540#define MORECORE_CONTIGUOUS 0
545#ifndef MORECORE_CONTIGUOUS
546#define MORECORE_CONTIGUOUS 1
549#ifndef DEFAULT_GRANULARITY
550#if MORECORE_CONTIGUOUS
551#define DEFAULT_GRANULARITY (0)
553#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
556#ifndef DEFAULT_TRIM_THRESHOLD
557#ifndef MORECORE_CANNOT_TRIM
558#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
560#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
563#ifndef DEFAULT_MMAP_THRESHOLD
565#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
567#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
570#ifndef USE_BUILTIN_FFS
571#define USE_BUILTIN_FFS 0
573#ifndef USE_DEV_RANDOM
574#define USE_DEV_RANDOM 0
579#ifndef MALLINFO_FIELD_TYPE
580#define MALLINFO_FIELD_TYPE size_t
590#define M_TRIM_THRESHOLD (-1)
591#define M_GRANULARITY (-2)
592#define M_MMAP_THRESHOLD (-3)
621#ifdef HAVE_USR_INCLUDE_MALLOC_H
622#include "/usr/include/malloc.h"
626#define _STRUCT_MALLINFO
653#define dlcalloc calloc
655#define dlmalloc malloc
656#define dlmemalign memalign
657#define dlrealloc realloc
658#define dlvalloc valloc
659#define dlpvalloc pvalloc
660#define dlmallinfo mallinfo
661#define dlmallopt mallopt
662#define dlmalloc_trim malloc_trim
663#define dlmalloc_stats malloc_stats
664#define dlmalloc_usable_size malloc_usable_size
665#define dlmalloc_footprint malloc_footprint
666#define dlmalloc_max_footprint malloc_max_footprint
667#define dlindependent_calloc independent_calloc
668#define dlindependent_comalloc independent_comalloc
1011typedef void* mspace;
1024mspace create_mspace(
size_t capacity,
int locked);
1032size_t destroy_mspace(mspace msp);
1043mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked);
1049void* mspace_malloc(mspace msp,
size_t bytes);
1059void mspace_free(mspace msp,
void* mem);
1070void* mspace_realloc(mspace msp,
void* mem,
size_t newsize);
1076void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1082void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes);
1088void** mspace_independent_calloc(mspace msp,
size_t n_elements,
1089 size_t elem_size,
void* chunks[]);
1095void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
1096 size_t sizes[],
void* chunks[]);
1102size_t mspace_footprint(mspace msp);
1108size_t mspace_max_footprint(mspace msp);
1116struct mallinfo mspace_mallinfo(mspace msp);
1123void mspace_malloc_stats(mspace msp);
1129int mspace_trim(mspace msp,
size_t pad);
1134int mspace_mallopt(
int,
int);
1155#pragma warning( disable : 4146 )
1160#ifndef LACKS_ERRNO_H
1166#ifndef LACKS_STDLIB_H
1170#if ABORT_ON_ASSERT_FAILURE
1171#define assert(x) if(!(x)) ABORT
1178#ifndef LACKS_STRING_H
1182#ifndef LACKS_STRINGS_H
1187#ifndef LACKS_SYS_MMAN_H
1188#include <sys/mman.h>
1190#ifndef LACKS_FCNTL_H
1195#ifndef LACKS_UNISTD_H
1198#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1205#ifndef malloc_getpagesize
1207# ifndef _SC_PAGE_SIZE
1208# define _SC_PAGE_SIZE _SC_PAGESIZE
1211# ifdef _SC_PAGE_SIZE
1212# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1214# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1216# define malloc_getpagesize getpagesize()
1219# define malloc_getpagesize getpagesize()
1221# ifndef LACKS_SYS_PARAM_H
1222# include <sys/param.h>
1224# ifdef EXEC_PAGESIZE
1225# define malloc_getpagesize EXEC_PAGESIZE
1229# define malloc_getpagesize NBPG
1231# define malloc_getpagesize (NBPG * CLSIZE)
1235# define malloc_getpagesize NBPC
1238# define malloc_getpagesize PAGESIZE
1240# define malloc_getpagesize ((size_t)4096U)
1254#define SIZE_T_SIZE (sizeof(size_t))
1255#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1259#define SIZE_T_ZERO ((size_t)0)
1260#define SIZE_T_ONE ((size_t)1)
1261#define SIZE_T_TWO ((size_t)2)
1262#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1263#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1264#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1265#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1268#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1271#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1274#define align_offset(A)\
1275 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1276 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1288#define MFAIL ((void*)(MAX_SIZE_T))
1289#define CMFAIL ((char*)(MFAIL))
1292#define IS_MMAPPED_BIT (SIZE_T_ZERO)
1293#define USE_MMAP_BIT (SIZE_T_ZERO)
1294#define CALL_MMAP(s) MFAIL
1295#define CALL_MUNMAP(a, s) (-1)
1296#define DIRECT_MMAP(s) MFAIL
1299#define IS_MMAPPED_BIT (SIZE_T_ONE)
1300#define USE_MMAP_BIT (SIZE_T_ONE)
1302#if !defined(WIN32) && !defined (__OS2__)
1303#define CALL_MUNMAP(a, s) munmap((a), (s))
1304#define MMAP_PROT (PROT_READ|PROT_WRITE)
1305#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1306#define MAP_ANONYMOUS MAP_ANON
1309#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1310#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1316#define MMAP_FLAGS (MAP_PRIVATE)
1317static int dev_zero_fd = -1;
1318#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1319 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1320 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1321 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1324#define DIRECT_MMAP(s) CALL_MMAP(s)
1326#elif defined(__OS2__)
1329static void* os2mmap(
size_t size) {
1331 if (DosAllocMem(&
ptr,
size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
1332 DosAllocMem(&
ptr,
size, PAG_COMMIT|PAG_READ|PAG_WRITE))
1337#define os2direct_mmap(n) os2mmap(n)
1340static int os2munmap(
void*
ptr,
size_t size) {
1342 ULONG ulSize =
size;
1344 if (DosQueryMem(
ptr, &ulSize, &ulFlags) != 0)
1346 if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
1349 if (DosFreeMem(
ptr) != 0)
1351 ptr = (
void * ) ( (
char * )
ptr + ulSize );
1357#define CALL_MMAP(s) os2mmap(s)
1358#define CALL_MUNMAP(a, s) os2munmap((a), (s))
1359#define DIRECT_MMAP(s) os2direct_mmap(s)
1364static void* win32mmap(
size_t size) {
1365 void*
ptr = VirtualAlloc(0,
size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE);
1370static void* win32direct_mmap(
size_t size) {
1371 void*
ptr = VirtualAlloc(0,
size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1372 PAGE_EXECUTE_READWRITE);
1377static int win32munmap(
void*
ptr,
size_t size) {
1378 MEMORY_BASIC_INFORMATION minfo;
1381 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1383 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1384 minfo.State != MEM_COMMIT || minfo.RegionSize >
size)
1386 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1388 cptr += minfo.RegionSize;
1389 size -= minfo.RegionSize;
1394#define CALL_MMAP(s) win32mmap(s)
1395#define CALL_MUNMAP(a, s) win32munmap((a), (s))
1396#define DIRECT_MMAP(s) win32direct_mmap(s)
1400#if HAVE_MMAP && HAVE_MREMAP
1401#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1403#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1407#define CALL_MORECORE(S) MORECORE(S)
1409#define CALL_MORECORE(S) MFAIL
1413#define USE_NONCONTIGUOUS_BIT (4U)
1416#define EXTERN_BIT (8U)
1437#if !defined(WIN32) && !defined(__OS2__)
1440#define MLOCK_T pthread_mutex_t
1441#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1442#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1443#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1451#elif defined(__OS2__)
1453#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE)
1454#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
1455#define RELEASE_LOCK(l) DosReleaseMutexSem(*l)
1457static MLOCK_T morecore_mutex;
1459static MLOCK_T magic_init_mutex;
1468static int win32_acquire_lock (MLOCK_T *sl) {
1470#ifdef InterlockedCompareExchangePointer
1471 if (!InterlockedCompareExchange(sl, 1, 0))
1474 if (!InterlockedCompareExchange((
void**)sl, (
void*)1, (
void*)0))
1481static void win32_release_lock (MLOCK_T *sl) {
1482 InterlockedExchange (sl, 0);
1485#define INITIAL_LOCK(l) *(l)=0
1486#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1487#define RELEASE_LOCK(l) win32_release_lock(l)
1489static MLOCK_T morecore_mutex;
1491static MLOCK_T magic_init_mutex;
1494#define USE_LOCK_BIT (2U)
1496#define USE_LOCK_BIT (0U)
1497#define INITIAL_LOCK(l)
1500#if USE_LOCKS && HAVE_MORECORE
1501#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1502#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1504#define ACQUIRE_MORECORE_LOCK()
1505#define RELEASE_MORECORE_LOCK()
1509#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1510#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1512#define ACQUIRE_MAGIC_INIT_LOCK()
1513#define RELEASE_MAGIC_INIT_LOCK()
1670#define MCHUNK_SIZE (sizeof(mchunk))
1673#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1675#define CHUNK_OVERHEAD (SIZE_T_SIZE)
1679#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1681#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1684#define MIN_CHUNK_SIZE\
1685 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1688#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1689#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1691#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1694#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1695#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1698#define pad_request(req) \
1699 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1702#define request2size(req) \
1703 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1716#define PINUSE_BIT (SIZE_T_ONE)
1717#define CINUSE_BIT (SIZE_T_TWO)
1718#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1721#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1724#define cinuse(p) ((p)->head & CINUSE_BIT)
1725#define pinuse(p) ((p)->head & PINUSE_BIT)
1726#define chunksize(p) ((p)->head & ~(INUSE_BITS))
1728#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1729#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1732#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1733#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1736#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1737#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1740#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1743#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1744#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1747#define set_size_and_pinuse_of_free_chunk(p, s)\
1748 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1751#define set_free_with_pinuse(p, s, n)\
1752 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1754#define is_mmapped(p)\
1755 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1758#define overhead_for(p)\
1759 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1763#define calloc_must_clear(p) (!is_mmapped(p))
1765#define calloc_must_clear(p) (1)
1876#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1939#if FFI_MMAP_EXEC_WRIT
1943# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t)))
1947# define check_segment_merge(S,b,s) \
1948 (mmap_exec_offset((b),(s)) == (S)->exec_offset)
1950# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset)
1951# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset)
1955# define get_segment_flags(S) (IS_MMAPPED_BIT)
1956# define set_segment_flags(S,v) \
1957 (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \
1958 (((S)->exec_offset = \
1959 mmap_exec_offset((S)->base, (S)->size)), \
1960 (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \
1961 (S)->exec_offset) ? (ABORT, (v)) : \
1962 (mmap_exec_offset((S)->base, (S)->size) = 0), (v)))
1970# define get_segment_flags(S) ((S)->sflags)
1971# define set_segment_flags(S,v) ((S)->sflags = (v))
1972# define check_segment_merge(S,b,s) (1)
1978#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT)
1979#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT)
2060#define NSMALLBINS (32U)
2061#define NTREEBINS (32U)
2062#define SMALLBIN_SHIFT (3U)
2063#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2064#define TREEBIN_SHIFT (8U)
2065#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2066#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2067#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2114#define is_global(M) ((M) == &_gm_)
2115#define is_initialized(M) ((M)->top != 0)
2121#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2122#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2123#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2125#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2126#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2127#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2129#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2130#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2132#define set_lock(M,L)\
2133 ((M)->mflags = (L)?\
2134 ((M)->mflags | USE_LOCK_BIT) :\
2135 ((M)->mflags & ~USE_LOCK_BIT))
2138#define page_align(S)\
2139 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2142#define granularity_align(S)\
2143 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2145#define is_page_aligned(S)\
2146 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2147#define is_granularity_aligned(S)\
2148 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2151#define segment_holds(S, A)\
2152 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2158 if (addr >= sp->
base && addr < sp->base + sp->
size)
2160 if ((sp = sp->
next) == 0)
2169 if ((
char*)sp >= ss->
base && (
char*)sp < ss->base + ss->
size)
2171 if ((sp = sp->
next) == 0)
2176#ifndef MORECORE_CANNOT_TRIM
2177#define should_trim(M,s) ((s) > (M)->trim_check)
2179#define should_trim(M,s) (0)
2187#define TOP_FOOT_SIZE\
2188 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2202#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2204#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2205#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2209#define PREACTION(M) (0)
2213#define POSTACTION(M)
2229int malloc_corruption_error_count;
2232static void reset_on_error(
mstate m);
2234#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2235#define USAGE_ERROR_ACTION(m, p)
2239#ifndef CORRUPTION_ERROR_ACTION
2240#define CORRUPTION_ERROR_ACTION(m) ABORT
2243#ifndef USAGE_ERROR_ACTION
2244#define USAGE_ERROR_ACTION(m,p) ABORT
2253#define check_free_chunk(M,P)
2254#define check_inuse_chunk(M,P)
2255#define check_malloced_chunk(M,P,N)
2256#define check_mmapped_chunk(M,P)
2257#define check_malloc_state(M)
2258#define check_top_chunk(M,P)
2261#define check_free_chunk(M,P) do_check_free_chunk(M,P)
2262#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2263#define check_top_chunk(M,P) do_check_top_chunk(M,P)
2264#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2265#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2266#define check_malloc_state(M) do_check_malloc_state(M)
2273static void do_check_malloced_chunk(
mstate m,
void* mem,
size_t s);
2277static void do_check_malloc_state(
mstate m);
2279static size_t traverse_and_check(
mstate m);
2284#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2285#define small_index(s) ((s) >> SMALLBIN_SHIFT)
2286#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2287#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2290#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2291#define treebin_at(M,i) (&((M)->treebins[i]))
2294#if defined(__GNUC__) && defined(i386)
2295#define compute_tree_index(S, I)\
2297 size_t X = S >> TREEBIN_SHIFT;\
2300 else if (X > 0xFFFF)\
2304 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2305 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2309#define compute_tree_index(S, I)\
2311 size_t X = S >> TREEBIN_SHIFT;\
2314 else if (X > 0xFFFF)\
2317 unsigned int Y = (unsigned int)X;\
2318 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2319 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2321 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2322 K = 14 - N + ((Y <<= K) >> 15);\
2323 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2329#define bit_for_tree_index(i) \
2330 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2333#define leftshift_for_tree_index(i) \
2334 ((i == NTREEBINS-1)? 0 : \
2335 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2338#define minsize_for_tree_index(i) \
2339 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2340 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2346#define idx2bit(i) ((binmap_t)(1) << (i))
2349#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2350#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2351#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2353#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2354#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2355#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2359#if defined(__GNUC__) && defined(i386)
2360#define compute_bit2idx(X, I)\
2363 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2369#define compute_bit2idx(X, I) I = ffs(X)-1
2372#define compute_bit2idx(X, I)\
2374 unsigned int Y = X - 1;\
2375 unsigned int K = Y >> (16-4) & 16;\
2376 unsigned int N = K; Y >>= K;\
2377 N += K = Y >> (8-3) & 8; Y >>= K;\
2378 N += K = Y >> (4-2) & 4; Y >>= K;\
2379 N += K = Y >> (2-1) & 2; Y >>= K;\
2380 N += K = Y >> (1-0) & 1; Y >>= K;\
2381 I = (bindex_t)(N + Y);\
2387#define least_bit(x) ((x) & -(x))
2390#define left_bits(x) ((x<<1) | -(x<<1))
2393#define same_or_left_bits(x) ((x) | -(x))
2426#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2428#define ok_next(p, n) ((char*)(p) < (char*)(n))
2430#define ok_cinuse(p) cinuse(p)
2432#define ok_pinuse(p) pinuse(p)
2435#define ok_address(M, a) (1)
2436#define ok_next(b, n) (1)
2437#define ok_cinuse(p) (1)
2438#define ok_pinuse(p) (1)
2441#if (FOOTERS && !INSECURE)
2443#define ok_magic(M) ((M)->magic == mparams.magic)
2445#define ok_magic(M) (1)
2451#if defined(__GNUC__) && __GNUC__ >= 3
2452#define RTCHECK(e) __builtin_expect(e, 1)
2454#define RTCHECK(e) (e)
2457#define RTCHECK(e) (1)
2464#define mark_inuse_foot(M,p,s)
2467#define set_inuse(M,p,s)\
2468 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2469 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2472#define set_inuse_and_pinuse(M,p,s)\
2473 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2474 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2477#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2478 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2483#define mark_inuse_foot(M,p,s)\
2484 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2486#define get_mstate_for(p)\
2487 ((mstate)(((mchunkptr)((char*)(p) +\
2488 (chunksize(p))))->prev_foot ^ mparams.magic))
2490#define set_inuse(M,p,s)\
2491 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2492 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2493 mark_inuse_foot(M,p,s))
2495#define set_inuse_and_pinuse(M,p,s)\
2496 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2497 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2498 mark_inuse_foot(M,p,s))
2500#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2501 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2502 mark_inuse_foot(M, p, s))
2509static int init_mparams(
void) {
2515#if MORECORE_CONTIGUOUS
2521#if (FOOTERS && !INSECURE)
2527 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
2529 s = *((
size_t *)
buf);
2544 if (mparams.
magic == 0) {
2552#if !defined(WIN32) && !defined(__OS2__)
2556#elif defined (__OS2__)
2563 SYSTEM_INFO system_info;
2564 GetSystemInfo(&system_info);
2565 mparams.
page_size = system_info.dwPageSize;
2566 mparams.
granularity = system_info.dwAllocationGranularity;
2576 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
2578 (
sizeof(
int) < 4) ||
2590static int change_mparam(
int param_number,
int value) {
2591 size_t val = (
size_t)value;
2593 switch(param_number) {
2598 if (val >= mparams.
page_size && ((val & (val-1)) == 0)) {
2651 do_check_any_chunk(m, p);
2657 do_check_mmapped_chunk(m, p);
2664 do_check_any_chunk(m, p);
2668 if (p != m->
dv && p != m->
top) {
2684static void do_check_malloced_chunk(
mstate m,
void* mem,
size_t s) {
2688 do_check_inuse_chunk(m, p);
2729 if (u->
child[0] != 0) {
2732 do_check_tree(m, u->
child[0]);
2734 if (u->
child[1] != 0) {
2737 do_check_tree(m, u->
child[1]);
2752 int empty = (m->
treemap & (1U <<
i)) == 0;
2756 do_check_tree(m, t);
2763 unsigned int empty = (m->
smallmap & (1U <<
i)) == 0;
2767 for (; p != b; p = p->
bk) {
2771 do_check_free_chunk(m, p);
2778 do_check_inuse_chunk(m, q);
2794 }
while ((p = p->
fd) != b);
2812 }
while ((u = u->
fd) != t);
2820static size_t traverse_and_check(
mstate m) {
2834 do_check_inuse_chunk(m, q);
2837 assert(q == m->
dv || bin_find(m, q));
2839 do_check_free_chunk(m, q);
2851static void do_check_malloc_state(
mstate m) {
2856 do_check_smallbin(m,
i);
2858 do_check_treebin(m,
i);
2861 do_check_any_chunk(m, m->
dv);
2868 do_check_top_chunk(m, m->
top);
2874 total = traverse_and_check(m);
2884 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2922static void internal_malloc_stats(
mstate m) {
2946 fprintf(
stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
2947 fprintf(
stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
2948 fprintf(
stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
2964#define insert_small_chunk(M, P, S) {\
2965 bindex_t I = small_index(S);\
2966 mchunkptr B = smallbin_at(M, I);\
2968 assert(S >= MIN_CHUNK_SIZE);\
2969 if (!smallmap_is_marked(M, I))\
2970 mark_smallmap(M, I);\
2971 else if (RTCHECK(ok_address(M, B->fd)))\
2974 CORRUPTION_ERROR_ACTION(M);\
2983#define unlink_small_chunk(M, P, S) {\
2984 mchunkptr F = P->fd;\
2985 mchunkptr B = P->bk;\
2986 bindex_t I = small_index(S);\
2989 assert(chunksize(P) == small_index2size(I));\
2991 clear_smallmap(M, I);\
2992 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
2993 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
2998 CORRUPTION_ERROR_ACTION(M);\
3003#define unlink_first_small_chunk(M, B, P, I) {\
3004 mchunkptr F = P->fd;\
3007 assert(chunksize(P) == small_index2size(I));\
3009 clear_smallmap(M, I);\
3010 else if (RTCHECK(ok_address(M, F))) {\
3015 CORRUPTION_ERROR_ACTION(M);\
3021#define replace_dv(M, P, S) {\
3022 size_t DVS = M->dvsize;\
3024 mchunkptr DV = M->dv;\
3025 assert(is_small(DVS));\
3026 insert_small_chunk(M, DV, DVS);\
3035#define insert_large_chunk(M, X, S) {\
3038 compute_tree_index(S, I);\
3039 H = treebin_at(M, I);\
3041 X->child[0] = X->child[1] = 0;\
3042 if (!treemap_is_marked(M, I)) {\
3043 mark_treemap(M, I);\
3045 X->parent = (tchunkptr)H;\
3050 size_t K = S << leftshift_for_tree_index(I);\
3052 if (chunksize(T) != S) {\
3053 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3057 else if (RTCHECK(ok_address(M, C))) {\
3064 CORRUPTION_ERROR_ACTION(M);\
3069 tchunkptr F = T->fd;\
3070 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3078 CORRUPTION_ERROR_ACTION(M);\
3103#define unlink_large_chunk(M, X) {\
3104 tchunkptr XP = X->parent;\
3107 tchunkptr F = X->fd;\
3109 if (RTCHECK(ok_address(M, F))) {\
3114 CORRUPTION_ERROR_ACTION(M);\
3119 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3120 ((R = *(RP = &(X->child[0]))) != 0)) {\
3122 while ((*(CP = &(R->child[1])) != 0) ||\
3123 (*(CP = &(R->child[0])) != 0)) {\
3126 if (RTCHECK(ok_address(M, RP)))\
3129 CORRUPTION_ERROR_ACTION(M);\
3134 tbinptr* H = treebin_at(M, X->index);\
3136 if ((*H = R) == 0) \
3137 clear_treemap(M, X->index);\
3139 else if (RTCHECK(ok_address(M, XP))) {\
3140 if (XP->child[0] == X) \
3146 CORRUPTION_ERROR_ACTION(M);\
3148 if (RTCHECK(ok_address(M, R))) {\
3151 if ((C0 = X->child[0]) != 0) {\
3152 if (RTCHECK(ok_address(M, C0))) {\
3157 CORRUPTION_ERROR_ACTION(M);\
3159 if ((C1 = X->child[1]) != 0) {\
3160 if (RTCHECK(ok_address(M, C1))) {\
3165 CORRUPTION_ERROR_ACTION(M);\
3169 CORRUPTION_ERROR_ACTION(M);\
3176#define insert_chunk(M, P, S)\
3177 if (is_small(S)) insert_small_chunk(M, P, S)\
3178 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3180#define unlink_chunk(M, P, S)\
3181 if (is_small(S)) unlink_small_chunk(M, P, S)\
3182 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3188#define internal_malloc(m, b) mspace_malloc(m, b)
3189#define internal_free(m, mem) mspace_free(m,mem);
3192#define internal_malloc(m, b)\
3193 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3194#define internal_free(m, mem)\
3195 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3197#define internal_malloc(m, b) dlmalloc(b)
3198#define internal_free(m, mem) dlfree(mem)
3215static void* mmap_alloc(
mstate m,
size_t nb) {
3229 if (mm < m->least_addr)
3251 size_t offset = oldp->
prev_foot & ~IS_MMAPPED_BIT;
3255 char* cp = (
char*)
CALL_MREMAP((
char*)oldp - offset,
3256 oldmmsize, newmmsize, 1);
3265 if (cp < m->least_addr)
3294static void init_bins(
mstate m) {
3306static void reset_on_error(
mstate m) {
3308 ++malloc_corruption_error_count;
3323static void* prepend_alloc(
mstate m,
char* newbase,
char* oldbase,
3327 size_t psize = (
char*)oldfirst - (
char*)p;
3329 size_t qsize = psize - nb;
3332 assert((
char*)oldfirst > (
char*)q);
3337 if (oldfirst == m->
top) {
3338 size_t tsize = m->
topsize += qsize;
3343 else if (oldfirst == m->
dv) {
3344 size_t dsize = m->
dvsize += qsize;
3366static void add_segment(
mstate m,
char* tbase,
size_t tsize,
flag_t mmapped) {
3368 char* old_top = (
char*)m->
top;
3370 char* old_end = oldsp->
base + oldsp->size;
3374 char* asp = rawsp + offset;
3399 if ((
char*)(&(nextp->
head)) < old_end)
3407 if (csp != old_top) {
3409 size_t psize = csp - old_top;
3421static void* sys_alloc(
mstate m,
size_t nb) {
3430 void* mem = mmap_alloc(m, nb);
3534 size_t ssize = end - br;
3559 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -
TOP_FOOT_SIZE);
3566 while (sp != 0 && tbase != sp->
base + sp->
size)
3577 if (tbase < m->least_addr)
3580 while (sp != 0 && sp->
base != tbase + tsize)
3586 char* oldbase = sp->
base;
3589 return prepend_alloc(m, tbase, oldbase, nb);
3592 add_segment(m, tbase, tsize, mmap_flag);
3596 if (nb < m->topsize) {
3597 size_t rsize = m->
topsize -= nb;
3615static size_t release_unused_segments(
mstate m) {
3616 size_t released = 0;
3620 char* base = sp->
base;
3655static int sys_trim(
mstate m,
size_t pad) {
3656 size_t released = 0;
3670 sp->
size >= extra &&
3671 !has_segment_link(m, sp)) {
3672 size_t newsize = sp->
size - extra;
3687 if (old_br == sp->
base + sp->
size) {
3690 if (rel_br !=
CMFAIL && new_br < old_br)
3691 released = old_br - new_br;
3698 if (released != 0) {
3699 sp->
size -= released;
3708 released += release_unused_segments(m);
3715 return (released != 0)? 1 : 0;
3721static void* tmalloc_large(
mstate m,
size_t nb) {
3737 if ((rsize = trem) == 0)
3742 if (rt != 0 && rt != t)
3752 if (t == 0 &&
v == 0) {
3754 if (leftbits != 0) {
3772 if (
v != 0 && rsize < (
size_t)(m->
dvsize - nb)) {
3794static void* tmalloc_small(
mstate m,
size_t nb) {
3834static void* internal_realloc(
mstate m,
void* oldmem,
size_t bytes) {
3852 newp = mmap_resize(m, oldp, nb);
3853 else if (oldsize >= nb) {
3854 size_t rsize = oldsize - nb;
3863 else if (next == m->
top && oldsize + m->
topsize > nb) {
3865 size_t newsize = oldsize + m->
topsize;
3866 size_t newtopsize = newsize - nb;
3894 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
3905static void* internal_memalign(
mstate m,
size_t alignment,
size_t bytes) {
3910 if ((alignment & (alignment-
SIZE_T_ONE)) != 0) {
3912 while (a < alignment) a <<= 1;
3931 if ((((
size_t)(mem)) % alignment) != 0) {
3947 size_t leadsize = pos - (
char*)(p);
3948 size_t newsize =
chunksize(p) - leadsize;
3966 size_t remainder_size =
size - nb;
3992static void** ialloc(
mstate m,
4006 size_t element_size;
4007 size_t contents_size;
4011 size_t remainder_size;
4020 if (n_elements == 0)
4027 if (n_elements == 0)
4030 array_size =
request2size(n_elements * (
sizeof(
void*)));
4036 contents_size = n_elements * element_size;
4041 for (
i = 0;
i != n_elements; ++
i)
4045 size = contents_size + array_size;
4072 size_t array_chunk_size;
4074 array_chunk_size = remainder_size - contents_size;
4075 marray = (
void**) (
chunk2mem(array_chunk));
4077 remainder_size = contents_size;
4081 for (
i = 0; ; ++
i) {
4083 if (
i != n_elements-1) {
4084 if (element_size != 0)
4085 size = element_size;
4088 remainder_size -=
size;
4099 if (marray != chunks) {
4101 if (element_size != 0) {
4102 assert(remainder_size == element_size);
4109 for (
i = 0;
i != n_elements; ++
i)
4155 smallbits =
gm->smallmap >> idx;
4157 if ((smallbits & 0x3U) != 0) {
4159 idx += ~smallbits & 1;
4170 else if (nb >
gm->dvsize) {
4171 if (smallbits != 0) {
4197 else if (
gm->treemap != 0 && (mem = tmalloc_small(
gm, nb)) != 0) {
4207 if (
gm->treemap != 0 && (mem = tmalloc_large(
gm, nb)) != 0) {
4213 if (nb <= gm->dvsize) {
4214 size_t rsize =
gm->dvsize - nb;
4223 size_t dvs =
gm->dvsize;
4233 else if (nb < gm->topsize) {
4234 size_t rsize =
gm->topsize -= nb;
4245 mem = sys_alloc(
gm, nb);
4281 prevsize &= ~IS_MMAPPED_BIT;
4284 fm->footprint -= psize;
4308 if (next ==
fm->top) {
4309 size_t tsize =
fm->topsize += psize;
4320 else if (next ==
fm->dv) {
4321 size_t dsize =
fm->dvsize += psize;
4358 if (n_elements != 0) {
4359 req = n_elements * elem_size;
4360 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4361 (req / n_elements != elem_size))
4373#ifdef REALLOC_ZERO_BYTES_FREES
4389 return internal_realloc(m, oldmem, bytes);
4394 return internal_memalign(
gm, alignment, bytes);
4399 size_t sz = elem_size;
4400 return ialloc(
gm, n_elements, &sz, 3, chunks);
4405 return ialloc(
gm, n_elements, sizes, 0, chunks);
4425 result = sys_trim(
gm, pad);
4432 return gm->footprint;
4436 return gm->max_footprint;
4441 return internal_mallinfo(
gm);
4446 internal_malloc_stats(
gm);
4459 return change_mparam(param_number, value);
4468static mstate init_user_mstate(
char* tbase,
size_t tsize) {
4483 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -
TOP_FOOT_SIZE);
4488mspace create_mspace(
size_t capacity,
int locked) {
4494 size_t rs = ((capacity == 0)? mparams.
granularity :
4497 char* tbase = (
char*)(
CALL_MMAP(tsize));
4499 m = init_user_mstate(tbase, tsize);
4507mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked) {
4514 m = init_user_mstate((
char*)base, capacity);
4521size_t destroy_mspace(mspace msp) {
4527 char* base = sp->
base;
4548void* mspace_malloc(mspace msp,
size_t bytes) {
4564 if ((smallbits & 0x3U) != 0) {
4566 idx += ~smallbits & 1;
4577 else if (nb > ms->
dvsize) {
4578 if (smallbits != 0) {
4604 else if (ms->
treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4614 if (ms->
treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4620 if (nb <= ms->dvsize) {
4621 size_t rsize = ms->
dvsize - nb;
4640 else if (nb < ms->topsize) {
4641 size_t rsize = ms->
topsize -= nb;
4652 mem = sys_alloc(ms, nb);
4662void mspace_free(mspace msp,
void* mem) {
4682 prevsize &= ~IS_MMAPPED_BIT;
4685 fm->footprint -= psize;
4709 if (next ==
fm->top) {
4710 size_t tsize =
fm->topsize += psize;
4721 else if (next ==
fm->dv) {
4722 size_t dsize =
fm->dvsize += psize;
4753void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size) {
4761 if (n_elements != 0) {
4762 req = n_elements * elem_size;
4763 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4764 (req / n_elements != elem_size))
4773void* mspace_realloc(mspace msp,
void* oldmem,
size_t bytes) {
4775 return mspace_malloc(msp, bytes);
4776#ifdef REALLOC_ZERO_BYTES_FREES
4778 mspace_free(msp, oldmem);
4785 mstate ms = get_mstate_for(p);
4793 return internal_realloc(ms, oldmem, bytes);
4797void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes) {
4803 return internal_memalign(ms, alignment, bytes);
4806void** mspace_independent_calloc(mspace msp,
size_t n_elements,
4807 size_t elem_size,
void* chunks[]) {
4808 size_t sz = elem_size;
4814 return ialloc(ms, n_elements, &sz, 3, chunks);
4817void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
4818 size_t sizes[],
void* chunks[]) {
4824 return ialloc(ms, n_elements, sizes, 0, chunks);
4827int mspace_trim(mspace msp,
size_t pad) {
4832 result = sys_trim(ms, pad);
4842void mspace_malloc_stats(mspace msp) {
4845 internal_malloc_stats(ms);
4852size_t mspace_footprint(mspace msp) {
4863size_t mspace_max_footprint(mspace msp) {
4875struct mallinfo mspace_mallinfo(mspace msp) {
4880 return internal_mallinfo(ms);
4884int mspace_mallopt(
int param_number,
int value) {
4885 return change_mparam(param_number, value);
#define compute_tree_index(S, I)
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define is_page_aligned(S)
#define RELEASE_MAGIC_INIT_LOCK()
#define DEFAULT_GRANULARITY
#define treemap_is_marked(M, i)
#define dlindependent_calloc
#define set_size_and_pinuse_of_free_chunk(p, s)
struct malloc_tree_chunk * tbinptr
#define disable_contiguous(M)
#define set_inuse(M, p, s)
#define internal_free(m, mem)
#define chunk_plus_offset(p, s)
#define chunk_minus_offset(p, s)
#define FOUR_SIZE_T_SIZES
#define unlink_large_chunk(M, X)
#define segment_holds(S, A)
#define replace_dv(M, P, S)
#define small_index2size(i)
#define DEFAULT_MMAP_THRESHOLD
#define set_free_with_pinuse(p, s, n)
#define unlink_first_small_chunk(M, B, P, I)
#define granularity_align(S)
#define check_inuse_chunk(M, P)
#define is_extern_segment(S)
#define get_segment_flags(S)
#define check_free_chunk(M, P)
#define MALLINFO_FIELD_TYPE
#define internal_malloc(m, b)
#define smallmap_is_marked(M, i)
#define CALL_MUNMAP(a, s)
#define minsize_for_tree_index(i)
#define leftmost_child(t)
#define leftshift_for_tree_index(i)
#define set_inuse_and_pinuse(M, p, s)
#define use_noncontiguous(M)
#define check_segment_merge(S, b, s)
#define MAX_SMALL_REQUEST
struct malloc_state * mstate
#define dlmalloc_footprint
#define RELEASE_MORECORE_LOCK()
#define smallbin_at(M, i)
#define align_as_chunk(A)
#define should_trim(M, s)
#define ACQUIRE_MORECORE_LOCK()
#define dlmalloc_max_footprint
#define is_mmapped_segment(S)
#define check_top_chunk(M, P)
#define dlindependent_comalloc
#define calloc_must_clear(p)
struct malloc_chunk * mchunkptr
#define set_segment_flags(S, v)
#define USAGE_ERROR_ACTION(m, p)
#define check_mmapped_chunk(M, P)
#define request2size(req)
#define dlmalloc_usable_size
struct malloc_segment * msegmentptr
#define insert_large_chunk(M, X, S)
struct malloc_chunk * sbinptr
#define USE_NONCONTIGUOUS_BIT
#define compute_bit2idx(X, I)
#define CORRUPTION_ERROR_ACTION(m)
#define unlink_chunk(M, P, S)
#define malloc_getpagesize
#define MORECORE_CONTIGUOUS
struct malloc_tree_chunk * tchunkptr
#define ACQUIRE_MAGIC_INIT_LOCK()
#define check_malloced_chunk(M, P, N)
#define mark_inuse_foot(M, p, s)
#define MALLOC_FAILURE_ACTION
#define insert_chunk(M, P, S)
#define DEFAULT_TRIM_THRESHOLD
#define is_initialized(M)
#define check_malloc_state(M)
#define CALL_MREMAP(addr, osz, nsz, mv)
unsigned char buf[MIME_BUF_SIZE]
MALLINFO_FIELD_TYPE arena
MALLINFO_FIELD_TYPE fordblks
MALLINFO_FIELD_TYPE uordblks
MALLINFO_FIELD_TYPE usmblks
MALLINFO_FIELD_TYPE fsmblks
MALLINFO_FIELD_TYPE smblks
MALLINFO_FIELD_TYPE keepcost
MALLINFO_FIELD_TYPE hblks
MALLINFO_FIELD_TYPE hblkhd
MALLINFO_FIELD_TYPE ordblks
struct malloc_segment * next
mchunkptr smallbins[(NSMALLBINS+1) *2]
tbinptr treebins[NTREEBINS]
struct malloc_tree_chunk * fd
struct malloc_tree_chunk * child[2]
struct malloc_tree_chunk * bk
struct malloc_tree_chunk * parent