• Main Page
  • Related Pages
  • Modules
  • Namespaces
  • Classes
  • Files
  • File List
  • File Members

rdlmalloc.cpp

Go to the documentation of this file.
00001 #ifdef _RAKNET_SUPPORT_DL_MALLOC
00002 
00003 #include "rdlmalloc.h"
00004 
00005 /* --------------------------- Lock preliminaries ------------------------ */
00006 
00007 /*
00008 When locks are defined, there is one global lock, plus
00009 one per-mspace lock.
00010 
00011 The global lock_ensures that mparams.magic and other unique
00012 mparams values are initialized only once. It also protects
00013 sequences of calls to MORECORE.  In many cases sys_alloc requires
00014 two calls, that should not be interleaved with calls by other
00015 threads.  This does not protect against direct calls to MORECORE
00016 by other threads not using this lock, so there is still code to
00017 cope the best we can on interference.
00018 
00019 Per-mspace locks surround calls to malloc, free, etc.  To enable use
00020 in layered extensions, per-mspace locks are reentrant.
00021 
00022 Because lock-protected regions generally have bounded times, it is
00023 OK to use the supplied simple spinlocks in the custom versions for
00024 x86. Spinlocks are likely to improve performance for lightly
00025 contended applications, but worsen performance under heavy
00026 contention.
00027 
00028 If USE_LOCKS is > 1, the definitions of lock routines here are
00029 bypassed, in which case you will need to define the type MLOCK_T,
00030 and at least INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly
00031 TRY_LOCK (which is not used in this malloc, but commonly needed in
00032 extensions.)  You must also declare a
00033 static MLOCK_T malloc_global_mutex = { initialization values };.
00034 
00035 */
00036 
00037 #if USE_LOCKS == 1
00038 
00039 #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
00040 #if defined(_XBOX) || defined(X360)
00041                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                
00042 #elif !defined(DL_PLATFORM_WIN32)
00043 
00044 /* Custom pthread-style spin locks on x86 and x64 for gcc */
00045 struct pthread_mlock_t {
00046         volatile unsigned int l;
00047         unsigned int c;
00048         pthread_t threadid;
00049 };
00050 #define MLOCK_T               struct pthread_mlock_t
00051 #define CURRENT_THREAD        pthread_self()
00052 #define INITIAL_LOCK(sl)      ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
00053 #define ACQUIRE_LOCK(sl)      pthread_acquire_lock(sl)
00054 #define RELEASE_LOCK(sl)      pthread_release_lock(sl)
00055 #define TRY_LOCK(sl)          pthread_try_lock(sl)
00056 #define SPINS_PER_YIELD       63
00057 
00058 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
00059 
00060 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
00061         int spins = 0;
00062         volatile unsigned int* lp = &sl->l;
00063         for (;;) {
00064                 if (*lp != 0) {
00065                         if (sl->threadid == CURRENT_THREAD) {
00066                                 ++sl->c;
00067                                 return 0;
00068                         }
00069                 }
00070                 else {
00071                         /* place args to cmpxchgl in locals to evade oddities in some gccs */
00072                         int cmp = 0;
00073                         int val = 1;
00074                         int ret;
00075                         __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
00076                                 : "=a" (ret)
00077                                 : "r" (val), "m" (*(lp)), "0"(cmp)
00078                                 : "memory", "cc");
00079                         if (!ret) {
00080                                 assert(!sl->threadid);
00081                                 sl->threadid = CURRENT_THREAD;
00082                                 sl->c = 1;
00083                                 return 0;
00084                         }
00085                 }
00086                 if ((++spins & SPINS_PER_YIELD) == 0) {
00087 #if defined (__SVR4) && defined (__sun) /* solaris */
00088                         thr_yield();
00089 #else
00090 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
00091                         sched_yield();
00092 #else  /* no-op yield on unknown systems */
00093                         ;
00094 #endif /* __linux__ || __FreeBSD__ || __APPLE__ */
00095 #endif /* solaris */
00096                 }
00097         }
00098 }
00099 
00100 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
00101         volatile unsigned int* lp = &sl->l;
00102         assert(*lp != 0);
00103         assert(sl->threadid == CURRENT_THREAD);
00104         if (--sl->c == 0) {
00105                 sl->threadid = 0;
00106                 int prev = 0;
00107                 int ret;
00108                 __asm__ __volatile__ ("lock; xchgl %0, %1"
00109                         : "=r" (ret)
00110                         : "m" (*(lp)), "0"(prev)
00111                         : "memory");
00112         }
00113 }
00114 
00115 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
00116         volatile unsigned int* lp = &sl->l;
00117         if (*lp != 0) {
00118                 if (sl->threadid == CURRENT_THREAD) {
00119                         ++sl->c;
00120                         return 1;
00121                 }
00122         }
00123         else {
00124                 int cmp = 0;
00125                 int val = 1;
00126                 int ret;
00127                 __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
00128                         : "=a" (ret)
00129                         : "r" (val), "m" (*(lp)), "0"(cmp)
00130                         : "memory", "cc");
00131                 if (!ret) {
00132                         assert(!sl->threadid);
00133                         sl->threadid = CURRENT_THREAD;
00134                         sl->c = 1;
00135                         return 1;
00136                 }
00137         }
00138         return 0;
00139 }
00140 
00141 
00142 #else /* DL_PLATFORM_WIN32 */
00143 /* Custom win32-style spin locks on x86 and x64 for MSC */
00144 struct win32_mlock_t {
00145         volatile long l;
00146         unsigned int c;
00147         long threadid;
00148 };
00149 
00150 #define MLOCK_T               struct win32_mlock_t
00151 #define CURRENT_THREAD        GetCurrentThreadId()
00152 #define INITIAL_LOCK(sl)      ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
00153 #define ACQUIRE_LOCK(sl)      win32_acquire_lock(sl)
00154 #define RELEASE_LOCK(sl)      win32_release_lock(sl)
00155 #define TRY_LOCK(sl)          win32_try_lock(sl)
00156 #define SPINS_PER_YIELD       63
00157 
00158 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
00159 
00160 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
00161         int spins = 0;
00162         for (;;) {
00163                 if (sl->l != 0) {
00164                         if (sl->threadid == CURRENT_THREAD) {
00165                                 ++sl->c;
00166                                 return 0;
00167                         }
00168                 }
00169                 else {
00170                         if (!interlockedexchange(&sl->l, 1)) {
00171                                 assert(!sl->threadid);
00172                                 sl->threadid = CURRENT_THREAD;
00173                                 sl->c = 1;
00174                                 return 0;
00175                         }
00176                 }
00177                 if ((++spins & SPINS_PER_YIELD) == 0)
00178                         SleepEx(0, FALSE);
00179         }
00180 }
00181 
00182 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
00183         assert(sl->threadid == CURRENT_THREAD);
00184         assert(sl->l != 0);
00185         if (--sl->c == 0) {
00186                 sl->threadid = 0;
00187                 interlockedexchange (&sl->l, 0);
00188         }
00189 }
00190 
00191 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
00192         if (sl->l != 0) {
00193                 if (sl->threadid == CURRENT_THREAD) {
00194                         ++sl->c;
00195                         return 1;
00196                 }
00197         }
00198         else {
00199                 if (!interlockedexchange(&sl->l, 1)){
00200                         assert(!sl->threadid);
00201                         sl->threadid = CURRENT_THREAD;
00202                         sl->c = 1;
00203                         return 1;
00204                 }
00205         }
00206         return 0;
00207 }
00208 
00209 #endif /* DL_PLATFORM_WIN32 */
00210 #else /* USE_SPIN_LOCKS */
00211 
00212 #ifndef DL_PLATFORM_WIN32
00213 /* pthreads-based locks */
00214 
00215 #define MLOCK_T               pthread_mutex_t
00216 #define CURRENT_THREAD        pthread_self()
00217 #define INITIAL_LOCK(sl)      pthread_init_lock(sl)
00218 #define ACQUIRE_LOCK(sl)      pthread_mutex_lock(sl)
00219 #define RELEASE_LOCK(sl)      pthread_mutex_unlock(sl)
00220 #define TRY_LOCK(sl)          (!pthread_mutex_trylock(sl))
00221 
00222 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
00223 
00224 /* Cope with old-style linux recursive lock initialization by adding */
00225 /* skipped internal declaration from pthread.h */
00226 #ifdef linux
00227 #ifndef PTHREAD_MUTEX_RECURSIVE
00228 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
00229                                                                                          int __kind));
00230 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
00231 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
00232 #endif
00233 #endif
00234 
00235 static int pthread_init_lock (MLOCK_T *sl) {
00236         pthread_mutexattr_t attr;
00237         if (pthread_mutexattr_init(&attr)) return 1;
00238         if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
00239         if (pthread_mutex_init(sl, &attr)) return 1;
00240         if (pthread_mutexattr_destroy(&attr)) return 1;
00241         return 0;
00242 }
00243 
00244 #else /* DL_PLATFORM_WIN32 */
00245 /* Win32 critical sections */
00246 #define MLOCK_T               CRITICAL_SECTION
00247 #define CURRENT_THREAD        GetCurrentThreadId()
00248 #define INITIAL_LOCK(s)       (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
00249 #define ACQUIRE_LOCK(s)       (EnterCriticalSection(sl), 0)
00250 #define RELEASE_LOCK(s)       LeaveCriticalSection(sl)
00251 #define TRY_LOCK(s)           TryEnterCriticalSection(sl)
00252 #define NEED_GLOBAL_LOCK_INIT
00253 
00254 static MLOCK_T malloc_global_mutex;
00255 static volatile long malloc_global_mutex_status;
00256 
00257 /* Use spin loop to initialize global lock */
00258 static void init_malloc_global_mutex() {
00259         for (;;) {
00260                 long stat = malloc_global_mutex_status;
00261                 if (stat > 0)
00262                         return;
00263                 /* transition to < 0 while initializing, then to > 0) */
00264                 if (stat == 0 &&
00265                         interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) {
00266                                 InitializeCriticalSection(&malloc_global_mutex);
00267                                 interlockedexchange(&malloc_global_mutex_status,1);
00268                                 return;
00269                 }
00270                 SleepEx(0, FALSE);
00271         }
00272 }
00273 
00274 #endif /* DL_PLATFORM_WIN32 */
00275 #endif /* USE_SPIN_LOCKS */
00276 #endif /* USE_LOCKS == 1 */
00277 
00278 /* -----------------------  User-defined locks ------------------------ */
00279 
00280 #if USE_LOCKS > 1
00281 /* Define your own lock implementation here */
00282 /* #define INITIAL_LOCK(sl)  ... */
00283 /* #define ACQUIRE_LOCK(sl)  ... */
00284 /* #define RELEASE_LOCK(sl)  ... */
00285 /* #define TRY_LOCK(sl) ... */
00286 /* static MLOCK_T malloc_global_mutex = ... */
00287 #endif /* USE_LOCKS > 1 */
00288 
00289 /* -----------------------  Lock-based state ------------------------ */
00290 
00291 #if USE_LOCKS
00292 #define USE_LOCK_BIT               (2U)
00293 #else  /* USE_LOCKS */
00294 #define USE_LOCK_BIT               (0U)
00295 #define INITIAL_LOCK(l)
00296 #endif /* USE_LOCKS */
00297 
00298 #if USE_LOCKS
00299 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
00300 #define ACQUIRE_MALLOC_GLOBAL_LOCK()  ACQUIRE_LOCK(&malloc_global_mutex);
00301 #endif
00302 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
00303 #define RELEASE_MALLOC_GLOBAL_LOCK()  RELEASE_LOCK(&malloc_global_mutex);
00304 #endif
00305 #else  /* USE_LOCKS */
00306 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
00307 #define RELEASE_MALLOC_GLOBAL_LOCK()
00308 #endif /* USE_LOCKS */
00309 
00310 
00311 /* -----------------------  Chunk representations ------------------------ */
00312 
00313 /*
00314 (The following includes lightly edited explanations by Colin Plumb.)
00315 
00316 The malloc_chunk declaration below is misleading (but accurate and
00317 necessary).  It declares a "view" into memory allowing access to
00318 necessary fields at known offsets from a given base.
00319 
00320 Chunks of memory are maintained using a `boundary tag' method as
00321 originally described by Knuth.  (See the paper by Paul Wilson
00322 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
00323 techniques.)  Sizes of free chunks are stored both in the front of
00324 each chunk and at the end.  This makes consolidating fragmented
00325 chunks into bigger chunks fast.  The head fields also hold bits
00326 representing whether chunks are free or in use.
00327 
00328 Here are some pictures to make it clearer.  They are "exploded" to
00329 show that the state of a chunk can be thought of as extending from
00330 the high 31 bits of the head field of its header through the
00331 prev_foot and PINUSE_BIT bit of the following chunk header.
00332 
00333 A chunk that's in use looks like:
00334 
00335 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00336 | Size of previous chunk (if P = 0)                             |
00337 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00338 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
00339 | Size of this chunk                                         1| +-+
00340 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00341 |                                                               |
00342 +-                                                             -+
00343 |                                                               |
00344 +-                                                             -+
00345 |                                                               :
00346 +-      size - sizeof(size_t) available payload bytes          -+
00347 :                                                               |
00348 chunk-> +-                                                             -+
00349 |                                                               |
00350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00351 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
00352 | Size of next chunk (may or may not be in use)               | +-+
00353 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00354 
00355 And if it's free, it looks like this:
00356 
00357 chunk-> +-                                                             -+
00358 | User payload (must be in use, or we would have merged!)       |
00359 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00360 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
00361 | Size of this chunk                                         0| +-+
00362 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00363 | Next pointer                                                  |
00364 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00365 | Prev pointer                                                  |
00366 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00367 |                                                               :
00368 +-      size - sizeof(struct chunk) unused bytes               -+
00369 :                                                               |
00370 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00371 | Size of this chunk                                            |
00372 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00373 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
00374 | Size of next chunk (must be in use, or we would have merged)| +-+
00375 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00376 |                                                               :
00377 +- User payload                                                -+
00378 :                                                               |
00379 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00380 |0|
00381 +-+
00382 Note that since we always merge adjacent free chunks, the chunks
00383 adjacent to a free chunk must be in use.
00384 
00385 Given a pointer to a chunk (which can be derived trivially from the
00386 payload pointer) we can, in O(1) time, find out whether the adjacent
00387 chunks are free, and if so, unlink them from the lists that they
00388 are on and merge them with the current chunk.
00389 
00390 Chunks always begin on even word boundaries, so the mem portion
00391 (which is returned to the user) is also on an even word boundary, and
00392 thus at least double-word aligned.
00393 
00394 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
00395 chunk size (which is always a multiple of two words), is an in-use
00396 bit for the *previous* chunk.  If that bit is *clear*, then the
00397 word before the current chunk size contains the previous chunk
00398 size, and can be used to find the front of the previous chunk.
00399 The very first chunk allocated always has this bit set, preventing
00400 access to non-existent (or non-owned) memory. If pinuse is set for
00401 any given chunk, then you CANNOT determine the size of the
00402 previous chunk, and might even get a memory addressing fault when
00403 trying to do so.
00404 
00405 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
00406 the chunk size redundantly records whether the current chunk is
00407 inuse (unless the chunk is mmapped). This redundancy enables usage
00408 checks within free and realloc, and reduces indirection when freeing
00409 and consolidating chunks.
00410 
00411 Each freshly allocated chunk must have both cinuse and pinuse set.
00412 That is, each allocated chunk borders either a previously allocated
00413 and still in-use chunk, or the base of its memory arena. This is
00414 ensured by making all allocations from the the `lowest' part of any
00415 found chunk.  Further, no free chunk physically borders another one,
00416 so each free chunk is known to be preceded and followed by either
00417 inuse chunks or the ends of memory.
00418 
00419 Note that the `foot' of the current chunk is actually represented
00420 as the prev_foot of the NEXT chunk. This makes it easier to
00421 deal with alignments etc but can be very confusing when trying
00422 to extend or adapt this code.
00423 
00424 The exceptions to all this are
00425 
00426 1. The special chunk `top' is the top-most available chunk (i.e.,
00427 the one bordering the end of available memory). It is treated
00428 specially.  Top is never included in any bin, is used only if
00429 no other chunk is available, and is released back to the
00430 system if it is very large (see M_TRIM_THRESHOLD).  In effect,
00431 the top chunk is treated as larger (and thus less well
00432 fitting) than any other available chunk.  The top chunk
00433 doesn't update its trailing size field since there is no next
00434 contiguous chunk that would have to index off it. However,
00435 space is still allocated for it (TOP_FOOT_SIZE) to enable
00436 separation or merging when space is extended.
00437 
00438 3. Chunks allocated via mmap, have both cinuse and pinuse bits
00439 cleared in their head fields.  Because they are allocated
00440 one-by-one, each must carry its own prev_foot field, which is
00441 also used to hold the offset this chunk has within its mmapped
00442 region, which is needed to preserve alignment. Each mmapped
00443 chunk is trailed by the first two fields of a fake next-chunk
00444 for sake of usage checks.
00445 
00446 */
00447 
00448 struct malloc_chunk {
00449         size_t               prev_foot;  /* Size of previous chunk (if free).  */
00450         size_t               head;       /* Size and inuse bits. */
00451         struct malloc_chunk* fd;         /* double links -- used only if free. */
00452         struct malloc_chunk* bk;
00453 };
00454 
00455 typedef struct malloc_chunk  mchunk;
00456 typedef struct malloc_chunk* mchunkptr;
00457 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
00458 typedef unsigned int bindex_t;         /* Described below */
00459 typedef unsigned int binmap_t;         /* Described below */
00460 typedef unsigned int flag_t;           /* The type of various bit flag sets */
00461 
00462 /* ------------------- Chunks sizes and alignments ----------------------- */
00463 
00464 #define MCHUNK_SIZE         (sizeof(mchunk))
00465 
00466 #if FOOTERS
00467 #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
00468 #else /* FOOTERS */
00469 #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
00470 #endif /* FOOTERS */
00471 
00472 /* MMapped chunks need a second word of overhead ... */
00473 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
00474 /* ... and additional padding for fake next-chunk at foot */
00475 #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
00476 
00477 /* The smallest size we can malloc is an aligned minimal chunk */
00478 #define MIN_CHUNK_SIZE\
00479         ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
00480 
00481 /* conversion from malloc headers to user pointers, and back */
00482 #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
00483 #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
00484 /* chunk associated with aligned address A */
00485 #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
00486 
00487 /* Bounds on request (not chunk) sizes. */
00488 #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
00489 #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
00490 
00491 /* pad request bytes into a usable size */
00492 #define pad_request(req) \
00493         (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
00494 
00495 /* pad request, checking for minimum (but not maximum) */
00496 #define request2size(req) \
00497         (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
00498 
00499 
00500 /* ------------------ Operations on head and foot fields ----------------- */
00501 
00502 /*
00503 The head field of a chunk is or'ed with PINUSE_BIT when previous
00504 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
00505 use, unless mmapped, in which case both bits are cleared.
00506 
00507 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
00508 */
00509 
00510 #define PINUSE_BIT          (SIZE_T_ONE)
00511 #define CINUSE_BIT          (SIZE_T_TWO)
00512 #define FLAG4_BIT           (SIZE_T_FOUR)
00513 #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
00514 #define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
00515 
00516 /* Head value for fenceposts */
00517 #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
00518 
00519 /* extraction of fields from head words */
00520 #define cinuse(p)           ((p)->head & CINUSE_BIT)
00521 #define pinuse(p)           ((p)->head & PINUSE_BIT)
00522 #define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
00523 #define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
00524 
00525 #define chunksize(p)        ((p)->head & ~(FLAG_BITS))
00526 
00527 #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
00528 
00529 /* Treat space at ptr +/- offset as a chunk */
00530 #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
00531 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
00532 
00533 /* Ptr to next or previous physical malloc_chunk. */
00534 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
00535 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
00536 
00537 /* extract next chunk's pinuse bit */
00538 #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
00539 
00540 /* Get/set size at footer */
00541 #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
00542 #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
00543 
00544 /* Set size, pinuse bit, and foot */
00545 #define set_size_and_pinuse_of_free_chunk(p, s)\
00546         ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
00547 
00548 /* Set size, pinuse bit, foot, and clear next pinuse */
00549 #define set_free_with_pinuse(p, s, n)\
00550         (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
00551 
00552 /* Get the internal overhead associated with chunk p */
00553 #define overhead_for(p)\
00554         (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
00555 
00556 /* Return true if malloced space is not necessarily cleared */
00557 #if MMAP_CLEARS
00558 #define calloc_must_clear(p) (!is_mmapped(p))
00559 #else /* MMAP_CLEARS */
00560 #define calloc_must_clear(p) (1)
00561 #endif /* MMAP_CLEARS */
00562 
00563 /* ---------------------- Overlaid data structures ----------------------- */
00564 
00565 /*
00566 When chunks are not in use, they are treated as nodes of either
00567 lists or trees.
00568 
00569 "Small"  chunks are stored in circular doubly-linked lists, and look
00570 like this:
00571 
00572 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00573 |             Size of previous chunk                            |
00574 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00575 `head:' |             Size of chunk, in bytes                         |P|
00576 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00577 |             Forward pointer to next chunk in list             |
00578 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00579 |             Back pointer to previous chunk in list            |
00580 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00581 |             Unused space (may be 0 bytes long)                .
00582 .                                                               .
00583 .                                                               |
00584 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00585 `foot:' |             Size of chunk, in bytes                           |
00586 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00587 
00588 Larger chunks are kept in a form of bitwise digital trees (aka
00589 tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
00590 free chunks greater than 256 bytes, their size doesn't impose any
00591 constraints on user chunk sizes.  Each node looks like:
00592 
00593 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00594 |             Size of previous chunk                            |
00595 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00596 `head:' |             Size of chunk, in bytes                         |P|
00597 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00598 |             Forward pointer to next chunk of same size        |
00599 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00600 |             Back pointer to previous chunk of same size       |
00601 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00602 |             Pointer to left child (child[0])                  |
00603 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00604 |             Pointer to right child (child[1])                 |
00605 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00606 |             Pointer to parent                                 |
00607 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00608 |             bin index of this chunk                           |
00609 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00610 |             Unused space                                      .
00611 .                                                               |
00612 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00613 `foot:' |             Size of chunk, in bytes                           |
00614 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
00615 
00616 Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
00617 of the same size are arranged in a circularly-linked list, with only
00618 the oldest chunk (the next to be used, in our FIFO ordering)
00619 actually in the tree.  (Tree members are distinguished by a non-null
00620 parent pointer.)  If a chunk with the same size an an existing node
00621 is inserted, it is linked off the existing node using pointers that
00622 work in the same way as fd/bk pointers of small chunks.
00623 
00624 Each tree contains a power of 2 sized range of chunk sizes (the
00625 smallest is 0x100 <= x < 0x180), which is is divided in half at each
00626 tree level, with the chunks in the smaller half of the range (0x100
00627 <= x < 0x140 for the top nose) in the left subtree and the larger
00628 half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
00629 done by inspecting individual bits.
00630 
00631 Using these rules, each node's left subtree contains all smaller
00632 sizes than its right subtree.  However, the node at the root of each
00633 subtree has no particular ordering relationship to either.  (The
00634 dividing line between the subtree sizes is based on trie relation.)
00635 If we remove the last chunk of a given size from the interior of the
00636 tree, we need to replace it with a leaf node.  The tree ordering
00637 rules permit a node to be replaced by any leaf below it.
00638 
00639 The smallest chunk in a tree (a common operation in a best-fit
00640 allocator) can be found by walking a path to the leftmost leaf in
00641 the tree.  Unlike a usual binary tree, where we follow left child
00642 pointers until we reach a null, here we follow the right child
00643 pointer any time the left one is null, until we reach a leaf with
00644 both child pointers null. The smallest chunk in the tree will be
00645 somewhere along that path.
00646 
00647 The worst case number of steps to add, find, or remove a node is
00648 bounded by the number of bits differentiating chunks within
00649 bins. Under current bin calculations, this ranges from 6 up to 21
00650 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
00651 is of course much better.
00652 */
00653 
00654 struct malloc_tree_chunk {
00655         /* The first four fields must be compatible with malloc_chunk */
00656         size_t                    prev_foot;
00657         size_t                    head;
00658         struct malloc_tree_chunk* fd;
00659         struct malloc_tree_chunk* bk;
00660 
00661         struct malloc_tree_chunk* child[2];
00662         struct malloc_tree_chunk* parent;
00663         bindex_t                  index;
00664 };
00665 
00666 typedef struct malloc_tree_chunk  tchunk;
00667 typedef struct malloc_tree_chunk* tchunkptr;
00668 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
00669 
00670 /* A little helper macro for trees */
00671 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
00672 
00673 /* ----------------------------- Segments -------------------------------- */
00674 
00675 /*
00676 Each malloc space may include non-contiguous segments, held in a
00677 list headed by an embedded malloc_segment record representing the
00678 top-most space. Segments also include flags holding properties of
00679 the space. Large chunks that are directly allocated by mmap are not
00680 included in this list. They are instead independently created and
00681 destroyed without otherwise keeping track of them.
00682 
00683 Segment management mainly comes into play for spaces allocated by
00684 MMAP.  Any call to MMAP might or might not return memory that is
00685 adjacent to an existing segment.  MORECORE normally contiguously
00686 extends the current space, so this space is almost always adjacent,
00687 which is simpler and faster to deal with. (This is why MORECORE is
00688 used preferentially to MMAP when both are available -- see
00689 sys_alloc.)  When allocating using MMAP, we don't use any of the
00690 hinting mechanisms (inconsistently) supported in various
00691 implementations of unix mmap, or distinguish reserving from
00692 committing memory. Instead, we just ask for space, and exploit
00693 contiguity when we get it.  It is probably possible to do
00694 better than this on some systems, but no general scheme seems
00695 to be significantly better.
00696 
00697 Management entails a simpler variant of the consolidation scheme
00698 used for chunks to reduce fragmentation -- new adjacent memory is
00699 normally prepended or appended to an existing segment. However,
00700 there are limitations compared to chunk consolidation that mostly
00701 reflect the fact that segment processing is relatively infrequent
00702 (occurring only when getting memory from system) and that we
00703 don't expect to have huge numbers of segments:
00704 
00705 * Segments are not indexed, so traversal requires linear scans.  (It
00706 would be possible to index these, but is not worth the extra
00707 overhead and complexity for most programs on most platforms.)
00708 * New segments are only appended to old ones when holding top-most
00709 memory; if they cannot be prepended to others, they are held in
00710 different segments.
00711 
00712 Except for the top-most segment of an mstate, each segment record
00713 is kept at the tail of its segment. Segments are added by pushing
00714 segment records onto the list headed by &mstate.seg for the
00715 containing mstate.
00716 
00717 Segment flags control allocation/merge/deallocation policies:
00718 * If EXTERN_BIT set, then we did not allocate this segment,
00719 and so should not try to deallocate or merge with others.
00720 (This currently holds only for the initial segment passed
00721 into rak_create_mspace_with_base.)
00722 * If USE_MMAP_BIT set, the segment may be merged with
00723 other surrounding mmapped segments and trimmed/de-allocated
00724 using munmap.
00725 * If neither bit is set, then the segment was obtained using
00726 MORECORE so can be merged with surrounding MORECORE'd segments
00727 and deallocated/trimmed using MORECORE with negative arguments.
00728 */
00729 
00730 struct malloc_segment {
00731         char*        base;             /* base address */
00732         size_t       size;             /* allocated size */
00733         struct malloc_segment* next;   /* ptr to next segment */
00734         flag_t       sflags;           /* mmap and extern flag */
00735 };
00736 
00737 #define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
00738 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
00739 
00740 typedef struct malloc_segment  msegment;
00741 typedef struct malloc_segment* msegmentptr;
00742 
00743 /* ---------------------------- malloc_state ----------------------------- */
00744 
00745 /*
00746 A malloc_state holds all of the bookkeeping for a space.
00747 The main fields are:
00748 
00749 Top
00750 The topmost chunk of the currently active segment. Its size is
00751 cached in topsize.  The actual size of topmost space is
00752 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
00753 fenceposts and segment records if necessary when getting more
00754 space from the system.  The size at which to autotrim top is
00755 cached from mparams in trim_check, except that it is disabled if
00756 an autotrim fails.
00757 
00758 Designated victim (dv)
00759 This is the preferred chunk for servicing small requests that
00760 don't have exact fits.  It is normally the chunk split off most
00761 recently to service another small request.  Its size is cached in
00762 dvsize. The link fields of this chunk are not maintained since it
00763 is not kept in a bin.
00764 
00765 SmallBins
00766 An array of bin headers for free chunks.  These bins hold chunks
00767 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
00768 chunks of all the same size, spaced 8 bytes apart.  To simplify
00769 use in double-linked lists, each bin header acts as a malloc_chunk
00770 pointing to the real first node, if it exists (else pointing to
00771 itself).  This avoids special-casing for headers.  But to avoid
00772 waste, we allocate only the fd/bk pointers of bins, and then use
00773 repositioning tricks to treat these as the fields of a chunk.
00774 
00775 TreeBins
00776 Treebins are pointers to the roots of trees holding a range of
00777 sizes. There are 2 equally spaced treebins for each power of two
00778 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
00779 larger.
00780 
00781 Bin maps
00782 There is one bit map for small bins ("smallmap") and one for
00783 treebins ("treemap).  Each bin sets its bit when non-empty, and
00784 clears the bit when empty.  Bit operations are then used to avoid
00785 bin-by-bin searching -- nearly all "search" is done without ever
00786 looking at bins that won't be selected.  The bit maps
00787 conservatively use 32 bits per map word, even if on 64bit system.
00788 For a good description of some of the bit-based techniques used
00789 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
00790 supplement at http://hackersdelight.org/). Many of these are
00791 intended to reduce the branchiness of paths through malloc etc, as
00792 well as to reduce the number of memory locations read or written.
00793 
00794 Segments
00795 A list of segments headed by an embedded malloc_segment record
00796 representing the initial space.
00797 
00798 Address check support
00799 The least_addr field is the least address ever obtained from
00800 MORECORE or MMAP. Attempted frees and reallocs of any address less
00801 than this are trapped (unless INSECURE is defined).
00802 
00803 Magic tag
00804 A cross-check field that should always hold same value as mparams.magic.
00805 
00806 Flags
00807 Bits recording whether to use MMAP, locks, or contiguous MORECORE
00808 
00809 Statistics
00810 Each space keeps track of current and maximum system memory
00811 obtained via MORECORE or MMAP.
00812 
00813 Trim support
00814 Fields holding the amount of unused topmost memory that should trigger
00815 timming, and a counter to force periodic scanning to release unused
00816 non-topmost segments.
00817 
00818 Locking
00819 If USE_LOCKS is defined, the "mutex" lock is acquired and released
00820 around every public call using this mspace.
00821 
00822 Extension support
00823 A void* pointer and a size_t field that can be used to help implement
00824 extensions to this malloc.
00825 */
00826 
00827 /* Bin types, widths and sizes */
00828 #define NSMALLBINS        (32U)
00829 #define NTREEBINS         (32U)
00830 #define SMALLBIN_SHIFT    (3U)
00831 #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
00832 #define TREEBIN_SHIFT     (8U)
00833 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
00834 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
00835 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
00836 
00837 struct malloc_state {
00838         binmap_t   smallmap;
00839         binmap_t   treemap;
00840         size_t     dvsize;
00841         size_t     topsize;
00842         char*      least_addr;
00843         mchunkptr  dv;
00844         mchunkptr  top;
00845         size_t     trim_check;
00846         size_t     release_checks;
00847         size_t     magic;
00848         mchunkptr  smallbins[(NSMALLBINS+1)*2];
00849         tbinptr    treebins[NTREEBINS];
00850         size_t     footprint;
00851         size_t     max_footprint;
00852         flag_t     mflags;
00853 #if USE_LOCKS
00854         MLOCK_T    mutex;     /* locate lock among fields that rarely change */
00855 #endif /* USE_LOCKS */
00856         msegment   seg;
00857         void*      extp;      /* Unused but available for extensions */
00858         size_t     exts;
00859 };
00860 
00861 typedef struct malloc_state*    mstate;
00862 
00863 /* ------------- Global malloc_state and malloc_params ------------------- */
00864 
00865 /*
00866 malloc_params holds global properties, including those that can be
00867 dynamically set using mallopt. There is a single instance, mparams,
00868 initialized in init_mparams. Note that the non-zeroness of "magic"
00869 also serves as an initialization flag.
00870 */
00871 
00872 struct malloc_params {
00873         volatile size_t magic;
00874         size_t page_size;
00875         size_t granularity;
00876         size_t mmap_threshold;
00877         size_t trim_threshold;
00878         flag_t default_mflags;
00879 };
00880 
00881 static struct malloc_params mparams;
00882 
00883 /* Ensure mparams initialized */
00884 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
00885 
00886 #if !ONLY_MSPACES
00887 
00888 /* The global malloc_state used for all non-"mspace" calls */
00889 static struct malloc_state _gm_;
00890 #define gm                 (&_gm_)
00891 #define is_global(M)       ((M) == &_gm_)
00892 
00893 #endif /* !ONLY_MSPACES */
00894 
00895 #define is_initialized(M)  ((M)->top != 0)
00896 
00897 /* -------------------------- system alloc setup ------------------------- */
00898 
00899 /* Operations on mflags */
00900 
00901 #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
00902 #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
00903 #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
00904 
00905 #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
00906 #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
00907 #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
00908 
00909 #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
00910 #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
00911 
00912 #define set_lock(M,L)\
00913         ((M)->mflags = (L)?\
00914         ((M)->mflags | USE_LOCK_BIT) :\
00915         ((M)->mflags & ~USE_LOCK_BIT))
00916 
00917 /* page-align a size */
00918 #define page_align(S)\
00919         (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
00920 
00921 /* granularity-align a size */
00922 #define granularity_align(S)\
00923         (((S) + (mparams.granularity - SIZE_T_ONE))\
00924         & ~(mparams.granularity - SIZE_T_ONE))
00925 
00926 
00927 /* For mmap, use granularity alignment on windows, else page-align */
00928 #ifdef DL_PLATFORM_WIN32
00929 #define mmap_align(S) granularity_align(S)
00930 #else
00931 #define mmap_align(S) page_align(S)
00932 #endif
00933 
00934 /* For sys_alloc, enough padding to ensure can malloc request on success */
00935 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
00936 
00937 #define is_page_aligned(S)\
00938         (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
00939 #define is_granularity_aligned(S)\
00940         (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
00941 
00942 /*  True if segment S holds address A */
00943 #define segment_holds(S, A)\
00944         ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
00945 
00946 /* Return segment holding given address */
00947 static msegmentptr segment_holding(mstate m, char* addr) {
00948         msegmentptr sp = &m->seg;
00949         for (;;) {
00950                 if (addr >= sp->base && addr < sp->base + sp->size)
00951                         return sp;
00952                 if ((sp = sp->next) == 0)
00953                         return 0;
00954         }
00955 }
00956 
00957 /* Return true if segment contains a segment link */
00958 static int has_segment_link(mstate m, msegmentptr ss) {
00959         msegmentptr sp = &m->seg;
00960         for (;;) {
00961                 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
00962                         return 1;
00963                 if ((sp = sp->next) == 0)
00964                         return 0;
00965         }
00966 }
00967 
00968 #ifndef MORECORE_CANNOT_TRIM
00969 #define should_trim(M,s)  ((s) > (M)->trim_check)
00970 #else  /* MORECORE_CANNOT_TRIM */
00971 #define should_trim(M,s)  (0)
00972 #endif /* MORECORE_CANNOT_TRIM */
00973 
00974 /*
00975 TOP_FOOT_SIZE is padding at the end of a segment, including space
00976 that may be needed to place segment records and fenceposts when new
00977 noncontiguous segments are added.
00978 */
00979 #define TOP_FOOT_SIZE\
00980         (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
00981 
00982 
00983 /* -------------------------------  Hooks -------------------------------- */
00984 
00985 /*
00986 PREACTION should be defined to return 0 on success, and nonzero on
00987 failure. If you are not using locking, you can redefine these to do
00988 anything you like.
00989 */
00990 
00991 #if USE_LOCKS
00992 
00993 #define PREACTION(M)  ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
00994 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
00995 #else /* USE_LOCKS */
00996 
00997 #ifndef PREACTION
00998 #define PREACTION(M) (0)
00999 #endif  /* PREACTION */
01000 
01001 #ifndef POSTACTION
01002 #define POSTACTION(M)
01003 #endif  /* POSTACTION */
01004 
01005 #endif /* USE_LOCKS */
01006 
01007 /*
01008 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
01009 USAGE_ERROR_ACTION is triggered on detected bad frees and
01010 reallocs. The argument p is an address that might have triggered the
01011 fault. It is ignored by the two predefined actions, but might be
01012 useful in custom actions that try to help diagnose errors.
01013 */
01014 
01015 #if PROCEED_ON_ERROR
01016 
01017 /* A count of the number of corruption errors causing resets */
01018 int malloc_corruption_error_count;
01019 
01020 /* default corruption action */
01021 static void reset_on_error(mstate m);
01022 
01023 #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
01024 #define USAGE_ERROR_ACTION(m, p)
01025 
01026 #else /* PROCEED_ON_ERROR */
01027 
01028 #ifndef CORRUPTION_ERROR_ACTION
01029 #define CORRUPTION_ERROR_ACTION(m) ABORT
01030 #endif /* CORRUPTION_ERROR_ACTION */
01031 
01032 #ifndef USAGE_ERROR_ACTION
01033 #define USAGE_ERROR_ACTION(m,p) ABORT
01034 #endif /* USAGE_ERROR_ACTION */
01035 
01036 #endif /* PROCEED_ON_ERROR */
01037 
01038 /* -------------------------- Debugging setup ---------------------------- */
01039 
01040 #if ! DEBUG
01041 
01042 #define check_free_chunk(M,P)
01043 #define check_inuse_chunk(M,P)
01044 #define check_malloced_chunk(M,P,N)
01045 #define check_mmapped_chunk(M,P)
01046 #define check_malloc_state(M)
01047 #define check_top_chunk(M,P)
01048 
01049 #else /* DEBUG */
01050 #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
01051 #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
01052 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
01053 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
01054 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
01055 #define check_malloc_state(M)       do_check_malloc_state(M)
01056 
01057 static void   do_check_any_chunk(mstate m, mchunkptr p);
01058 static void   do_check_top_chunk(mstate m, mchunkptr p);
01059 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
01060 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
01061 static void   do_check_free_chunk(mstate m, mchunkptr p);
01062 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
01063 static void   do_check_tree(mstate m, tchunkptr t);
01064 static void   do_check_treebin(mstate m, bindex_t i);
01065 static void   do_check_smallbin(mstate m, bindex_t i);
01066 static void   do_check_malloc_state(mstate m);
01067 static int    bin_find(mstate m, mchunkptr x);
01068 static size_t traverse_and_check(mstate m);
01069 #endif /* DEBUG */
01070 
01071 /* ---------------------------- Indexing Bins ---------------------------- */
01072 
01073 #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
01074 #define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
01075 #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
01076 #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
01077 
01078 /* addressing by index. See above about smallbin repositioning */
01079 #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
01080 #define treebin_at(M,i)     (&((M)->treebins[i]))
01081 
01082 /* assign tree index for size S to variable I. Use x86 asm if possible  */
01083 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
01084 #define compute_tree_index(S, I)\
01085 {\
01086         unsigned int X = S >> TREEBIN_SHIFT;\
01087         if (X == 0)\
01088         I = 0;\
01089   else if (X > 0xFFFF)\
01090   I = NTREEBINS-1;\
01091   else {\
01092   unsigned int K;\
01093   __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g"  (X));\
01094   I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01095 }\
01096 }
01097 
01098 #elif defined (__INTEL_COMPILER)
01099 #define compute_tree_index(S, I)\
01100 {\
01101         size_t X = S >> TREEBIN_SHIFT;\
01102         if (X == 0)\
01103         I = 0;\
01104   else if (X > 0xFFFF)\
01105   I = NTREEBINS-1;\
01106   else {\
01107   unsigned int K = _bit_scan_reverse (X); \
01108   I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01109 }\
01110 }
01111 
01112 #elif defined(_MSC_VER) && _MSC_VER>=1300 && defined(DL_PLATFORM_WIN32)
01113 #define compute_tree_index(S, I)\
01114 {\
01115         size_t X = S >> TREEBIN_SHIFT;\
01116         if (X == 0)\
01117         I = 0;\
01118   else if (X > 0xFFFF)\
01119   I = NTREEBINS-1;\
01120   else {\
01121   unsigned int K;\
01122   _BitScanReverse((DWORD *) &K, X);\
01123   I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01124 }\
01125 }
01126 
01127 #else /* GNUC */
01128 #define compute_tree_index(S, I)\
01129 {\
01130         size_t X = S >> TREEBIN_SHIFT;\
01131         if (X == 0)\
01132         I = 0;\
01133   else if (X > 0xFFFF)\
01134   I = NTREEBINS-1;\
01135   else {\
01136   unsigned int Y = (unsigned int)X;\
01137   unsigned int N = ((Y - 0x100) >> 16) & 8;\
01138   unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
01139   N += K;\
01140   N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
01141   K = 14 - N + ((Y <<= K) >> 15);\
01142   I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
01143 }\
01144 }
01145 #endif /* GNUC */
01146 
01147 /* Bit representing maximum resolved size in a treebin at i */
01148 #define bit_for_tree_index(i) \
01149         (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
01150 
01151 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
01152 #define leftshift_for_tree_index(i) \
01153         ((i == NTREEBINS-1)? 0 : \
01154         ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
01155 
01156 /* The size of the smallest chunk held in bin with index i */
01157 #define minsize_for_tree_index(i) \
01158         ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
01159         (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
01160 
01161 
01162 /* ------------------------ Operations on bin maps ----------------------- */
01163 
01164 /* bit corresponding to given index */
01165 #define idx2bit(i)              ((binmap_t)(1) << (i))
01166 
01167 /* Mark/Clear bits with given index */
01168 #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
01169 #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
01170 #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
01171 
01172 #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
01173 #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
01174 #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
01175 
01176 /* isolate the least set bit of a bitmap */
01177 #define least_bit(x)         ((x) & -(x))
01178 
01179 /* mask with all bits to left of least bit of x on */
01180 #define left_bits(x)         ((x<<1) | -(x<<1))
01181 
01182 /* mask with all bits to left of or equal to least bit of x on */
01183 #define same_or_left_bits(x) ((x) | -(x))
01184 
01185 /* index corresponding to given bit. Use x86 asm if possible */
01186 
01187 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
01188 #define compute_bit2idx(X, I)\
01189 {\
01190         unsigned int J;\
01191         __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
01192         I = (bindex_t)J;\
01193 }
01194 
01195 #elif defined (__INTEL_COMPILER)
01196 #define compute_bit2idx(X, I)\
01197 {\
01198         unsigned int J;\
01199         J = _bit_scan_forward (X); \
01200         I = (bindex_t)J;\
01201 }
01202 
01203 #elif defined(_MSC_VER) && _MSC_VER>=1300 && defined(DL_PLATFORM_WIN32)
01204 #define compute_bit2idx(X, I)\
01205 {\
01206         unsigned int J;\
01207         _BitScanForward((DWORD *) &J, X);\
01208         I = (bindex_t)J;\
01209 }
01210 
01211 #elif USE_BUILTIN_FFS
01212 #define compute_bit2idx(X, I) I = ffs(X)-1
01213 
01214 #else
01215 #define compute_bit2idx(X, I)\
01216 {\
01217         unsigned int Y = X - 1;\
01218         unsigned int K = Y >> (16-4) & 16;\
01219         unsigned int N = K;        Y >>= K;\
01220         N += K = Y >> (8-3) &  8;  Y >>= K;\
01221         N += K = Y >> (4-2) &  4;  Y >>= K;\
01222         N += K = Y >> (2-1) &  2;  Y >>= K;\
01223         N += K = Y >> (1-0) &  1;  Y >>= K;\
01224         I = (bindex_t)(N + Y);\
01225 }
01226 #endif /* GNUC */
01227 
01228 
01229 /* ----------------------- Runtime Check Support ------------------------- */
01230 
01231 /*
01232 For security, the main invariant is that malloc/free/etc never
01233 writes to a static address other than malloc_state, unless static
01234 malloc_state itself has been corrupted, which cannot occur via
01235 malloc (because of these checks). In essence this means that we
01236 believe all pointers, sizes, maps etc held in malloc_state, but
01237 check all of those linked or offsetted from other embedded data
01238 structures.  These checks are interspersed with main code in a way
01239 that tends to minimize their run-time cost.
01240 
01241 When FOOTERS is defined, in addition to range checking, we also
01242 verify footer fields of inuse chunks, which can be used guarantee
01243 that the mstate controlling malloc/free is intact.  This is a
01244 streamlined version of the approach described by William Robertson
01245 et al in "Run-time Detection of Heap-based Overflows" LISA'03
01246 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
01247 of an inuse chunk holds the xor of its mstate and a random seed,
01248 that is checked upon calls to free() and realloc().  This is
01249 (probablistically) unguessable from outside the program, but can be
01250 computed by any code successfully malloc'ing any chunk, so does not
01251 itself provide protection against code that has already broken
01252 security through some other means.  Unlike Robertson et al, we
01253 always dynamically check addresses of all offset chunks (previous,
01254 next, etc). This turns out to be cheaper than relying on hashes.
01255 */
01256 
01257 #if !INSECURE
01258 /* Check if address a is at least as high as any from MORECORE or MMAP */
01259 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
01260 /* Check if address of next chunk n is higher than base chunk p */
01261 #define ok_next(p, n)    ((char*)(p) < (char*)(n))
01262 /* Check if p has inuse status */
01263 #define ok_inuse(p)     is_inuse(p)
01264 /* Check if p has its pinuse bit on */
01265 #define ok_pinuse(p)     pinuse(p)
01266 
01267 #else /* !INSECURE */
01268 #define ok_address(M, a) (1)
01269 #define ok_next(b, n)    (1)
01270 #define ok_inuse(p)      (1)
01271 #define ok_pinuse(p)     (1)
01272 #endif /* !INSECURE */
01273 
01274 #if (FOOTERS && !INSECURE)
01275 /* Check if (alleged) mstate m has expected magic field */
01276 #define ok_magic(M)      ((M)->magic == mparams.magic)
01277 #else  /* (FOOTERS && !INSECURE) */
01278 #define ok_magic(M)      (1)
01279 #endif /* (FOOTERS && !INSECURE) */
01280 
01281 
01282 /* In gcc, use __builtin_expect to minimize impact of checks */
01283 #if !INSECURE
01284 #if defined(__GNUC__) && __GNUC__ >= 3
01285 #define RTCHECK(e)  __builtin_expect(e, 1)
01286 #else /* GNUC */
01287 #define RTCHECK(e)  (e)
01288 #endif /* GNUC */
01289 #else /* !INSECURE */
01290 #define RTCHECK(e)  (1)
01291 #endif /* !INSECURE */
01292 
01293 /* macros to set up inuse chunks with or without footers */
01294 
01295 #if !FOOTERS
01296 
01297 #define mark_inuse_foot(M,p,s)
01298 
01299 /* Macros for setting head/foot of non-mmapped chunks */
01300 
01301 /* Set cinuse bit and pinuse bit of next chunk */
01302 #define set_inuse(M,p,s)\
01303         ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01304         ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01305 
01306 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
01307 #define set_inuse_and_pinuse(M,p,s)\
01308         ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01309         ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01310 
01311 /* Set size, cinuse and pinuse bit of this chunk */
01312 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01313         ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
01314 
01315 #else /* FOOTERS */
01316 
01317 /* Set foot of inuse chunk to be xor of mstate and seed */
01318 #define mark_inuse_foot(M,p,s)\
01319         (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
01320 
01321 #define get_mstate_for(p)\
01322         ((mstate)(((mchunkptr)((char*)(p) +\
01323         (chunksize(p))))->prev_foot ^ mparams.magic))
01324 
01325 #define set_inuse(M,p,s)\
01326         ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01327         (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
01328         mark_inuse_foot(M,p,s))
01329 
01330 #define set_inuse_and_pinuse(M,p,s)\
01331         ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01332         (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
01333         mark_inuse_foot(M,p,s))
01334 
01335 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01336         ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01337         mark_inuse_foot(M, p, s))
01338 
01339 #endif /* !FOOTERS */
01340 
01341 /* ---------------------------- setting mparams -------------------------- */
01342 
01343 /* Initialize mparams */
01344 static int init_mparams(void) {
01345 #ifdef NEED_GLOBAL_LOCK_INIT
01346         if (malloc_global_mutex_status <= 0)
01347                 init_malloc_global_mutex();
01348 #endif
01349 
01350         ACQUIRE_MALLOC_GLOBAL_LOCK();
01351         if (mparams.magic == 0) {
01352                 size_t magic;
01353                 size_t psize;
01354                 size_t gsize;
01355 
01356 #ifndef DL_PLATFORM_WIN32
01357                 psize = malloc_getpagesize;
01358                 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
01359 #else /* DL_PLATFORM_WIN32 */
01360                 {
01361                         SYSTEM_INFO system_info;
01362                         GetSystemInfo(&system_info);
01363                         psize = system_info.dwPageSize;
01364                         gsize = ((DEFAULT_GRANULARITY != 0)?
01365 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
01366                 }
01367 #endif /* DL_PLATFORM_WIN32 */
01368 
01369                 /* Sanity-check configuration:
01370                 size_t must be unsigned and as wide as pointer type.
01371                 ints must be at least 4 bytes.
01372                 alignment must be at least 8.
01373                 Alignment, min chunk size, and page size must all be powers of 2.
01374                 */
01375                 if ((sizeof(size_t) != sizeof(char*)) ||
01376                         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
01377                         (sizeof(int) < 4)  ||
01378                         (MALLOC_ALIGNMENT < (size_t)8U) ||
01379                         ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
01380                         ((MCHUNK_SIZE      & (MCHUNK_SIZE-SIZE_T_ONE))      != 0) ||
01381                         ((gsize            & (gsize-SIZE_T_ONE))            != 0) ||
01382                         ((psize            & (psize-SIZE_T_ONE))            != 0))
01383                         ABORT;
01384 
01385                 mparams.granularity = gsize;
01386                 mparams.page_size = psize;
01387                 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
01388                 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
01389 #if MORECORE_CONTIGUOUS
01390                 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
01391 #else  /* MORECORE_CONTIGUOUS */
01392                 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
01393 #endif /* MORECORE_CONTIGUOUS */
01394 
01395 #if !ONLY_MSPACES
01396                 /* Set up lock for main malloc area */
01397                 gm->mflags = mparams.default_mflags;
01398                 INITIAL_LOCK(&gm->mutex);
01399 #endif
01400 
01401                 {
01402 #if USE_DEV_RANDOM
01403                         int fd;
01404                         unsigned char buf[sizeof(size_t)];
01405                         /* Try to use /dev/urandom, else fall back on using time */
01406                         if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
01407                                 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
01408                                         magic = *((size_t *) buf);
01409                                         close(fd);
01410                         }
01411                         else
01412 #endif /* USE_DEV_RANDOM */
01413 
01414 #if defined(_XBOX) || defined(X360)
01415                                                            
01416 #elif defined(DL_PLATFORM_WIN32)
01417                                 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
01418 #else
01419                                 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
01420 #endif
01421                         magic |= (size_t)8U;    /* ensure nonzero */
01422                         magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
01423                         mparams.magic = magic;
01424                 }
01425         }
01426 
01427         RELEASE_MALLOC_GLOBAL_LOCK();
01428         return 1;
01429 }
01430 
01431 /* support for mallopt */
01432 static int change_mparam(int param_number, int value) {
01433         size_t val;
01434         ensure_initialization();
01435         val = (value == -1)? MAX_SIZE_T : (size_t)value;
01436         switch(param_number) {
01437   case M_TRIM_THRESHOLD:
01438           mparams.trim_threshold = val;
01439           return 1;
01440   case M_GRANULARITY:
01441           if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
01442                   mparams.granularity = val;
01443                   return 1;
01444           }
01445           else
01446                   return 0;
01447   case M_MMAP_THRESHOLD:
01448           mparams.mmap_threshold = val;
01449           return 1;
01450   default:
01451           return 0;
01452         }
01453 }
01454 
01455 #if DEBUG
01456 /* ------------------------- Debugging Support --------------------------- */
01457 
01458 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
01459 static void do_check_any_chunk(mstate m, mchunkptr p) {
01460         assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01461         assert(ok_address(m, p));
01462 }
01463 
01464 /* Check properties of top chunk */
01465 static void do_check_top_chunk(mstate m, mchunkptr p) {
01466         msegmentptr sp = segment_holding(m, (char*)p);
01467         size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
01468         assert(sp != 0);
01469         assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01470         assert(ok_address(m, p));
01471         assert(sz == m->topsize);
01472         assert(sz > 0);
01473         assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
01474         assert(pinuse(p));
01475         assert(!pinuse(chunk_plus_offset(p, sz)));
01476 }
01477 
01478 /* Check properties of (inuse) mmapped chunks */
01479 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
01480         size_t  sz = chunksize(p);
01481         size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
01482         assert(is_mmapped(p));
01483         assert(use_mmap(m));
01484         assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01485         assert(ok_address(m, p));
01486         assert(!is_small(sz));
01487         assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
01488         assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
01489         assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
01490 }
01491 
01492 /* Check properties of inuse chunks */
01493 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
01494         do_check_any_chunk(m, p);
01495         assert(is_inuse(p));
01496         assert(next_pinuse(p));
01497         /* If not pinuse and not mmapped, previous chunk has OK offset */
01498         assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
01499         if (is_mmapped(p))
01500                 do_check_mmapped_chunk(m, p);
01501 }
01502 
01503 /* Check properties of free chunks */
01504 static void do_check_free_chunk(mstate m, mchunkptr p) {
01505         size_t sz = chunksize(p);
01506         mchunkptr next = chunk_plus_offset(p, sz);
01507         do_check_any_chunk(m, p);
01508         assert(!is_inuse(p));
01509         assert(!next_pinuse(p));
01510         assert (!is_mmapped(p));
01511         if (p != m->dv && p != m->top) {
01512                 if (sz >= MIN_CHUNK_SIZE) {
01513                         assert((sz & CHUNK_ALIGN_MASK) == 0);
01514                         assert(is_aligned(chunk2mem(p)));
01515                         assert(next->prev_foot == sz);
01516                         assert(pinuse(p));
01517                         assert (next == m->top || is_inuse(next));
01518                         assert(p->fd->bk == p);
01519                         assert(p->bk->fd == p);
01520                 }
01521                 else  /* markers are always of size SIZE_T_SIZE */
01522                         assert(sz == SIZE_T_SIZE);
01523         }
01524 }
01525 
01526 /* Check properties of malloced chunks at the point they are malloced */
01527 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
01528         if (mem != 0) {
01529                 mchunkptr p = mem2chunk(mem);
01530                 size_t sz = p->head & ~INUSE_BITS;
01531                 do_check_inuse_chunk(m, p);
01532                 assert((sz & CHUNK_ALIGN_MASK) == 0);
01533                 assert(sz >= MIN_CHUNK_SIZE);
01534                 assert(sz >= s);
01535                 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
01536                 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
01537         }
01538 }
01539 
01540 /* Check a tree and its subtrees.  */
01541 static void do_check_tree(mstate m, tchunkptr t) {
01542         tchunkptr head = 0;
01543         tchunkptr u = t;
01544         bindex_t tindex = t->index;
01545         size_t tsize = chunksize(t);
01546         bindex_t idx;
01547         compute_tree_index(tsize, idx);
01548         assert(tindex == idx);
01549         assert(tsize >= MIN_LARGE_SIZE);
01550         assert(tsize >= minsize_for_tree_index(idx));
01551         assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
01552 
01553         do { /* traverse through chain of same-sized nodes */
01554                 do_check_any_chunk(m, ((mchunkptr)u));
01555                 assert(u->index == tindex);
01556                 assert(chunksize(u) == tsize);
01557                 assert(!is_inuse(u));
01558                 assert(!next_pinuse(u));
01559                 assert(u->fd->bk == u);
01560                 assert(u->bk->fd == u);
01561                 if (u->parent == 0) {
01562                         assert(u->child[0] == 0);
01563                         assert(u->child[1] == 0);
01564                 }
01565                 else {
01566                         assert(head == 0); /* only one node on chain has parent */
01567                         head = u;
01568                         assert(u->parent != u);
01569                         assert (u->parent->child[0] == u ||
01570                                 u->parent->child[1] == u ||
01571                                 *((tbinptr*)(u->parent)) == u);
01572                         if (u->child[0] != 0) {
01573                                 assert(u->child[0]->parent == u);
01574                                 assert(u->child[0] != u);
01575                                 do_check_tree(m, u->child[0]);
01576                         }
01577                         if (u->child[1] != 0) {
01578                                 assert(u->child[1]->parent == u);
01579                                 assert(u->child[1] != u);
01580                                 do_check_tree(m, u->child[1]);
01581                         }
01582                         if (u->child[0] != 0 && u->child[1] != 0) {
01583                                 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
01584                         }
01585                 }
01586                 u = u->fd;
01587         } while (u != t);
01588         assert(head != 0);
01589 }
01590 
01591 /*  Check all the chunks in a treebin.  */
01592 static void do_check_treebin(mstate m, bindex_t i) {
01593         tbinptr* tb = treebin_at(m, i);
01594         tchunkptr t = *tb;
01595         int empty = (m->treemap & (1U << i)) == 0;
01596         if (t == 0)
01597                 assert(empty);
01598         if (!empty)
01599                 do_check_tree(m, t);
01600 }
01601 
01602 /*  Check all the chunks in a smallbin.  */
01603 static void do_check_smallbin(mstate m, bindex_t i) {
01604         sbinptr b = smallbin_at(m, i);
01605         mchunkptr p = b->bk;
01606         unsigned int empty = (m->smallmap & (1U << i)) == 0;
01607         if (p == b)
01608                 assert(empty);
01609         if (!empty) {
01610                 for (; p != b; p = p->bk) {
01611                         size_t size = chunksize(p);
01612                         mchunkptr q;
01613                         /* each chunk claims to be free */
01614                         do_check_free_chunk(m, p);
01615                         /* chunk belongs in bin */
01616                         assert(small_index(size) == i);
01617                         assert(p->bk == b || chunksize(p->bk) == chunksize(p));
01618                         /* chunk is followed by an inuse chunk */
01619                         q = next_chunk(p);
01620                         if (q->head != FENCEPOST_HEAD)
01621                                 do_check_inuse_chunk(m, q);
01622                 }
01623         }
01624 }
01625 
01626 /* Find x in a bin. Used in other check functions. */
01627 static int bin_find(mstate m, mchunkptr x) {
01628         size_t size = chunksize(x);
01629         if (is_small(size)) {
01630                 bindex_t sidx = small_index(size);
01631                 sbinptr b = smallbin_at(m, sidx);
01632                 if (smallmap_is_marked(m, sidx)) {
01633                         mchunkptr p = b;
01634                         do {
01635                                 if (p == x)
01636                                         return 1;
01637                         } while ((p = p->fd) != b);
01638                 }
01639         }
01640         else {
01641                 bindex_t tidx;
01642                 compute_tree_index(size, tidx);
01643                 if (treemap_is_marked(m, tidx)) {
01644                         tchunkptr t = *treebin_at(m, tidx);
01645                         size_t sizebits = size << leftshift_for_tree_index(tidx);
01646                         while (t != 0 && chunksize(t) != size) {
01647                                 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
01648                                 sizebits <<= 1;
01649                         }
01650                         if (t != 0) {
01651                                 tchunkptr u = t;
01652                                 do {
01653                                         if (u == (tchunkptr)x)
01654                                                 return 1;
01655                                 } while ((u = u->fd) != t);
01656                         }
01657                 }
01658         }
01659         return 0;
01660 }
01661 
01662 /* Traverse each chunk and check it; return total */
01663 static size_t traverse_and_check(mstate m) {
01664         size_t sum = 0;
01665         if (is_initialized(m)) {
01666                 msegmentptr s = &m->seg;
01667                 sum += m->topsize + TOP_FOOT_SIZE;
01668                 while (s != 0) {
01669                         mchunkptr q = align_as_chunk(s->base);
01670                         mchunkptr lastq = 0;
01671                         assert(pinuse(q));
01672                         while (segment_holds(s, q) &&
01673                                 q != m->top && q->head != FENCEPOST_HEAD) {
01674                                         sum += chunksize(q);
01675                                         if (is_inuse(q)) {
01676                                                 assert(!bin_find(m, q));
01677                                                 do_check_inuse_chunk(m, q);
01678                                         }
01679                                         else {
01680                                                 assert(q == m->dv || bin_find(m, q));
01681                                                 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
01682                                                 do_check_free_chunk(m, q);
01683                                         }
01684                                         lastq = q;
01685                                         q = next_chunk(q);
01686                         }
01687                         s = s->next;
01688                 }
01689         }
01690         return sum;
01691 }
01692 
01693 /* Check all properties of malloc_state. */
01694 static void do_check_malloc_state(mstate m) {
01695         bindex_t i;
01696         size_t total;
01697         /* check bins */
01698         for (i = 0; i < NSMALLBINS; ++i)
01699                 do_check_smallbin(m, i);
01700         for (i = 0; i < NTREEBINS; ++i)
01701                 do_check_treebin(m, i);
01702 
01703         if (m->dvsize != 0) { /* check dv chunk */
01704                 do_check_any_chunk(m, m->dv);
01705                 assert(m->dvsize == chunksize(m->dv));
01706                 assert(m->dvsize >= MIN_CHUNK_SIZE);
01707                 assert(bin_find(m, m->dv) == 0);
01708         }
01709 
01710         if (m->top != 0) {   /* check top chunk */
01711                 do_check_top_chunk(m, m->top);
01712                 /*assert(m->topsize == chunksize(m->top)); redundant */
01713                 assert(m->topsize > 0);
01714                 assert(bin_find(m, m->top) == 0);
01715         }
01716 
01717         total = traverse_and_check(m);
01718         assert(total <= m->footprint);
01719         assert(m->footprint <= m->max_footprint);
01720 }
01721 #endif /* DEBUG */
01722 
01723 /* ----------------------------- statistics ------------------------------ */
01724 
01725 #if !NO_MALLINFO
01726 static struct mallinfo internal_mallinfo(mstate m) {
01727         struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
01728         ensure_initialization();
01729         if (!PREACTION(m)) {
01730                 check_malloc_state(m);
01731                 if (is_initialized(m)) {
01732                         size_t nfree = SIZE_T_ONE; /* top always free */
01733                         size_t mfree = m->topsize + TOP_FOOT_SIZE;
01734                         size_t sum = mfree;
01735                         msegmentptr s = &m->seg;
01736                         while (s != 0) {
01737                                 mchunkptr q = align_as_chunk(s->base);
01738                                 while (segment_holds(s, q) &&
01739                                         q != m->top && q->head != FENCEPOST_HEAD) {
01740                                                 size_t sz = chunksize(q);
01741                                                 sum += sz;
01742                                                 if (!is_inuse(q)) {
01743                                                         mfree += sz;
01744                                                         ++nfree;
01745                                                 }
01746                                                 q = next_chunk(q);
01747                                 }
01748                                 s = s->next;
01749                         }
01750 
01751                         nm.arena    = sum;
01752                         nm.ordblks  = nfree;
01753                         nm.hblkhd   = m->footprint - sum;
01754                         nm.usmblks  = m->max_footprint;
01755                         nm.uordblks = m->footprint - mfree;
01756                         nm.fordblks = mfree;
01757                         nm.keepcost = m->topsize;
01758                 }
01759 
01760                 POSTACTION(m);
01761         }
01762         return nm;
01763 }
01764 #endif /* !NO_MALLINFO */
01765 
01766 static void internal_malloc_stats(mstate m) {
01767         ensure_initialization();
01768         if (!PREACTION(m)) {
01769                 size_t maxfp = 0;
01770                 size_t fp = 0;
01771                 size_t used = 0;
01772                 check_malloc_state(m);
01773                 if (is_initialized(m)) {
01774                         msegmentptr s = &m->seg;
01775                         maxfp = m->max_footprint;
01776                         fp = m->footprint;
01777                         used = fp - (m->topsize + TOP_FOOT_SIZE);
01778 
01779                         while (s != 0) {
01780                                 mchunkptr q = align_as_chunk(s->base);
01781                                 while (segment_holds(s, q) &&
01782                                         q != m->top && q->head != FENCEPOST_HEAD) {
01783                                                 if (!is_inuse(q))
01784                                                         used -= chunksize(q);
01785                                                 q = next_chunk(q);
01786                                 }
01787                                 s = s->next;
01788                         }
01789                 }
01790 
01791                 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
01792                 fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
01793                 fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
01794 
01795                 POSTACTION(m);
01796         }
01797 }
01798 
01799 /* ----------------------- Operations on smallbins ----------------------- */
01800 
01801 /*
01802 Various forms of linking and unlinking are defined as macros.  Even
01803 the ones for trees, which are very long but have very short typical
01804 paths.  This is ugly but reduces reliance on inlining support of
01805 compilers.
01806 */
01807 
01808 /* Link a free chunk into a smallbin  */
01809 #define insert_small_chunk(M, P, S) {\
01810         bindex_t I  = small_index(S);\
01811         mchunkptr B = smallbin_at(M, I);\
01812         mchunkptr F = B;\
01813         assert(S >= MIN_CHUNK_SIZE);\
01814         if (!smallmap_is_marked(M, I))\
01815         mark_smallmap(M, I);\
01816   else if (RTCHECK(ok_address(M, B->fd)))\
01817   F = B->fd;\
01818   else {\
01819   CORRUPTION_ERROR_ACTION(M);\
01820 }\
01821         B->fd = P;\
01822         F->bk = P;\
01823         P->fd = F;\
01824         P->bk = B;\
01825 }
01826 
01827 /* Unlink a chunk from a smallbin  */
01828 #define unlink_small_chunk(M, P, S) {\
01829         mchunkptr F = P->fd;\
01830         mchunkptr B = P->bk;\
01831         bindex_t I = small_index(S);\
01832         assert(P != B);\
01833         assert(P != F);\
01834         assert(chunksize(P) == small_index2size(I));\
01835         if (F == B)\
01836         clear_smallmap(M, I);\
01837   else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
01838   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
01839   F->bk = B;\
01840   B->fd = F;\
01841 }\
01842   else {\
01843   CORRUPTION_ERROR_ACTION(M);\
01844 }\
01845 }
01846 
01847 /* Unlink the first chunk from a smallbin */
01848 #define unlink_first_small_chunk(M, B, P, I) {\
01849         mchunkptr F = P->fd;\
01850         assert(P != B);\
01851         assert(P != F);\
01852         assert(chunksize(P) == small_index2size(I));\
01853         if (B == F)\
01854         clear_smallmap(M, I);\
01855   else if (RTCHECK(ok_address(M, F))) {\
01856   B->fd = F;\
01857   F->bk = B;\
01858 }\
01859   else {\
01860   CORRUPTION_ERROR_ACTION(M);\
01861 }\
01862 }
01863 
01864 
01865 
01866 /* Replace dv node, binning the old one */
01867 /* Used only when dvsize known to be small */
01868 #define replace_dv(M, P, S) {\
01869         size_t DVS = M->dvsize;\
01870         if (DVS != 0) {\
01871         mchunkptr DV = M->dv;\
01872         assert(is_small(DVS));\
01873         insert_small_chunk(M, DV, DVS);\
01874         }\
01875         M->dvsize = S;\
01876         M->dv = P;\
01877 }
01878 
01879 /* ------------------------- Operations on trees ------------------------- */
01880 
01881 /* Insert chunk into tree */
01882 #define insert_large_chunk(M, X, S) {\
01883         tbinptr* H;\
01884         bindex_t I;\
01885         compute_tree_index(S, I);\
01886         H = treebin_at(M, I);\
01887         X->index = I;\
01888         X->child[0] = X->child[1] = 0;\
01889         if (!treemap_is_marked(M, I)) {\
01890         mark_treemap(M, I);\
01891         *H = X;\
01892         X->parent = (tchunkptr)H;\
01893         X->fd = X->bk = X;\
01894         }\
01895   else {\
01896   tchunkptr T = *H;\
01897   size_t K = S << leftshift_for_tree_index(I);\
01898   for (;;) {\
01899   if (chunksize(T) != S) {\
01900   tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
01901   K <<= 1;\
01902   if (*C != 0)\
01903   T = *C;\
01904                 else if (RTCHECK(ok_address(M, C))) {\
01905                 *C = X;\
01906                 X->parent = T;\
01907                 X->fd = X->bk = X;\
01908                 break;\
01909 }\
01910                 else {\
01911                 CORRUPTION_ERROR_ACTION(M);\
01912                 break;\
01913 }\
01914   }\
01915           else {\
01916           tchunkptr F = T->fd;\
01917           if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
01918           T->fd = F->bk = X;\
01919           X->fd = F;\
01920           X->bk = T;\
01921           X->parent = 0;\
01922           break;\
01923           }\
01924                 else {\
01925                 CORRUPTION_ERROR_ACTION(M);\
01926                 break;\
01927 }\
01928 }\
01929   }\
01930 }\
01931 }
01932 
01933 /*
01934 Unlink steps:
01935 
01936 1. If x is a chained node, unlink it from its same-sized fd/bk links
01937 and choose its bk node as its replacement.
01938 2. If x was the last node of its size, but not a leaf node, it must
01939 be replaced with a leaf node (not merely one with an open left or
01940 right), to make sure that lefts and rights of descendents
01941 correspond properly to bit masks.  We use the rightmost descendent
01942 of x.  We could use any other leaf, but this is easy to locate and
01943 tends to counteract removal of leftmosts elsewhere, and so keeps
01944 paths shorter than minimally guaranteed.  This doesn't loop much
01945 because on average a node in a tree is near the bottom.
01946 3. If x is the base of a chain (i.e., has parent links) relink
01947 x's parent and children to x's replacement (or null if none).
01948 */
01949 
01950 #define unlink_large_chunk(M, X) {\
01951         tchunkptr XP = X->parent;\
01952         tchunkptr R;\
01953         if (X->bk != X) {\
01954         tchunkptr F = X->fd;\
01955         R = X->bk;\
01956         if (RTCHECK(ok_address(M, F))) {\
01957         F->bk = R;\
01958         R->fd = F;\
01959         }\
01960         else {\
01961         CORRUPTION_ERROR_ACTION(M);\
01962 }\
01963         }\
01964   else {\
01965   tchunkptr* RP;\
01966   if (((R = *(RP = &(X->child[1]))) != 0) ||\
01967   ((R = *(RP = &(X->child[0]))) != 0)) {\
01968   tchunkptr* CP;\
01969   while ((*(CP = &(R->child[1])) != 0) ||\
01970   (*(CP = &(R->child[0])) != 0)) {\
01971   R = *(RP = CP);\
01972 }\
01973         if (RTCHECK(ok_address(M, RP)))\
01974         *RP = 0;\
01975           else {\
01976           CORRUPTION_ERROR_ACTION(M);\
01977 }\
01978 }\
01979 }\
01980         if (XP != 0) {\
01981         tbinptr* H = treebin_at(M, X->index);\
01982         if (X == *H) {\
01983         if ((*H = R) == 0) \
01984         clear_treemap(M, X->index);\
01985         }\
01986         else if (RTCHECK(ok_address(M, XP))) {\
01987         if (XP->child[0] == X) \
01988         XP->child[0] = R;\
01989           else \
01990           XP->child[1] = R;\
01991 }\
01992         else\
01993         CORRUPTION_ERROR_ACTION(M);\
01994         if (R != 0) {\
01995         if (RTCHECK(ok_address(M, R))) {\
01996         tchunkptr C0, C1;\
01997         R->parent = XP;\
01998         if ((C0 = X->child[0]) != 0) {\
01999         if (RTCHECK(ok_address(M, C0))) {\
02000         R->child[0] = C0;\
02001         C0->parent = R;\
02002         }\
02003                   else\
02004                   CORRUPTION_ERROR_ACTION(M);\
02005         }\
02006         if ((C1 = X->child[1]) != 0) {\
02007         if (RTCHECK(ok_address(M, C1))) {\
02008         R->child[1] = C1;\
02009         C1->parent = R;\
02010         }\
02011                   else\
02012                   CORRUPTION_ERROR_ACTION(M);\
02013         }\
02014         }\
02015           else\
02016           CORRUPTION_ERROR_ACTION(M);\
02017         }\
02018         }\
02019 }
02020 
02021 /* Relays to large vs small bin operations */
02022 
02023 #define insert_chunk(M, P, S)\
02024         if (is_small(S)) insert_small_chunk(M, P, S)\
02025   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
02026 
02027 #define unlink_chunk(M, P, S)\
02028         if (is_small(S)) unlink_small_chunk(M, P, S)\
02029   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
02030 
02031 
02032 /* Relays to internal calls to malloc/free from realloc, memalign etc */
02033 
02034 #if ONLY_MSPACES
02035 #define internal_malloc(m, b) rak_mspace_malloc(m, b)
02036 #define internal_free(m, mem) rak_mspace_free(m,mem);
02037 #else /* ONLY_MSPACES */
02038 #if MSPACES
02039 #define internal_malloc(m, b)\
02040         (m == gm)? rdlmalloc(b) : rak_mspace_malloc(m, b)
02041 #define internal_free(m, mem)\
02042         if (m == gm) rdlfree(mem); else rak_mspace_free(m,mem);
02043 #else /* MSPACES */
02044 #define internal_malloc(m, b) rdlmalloc(b)
02045 #define internal_free(m, mem) rdlfree(mem)
02046 #endif /* MSPACES */
02047 #endif /* ONLY_MSPACES */
02048 
02049 /* -----------------------  Direct-mmapping chunks ----------------------- */
02050 
02051 /*
02052 Directly mmapped chunks are set up with an offset to the start of
02053 the mmapped region stored in the prev_foot field of the chunk. This
02054 allows reconstruction of the required argument to MUNMAP when freed,
02055 and also allows adjustment of the returned chunk to meet alignment
02056 requirements (especially in memalign).
02057 */
02058 
02059 /* Malloc using mmap */
02060 static void* mmap_alloc(mstate m, size_t nb) {
02061         size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02062         if (mmsize > nb) {     /* Check for wrap around 0 */
02063                 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
02064                 if (mm != CMFAIL) {
02065                         size_t offset = align_offset(chunk2mem(mm));
02066                         size_t psize = mmsize - offset - MMAP_FOOT_PAD;
02067                         mchunkptr p = (mchunkptr)(mm + offset);
02068                         p->prev_foot = offset;
02069                         p->head = psize;
02070                         mark_inuse_foot(m, p, psize);
02071                         chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
02072                         chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
02073 
02074                         if (m->least_addr == 0 || mm < m->least_addr)
02075                                 m->least_addr = mm;
02076                         if ((m->footprint += mmsize) > m->max_footprint)
02077                                 m->max_footprint = m->footprint;
02078                         assert(is_aligned(chunk2mem(p)));
02079                         check_mmapped_chunk(m, p);
02080                         return chunk2mem(p);
02081                 }
02082         }
02083         return 0;
02084 }
02085 
02086 /* Realloc using mmap */
02087 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
02088         size_t oldsize = chunksize(oldp);
02089         if (is_small(nb)) /* Can't shrink mmap regions below small size */
02090                 return 0;
02091         /* Keep old chunk if big enough but not too big */
02092         if (oldsize >= nb + SIZE_T_SIZE &&
02093                 (oldsize - nb) <= (mparams.granularity << 1))
02094                 return oldp;
02095         else {
02096                 size_t offset = oldp->prev_foot;
02097                 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
02098                 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02099                 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
02100                         oldmmsize, newmmsize, 1);
02101                 if (cp != CMFAIL) {
02102                         mchunkptr newp = (mchunkptr)(cp + offset);
02103                         size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
02104                         newp->head = psize;
02105                         mark_inuse_foot(m, newp, psize);
02106                         chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
02107                         chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
02108 
02109                         if (cp < m->least_addr)
02110                                 m->least_addr = cp;
02111                         if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
02112                                 m->max_footprint = m->footprint;
02113                         check_mmapped_chunk(m, newp);
02114                         return newp;
02115                 }
02116         }
02117         return 0;
02118 }
02119 
02120 /* -------------------------- mspace management -------------------------- */
02121 
02122 /* Initialize top chunk and its size */
02123 static void init_top(mstate m, mchunkptr p, size_t psize) {
02124         /* Ensure alignment */
02125         size_t offset = align_offset(chunk2mem(p));
02126         p = (mchunkptr)((char*)p + offset);
02127         psize -= offset;
02128 
02129         m->top = p;
02130         m->topsize = psize;
02131         p->head = psize | PINUSE_BIT;
02132         /* set size of fake trailing chunk holding overhead space only once */
02133         chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
02134         m->trim_check = mparams.trim_threshold; /* reset on each update */
02135 }
02136 
02137 /* Initialize bins for a new mstate that is otherwise zeroed out */
02138 static void init_bins(mstate m) {
02139         /* Establish circular links for smallbins */
02140         bindex_t i;
02141         for (i = 0; i < NSMALLBINS; ++i) {
02142                 sbinptr bin = smallbin_at(m,i);
02143                 bin->fd = bin->bk = bin;
02144         }
02145 }
02146 
02147 #if PROCEED_ON_ERROR
02148 
02149 /* default corruption action */
02150 static void reset_on_error(mstate m) {
02151         int i;
02152         ++malloc_corruption_error_count;
02153         /* Reinitialize fields to forget about all memory */
02154         m->smallbins = m->treebins = 0;
02155         m->dvsize = m->topsize = 0;
02156         m->seg.base = 0;
02157         m->seg.size = 0;
02158         m->seg.next = 0;
02159         m->top = m->dv = 0;
02160         for (i = 0; i < NTREEBINS; ++i)
02161                 *treebin_at(m, i) = 0;
02162         init_bins(m);
02163 }
02164 #endif /* PROCEED_ON_ERROR */
02165 
02166 /* Allocate chunk and prepend remainder with chunk in successor base. */
02167 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
02168                                                    size_t nb) {
02169                                                            mchunkptr p = align_as_chunk(newbase);
02170                                                            mchunkptr oldfirst = align_as_chunk(oldbase);
02171                                                            size_t psize = (char*)oldfirst - (char*)p;
02172                                                            mchunkptr q = chunk_plus_offset(p, nb);
02173                                                            size_t qsize = psize - nb;
02174                                                            set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02175 
02176                                                            assert((char*)oldfirst > (char*)q);
02177                                                            assert(pinuse(oldfirst));
02178                                                            assert(qsize >= MIN_CHUNK_SIZE);
02179 
02180                                                            /* consolidate remainder with first chunk of old base */
02181                                                            if (oldfirst == m->top) {
02182                                                                    size_t tsize = m->topsize += qsize;
02183                                                                    m->top = q;
02184                                                                    q->head = tsize | PINUSE_BIT;
02185                                                                    check_top_chunk(m, q);
02186                                                            }
02187                                                            else if (oldfirst == m->dv) {
02188                                                                    size_t dsize = m->dvsize += qsize;
02189                                                                    m->dv = q;
02190                                                                    set_size_and_pinuse_of_free_chunk(q, dsize);
02191                                                            }
02192                                                            else {
02193                                                                    if (!is_inuse(oldfirst)) {
02194                                                                            size_t nsize = chunksize(oldfirst);
02195                                                                            unlink_chunk(m, oldfirst, nsize);
02196                                                                            oldfirst = chunk_plus_offset(oldfirst, nsize);
02197                                                                            qsize += nsize;
02198                                                                    }
02199                                                                    set_free_with_pinuse(q, qsize, oldfirst);
02200                                                                    insert_chunk(m, q, qsize);
02201                                                                    check_free_chunk(m, q);
02202                                                            }
02203 
02204                                                            check_malloced_chunk(m, chunk2mem(p), nb);
02205                                                            return chunk2mem(p);
02206 }
02207 
02208 /* Add a segment to hold a new noncontiguous region */
02209 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
02210         /* Determine locations and sizes of segment, fenceposts, old top */
02211         char* old_top = (char*)m->top;
02212         msegmentptr oldsp = segment_holding(m, old_top);
02213         char* old_end = oldsp->base + oldsp->size;
02214         size_t ssize = pad_request(sizeof(struct malloc_segment));
02215         char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02216         size_t offset = align_offset(chunk2mem(rawsp));
02217         char* asp = rawsp + offset;
02218         char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
02219         mchunkptr sp = (mchunkptr)csp;
02220         msegmentptr ss = (msegmentptr)(chunk2mem(sp));
02221         mchunkptr tnext = chunk_plus_offset(sp, ssize);
02222         mchunkptr p = tnext;
02223         int nfences = 0;
02224 
02225         /* reset top to new space */
02226         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02227 
02228         /* Set up segment record */
02229         assert(is_aligned(ss));
02230         set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
02231         *ss = m->seg; /* Push current record */
02232         m->seg.base = tbase;
02233         m->seg.size = tsize;
02234         m->seg.sflags = mmapped;
02235         m->seg.next = ss;
02236 
02237         /* Insert trailing fenceposts */
02238         for (;;) {
02239                 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
02240                 p->head = FENCEPOST_HEAD;
02241                 ++nfences;
02242                 if ((char*)(&(nextp->head)) < old_end)
02243                         p = nextp;
02244                 else
02245                         break;
02246         }
02247         assert(nfences >= 2);
02248 
02249         /* Insert the rest of old top into a bin as an ordinary free chunk */
02250         if (csp != old_top) {
02251                 mchunkptr q = (mchunkptr)old_top;
02252                 size_t psize = csp - old_top;
02253                 mchunkptr tn = chunk_plus_offset(q, psize);
02254                 set_free_with_pinuse(q, psize, tn);
02255                 insert_chunk(m, q, psize);
02256         }
02257 
02258         check_top_chunk(m, m->top);
02259 }
02260 
02261 /* -------------------------- System allocation -------------------------- */
02262 
02263 /* Get memory from system using MORECORE or MMAP */
02264 static void* sys_alloc(mstate m, size_t nb) {
02265         char* tbase = CMFAIL;
02266         size_t tsize = 0;
02267         flag_t mmap_flag = 0;
02268 
02269         ensure_initialization();
02270 
02271         /* Directly map large chunks, but only if already initialized */
02272         if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
02273                 void* mem = mmap_alloc(m, nb);
02274                 if (mem != 0)
02275                         return mem;
02276         }
02277 
02278         /*
02279         Try getting memory in any of three ways (in most-preferred to
02280         least-preferred order):
02281         1. A call to MORECORE that can normally contiguously extend memory.
02282         (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
02283         or main space is mmapped or a previous contiguous call failed)
02284         2. A call to MMAP new space (disabled if not HAVE_MMAP).
02285         Note that under the default settings, if MORECORE is unable to
02286         fulfill a request, and HAVE_MMAP is true, then mmap is
02287         used as a noncontiguous system allocator. This is a useful backup
02288         strategy for systems with holes in address spaces -- in this case
02289         sbrk cannot contiguously expand the heap, but mmap may be able to
02290         find space.
02291         3. A call to MORECORE that cannot usually contiguously extend memory.
02292         (disabled if not HAVE_MORECORE)
02293 
02294         In all cases, we need to request enough bytes from system to ensure
02295         we can malloc nb bytes upon success, so pad with enough space for
02296         top_foot, plus alignment-pad to make sure we don't lose bytes if
02297         not on boundary, and round this up to a granularity unit.
02298         */
02299 
02300         if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
02301                 char* br = CMFAIL;
02302                 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
02303                 size_t asize = 0;
02304                 ACQUIRE_MALLOC_GLOBAL_LOCK();
02305 
02306                 if (ss == 0) {  /* First time through or recovery */
02307                         char* base = (char*)CALL_MORECORE(0);
02308                         if (base != CMFAIL) {
02309                                 asize = granularity_align(nb + SYS_ALLOC_PADDING);
02310                                 /* Adjust to end on a page boundary */
02311                                 if (!is_page_aligned(base))
02312                                         asize += (page_align((size_t)base) - (size_t)base);
02313                                 /* Can't call MORECORE if size is negative when treated as signed */
02314                                 if (asize < HALF_MAX_SIZE_T &&
02315                                         (br = (char*)(CALL_MORECORE(asize))) == base) {
02316                                                 tbase = base;
02317                                                 tsize = asize;
02318                                 }
02319                         }
02320                 }
02321                 else {
02322                         /* Subtract out existing available top space from MORECORE request. */
02323                         asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
02324                         /* Use mem here only if it did continuously extend old space */
02325                         if (asize < HALF_MAX_SIZE_T &&
02326                                 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
02327                                         tbase = br;
02328                                         tsize = asize;
02329                         }
02330                 }
02331 
02332                 if (tbase == CMFAIL) {    /* Cope with partial failure */
02333                         if (br != CMFAIL) {    /* Try to use/extend the space we did get */
02334                                 if (asize < HALF_MAX_SIZE_T &&
02335                                         asize < nb + SYS_ALLOC_PADDING) {
02336                                                 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize);
02337                                                 if (esize < HALF_MAX_SIZE_T) {
02338                                                         char* end = (char*)CALL_MORECORE(esize);
02339                                                         if (end != CMFAIL)
02340                                                                 asize += esize;
02341                                                         else {            /* Can't use; try to release */
02342                                                                 (void) CALL_MORECORE(-asize);
02343                                                                 br = CMFAIL;
02344                                                         }
02345                                                 }
02346                                 }
02347                         }
02348                         if (br != CMFAIL) {    /* Use the space we did get */
02349                                 tbase = br;
02350                                 tsize = asize;
02351                         }
02352                         else
02353                                 disable_contiguous(m); /* Don't try contiguous path in the future */
02354                 }
02355 
02356                 RELEASE_MALLOC_GLOBAL_LOCK();
02357         }
02358 
02359         if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
02360                 size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
02361                 if (rsize > nb) { /* Fail if wraps around zero */
02362                         char* mp = (char*)(CALL_MMAP(rsize));
02363                         if (mp != CMFAIL) {
02364                                 tbase = mp;
02365                                 tsize = rsize;
02366                                 mmap_flag = USE_MMAP_BIT;
02367                         }
02368                 }
02369         }
02370 
02371         if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
02372                 size_t asize = granularity_align(nb + SYS_ALLOC_PADDING);
02373                 if (asize < HALF_MAX_SIZE_T) {
02374                         char* br = CMFAIL;
02375                         char* end = CMFAIL;
02376                         ACQUIRE_MALLOC_GLOBAL_LOCK();
02377                         br = (char*)(CALL_MORECORE(asize));
02378                         end = (char*)(CALL_MORECORE(0));
02379                         RELEASE_MALLOC_GLOBAL_LOCK();
02380                         if (br != CMFAIL && end != CMFAIL && br < end) {
02381                                 size_t ssize = end - br;
02382                                 if (ssize > nb + TOP_FOOT_SIZE) {
02383                                         tbase = br;
02384                                         tsize = ssize;
02385                                 }
02386                         }
02387                 }
02388         }
02389 
02390         if (tbase != CMFAIL) {
02391 
02392                 if ((m->footprint += tsize) > m->max_footprint)
02393                         m->max_footprint = m->footprint;
02394 
02395                 if (!is_initialized(m)) { /* first-time initialization */
02396                         if (m->least_addr == 0 || tbase < m->least_addr)
02397                                 m->least_addr = tbase;
02398                         m->seg.base = tbase;
02399                         m->seg.size = tsize;
02400                         m->seg.sflags = mmap_flag;
02401                         m->magic = mparams.magic;
02402                         m->release_checks = MAX_RELEASE_CHECK_RATE;
02403                         init_bins(m);
02404 #if !ONLY_MSPACES
02405                         if (is_global(m))
02406                                 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02407                         else
02408 #endif
02409                         {
02410                                 /* Offset top by embedded malloc_state */
02411                                 mchunkptr mn = next_chunk(mem2chunk(m));
02412                                 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
02413                         }
02414                 }
02415 
02416                 else {
02417                         /* Try to merge with an existing segment */
02418                         msegmentptr sp = &m->seg;
02419                         /* Only consider most recent segment if traversal suppressed */
02420                         while (sp != 0 && tbase != sp->base + sp->size)
02421                                 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
02422                         if (sp != 0 &&
02423                                 !is_extern_segment(sp) &&
02424                                 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
02425                                 segment_holds(sp, m->top)) { /* append */
02426                                         sp->size += tsize;
02427                                         init_top(m, m->top, m->topsize + tsize);
02428                         }
02429                         else {
02430                                 if (tbase < m->least_addr)
02431                                         m->least_addr = tbase;
02432                                 sp = &m->seg;
02433                                 while (sp != 0 && sp->base != tbase + tsize)
02434                                         sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
02435                                 if (sp != 0 &&
02436                                         !is_extern_segment(sp) &&
02437                                         (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
02438                                                 char* oldbase = sp->base;
02439                                                 sp->base = tbase;
02440                                                 sp->size += tsize;
02441                                                 return prepend_alloc(m, tbase, oldbase, nb);
02442                                 }
02443                                 else
02444                                         add_segment(m, tbase, tsize, mmap_flag);
02445                         }
02446                 }
02447 
02448                 if (nb < m->topsize) { /* Allocate from new or extended top space */
02449                         size_t rsize = m->topsize -= nb;
02450                         mchunkptr p = m->top;
02451                         mchunkptr r = m->top = chunk_plus_offset(p, nb);
02452                         r->head = rsize | PINUSE_BIT;
02453                         set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02454                         check_top_chunk(m, m->top);
02455                         check_malloced_chunk(m, chunk2mem(p), nb);
02456                         return chunk2mem(p);
02457                 }
02458         }
02459 
02460         MALLOC_FAILURE_ACTION;
02461         return 0;
02462 }
02463 
02464 /* -----------------------  system deallocation -------------------------- */
02465 
02466 /* Unmap and unlink any mmapped segments that don't contain used chunks */
02467 static size_t release_unused_segments(mstate m) {
02468         size_t released = 0;
02469         int nsegs = 0;
02470         msegmentptr pred = &m->seg;
02471         msegmentptr sp = pred->next;
02472         while (sp != 0) {
02473                 char* base = sp->base;
02474                 size_t size = sp->size;
02475                 msegmentptr next = sp->next;
02476                 ++nsegs;
02477                 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
02478                         mchunkptr p = align_as_chunk(base);
02479                         size_t psize = chunksize(p);
02480                         /* Can unmap if first chunk holds entire segment and not pinned */
02481                         if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
02482                                 tchunkptr tp = (tchunkptr)p;
02483                                 assert(segment_holds(sp, (char*)sp));
02484                                 if (p == m->dv) {
02485                                         m->dv = 0;
02486                                         m->dvsize = 0;
02487                                 }
02488                                 else {
02489                                         unlink_large_chunk(m, tp);
02490                                 }
02491                                 if (CALL_MUNMAP(base, size) == 0) {
02492                                         released += size;
02493                                         m->footprint -= size;
02494                                         /* unlink obsoleted record */
02495                                         sp = pred;
02496                                         sp->next = next;
02497                                 }
02498                                 else { /* back out if cannot unmap */
02499                                         insert_large_chunk(m, tp, psize);
02500                                 }
02501                         }
02502                 }
02503                 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
02504                         break;
02505                 pred = sp;
02506                 sp = next;
02507         }
02508         /* Reset check counter */
02509         m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
02510 nsegs : MAX_RELEASE_CHECK_RATE);
02511         return released;
02512 }
02513 
02514 static int sys_trim(mstate m, size_t pad) {
02515         size_t released = 0;
02516         ensure_initialization();
02517         if (pad < MAX_REQUEST && is_initialized(m)) {
02518                 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
02519 
02520                 if (m->topsize > pad) {
02521                         /* Shrink top space in granularity-size units, keeping at least one */
02522                         size_t unit = mparams.granularity;
02523                         size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
02524                                 SIZE_T_ONE) * unit;
02525                         msegmentptr sp = segment_holding(m, (char*)m->top);
02526 
02527                         if (!is_extern_segment(sp)) {
02528                                 if (is_mmapped_segment(sp)) {
02529                                         if (HAVE_MMAP &&
02530                                                 sp->size >= extra &&
02531                                                 !has_segment_link(m, sp)) { /* can't shrink if pinned */
02532                                                         size_t newsize = sp->size - extra;
02533                                                         /* Prefer mremap, fall back to munmap */
02534                                                         if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
02535                                                                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
02536                                                                         released = extra;
02537                                                         }
02538                                         }
02539                                 }
02540                                 else if (HAVE_MORECORE) {
02541                                         if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
02542                                                 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
02543                                         ACQUIRE_MALLOC_GLOBAL_LOCK();
02544                                         {
02545                                                 /* Make sure end of memory is where we last set it. */
02546                                                 char* old_br = (char*)(CALL_MORECORE(0));
02547                                                 if (old_br == sp->base + sp->size) {
02548                                                         char* rel_br = (char*)(CALL_MORECORE(-extra));
02549                                                         char* new_br = (char*)(CALL_MORECORE(0));
02550                                                         if (rel_br != CMFAIL && new_br < old_br)
02551                                                                 released = old_br - new_br;
02552                                                 }
02553                                         }
02554                                         RELEASE_MALLOC_GLOBAL_LOCK();
02555                                 }
02556                         }
02557 
02558                         if (released != 0) {
02559                                 sp->size -= released;
02560                                 m->footprint -= released;
02561                                 init_top(m, m->top, m->topsize - released);
02562                                 check_top_chunk(m, m->top);
02563                         }
02564                 }
02565 
02566                 /* Unmap any unused mmapped segments */
02567                 if (HAVE_MMAP)
02568                         released += release_unused_segments(m);
02569 
02570                 /* On failure, disable autotrim to avoid repeated failed future calls */
02571                 if (released == 0 && m->topsize > m->trim_check)
02572                         m->trim_check = MAX_SIZE_T;
02573         }
02574 
02575         return (released != 0)? 1 : 0;
02576 }
02577 
02578 
02579 /* ---------------------------- malloc support --------------------------- */
02580 
02581 /* allocate a large request from the best fitting chunk in a treebin */
02582 static void* tmalloc_large(mstate m, size_t nb) {
02583         tchunkptr v = 0;
02584         size_t rsize = -nb; /* Unsigned negation */
02585         tchunkptr t;
02586         bindex_t idx;
02587         compute_tree_index(nb, idx);
02588         if ((t = *treebin_at(m, idx)) != 0) {
02589                 /* Traverse tree for this bin looking for node with size == nb */
02590                 size_t sizebits = nb << leftshift_for_tree_index(idx);
02591                 tchunkptr rst = 0;  /* The deepest untaken right subtree */
02592                 for (;;) {
02593                         tchunkptr rt;
02594                         size_t trem = chunksize(t) - nb;
02595                         if (trem < rsize) {
02596                                 v = t;
02597                                 if ((rsize = trem) == 0)
02598                                         break;
02599                         }
02600                         rt = t->child[1];
02601                         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
02602                         if (rt != 0 && rt != t)
02603                                 rst = rt;
02604                         if (t == 0) {
02605                                 t = rst; /* set t to least subtree holding sizes > nb */
02606                                 break;
02607                         }
02608                         sizebits <<= 1;
02609                 }
02610         }
02611         if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
02612                 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
02613                 if (leftbits != 0) {
02614                         bindex_t i;
02615                         binmap_t leastbit = least_bit(leftbits);
02616                         compute_bit2idx(leastbit, i);
02617                         t = *treebin_at(m, i);
02618                 }
02619         }
02620 
02621         while (t != 0) { /* find smallest of tree or subtree */
02622                 size_t trem = chunksize(t) - nb;
02623                 if (trem < rsize) {
02624                         rsize = trem;
02625                         v = t;
02626                 }
02627                 t = leftmost_child(t);
02628         }
02629 
02630         /*  If dv is a better fit, return 0 so malloc will use it */
02631         if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
02632                 if (RTCHECK(ok_address(m, v))) { /* split */
02633                         mchunkptr r = chunk_plus_offset(v, nb);
02634                         assert(chunksize(v) == rsize + nb);
02635                         if (RTCHECK(ok_next(v, r))) {
02636                                 unlink_large_chunk(m, v);
02637                                 if (rsize < MIN_CHUNK_SIZE)
02638                                         set_inuse_and_pinuse(m, v, (rsize + nb));
02639                                 else {
02640                                         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
02641                                         set_size_and_pinuse_of_free_chunk(r, rsize);
02642                                         insert_chunk(m, r, rsize);
02643                                 }
02644                                 return chunk2mem(v);
02645                         }
02646                 }
02647                 CORRUPTION_ERROR_ACTION(m);
02648         }
02649         return 0;
02650 }
02651 
02652 /* allocate a small request from the best fitting chunk in a treebin */
02653 static void* tmalloc_small(mstate m, size_t nb) {
02654         tchunkptr t, v;
02655         size_t rsize;
02656         bindex_t i;
02657         binmap_t leastbit = least_bit(m->treemap);
02658         compute_bit2idx(leastbit, i);
02659         v = t = *treebin_at(m, i);
02660         rsize = chunksize(t) - nb;
02661 
02662         while ((t = leftmost_child(t)) != 0) {
02663                 size_t trem = chunksize(t) - nb;
02664                 if (trem < rsize) {
02665                         rsize = trem;
02666                         v = t;
02667                 }
02668         }
02669 
02670         if (RTCHECK(ok_address(m, v))) {
02671                 mchunkptr r = chunk_plus_offset(v, nb);
02672                 assert(chunksize(v) == rsize + nb);
02673                 if (RTCHECK(ok_next(v, r))) {
02674                         unlink_large_chunk(m, v);
02675                         if (rsize < MIN_CHUNK_SIZE)
02676                                 set_inuse_and_pinuse(m, v, (rsize + nb));
02677                         else {
02678                                 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
02679                                 set_size_and_pinuse_of_free_chunk(r, rsize);
02680                                 replace_dv(m, r, rsize);
02681                         }
02682                         return chunk2mem(v);
02683                 }
02684         }
02685 
02686         CORRUPTION_ERROR_ACTION(m);
02687         return 0;
02688 }
02689 
02690 /* --------------------------- realloc support --------------------------- */
02691 
02692 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
02693         if (bytes >= MAX_REQUEST) {
02694                 MALLOC_FAILURE_ACTION;
02695                 return 0;
02696         }
02697         if (!PREACTION(m)) {
02698                 mchunkptr oldp = mem2chunk(oldmem);
02699                 size_t oldsize = chunksize(oldp);
02700                 mchunkptr next = chunk_plus_offset(oldp, oldsize);
02701                 mchunkptr newp = 0;
02702                 void* extra = 0;
02703 
02704                 /* Try to either shrink or extend into top. Else malloc-copy-free */
02705 
02706                 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
02707                         ok_next(oldp, next) && ok_pinuse(next))) {
02708                                 size_t nb = request2size(bytes);
02709                                 if (is_mmapped(oldp))
02710                                         newp = mmap_resize(m, oldp, nb);
02711                                 else if (oldsize >= nb) { /* already big enough */
02712                                         size_t rsize = oldsize - nb;
02713                                         newp = oldp;
02714                                         if (rsize >= MIN_CHUNK_SIZE) {
02715                                                 mchunkptr remainder = chunk_plus_offset(newp, nb);
02716                                                 set_inuse(m, newp, nb);
02717                                                 set_inuse_and_pinuse(m, remainder, rsize);
02718                                                 extra = chunk2mem(remainder);
02719                                         }
02720                                 }
02721                                 else if (next == m->top && oldsize + m->topsize > nb) {
02722                                         /* Expand into top */
02723                                         size_t newsize = oldsize + m->topsize;
02724                                         size_t newtopsize = newsize - nb;
02725                                         mchunkptr newtop = chunk_plus_offset(oldp, nb);
02726                                         set_inuse(m, oldp, nb);
02727                                         newtop->head = newtopsize |PINUSE_BIT;
02728                                         m->top = newtop;
02729                                         m->topsize = newtopsize;
02730                                         newp = oldp;
02731                                 }
02732                 }
02733                 else {
02734                         USAGE_ERROR_ACTION(m, oldmem);
02735                         POSTACTION(m);
02736                         return 0;
02737                 }
02738 #if DEBUG
02739                 if (newp != 0) {
02740                         check_inuse_chunk(m, newp); /* Check requires lock */
02741                 }
02742 #endif
02743 
02744                 POSTACTION(m);
02745 
02746                 if (newp != 0) {
02747                         if (extra != 0) {
02748                                 internal_free(m, extra);
02749                         }
02750                         return chunk2mem(newp);
02751                 }
02752                 else {
02753                         void* newmem = internal_malloc(m, bytes);
02754                         if (newmem != 0) {
02755                                 size_t oc = oldsize - overhead_for(oldp);
02756                                 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
02757                                 internal_free(m, oldmem);
02758                         }
02759                         return newmem;
02760                 }
02761         }
02762         return 0;
02763 }
02764 
02765 /* --------------------------- memalign support -------------------------- */
02766 
02767 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
02768         if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
02769                 return internal_malloc(m, bytes);
02770         if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
02771                 alignment = MIN_CHUNK_SIZE;
02772         if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
02773                 size_t a = MALLOC_ALIGNMENT << 1;
02774                 while (a < alignment) a <<= 1;
02775                 alignment = a;
02776         }
02777 
02778         if (bytes >= MAX_REQUEST - alignment) {
02779                 if (m != 0)  { /* Test isn't needed but avoids compiler warning */
02780                         MALLOC_FAILURE_ACTION;
02781                 }
02782         }
02783         else {
02784                 size_t nb = request2size(bytes);
02785                 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
02786                 char* mem = (char*)internal_malloc(m, req);
02787                 if (mem != 0) {
02788                         void* leader = 0;
02789                         void* trailer = 0;
02790                         mchunkptr p = mem2chunk(mem);
02791 
02792                         if (PREACTION(m)) return 0;
02793                         if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
02794                                 /*
02795                                 Find an aligned spot inside chunk.  Since we need to give
02796                                 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
02797                                 the first calculation places us at a spot with less than
02798                                 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
02799                                 We've allocated enough total room so that this is always
02800                                 possible.
02801                                 */
02802                                 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
02803                                         alignment -
02804                                         SIZE_T_ONE)) &
02805                                         -alignment));
02806                                 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
02807 br : br+alignment;
02808                                 mchunkptr newp = (mchunkptr)pos;
02809                                 size_t leadsize = pos - (char*)(p);
02810                                 size_t newsize = chunksize(p) - leadsize;
02811 
02812                                 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
02813                                         newp->prev_foot = p->prev_foot + leadsize;
02814                                         newp->head = newsize;
02815                                 }
02816                                 else { /* Otherwise, give back leader, use the rest */
02817                                         set_inuse(m, newp, newsize);
02818                                         set_inuse(m, p, leadsize);
02819                                         leader = chunk2mem(p);
02820                                 }
02821                                 p = newp;
02822                         }
02823 
02824                         /* Give back spare room at the end */
02825                         if (!is_mmapped(p)) {
02826                                 size_t size = chunksize(p);
02827                                 if (size > nb + MIN_CHUNK_SIZE) {
02828                                         size_t remainder_size = size - nb;
02829                                         mchunkptr remainder = chunk_plus_offset(p, nb);
02830                                         set_inuse(m, p, nb);
02831                                         set_inuse(m, remainder, remainder_size);
02832                                         trailer = chunk2mem(remainder);
02833                                 }
02834                         }
02835 
02836                         assert (chunksize(p) >= nb);
02837                         assert((((size_t)(chunk2mem(p))) % alignment) == 0);
02838                         check_inuse_chunk(m, p);
02839                         POSTACTION(m);
02840                         if (leader != 0) {
02841                                 internal_free(m, leader);
02842                         }
02843                         if (trailer != 0) {
02844                                 internal_free(m, trailer);
02845                         }
02846                         return chunk2mem(p);
02847                 }
02848         }
02849         return 0;
02850 }
02851 
02852 /* ------------------------ comalloc/coalloc support --------------------- */
02853 
02854 static void** ialloc(mstate m,
02855                                          size_t n_elements,
02856                                          size_t* sizes,
02857                                          int opts,
02858                                          void* chunks[]) {
02859                                                  /*
02860                                                  This provides common support for independent_X routines, handling
02861                                                  all of the combinations that can result.
02862 
02863                                                  The opts arg has:
02864                                                  bit 0 set if all elements are same size (using sizes[0])
02865                                                  bit 1 set if elements should be zeroed
02866                                                  */
02867 
02868                                                  size_t    element_size;   /* chunksize of each element, if all same */
02869                                                  size_t    contents_size;  /* total size of elements */
02870                                                  size_t    array_size;     /* request size of pointer array */
02871                                                  void*     mem;            /* malloced aggregate space */
02872                                                  mchunkptr p;              /* corresponding chunk */
02873                                                  size_t    remainder_size; /* remaining bytes while splitting */
02874                                                  void**    marray;         /* either "chunks" or malloced ptr array */
02875                                                  mchunkptr array_chunk;    /* chunk for malloced ptr array */
02876                                                  flag_t    was_enabled;    /* to disable mmap */
02877                                                  size_t    size;
02878                                                  size_t    i;
02879 
02880                                                  ensure_initialization();
02881                                                  /* compute array length, if needed */
02882                                                  if (chunks != 0) {
02883                                                          if (n_elements == 0)
02884                                                                  return chunks; /* nothing to do */
02885                                                          marray = chunks;
02886                                                          array_size = 0;
02887                                                  }
02888                                                  else {
02889                                                          /* if empty req, must still return chunk representing empty array */
02890                                                          if (n_elements == 0)
02891                                                                  return (void**)internal_malloc(m, 0);
02892                                                          marray = 0;
02893                                                          array_size = request2size(n_elements * (sizeof(void*)));
02894                                                  }
02895 
02896                                                  /* compute total element size */
02897                                                  if (opts & 0x1) { /* all-same-size */
02898                                                          element_size = request2size(*sizes);
02899                                                          contents_size = n_elements * element_size;
02900                                                  }
02901                                                  else { /* add up all the sizes */
02902                                                          element_size = 0;
02903                                                          contents_size = 0;
02904                                                          for (i = 0; i != n_elements; ++i)
02905                                                                  contents_size += request2size(sizes[i]);
02906                                                  }
02907 
02908                                                  size = contents_size + array_size;
02909 
02910                                                  /*
02911                                                  Allocate the aggregate chunk.  First disable direct-mmapping so
02912                                                  malloc won't use it, since we would not be able to later
02913                                                  free/realloc space internal to a segregated mmap region.
02914                                                  */
02915                                                  was_enabled = use_mmap(m);
02916                                                  disable_mmap(m);
02917                                                  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
02918                                                  if (was_enabled)
02919                                                          enable_mmap(m);
02920                                                  if (mem == 0)
02921                                                          return 0;
02922 
02923                                                  if (PREACTION(m)) return 0;
02924                                                  p = mem2chunk(mem);
02925                                                  remainder_size = chunksize(p);
02926 
02927                                                  assert(!is_mmapped(p));
02928 
02929                                                  if (opts & 0x2) {       /* optionally clear the elements */
02930                                                          memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
02931                                                  }
02932 
02933                                                  /* If not provided, allocate the pointer array as final part of chunk */
02934                                                  if (marray == 0) {
02935                                                          size_t  array_chunk_size;
02936                                                          array_chunk = chunk_plus_offset(p, contents_size);
02937                                                          array_chunk_size = remainder_size - contents_size;
02938                                                          marray = (void**) (chunk2mem(array_chunk));
02939                                                          set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
02940                                                          remainder_size = contents_size;
02941                                                  }
02942 
02943                                                  /* split out elements */
02944                                                  for (i = 0; ; ++i) {
02945                                                          marray[i] = chunk2mem(p);
02946                                                          if (i != n_elements-1) {
02947                                                                  if (element_size != 0)
02948                                                                          size = element_size;
02949                                                                  else
02950                                                                          size = request2size(sizes[i]);
02951                                                                  remainder_size -= size;
02952                                                                  set_size_and_pinuse_of_inuse_chunk(m, p, size);
02953                                                                  p = chunk_plus_offset(p, size);
02954                                                          }
02955                                                          else { /* the final element absorbs any overallocation slop */
02956                                                                  set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
02957                                                                  break;
02958                                                          }
02959                                                  }
02960 
02961 #if DEBUG
02962                                                  if (marray != chunks) {
02963                                                          /* final element must have exactly exhausted chunk */
02964                                                          if (element_size != 0) {
02965                                                                  assert(remainder_size == element_size);
02966                                                          }
02967                                                          else {
02968                                                                  assert(remainder_size == request2size(sizes[i]));
02969                                                          }
02970                                                          check_inuse_chunk(m, mem2chunk(marray));
02971                                                  }
02972                                                  for (i = 0; i != n_elements; ++i)
02973                                                          check_inuse_chunk(m, mem2chunk(marray[i]));
02974 
02975 #endif /* DEBUG */
02976 
02977                                                  POSTACTION(m);
02978                                                  return marray;
02979 }
02980 
02981 
02982 /* -------------------------- public routines ---------------------------- */
02983 
02984 #if !ONLY_MSPACES
02985 
02986 void* rdlmalloc(size_t bytes) {
02987         /*
02988         Basic algorithm:
02989         If a small request (< 256 bytes minus per-chunk overhead):
02990         1. If one exists, use a remainderless chunk in associated smallbin.
02991         (Remainderless means that there are too few excess bytes to
02992         represent as a chunk.)
02993         2. If it is big enough, use the dv chunk, which is normally the
02994         chunk adjacent to the one used for the most recent small request.
02995         3. If one exists, split the smallest available chunk in a bin,
02996         saving remainder in dv.
02997         4. If it is big enough, use the top chunk.
02998         5. If available, get memory from system and use it
02999         Otherwise, for a large request:
03000         1. Find the smallest available binned chunk that fits, and use it
03001         if it is better fitting than dv chunk, splitting if necessary.
03002         2. If better fitting than any binned chunk, use the dv chunk.
03003         3. If it is big enough, use the top chunk.
03004         4. If request size >= mmap threshold, try to directly mmap this chunk.
03005         5. If available, get memory from system and use it
03006 
03007         The ugly goto's here ensure that postaction occurs along all paths.
03008         */
03009 
03010 #if USE_LOCKS
03011         ensure_initialization(); /* initialize in sys_alloc if not using locks */
03012 #endif
03013 
03014         if (!PREACTION(gm)) {
03015                 void* mem;
03016                 size_t nb;
03017                 if (bytes <= MAX_SMALL_REQUEST) {
03018                         bindex_t idx;
03019                         binmap_t smallbits;
03020                         nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03021                         idx = small_index(nb);
03022                         smallbits = gm->smallmap >> idx;
03023 
03024                         if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
03025                                 mchunkptr b, p;
03026                                 idx += ~smallbits & 1;       /* Uses next bin if idx empty */
03027                                 b = smallbin_at(gm, idx);
03028                                 p = b->fd;
03029                                 assert(chunksize(p) == small_index2size(idx));
03030                                 unlink_first_small_chunk(gm, b, p, idx);
03031                                 set_inuse_and_pinuse(gm, p, small_index2size(idx));
03032                                 mem = chunk2mem(p);
03033                                 check_malloced_chunk(gm, mem, nb);
03034                                 goto postaction;
03035                         }
03036 
03037                         else if (nb > gm->dvsize) {
03038                                 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
03039                                         mchunkptr b, p, r;
03040                                         size_t rsize;
03041                                         bindex_t i;
03042                                         binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03043                                         binmap_t leastbit = least_bit(leftbits);
03044                                         compute_bit2idx(leastbit, i);
03045                                         b = smallbin_at(gm, i);
03046                                         p = b->fd;
03047                                         assert(chunksize(p) == small_index2size(i));
03048                                         unlink_first_small_chunk(gm, b, p, i);
03049                                         rsize = small_index2size(i) - nb;
03050                                         /* Fit here cannot be remainderless if 4byte sizes */
03051                                         if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03052                                                 set_inuse_and_pinuse(gm, p, small_index2size(i));
03053                                         else {
03054                                                 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03055                                                 r = chunk_plus_offset(p, nb);
03056                                                 set_size_and_pinuse_of_free_chunk(r, rsize);
03057                                                 replace_dv(gm, r, rsize);
03058                                         }
03059                                         mem = chunk2mem(p);
03060                                         check_malloced_chunk(gm, mem, nb);
03061                                         goto postaction;
03062                                 }
03063 
03064                                 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
03065                                         check_malloced_chunk(gm, mem, nb);
03066                                         goto postaction;
03067                                 }
03068                         }
03069                 }
03070                 else if (bytes >= MAX_REQUEST)
03071                         nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
03072                 else {
03073                         nb = pad_request(bytes);
03074                         if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
03075                                 check_malloced_chunk(gm, mem, nb);
03076                                 goto postaction;
03077                         }
03078                 }
03079 
03080                 if (nb <= gm->dvsize) {
03081                         size_t rsize = gm->dvsize - nb;
03082                         mchunkptr p = gm->dv;
03083                         if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
03084                                 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
03085                                 gm->dvsize = rsize;
03086                                 set_size_and_pinuse_of_free_chunk(r, rsize);
03087                                 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03088                         }
03089                         else { /* exhaust dv */
03090                                 size_t dvs = gm->dvsize;
03091                                 gm->dvsize = 0;
03092                                 gm->dv = 0;
03093                                 set_inuse_and_pinuse(gm, p, dvs);
03094                         }
03095                         mem = chunk2mem(p);
03096                         check_malloced_chunk(gm, mem, nb);
03097                         goto postaction;
03098                 }
03099 
03100                 else if (nb < gm->topsize) { /* Split top */
03101                         size_t rsize = gm->topsize -= nb;
03102                         mchunkptr p = gm->top;
03103                         mchunkptr r = gm->top = chunk_plus_offset(p, nb);
03104                         r->head = rsize | PINUSE_BIT;
03105                         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03106                         mem = chunk2mem(p);
03107                         check_top_chunk(gm, gm->top);
03108                         check_malloced_chunk(gm, mem, nb);
03109                         goto postaction;
03110                 }
03111 
03112                 mem = sys_alloc(gm, nb);
03113 
03114 postaction:
03115                 POSTACTION(gm);
03116                 return mem;
03117         }
03118 
03119         return 0;
03120 }
03121 
03122 void rdlfree(void* mem) {
03123         /*
03124         Consolidate freed chunks with preceeding or succeeding bordering
03125         free chunks, if they exist, and then place in a bin.  Intermixed
03126         with special cases for top, dv, mmapped chunks, and usage errors.
03127         */
03128 
03129         if (mem != 0) {
03130                 mchunkptr p  = mem2chunk(mem);
03131 #if FOOTERS
03132                 mstate fm = get_mstate_for(p);
03133                 if (!ok_magic(fm)) {
03134                         USAGE_ERROR_ACTION(fm, p);
03135                         return;
03136                 }
03137 #else /* FOOTERS */
03138 #define fm gm
03139 #endif /* FOOTERS */
03140                 if (!PREACTION(fm)) {
03141                         check_inuse_chunk(fm, p);
03142                         if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
03143                                 size_t psize = chunksize(p);
03144                                 mchunkptr next = chunk_plus_offset(p, psize);
03145                                 if (!pinuse(p)) {
03146                                         size_t prevsize = p->prev_foot;
03147                                         if (is_mmapped(p)) {
03148                                                 psize += prevsize + MMAP_FOOT_PAD;
03149                                                 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03150                                                         fm->footprint -= psize;
03151                                                 goto postaction;
03152                                         }
03153                                         else {
03154                                                 mchunkptr prev = chunk_minus_offset(p, prevsize);
03155                                                 psize += prevsize;
03156                                                 p = prev;
03157                                                 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
03158                                                         if (p != fm->dv) {
03159                                                                 unlink_chunk(fm, p, prevsize);
03160                                                         }
03161                                                         else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03162                                                                 fm->dvsize = psize;
03163                                                                 set_free_with_pinuse(p, psize, next);
03164                                                                 goto postaction;
03165                                                         }
03166                                                 }
03167                                                 else
03168                                                         goto erroraction;
03169                                         }
03170                                 }
03171 
03172                                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03173                                         if (!cinuse(next)) {  /* consolidate forward */
03174                                                 if (next == fm->top) {
03175                                                         size_t tsize = fm->topsize += psize;
03176                                                         fm->top = p;
03177                                                         p->head = tsize | PINUSE_BIT;
03178                                                         if (p == fm->dv) {
03179                                                                 fm->dv = 0;
03180                                                                 fm->dvsize = 0;
03181                                                         }
03182                                                         if (should_trim(fm, tsize))
03183                                                                 sys_trim(fm, 0);
03184                                                         goto postaction;
03185                                                 }
03186                                                 else if (next == fm->dv) {
03187                                                         size_t dsize = fm->dvsize += psize;
03188                                                         fm->dv = p;
03189                                                         set_size_and_pinuse_of_free_chunk(p, dsize);
03190                                                         goto postaction;
03191                                                 }
03192                                                 else {
03193                                                         size_t nsize = chunksize(next);
03194                                                         psize += nsize;
03195                                                         unlink_chunk(fm, next, nsize);
03196                                                         set_size_and_pinuse_of_free_chunk(p, psize);
03197                                                         if (p == fm->dv) {
03198                                                                 fm->dvsize = psize;
03199                                                                 goto postaction;
03200                                                         }
03201                                                 }
03202                                         }
03203                                         else
03204                                                 set_free_with_pinuse(p, psize, next);
03205 
03206                                         if (is_small(psize)) {
03207                                                 insert_small_chunk(fm, p, psize);
03208                                                 check_free_chunk(fm, p);
03209                                         }
03210                                         else {
03211                                                 tchunkptr tp = (tchunkptr)p;
03212                                                 insert_large_chunk(fm, tp, psize);
03213                                                 check_free_chunk(fm, p);
03214                                                 if (--fm->release_checks == 0)
03215                                                         release_unused_segments(fm);
03216                                         }
03217                                         goto postaction;
03218                                 }
03219                         }
03220 erroraction:
03221                         USAGE_ERROR_ACTION(fm, p);
03222 postaction:
03223                         POSTACTION(fm);
03224                 }
03225         }
03226 #if !FOOTERS
03227 #undef fm
03228 #endif /* FOOTERS */
03229 }
03230 
03231 void* rdlcalloc(size_t n_elements, size_t elem_size) {
03232         void* mem;
03233         size_t req = 0;
03234         if (n_elements != 0) {
03235                 req = n_elements * elem_size;
03236                 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
03237                         (req / n_elements != elem_size))
03238                         req = MAX_SIZE_T; /* force downstream failure on overflow */
03239         }
03240         mem = rdlmalloc(req);
03241         if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
03242                 memset(mem, 0, req);
03243         return mem;
03244 }
03245 
03246 void* rdlrealloc(void* oldmem, size_t bytes) {
03247         if (oldmem == 0)
03248                 return rdlmalloc(bytes);
03249 #ifdef REALLOC_ZERO_BYTES_FREES
03250         if (bytes == 0) {
03251                 rdlfree(oldmem);
03252                 return 0;
03253         }
03254 #endif /* REALLOC_ZERO_BYTES_FREES */
03255         else {
03256 #if ! FOOTERS
03257                 mstate m = gm;
03258 #else /* FOOTERS */
03259                 mstate m = get_mstate_for(mem2chunk(oldmem));
03260                 if (!ok_magic(m)) {
03261                         USAGE_ERROR_ACTION(m, oldmem);
03262                         return 0;
03263                 }
03264 #endif /* FOOTERS */
03265                 return internal_realloc(m, oldmem, bytes);
03266         }
03267 }
03268 
03269 void* rdlmemalign(size_t alignment, size_t bytes) {
03270         return internal_memalign(gm, alignment, bytes);
03271 }
03272 
03273 void** rdlindependent_calloc(size_t n_elements, size_t elem_size,
03274                                                         void* chunks[]) {
03275                                                                 size_t sz = elem_size; /* serves as 1-element array */
03276                                                                 return ialloc(gm, n_elements, &sz, 3, chunks);
03277 }
03278 
03279 void** rdlindependent_comalloc(size_t n_elements, size_t sizes[],
03280                                                           void* chunks[]) {
03281                                                                   return ialloc(gm, n_elements, sizes, 0, chunks);
03282 }
03283 
03284 void* rdlvalloc(size_t bytes) {
03285         size_t pagesz;
03286         ensure_initialization();
03287         pagesz = mparams.page_size;
03288         return rdlmemalign(pagesz, bytes);
03289 }
03290 
03291 void* rdlpvalloc(size_t bytes) {
03292         size_t pagesz;
03293         ensure_initialization();
03294         pagesz = mparams.page_size;
03295         return rdlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
03296 }
03297 
03298 int rdlmalloc_trim(size_t pad) {
03299         int result = 0;
03300         ensure_initialization();
03301         if (!PREACTION(gm)) {
03302                 result = sys_trim(gm, pad);
03303                 POSTACTION(gm);
03304         }
03305         return result;
03306 }
03307 
03308 size_t rdlmalloc_footprint(void) {
03309         return gm->footprint;
03310 }
03311 
03312 size_t dlmalloc_max_footprint(void) {
03313         return gm->max_footprint;
03314 }
03315 
03316 #if !NO_MALLINFO
03317 struct mallinfo rdlmallinfo(void) {
03318         return internal_mallinfo(gm);
03319 }
03320 #endif /* NO_MALLINFO */
03321 
03322 void rdlmalloc_stats() {
03323         internal_malloc_stats(gm);
03324 }
03325 
03326 int rdlmallopt(int param_number, int value) {
03327         return change_mparam(param_number, value);
03328 }
03329 
03330 #endif /* !ONLY_MSPACES */
03331 
03332 size_t rdlmalloc_usable_size(void* mem) {
03333         if (mem != 0) {
03334                 mchunkptr p = mem2chunk(mem);
03335                 if (is_inuse(p))
03336                         return chunksize(p) - overhead_for(p);
03337         }
03338         return 0;
03339 }
03340 
03341 /* ----------------------------- user mspaces ---------------------------- */
03342 
03343 #if MSPACES
03344 
03345 static mstate init_user_mstate(char* tbase, size_t tsize) {
03346         size_t msize = pad_request(sizeof(struct malloc_state));
03347         mchunkptr mn;
03348         mchunkptr msp = align_as_chunk(tbase);
03349         mstate m = (mstate)(chunk2mem(msp));
03350         memset(m, 0, msize);
03351         INITIAL_LOCK(&m->mutex);
03352         msp->head = (msize|INUSE_BITS);
03353         m->seg.base = m->least_addr = tbase;
03354         m->seg.size = m->footprint = m->max_footprint = tsize;
03355         m->magic = mparams.magic;
03356         m->release_checks = MAX_RELEASE_CHECK_RATE;
03357         m->mflags = mparams.default_mflags;
03358         m->extp = 0;
03359         m->exts = 0;
03360         disable_contiguous(m);
03361         init_bins(m);
03362         mn = next_chunk(mem2chunk(m));
03363         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
03364         check_top_chunk(m, m->top);
03365         return m;
03366 }
03367 
03368 mspace rak_create_mspace(size_t capacity, int locked) {
03369         mstate m = 0;
03370         size_t msize;
03371         ensure_initialization();
03372         msize = pad_request(sizeof(struct malloc_state));
03373         if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03374                 size_t rs = ((capacity == 0)? mparams.granularity :
03375                         (capacity + TOP_FOOT_SIZE + msize));
03376         size_t tsize = granularity_align(rs);
03377         char* tbase = (char*)(CALL_MMAP(tsize));
03378         if (tbase != CMFAIL) {
03379                 m = init_user_mstate(tbase, tsize);
03380                 m->seg.sflags = USE_MMAP_BIT;
03381                 set_lock(m, locked);
03382         }
03383         }
03384         return (mspace)m;
03385 }
03386 
03387 mspace rak_create_mspace_with_base(void* base, size_t capacity, int locked) {
03388         mstate m = 0;
03389         size_t msize;
03390         ensure_initialization();
03391         msize = pad_request(sizeof(struct malloc_state));
03392         if (capacity > msize + TOP_FOOT_SIZE &&
03393                 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03394                         m = init_user_mstate((char*)base, capacity);
03395                         m->seg.sflags = EXTERN_BIT;
03396                         set_lock(m, locked);
03397         }
03398         return (mspace)m;
03399 }
03400 
03401 int rak_mspace_track_large_chunks(mspace msp, int enable) {
03402         int ret = 0;
03403         mstate ms = (mstate)msp;
03404         if (!PREACTION(ms)) {
03405                 if (!use_mmap(ms))
03406                         ret = 1;
03407                 if (!enable)
03408                         enable_mmap(ms);
03409                 else
03410                         disable_mmap(ms);
03411                 POSTACTION(ms);
03412         }
03413         return ret;
03414 }
03415 
03416 size_t rak_destroy_mspace(mspace msp) {
03417         size_t freed = 0;
03418         mstate ms = (mstate)msp;
03419         if (ok_magic(ms)) {
03420                 msegmentptr sp = &ms->seg;
03421                 while (sp != 0) {
03422                         char* base = sp->base;
03423                         size_t size = sp->size;
03424                         flag_t flag = sp->sflags;
03425                         sp = sp->next;
03426                         if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
03427                                 CALL_MUNMAP(base, size) == 0)
03428                                 freed += size;
03429                 }
03430         }
03431         else {
03432                 USAGE_ERROR_ACTION(ms,ms);
03433         }
03434         return freed;
03435 }
03436 
03437 /*
03438 mspace versions of routines are near-clones of the global
03439 versions. This is not so nice but better than the alternatives.
03440 */
03441 
03442 
03443 void* rak_mspace_malloc(mspace msp, size_t bytes) {
03444         mstate ms = (mstate)msp;
03445         if (!ok_magic(ms)) {
03446                 USAGE_ERROR_ACTION(ms,ms);
03447                 return 0;
03448         }
03449         if (!PREACTION(ms)) {
03450                 void* mem;
03451                 size_t nb;
03452                 if (bytes <= MAX_SMALL_REQUEST) {
03453                         bindex_t idx;
03454                         binmap_t smallbits;
03455                         nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03456                         idx = small_index(nb);
03457                         smallbits = ms->smallmap >> idx;
03458 
03459                         if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
03460                                 mchunkptr b, p;
03461                                 idx += ~smallbits & 1;       /* Uses next bin if idx empty */
03462                                 b = smallbin_at(ms, idx);
03463                                 p = b->fd;
03464                                 assert(chunksize(p) == small_index2size(idx));
03465                                 unlink_first_small_chunk(ms, b, p, idx);
03466                                 set_inuse_and_pinuse(ms, p, small_index2size(idx));
03467                                 mem = chunk2mem(p);
03468                                 check_malloced_chunk(ms, mem, nb);
03469                                 goto postaction;
03470                         }
03471 
03472                         else if (nb > ms->dvsize) {
03473                                 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
03474                                         mchunkptr b, p, r;
03475                                         size_t rsize;
03476                                         bindex_t i;
03477                                         binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03478                                         binmap_t leastbit = least_bit(leftbits);
03479                                         compute_bit2idx(leastbit, i);
03480                                         b = smallbin_at(ms, i);
03481                                         p = b->fd;
03482                                         assert(chunksize(p) == small_index2size(i));
03483                                         unlink_first_small_chunk(ms, b, p, i);
03484                                         rsize = small_index2size(i) - nb;
03485                                         /* Fit here cannot be remainderless if 4byte sizes */
03486                                         if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03487                                                 set_inuse_and_pinuse(ms, p, small_index2size(i));
03488                                         else {
03489                                                 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03490                                                 r = chunk_plus_offset(p, nb);
03491                                                 set_size_and_pinuse_of_free_chunk(r, rsize);
03492                                                 replace_dv(ms, r, rsize);
03493                                         }
03494                                         mem = chunk2mem(p);
03495                                         check_malloced_chunk(ms, mem, nb);
03496                                         goto postaction;
03497                                 }
03498 
03499                                 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
03500                                         check_malloced_chunk(ms, mem, nb);
03501                                         goto postaction;
03502                                 }
03503                         }
03504                 }
03505                 else if (bytes >= MAX_REQUEST)
03506                         nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
03507                 else {
03508                         nb = pad_request(bytes);
03509                         if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
03510                                 check_malloced_chunk(ms, mem, nb);
03511                                 goto postaction;
03512                         }
03513                 }
03514 
03515                 if (nb <= ms->dvsize) {
03516                         size_t rsize = ms->dvsize - nb;
03517                         mchunkptr p = ms->dv;
03518                         if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
03519                                 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
03520                                 ms->dvsize = rsize;
03521                                 set_size_and_pinuse_of_free_chunk(r, rsize);
03522                                 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03523                         }
03524                         else { /* exhaust dv */
03525                                 size_t dvs = ms->dvsize;
03526                                 ms->dvsize = 0;
03527                                 ms->dv = 0;
03528                                 set_inuse_and_pinuse(ms, p, dvs);
03529                         }
03530                         mem = chunk2mem(p);
03531                         check_malloced_chunk(ms, mem, nb);
03532                         goto postaction;
03533                 }
03534 
03535                 else if (nb < ms->topsize) { /* Split top */
03536                         size_t rsize = ms->topsize -= nb;
03537                         mchunkptr p = ms->top;
03538                         mchunkptr r = ms->top = chunk_plus_offset(p, nb);
03539                         r->head = rsize | PINUSE_BIT;
03540                         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03541                         mem = chunk2mem(p);
03542                         check_top_chunk(ms, ms->top);
03543                         check_malloced_chunk(ms, mem, nb);
03544                         goto postaction;
03545                 }
03546 
03547                 mem = sys_alloc(ms, nb);
03548 
03549 postaction:
03550                 POSTACTION(ms);
03551                 return mem;
03552         }
03553 
03554         return 0;
03555 }
03556 
03557 void rak_mspace_free(mspace msp, void* mem) {
03558         if (mem != 0) {
03559                 mchunkptr p  = mem2chunk(mem);
03560 #if FOOTERS
03561                 mstate fm = get_mstate_for(p);
03562                 msp = msp; /* placate people compiling -Wunused */
03563 #else /* FOOTERS */
03564                 mstate fm = (mstate)msp;
03565 #endif /* FOOTERS */
03566                 if (!ok_magic(fm)) {
03567                         USAGE_ERROR_ACTION(fm, p);
03568                         return;
03569                 }
03570                 if (!PREACTION(fm)) {
03571                         check_inuse_chunk(fm, p);
03572                         if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
03573                                 size_t psize = chunksize(p);
03574                                 mchunkptr next = chunk_plus_offset(p, psize);
03575                                 if (!pinuse(p)) {
03576                                         size_t prevsize = p->prev_foot;
03577                                         if (is_mmapped(p)) {
03578                                                 psize += prevsize + MMAP_FOOT_PAD;
03579                                                 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03580                                                         fm->footprint -= psize;
03581                                                 goto postaction;
03582                                         }
03583                                         else {
03584                                                 mchunkptr prev = chunk_minus_offset(p, prevsize);
03585                                                 psize += prevsize;
03586                                                 p = prev;
03587                                                 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
03588                                                         if (p != fm->dv) {
03589                                                                 unlink_chunk(fm, p, prevsize);
03590                                                         }
03591                                                         else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03592                                                                 fm->dvsize = psize;
03593                                                                 set_free_with_pinuse(p, psize, next);
03594                                                                 goto postaction;
03595                                                         }
03596                                                 }
03597                                                 else
03598                                                         goto erroraction;
03599                                         }
03600                                 }
03601 
03602                                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03603                                         if (!cinuse(next)) {  /* consolidate forward */
03604                                                 if (next == fm->top) {
03605                                                         size_t tsize = fm->topsize += psize;
03606                                                         fm->top = p;
03607                                                         p->head = tsize | PINUSE_BIT;
03608                                                         if (p == fm->dv) {
03609                                                                 fm->dv = 0;
03610                                                                 fm->dvsize = 0;
03611                                                         }
03612                                                         if (should_trim(fm, tsize))
03613                                                                 sys_trim(fm, 0);
03614                                                         goto postaction;
03615                                                 }
03616                                                 else if (next == fm->dv) {
03617                                                         size_t dsize = fm->dvsize += psize;
03618                                                         fm->dv = p;
03619                                                         set_size_and_pinuse_of_free_chunk(p, dsize);
03620                                                         goto postaction;
03621                                                 }
03622                                                 else {
03623                                                         size_t nsize = chunksize(next);
03624                                                         psize += nsize;
03625                                                         unlink_chunk(fm, next, nsize);
03626                                                         set_size_and_pinuse_of_free_chunk(p, psize);
03627                                                         if (p == fm->dv) {
03628                                                                 fm->dvsize = psize;
03629                                                                 goto postaction;
03630                                                         }
03631                                                 }
03632                                         }
03633                                         else
03634                                                 set_free_with_pinuse(p, psize, next);
03635 
03636                                         if (is_small(psize)) {
03637                                                 insert_small_chunk(fm, p, psize);
03638                                                 check_free_chunk(fm, p);
03639                                         }
03640                                         else {
03641                                                 tchunkptr tp = (tchunkptr)p;
03642                                                 insert_large_chunk(fm, tp, psize);
03643                                                 check_free_chunk(fm, p);
03644                                                 if (--fm->release_checks == 0)
03645                                                         release_unused_segments(fm);
03646                                         }
03647                                         goto postaction;
03648                                 }
03649                         }
03650 erroraction:
03651                         USAGE_ERROR_ACTION(fm, p);
03652 postaction:
03653                         POSTACTION(fm);
03654                 }
03655         }
03656 }
03657 
03658 void* rak_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
03659         void* mem;
03660         size_t req = 0;
03661         mstate ms = (mstate)msp;
03662         if (!ok_magic(ms)) {
03663                 USAGE_ERROR_ACTION(ms,ms);
03664                 return 0;
03665         }
03666         if (n_elements != 0) {
03667                 req = n_elements * elem_size;
03668                 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
03669                         (req / n_elements != elem_size))
03670                         req = MAX_SIZE_T; /* force downstream failure on overflow */
03671         }
03672         mem = internal_malloc(ms, req);
03673         if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
03674                 memset(mem, 0, req);
03675         return mem;
03676 }
03677 
03678 void* rak_mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
03679         if (oldmem == 0)
03680                 return rak_mspace_malloc(msp, bytes);
03681 #ifdef REALLOC_ZERO_BYTES_FREES
03682         if (bytes == 0) {
03683                 rak_mspace_free(msp, oldmem);
03684                 return 0;
03685         }
03686 #endif /* REALLOC_ZERO_BYTES_FREES */
03687         else {
03688 #if FOOTERS
03689                 mchunkptr p  = mem2chunk(oldmem);
03690                 mstate ms = get_mstate_for(p);
03691 #else /* FOOTERS */
03692                 mstate ms = (mstate)msp;
03693 #endif /* FOOTERS */
03694                 if (!ok_magic(ms)) {
03695                         USAGE_ERROR_ACTION(ms,ms);
03696                         return 0;
03697                 }
03698                 return internal_realloc(ms, oldmem, bytes);
03699         }
03700 }
03701 
03702 void* rak_mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
03703         mstate ms = (mstate)msp;
03704         if (!ok_magic(ms)) {
03705                 USAGE_ERROR_ACTION(ms,ms);
03706                 return 0;
03707         }
03708         return internal_memalign(ms, alignment, bytes);
03709 }
03710 
03711 void** rak_mspace_independent_calloc(mspace msp, size_t n_elements,
03712                                                                  size_t elem_size, void* chunks[]) {
03713                                                                          size_t sz = elem_size; /* serves as 1-element array */
03714                                                                          mstate ms = (mstate)msp;
03715                                                                          if (!ok_magic(ms)) {
03716                                                                                  USAGE_ERROR_ACTION(ms,ms);
03717                                                                                  return 0;
03718                                                                          }
03719                                                                          return ialloc(ms, n_elements, &sz, 3, chunks);
03720 }
03721 
03722 void** rak_mspace_independent_comalloc(mspace msp, size_t n_elements,
03723                                                                    size_t sizes[], void* chunks[]) {
03724                                                                            mstate ms = (mstate)msp;
03725                                                                            if (!ok_magic(ms)) {
03726                                                                                    USAGE_ERROR_ACTION(ms,ms);
03727                                                                                    return 0;
03728                                                                            }
03729                                                                            return ialloc(ms, n_elements, sizes, 0, chunks);
03730 }
03731 
03732 int rak_mspace_trim(mspace msp, size_t pad) {
03733         int result = 0;
03734         mstate ms = (mstate)msp;
03735         if (ok_magic(ms)) {
03736                 if (!PREACTION(ms)) {
03737                         result = sys_trim(ms, pad);
03738                         POSTACTION(ms);
03739                 }
03740         }
03741         else {
03742                 USAGE_ERROR_ACTION(ms,ms);
03743         }
03744         return result;
03745 }
03746 
03747 void rak_mspace_malloc_stats(mspace msp) {
03748         mstate ms = (mstate)msp;
03749         if (ok_magic(ms)) {
03750                 internal_malloc_stats(ms);
03751         }
03752         else {
03753                 USAGE_ERROR_ACTION(ms,ms);
03754         }
03755 }
03756 
03757 size_t rak_mspace_footprint(mspace msp) {
03758         size_t result = 0;
03759         mstate ms = (mstate)msp;
03760         if (ok_magic(ms)) {
03761                 result = ms->footprint;
03762         }
03763         else {
03764                 USAGE_ERROR_ACTION(ms,ms);
03765         }
03766         return result;
03767 }
03768 
03769 
03770 size_t mspace_max_footprint(mspace msp) {
03771         size_t result = 0;
03772         mstate ms = (mstate)msp;
03773         if (ok_magic(ms)) {
03774                 result = ms->max_footprint;
03775         }
03776         else {
03777                 USAGE_ERROR_ACTION(ms,ms);
03778         }
03779         return result;
03780 }
03781 
03782 
03783 #if !NO_MALLINFO
03784 struct mallinfo rak_mspace_mallinfo(mspace msp) {
03785         mstate ms = (mstate)msp;
03786         if (!ok_magic(ms)) {
03787                 USAGE_ERROR_ACTION(ms,ms);
03788         }
03789         return internal_mallinfo(ms);
03790 }
03791 #endif /* NO_MALLINFO */
03792 
03793 size_t rak_mspace_usable_size(void* mem) {
03794         if (mem != 0) {
03795                 mchunkptr p = mem2chunk(mem);
03796                 if (is_inuse(p))
03797                         return chunksize(p) - overhead_for(p);
03798         }
03799         return 0;
03800 }
03801 
03802 int rak_mspace_mallopt(int param_number, int value) {
03803         return change_mparam(param_number, value);
03804 }
03805 
03806 #endif /* MSPACES */
03807 
03808 
03809 /* -------------------- Alternative MORECORE functions ------------------- */
03810 
03811 /*
03812 Guidelines for creating a custom version of MORECORE:
03813 
03814 * For best performance, MORECORE should allocate in multiples of pagesize.
03815 * MORECORE may allocate more memory than requested. (Or even less,
03816 but this will usually result in a malloc failure.)
03817 * MORECORE must not allocate memory when given argument zero, but
03818 instead return one past the end address of memory from previous
03819 nonzero call.
03820 * For best performance, consecutive calls to MORECORE with positive
03821 arguments should return increasing addresses, indicating that
03822 space has been contiguously extended.
03823 * Even though consecutive calls to MORECORE need not return contiguous
03824 addresses, it must be OK for malloc'ed chunks to span multiple
03825 regions in those cases where they do happen to be contiguous.
03826 * MORECORE need not handle negative arguments -- it may instead
03827 just return MFAIL when given negative arguments.
03828 Negative arguments are always multiples of pagesize. MORECORE
03829 must not misinterpret negative args as large positive unsigned
03830 args. You can suppress all such calls from even occurring by defining
03831 MORECORE_CANNOT_TRIM,
03832 
03833 As an example alternative MORECORE, here is a custom allocator
03834 kindly contributed for pre-OSX macOS.  It uses virtually but not
03835 necessarily physically contiguous non-paged memory (locked in,
03836 present and won't get swapped out).  You can use it by uncommenting
03837 this section, adding some #includes, and setting up the appropriate
03838 defines above:
03839 
03840 #define MORECORE osMoreCore
03841 
03842 There is also a shutdown routine that should somehow be called for
03843 cleanup upon program exit.
03844 
03845 #define MAX_POOL_ENTRIES 100
03846 #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
03847 static int next_os_pool;
03848 void *our_os_pools[MAX_POOL_ENTRIES];
03849 
03850 void *osMoreCore(int size)
03851 {
03852 void *ptr = 0;
03853 static void *sbrk_top = 0;
03854 
03855 if (size > 0)
03856 {
03857 if (size < MINIMUM_MORECORE_SIZE)
03858 size = MINIMUM_MORECORE_SIZE;
03859 if (CurrentExecutionLevel() == kTaskLevel)
03860 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
03861 if (ptr == 0)
03862 {
03863 return (void *) MFAIL;
03864 }
03865 // save ptrs so they can be freed during cleanup
03866 our_os_pools[next_os_pool] = ptr;
03867 next_os_pool++;
03868 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
03869 sbrk_top = (char *) ptr + size;
03870 return ptr;
03871 }
03872 else if (size < 0)
03873 {
03874 // we don't currently support shrink behavior
03875 return (void *) MFAIL;
03876 }
03877 else
03878 {
03879 return sbrk_top;
03880 }
03881 }
03882 
03883 // cleanup any allocated memory pools
03884 // called as last thing before shutting down driver
03885 
03886 void osCleanupMem(void)
03887 {
03888 void **ptr;
03889 
03890 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
03891 if (*ptr)
03892 {
03893 PoolDeallocate(*ptr);
03894 *ptr = 0;
03895 }
03896 }
03897 
03898 */
03899 
03900 
03901 /* -----------------------------------------------------------------------
03902 History:
03903 V2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
03904 * Use zeros instead of prev foot for is_mmapped
03905 * Add rak_mspace_track_large_chunks; thanks to Jean Brouwers
03906 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
03907 * Fix insufficient sys_alloc padding when using 16byte alignment
03908 * Fix bad error check in rak_mspace_footprint
03909 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
03910 * Reentrant spin locks; thanks to Earl Chew and others
03911 * Win32 improvements; thanks to Niall Douglas and Earl Chew
03912 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
03913 * Extension hook in malloc_state
03914 * Various small adjustments to reduce warnings on some compilers
03915 * Various configuration extensions/changes for more platforms. Thanks
03916 to all who contributed these.
03917 
03918 V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
03919 * Add max_footprint functions
03920 * Ensure all appropriate literals are size_t
03921 * Fix conditional compilation problem for some #define settings
03922 * Avoid concatenating segments with the one provided
03923 in rak_create_mspace_with_base
03924 * Rename some variables to avoid compiler shadowing warnings
03925 * Use explicit lock initialization.
03926 * Better handling of sbrk interference.
03927 * Simplify and fix segment insertion, trimming and mspace_destroy
03928 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
03929 * Thanks especially to Dennis Flanagan for help on these.
03930 
03931 V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
03932 * Fix memalign brace error.
03933 
03934 V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
03935 * Fix improper #endif nesting in C++
03936 * Add explicit casts needed for C++
03937 
03938 V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
03939 * Use trees for large bins
03940 * Support mspaces
03941 * Use segments to unify sbrk-based and mmap-based system allocation,
03942 removing need for emulation on most platforms without sbrk.
03943 * Default safety checks
03944 * Optional footer checks. Thanks to William Robertson for the idea.
03945 * Internal code refactoring
03946 * Incorporate suggestions and platform-specific changes.
03947 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
03948 Aaron Bachmann,  Emery Berger, and others.
03949 * Speed up non-fastbin processing enough to remove fastbins.
03950 * Remove useless cfree() to avoid conflicts with other apps.
03951 * Remove internal memcpy, memset. Compilers handle builtins better.
03952 * Remove some options that no one ever used and rename others.
03953 
03954 V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
03955 * Fix malloc_state bitmap array misdeclaration
03956 
03957 V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
03958 * Allow tuning of FIRST_SORTED_BIN_SIZE
03959 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
03960 * Better detection and support for non-contiguousness of MORECORE.
03961 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
03962 * Bypass most of malloc if no frees. Thanks To Emery Berger.
03963 * Fix freeing of old top non-contiguous chunk im sysmalloc.
03964 * Raised default trim and map thresholds to 256K.
03965 * Fix mmap-related #defines. Thanks to Lubos Lunak.
03966 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
03967 * Branch-free bin calculation
03968 * Default trim and mmap thresholds now 256K.
03969 
03970 V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
03971 * Introduce independent_comalloc and independent_calloc.
03972 Thanks to Michael Pachos for motivation and help.
03973 * Make optional .h file available
03974 * Allow > 2GB requests on 32bit systems.
03975 * new DL_PLATFORM_WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
03976 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
03977 and Anonymous.
03978 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
03979 helping test this.)
03980 * memalign: check alignment arg
03981 * realloc: don't try to shift chunks backwards, since this
03982 leads to  more fragmentation in some programs and doesn't
03983 seem to help in any others.
03984 * Collect all cases in malloc requiring system memory into sysmalloc
03985 * Use mmap as backup to sbrk
03986 * Place all internal state in malloc_state
03987 * Introduce fastbins (although similar to 2.5.1)
03988 * Many minor tunings and cosmetic improvements
03989 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
03990 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
03991 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
03992 * Include errno.h to support default failure action.
03993 
03994 V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
03995 * return null for negative arguments
03996 * Added Several DL_PLATFORM_WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
03997 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
03998 (e.g. DL_PLATFORM_WIN32 platforms)
03999 * Cleanup header file inclusion for DL_PLATFORM_WIN32 platforms
04000 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
04001 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
04002 memory allocation routines
04003 * Set 'malloc_getpagesize' for DL_PLATFORM_WIN32 platforms (needs more work)
04004 * Use 'assert' rather than 'ASSERT' in DL_PLATFORM_WIN32 code to conform to
04005 usage of 'assert' in non-DL_PLATFORM_WIN32 code
04006 * Improve DL_PLATFORM_WIN32 'sbrk()' emulation's 'findRegion()' routine to
04007 avoid infinite loop
04008 * Always call 'fREe()' rather than 'free()'
04009 
04010 V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
04011 * Fixed ordering problem with boundary-stamping
04012 
04013 V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
04014 * Added pvalloc, as recommended by H.J. Liu
04015 * Added 64bit pointer support mainly from Wolfram Gloger
04016 * Added anonymously donated DL_PLATFORM_WIN32 sbrk emulation
04017 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
04018 * malloc_extend_top: fix mask error that caused wastage after
04019 foreign sbrks
04020 * Add linux mremap support code from HJ Liu
04021 
04022 V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
04023 * Integrated most documentation with the code.
04024 * Add support for mmap, with help from
04025 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
04026 * Use last_remainder in more cases.
04027 * Pack bins using idea from  colin@nyx10.cs.du.edu
04028 * Use ordered bins instead of best-fit threshhold
04029 * Eliminate block-local decls to simplify tracing and debugging.
04030 * Support another case of realloc via move into top
04031 * Fix error occuring when initial sbrk_base not word-aligned.
04032 * Rely on page size for units instead of SBRK_UNIT to
04033 avoid surprises about sbrk alignment conventions.
04034 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
04035 (raymond@es.ele.tue.nl) for the suggestion.
04036 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
04037 * More precautions for cases where other routines call sbrk,
04038 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
04039 * Added macros etc., allowing use in linux libc from
04040 H.J. Lu (hjl@gnu.ai.mit.edu)
04041 * Inverted this history list
04042 
04043 V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
04044 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
04045 * Removed all preallocation code since under current scheme
04046 the work required to undo bad preallocations exceeds
04047 the work saved in good cases for most test programs.
04048 * No longer use return list or unconsolidated bins since
04049 no scheme using them consistently outperforms those that don't
04050 given above changes.
04051 * Use best fit for very large chunks to prevent some worst-cases.
04052 * Added some support for debugging
04053 
04054 V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
04055 * Removed footers when chunks are in use. Thanks to
04056 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
04057 
04058 V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
04059 * Added malloc_trim, with help from Wolfram Gloger
04060 (wmglo@Dent.MED.Uni-Muenchen.DE).
04061 
04062 V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
04063 
04064 V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
04065 * realloc: try to expand in both directions
04066 * malloc: swap order of clean-bin strategy;
04067 * realloc: only conditionally expand backwards
04068 * Try not to scavenge used bins
04069 * Use bin counts as a guide to preallocation
04070 * Occasionally bin return list chunks in first scan
04071 * Add a few optimizations from colin@nyx10.cs.du.edu
04072 
04073 V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
04074 * faster bin computation & slightly different binning
04075 * merged all consolidations to one part of malloc proper
04076 (eliminating old malloc_find_space & malloc_clean_bin)
04077 * Scan 2 returns chunks (not just 1)
04078 * Propagate failure in realloc if malloc returns 0
04079 * Add stuff to allow compilation on non-ANSI compilers
04080 from kpv@research.att.com
04081 
04082 V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
04083 * removed potential for odd address access in prev_chunk
04084 * removed dependency on getpagesize.h
04085 * misc cosmetics and a bit more internal documentation
04086 * anticosmetics: mangled names in macros to evade debugger strangeness
04087 * tested on sparc, hp-700, dec-mips, rs6000
04088 with gcc & native cc (hp, dec only) allowing
04089 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
04090 
04091 Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
04092 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
04093 structure of old version,  but most details differ.)
04094 
04095 */
04096 
04097 #endif // _RAKNET_SUPPORT_DL_MALLOC

Generated on Thu Sep 30 2010 01:27:27 for RakNet by  doxygen 1.7.1