00001 #ifdef _RAKNET_SUPPORT_DL_MALLOC
00002
00003 #include "rdlmalloc.h"
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037 #if USE_LOCKS == 1
00038
00039 #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
00040 #if defined(_XBOX) || defined(X360)
00041
00042 #elif !defined(DL_PLATFORM_WIN32)
00043
00044
00045 struct pthread_mlock_t {
00046 volatile unsigned int l;
00047 unsigned int c;
00048 pthread_t threadid;
00049 };
00050 #define MLOCK_T struct pthread_mlock_t
00051 #define CURRENT_THREAD pthread_self()
00052 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
00053 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
00054 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
00055 #define TRY_LOCK(sl) pthread_try_lock(sl)
00056 #define SPINS_PER_YIELD 63
00057
00058 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
00059
00060 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
00061 int spins = 0;
00062 volatile unsigned int* lp = &sl->l;
00063 for (;;) {
00064 if (*lp != 0) {
00065 if (sl->threadid == CURRENT_THREAD) {
00066 ++sl->c;
00067 return 0;
00068 }
00069 }
00070 else {
00071
00072 int cmp = 0;
00073 int val = 1;
00074 int ret;
00075 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
00076 : "=a" (ret)
00077 : "r" (val), "m" (*(lp)), "0"(cmp)
00078 : "memory", "cc");
00079 if (!ret) {
00080 assert(!sl->threadid);
00081 sl->threadid = CURRENT_THREAD;
00082 sl->c = 1;
00083 return 0;
00084 }
00085 }
00086 if ((++spins & SPINS_PER_YIELD) == 0) {
00087 #if defined (__SVR4) && defined (__sun)
00088 thr_yield();
00089 #else
00090 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
00091 sched_yield();
00092 #else
00093 ;
00094 #endif
00095 #endif
00096 }
00097 }
00098 }
00099
00100 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
00101 volatile unsigned int* lp = &sl->l;
00102 assert(*lp != 0);
00103 assert(sl->threadid == CURRENT_THREAD);
00104 if (--sl->c == 0) {
00105 sl->threadid = 0;
00106 int prev = 0;
00107 int ret;
00108 __asm__ __volatile__ ("lock; xchgl %0, %1"
00109 : "=r" (ret)
00110 : "m" (*(lp)), "0"(prev)
00111 : "memory");
00112 }
00113 }
00114
00115 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
00116 volatile unsigned int* lp = &sl->l;
00117 if (*lp != 0) {
00118 if (sl->threadid == CURRENT_THREAD) {
00119 ++sl->c;
00120 return 1;
00121 }
00122 }
00123 else {
00124 int cmp = 0;
00125 int val = 1;
00126 int ret;
00127 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
00128 : "=a" (ret)
00129 : "r" (val), "m" (*(lp)), "0"(cmp)
00130 : "memory", "cc");
00131 if (!ret) {
00132 assert(!sl->threadid);
00133 sl->threadid = CURRENT_THREAD;
00134 sl->c = 1;
00135 return 1;
00136 }
00137 }
00138 return 0;
00139 }
00140
00141
00142 #else
00143
00144 struct win32_mlock_t {
00145 volatile long l;
00146 unsigned int c;
00147 long threadid;
00148 };
00149
00150 #define MLOCK_T struct win32_mlock_t
00151 #define CURRENT_THREAD GetCurrentThreadId()
00152 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
00153 #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
00154 #define RELEASE_LOCK(sl) win32_release_lock(sl)
00155 #define TRY_LOCK(sl) win32_try_lock(sl)
00156 #define SPINS_PER_YIELD 63
00157
00158 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
00159
00160 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
00161 int spins = 0;
00162 for (;;) {
00163 if (sl->l != 0) {
00164 if (sl->threadid == CURRENT_THREAD) {
00165 ++sl->c;
00166 return 0;
00167 }
00168 }
00169 else {
00170 if (!interlockedexchange(&sl->l, 1)) {
00171 assert(!sl->threadid);
00172 sl->threadid = CURRENT_THREAD;
00173 sl->c = 1;
00174 return 0;
00175 }
00176 }
00177 if ((++spins & SPINS_PER_YIELD) == 0)
00178 SleepEx(0, FALSE);
00179 }
00180 }
00181
00182 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
00183 assert(sl->threadid == CURRENT_THREAD);
00184 assert(sl->l != 0);
00185 if (--sl->c == 0) {
00186 sl->threadid = 0;
00187 interlockedexchange (&sl->l, 0);
00188 }
00189 }
00190
00191 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
00192 if (sl->l != 0) {
00193 if (sl->threadid == CURRENT_THREAD) {
00194 ++sl->c;
00195 return 1;
00196 }
00197 }
00198 else {
00199 if (!interlockedexchange(&sl->l, 1)){
00200 assert(!sl->threadid);
00201 sl->threadid = CURRENT_THREAD;
00202 sl->c = 1;
00203 return 1;
00204 }
00205 }
00206 return 0;
00207 }
00208
00209 #endif
00210 #else
00211
00212 #ifndef DL_PLATFORM_WIN32
00213
00214
00215 #define MLOCK_T pthread_mutex_t
00216 #define CURRENT_THREAD pthread_self()
00217 #define INITIAL_LOCK(sl) pthread_init_lock(sl)
00218 #define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl)
00219 #define RELEASE_LOCK(sl) pthread_mutex_unlock(sl)
00220 #define TRY_LOCK(sl) (!pthread_mutex_trylock(sl))
00221
00222 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
00223
00224
00225
00226 #ifdef linux
00227 #ifndef PTHREAD_MUTEX_RECURSIVE
00228 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
00229 int __kind));
00230 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
00231 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
00232 #endif
00233 #endif
00234
00235 static int pthread_init_lock (MLOCK_T *sl) {
00236 pthread_mutexattr_t attr;
00237 if (pthread_mutexattr_init(&attr)) return 1;
00238 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
00239 if (pthread_mutex_init(sl, &attr)) return 1;
00240 if (pthread_mutexattr_destroy(&attr)) return 1;
00241 return 0;
00242 }
00243
00244 #else
00245
00246 #define MLOCK_T CRITICAL_SECTION
00247 #define CURRENT_THREAD GetCurrentThreadId()
00248 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
00249 #define ACQUIRE_LOCK(s) (EnterCriticalSection(sl), 0)
00250 #define RELEASE_LOCK(s) LeaveCriticalSection(sl)
00251 #define TRY_LOCK(s) TryEnterCriticalSection(sl)
00252 #define NEED_GLOBAL_LOCK_INIT
00253
00254 static MLOCK_T malloc_global_mutex;
00255 static volatile long malloc_global_mutex_status;
00256
00257
00258 static void init_malloc_global_mutex() {
00259 for (;;) {
00260 long stat = malloc_global_mutex_status;
00261 if (stat > 0)
00262 return;
00263
00264 if (stat == 0 &&
00265 interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) {
00266 InitializeCriticalSection(&malloc_global_mutex);
00267 interlockedexchange(&malloc_global_mutex_status,1);
00268 return;
00269 }
00270 SleepEx(0, FALSE);
00271 }
00272 }
00273
00274 #endif
00275 #endif
00276 #endif
00277
00278
00279
00280 #if USE_LOCKS > 1
00281
00282
00283
00284
00285
00286
00287 #endif
00288
00289
00290
00291 #if USE_LOCKS
00292 #define USE_LOCK_BIT (2U)
00293 #else
00294 #define USE_LOCK_BIT (0U)
00295 #define INITIAL_LOCK(l)
00296 #endif
00297
00298 #if USE_LOCKS
00299 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
00300 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
00301 #endif
00302 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
00303 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
00304 #endif
00305 #else
00306 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
00307 #define RELEASE_MALLOC_GLOBAL_LOCK()
00308 #endif
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448 struct malloc_chunk {
00449 size_t prev_foot;
00450 size_t head;
00451 struct malloc_chunk* fd;
00452 struct malloc_chunk* bk;
00453 };
00454
00455 typedef struct malloc_chunk mchunk;
00456 typedef struct malloc_chunk* mchunkptr;
00457 typedef struct malloc_chunk* sbinptr;
00458 typedef unsigned int bindex_t;
00459 typedef unsigned int binmap_t;
00460 typedef unsigned int flag_t;
00461
00462
00463
00464 #define MCHUNK_SIZE (sizeof(mchunk))
00465
00466 #if FOOTERS
00467 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
00468 #else
00469 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
00470 #endif
00471
00472
00473 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
00474
00475 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
00476
00477
00478 #define MIN_CHUNK_SIZE\
00479 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
00480
00481
00482 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
00483 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
00484
00485 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
00486
00487
00488 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
00489 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
00490
00491
00492 #define pad_request(req) \
00493 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
00494
00495
00496 #define request2size(req) \
00497 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510 #define PINUSE_BIT (SIZE_T_ONE)
00511 #define CINUSE_BIT (SIZE_T_TWO)
00512 #define FLAG4_BIT (SIZE_T_FOUR)
00513 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
00514 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
00515
00516
00517 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
00518
00519
00520 #define cinuse(p) ((p)->head & CINUSE_BIT)
00521 #define pinuse(p) ((p)->head & PINUSE_BIT)
00522 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
00523 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
00524
00525 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
00526
00527 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
00528
00529
00530 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
00531 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
00532
00533
00534 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
00535 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
00536
00537
00538 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
00539
00540
00541 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
00542 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
00543
00544
00545 #define set_size_and_pinuse_of_free_chunk(p, s)\
00546 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
00547
00548
00549 #define set_free_with_pinuse(p, s, n)\
00550 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
00551
00552
00553 #define overhead_for(p)\
00554 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
00555
00556
00557 #if MMAP_CLEARS
00558 #define calloc_must_clear(p) (!is_mmapped(p))
00559 #else
00560 #define calloc_must_clear(p) (1)
00561 #endif
00562
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631
00632
00633
00634
00635
00636
00637
00638
00639
00640
00641
00642
00643
00644
00645
00646
00647
00648
00649
00650
00651
00652
00653
00654 struct malloc_tree_chunk {
00655
00656 size_t prev_foot;
00657 size_t head;
00658 struct malloc_tree_chunk* fd;
00659 struct malloc_tree_chunk* bk;
00660
00661 struct malloc_tree_chunk* child[2];
00662 struct malloc_tree_chunk* parent;
00663 bindex_t index;
00664 };
00665
00666 typedef struct malloc_tree_chunk tchunk;
00667 typedef struct malloc_tree_chunk* tchunkptr;
00668 typedef struct malloc_tree_chunk* tbinptr;
00669
00670
00671 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710
00711
00712
00713
00714
00715
00716
00717
00718
00719
00720
00721
00722
00723
00724
00725
00726
00727
00728
00729
00730 struct malloc_segment {
00731 char* base;
00732 size_t size;
00733 struct malloc_segment* next;
00734 flag_t sflags;
00735 };
00736
00737 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
00738 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
00739
00740 typedef struct malloc_segment msegment;
00741 typedef struct malloc_segment* msegmentptr;
00742
00743
00744
00745
00746
00747
00748
00749
00750
00751
00752
00753
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763
00764
00765
00766
00767
00768
00769
00770
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780
00781
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828 #define NSMALLBINS (32U)
00829 #define NTREEBINS (32U)
00830 #define SMALLBIN_SHIFT (3U)
00831 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
00832 #define TREEBIN_SHIFT (8U)
00833 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
00834 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
00835 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
00836
00837 struct malloc_state {
00838 binmap_t smallmap;
00839 binmap_t treemap;
00840 size_t dvsize;
00841 size_t topsize;
00842 char* least_addr;
00843 mchunkptr dv;
00844 mchunkptr top;
00845 size_t trim_check;
00846 size_t release_checks;
00847 size_t magic;
00848 mchunkptr smallbins[(NSMALLBINS+1)*2];
00849 tbinptr treebins[NTREEBINS];
00850 size_t footprint;
00851 size_t max_footprint;
00852 flag_t mflags;
00853 #if USE_LOCKS
00854 MLOCK_T mutex;
00855 #endif
00856 msegment seg;
00857 void* extp;
00858 size_t exts;
00859 };
00860
00861 typedef struct malloc_state* mstate;
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872 struct malloc_params {
00873 volatile size_t magic;
00874 size_t page_size;
00875 size_t granularity;
00876 size_t mmap_threshold;
00877 size_t trim_threshold;
00878 flag_t default_mflags;
00879 };
00880
00881 static struct malloc_params mparams;
00882
00883
00884 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
00885
00886 #if !ONLY_MSPACES
00887
00888
00889 static struct malloc_state _gm_;
00890 #define gm (&_gm_)
00891 #define is_global(M) ((M) == &_gm_)
00892
00893 #endif
00894
00895 #define is_initialized(M) ((M)->top != 0)
00896
00897
00898
00899
00900
00901 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
00902 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
00903 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
00904
00905 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
00906 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
00907 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
00908
00909 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
00910 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
00911
00912 #define set_lock(M,L)\
00913 ((M)->mflags = (L)?\
00914 ((M)->mflags | USE_LOCK_BIT) :\
00915 ((M)->mflags & ~USE_LOCK_BIT))
00916
00917
00918 #define page_align(S)\
00919 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
00920
00921
00922 #define granularity_align(S)\
00923 (((S) + (mparams.granularity - SIZE_T_ONE))\
00924 & ~(mparams.granularity - SIZE_T_ONE))
00925
00926
00927
00928 #ifdef DL_PLATFORM_WIN32
00929 #define mmap_align(S) granularity_align(S)
00930 #else
00931 #define mmap_align(S) page_align(S)
00932 #endif
00933
00934
00935 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
00936
00937 #define is_page_aligned(S)\
00938 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
00939 #define is_granularity_aligned(S)\
00940 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
00941
00942
00943 #define segment_holds(S, A)\
00944 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
00945
00946
00947 static msegmentptr segment_holding(mstate m, char* addr) {
00948 msegmentptr sp = &m->seg;
00949 for (;;) {
00950 if (addr >= sp->base && addr < sp->base + sp->size)
00951 return sp;
00952 if ((sp = sp->next) == 0)
00953 return 0;
00954 }
00955 }
00956
00957
00958 static int has_segment_link(mstate m, msegmentptr ss) {
00959 msegmentptr sp = &m->seg;
00960 for (;;) {
00961 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
00962 return 1;
00963 if ((sp = sp->next) == 0)
00964 return 0;
00965 }
00966 }
00967
00968 #ifndef MORECORE_CANNOT_TRIM
00969 #define should_trim(M,s) ((s) > (M)->trim_check)
00970 #else
00971 #define should_trim(M,s) (0)
00972 #endif
00973
00974
00975
00976
00977
00978
00979 #define TOP_FOOT_SIZE\
00980 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
00981
00982
00983
00984
00985
00986
00987
00988
00989
00990
00991 #if USE_LOCKS
00992
00993 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
00994 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
00995 #else
00996
00997 #ifndef PREACTION
00998 #define PREACTION(M) (0)
00999 #endif
01000
01001 #ifndef POSTACTION
01002 #define POSTACTION(M)
01003 #endif
01004
01005 #endif
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015 #if PROCEED_ON_ERROR
01016
01017
01018 int malloc_corruption_error_count;
01019
01020
01021 static void reset_on_error(mstate m);
01022
01023 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
01024 #define USAGE_ERROR_ACTION(m, p)
01025
01026 #else
01027
01028 #ifndef CORRUPTION_ERROR_ACTION
01029 #define CORRUPTION_ERROR_ACTION(m) ABORT
01030 #endif
01031
01032 #ifndef USAGE_ERROR_ACTION
01033 #define USAGE_ERROR_ACTION(m,p) ABORT
01034 #endif
01035
01036 #endif
01037
01038
01039
01040 #if ! DEBUG
01041
01042 #define check_free_chunk(M,P)
01043 #define check_inuse_chunk(M,P)
01044 #define check_malloced_chunk(M,P,N)
01045 #define check_mmapped_chunk(M,P)
01046 #define check_malloc_state(M)
01047 #define check_top_chunk(M,P)
01048
01049 #else
01050 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
01051 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
01052 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
01053 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
01054 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
01055 #define check_malloc_state(M) do_check_malloc_state(M)
01056
01057 static void do_check_any_chunk(mstate m, mchunkptr p);
01058 static void do_check_top_chunk(mstate m, mchunkptr p);
01059 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
01060 static void do_check_inuse_chunk(mstate m, mchunkptr p);
01061 static void do_check_free_chunk(mstate m, mchunkptr p);
01062 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
01063 static void do_check_tree(mstate m, tchunkptr t);
01064 static void do_check_treebin(mstate m, bindex_t i);
01065 static void do_check_smallbin(mstate m, bindex_t i);
01066 static void do_check_malloc_state(mstate m);
01067 static int bin_find(mstate m, mchunkptr x);
01068 static size_t traverse_and_check(mstate m);
01069 #endif
01070
01071
01072
01073 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
01074 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
01075 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
01076 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
01077
01078
01079 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
01080 #define treebin_at(M,i) (&((M)->treebins[i]))
01081
01082
01083 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
01084 #define compute_tree_index(S, I)\
01085 {\
01086 unsigned int X = S >> TREEBIN_SHIFT;\
01087 if (X == 0)\
01088 I = 0;\
01089 else if (X > 0xFFFF)\
01090 I = NTREEBINS-1;\
01091 else {\
01092 unsigned int K;\
01093 __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
01094 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01095 }\
01096 }
01097
01098 #elif defined (__INTEL_COMPILER)
01099 #define compute_tree_index(S, I)\
01100 {\
01101 size_t X = S >> TREEBIN_SHIFT;\
01102 if (X == 0)\
01103 I = 0;\
01104 else if (X > 0xFFFF)\
01105 I = NTREEBINS-1;\
01106 else {\
01107 unsigned int K = _bit_scan_reverse (X); \
01108 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01109 }\
01110 }
01111
01112 #elif defined(_MSC_VER) && _MSC_VER>=1300 && defined(DL_PLATFORM_WIN32)
01113 #define compute_tree_index(S, I)\
01114 {\
01115 size_t X = S >> TREEBIN_SHIFT;\
01116 if (X == 0)\
01117 I = 0;\
01118 else if (X > 0xFFFF)\
01119 I = NTREEBINS-1;\
01120 else {\
01121 unsigned int K;\
01122 _BitScanReverse((DWORD *) &K, X);\
01123 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
01124 }\
01125 }
01126
01127 #else
01128 #define compute_tree_index(S, I)\
01129 {\
01130 size_t X = S >> TREEBIN_SHIFT;\
01131 if (X == 0)\
01132 I = 0;\
01133 else if (X > 0xFFFF)\
01134 I = NTREEBINS-1;\
01135 else {\
01136 unsigned int Y = (unsigned int)X;\
01137 unsigned int N = ((Y - 0x100) >> 16) & 8;\
01138 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
01139 N += K;\
01140 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
01141 K = 14 - N + ((Y <<= K) >> 15);\
01142 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
01143 }\
01144 }
01145 #endif
01146
01147
01148 #define bit_for_tree_index(i) \
01149 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
01150
01151
01152 #define leftshift_for_tree_index(i) \
01153 ((i == NTREEBINS-1)? 0 : \
01154 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
01155
01156
01157 #define minsize_for_tree_index(i) \
01158 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
01159 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
01160
01161
01162
01163
01164
01165 #define idx2bit(i) ((binmap_t)(1) << (i))
01166
01167
01168 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
01169 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
01170 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
01171
01172 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
01173 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
01174 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
01175
01176
01177 #define least_bit(x) ((x) & -(x))
01178
01179
01180 #define left_bits(x) ((x<<1) | -(x<<1))
01181
01182
01183 #define same_or_left_bits(x) ((x) | -(x))
01184
01185
01186
01187 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
01188 #define compute_bit2idx(X, I)\
01189 {\
01190 unsigned int J;\
01191 __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
01192 I = (bindex_t)J;\
01193 }
01194
01195 #elif defined (__INTEL_COMPILER)
01196 #define compute_bit2idx(X, I)\
01197 {\
01198 unsigned int J;\
01199 J = _bit_scan_forward (X); \
01200 I = (bindex_t)J;\
01201 }
01202
01203 #elif defined(_MSC_VER) && _MSC_VER>=1300 && defined(DL_PLATFORM_WIN32)
01204 #define compute_bit2idx(X, I)\
01205 {\
01206 unsigned int J;\
01207 _BitScanForward((DWORD *) &J, X);\
01208 I = (bindex_t)J;\
01209 }
01210
01211 #elif USE_BUILTIN_FFS
01212 #define compute_bit2idx(X, I) I = ffs(X)-1
01213
01214 #else
01215 #define compute_bit2idx(X, I)\
01216 {\
01217 unsigned int Y = X - 1;\
01218 unsigned int K = Y >> (16-4) & 16;\
01219 unsigned int N = K; Y >>= K;\
01220 N += K = Y >> (8-3) & 8; Y >>= K;\
01221 N += K = Y >> (4-2) & 4; Y >>= K;\
01222 N += K = Y >> (2-1) & 2; Y >>= K;\
01223 N += K = Y >> (1-0) & 1; Y >>= K;\
01224 I = (bindex_t)(N + Y);\
01225 }
01226 #endif
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238
01239
01240
01241
01242
01243
01244
01245
01246
01247
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257 #if !INSECURE
01258
01259 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
01260
01261 #define ok_next(p, n) ((char*)(p) < (char*)(n))
01262
01263 #define ok_inuse(p) is_inuse(p)
01264
01265 #define ok_pinuse(p) pinuse(p)
01266
01267 #else
01268 #define ok_address(M, a) (1)
01269 #define ok_next(b, n) (1)
01270 #define ok_inuse(p) (1)
01271 #define ok_pinuse(p) (1)
01272 #endif
01273
01274 #if (FOOTERS && !INSECURE)
01275
01276 #define ok_magic(M) ((M)->magic == mparams.magic)
01277 #else
01278 #define ok_magic(M) (1)
01279 #endif
01280
01281
01282
01283 #if !INSECURE
01284 #if defined(__GNUC__) && __GNUC__ >= 3
01285 #define RTCHECK(e) __builtin_expect(e, 1)
01286 #else
01287 #define RTCHECK(e) (e)
01288 #endif
01289 #else
01290 #define RTCHECK(e) (1)
01291 #endif
01292
01293
01294
01295 #if !FOOTERS
01296
01297 #define mark_inuse_foot(M,p,s)
01298
01299
01300
01301
01302 #define set_inuse(M,p,s)\
01303 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01304 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01305
01306
01307 #define set_inuse_and_pinuse(M,p,s)\
01308 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01309 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
01310
01311
01312 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01313 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
01314
01315 #else
01316
01317
01318 #define mark_inuse_foot(M,p,s)\
01319 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
01320
01321 #define get_mstate_for(p)\
01322 ((mstate)(((mchunkptr)((char*)(p) +\
01323 (chunksize(p))))->prev_foot ^ mparams.magic))
01324
01325 #define set_inuse(M,p,s)\
01326 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
01327 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
01328 mark_inuse_foot(M,p,s))
01329
01330 #define set_inuse_and_pinuse(M,p,s)\
01331 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01332 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
01333 mark_inuse_foot(M,p,s))
01334
01335 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
01336 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
01337 mark_inuse_foot(M, p, s))
01338
01339 #endif
01340
01341
01342
01343
01344 static int init_mparams(void) {
01345 #ifdef NEED_GLOBAL_LOCK_INIT
01346 if (malloc_global_mutex_status <= 0)
01347 init_malloc_global_mutex();
01348 #endif
01349
01350 ACQUIRE_MALLOC_GLOBAL_LOCK();
01351 if (mparams.magic == 0) {
01352 size_t magic;
01353 size_t psize;
01354 size_t gsize;
01355
01356 #ifndef DL_PLATFORM_WIN32
01357 psize = malloc_getpagesize;
01358 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
01359 #else
01360 {
01361 SYSTEM_INFO system_info;
01362 GetSystemInfo(&system_info);
01363 psize = system_info.dwPageSize;
01364 gsize = ((DEFAULT_GRANULARITY != 0)?
01365 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
01366 }
01367 #endif
01368
01369
01370
01371
01372
01373
01374
01375 if ((sizeof(size_t) != sizeof(char*)) ||
01376 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
01377 (sizeof(int) < 4) ||
01378 (MALLOC_ALIGNMENT < (size_t)8U) ||
01379 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
01380 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
01381 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
01382 ((psize & (psize-SIZE_T_ONE)) != 0))
01383 ABORT;
01384
01385 mparams.granularity = gsize;
01386 mparams.page_size = psize;
01387 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
01388 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
01389 #if MORECORE_CONTIGUOUS
01390 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
01391 #else
01392 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
01393 #endif
01394
01395 #if !ONLY_MSPACES
01396
01397 gm->mflags = mparams.default_mflags;
01398 INITIAL_LOCK(&gm->mutex);
01399 #endif
01400
01401 {
01402 #if USE_DEV_RANDOM
01403 int fd;
01404 unsigned char buf[sizeof(size_t)];
01405
01406 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
01407 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
01408 magic = *((size_t *) buf);
01409 close(fd);
01410 }
01411 else
01412 #endif
01413
01414 #if defined(_XBOX) || defined(X360)
01415
01416 #elif defined(DL_PLATFORM_WIN32)
01417 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
01418 #else
01419 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
01420 #endif
01421 magic |= (size_t)8U;
01422 magic &= ~(size_t)7U;
01423 mparams.magic = magic;
01424 }
01425 }
01426
01427 RELEASE_MALLOC_GLOBAL_LOCK();
01428 return 1;
01429 }
01430
01431
01432 static int change_mparam(int param_number, int value) {
01433 size_t val;
01434 ensure_initialization();
01435 val = (value == -1)? MAX_SIZE_T : (size_t)value;
01436 switch(param_number) {
01437 case M_TRIM_THRESHOLD:
01438 mparams.trim_threshold = val;
01439 return 1;
01440 case M_GRANULARITY:
01441 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
01442 mparams.granularity = val;
01443 return 1;
01444 }
01445 else
01446 return 0;
01447 case M_MMAP_THRESHOLD:
01448 mparams.mmap_threshold = val;
01449 return 1;
01450 default:
01451 return 0;
01452 }
01453 }
01454
01455 #if DEBUG
01456
01457
01458
01459 static void do_check_any_chunk(mstate m, mchunkptr p) {
01460 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01461 assert(ok_address(m, p));
01462 }
01463
01464
01465 static void do_check_top_chunk(mstate m, mchunkptr p) {
01466 msegmentptr sp = segment_holding(m, (char*)p);
01467 size_t sz = p->head & ~INUSE_BITS;
01468 assert(sp != 0);
01469 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01470 assert(ok_address(m, p));
01471 assert(sz == m->topsize);
01472 assert(sz > 0);
01473 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
01474 assert(pinuse(p));
01475 assert(!pinuse(chunk_plus_offset(p, sz)));
01476 }
01477
01478
01479 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
01480 size_t sz = chunksize(p);
01481 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
01482 assert(is_mmapped(p));
01483 assert(use_mmap(m));
01484 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
01485 assert(ok_address(m, p));
01486 assert(!is_small(sz));
01487 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
01488 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
01489 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
01490 }
01491
01492
01493 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
01494 do_check_any_chunk(m, p);
01495 assert(is_inuse(p));
01496 assert(next_pinuse(p));
01497
01498 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
01499 if (is_mmapped(p))
01500 do_check_mmapped_chunk(m, p);
01501 }
01502
01503
01504 static void do_check_free_chunk(mstate m, mchunkptr p) {
01505 size_t sz = chunksize(p);
01506 mchunkptr next = chunk_plus_offset(p, sz);
01507 do_check_any_chunk(m, p);
01508 assert(!is_inuse(p));
01509 assert(!next_pinuse(p));
01510 assert (!is_mmapped(p));
01511 if (p != m->dv && p != m->top) {
01512 if (sz >= MIN_CHUNK_SIZE) {
01513 assert((sz & CHUNK_ALIGN_MASK) == 0);
01514 assert(is_aligned(chunk2mem(p)));
01515 assert(next->prev_foot == sz);
01516 assert(pinuse(p));
01517 assert (next == m->top || is_inuse(next));
01518 assert(p->fd->bk == p);
01519 assert(p->bk->fd == p);
01520 }
01521 else
01522 assert(sz == SIZE_T_SIZE);
01523 }
01524 }
01525
01526
01527 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
01528 if (mem != 0) {
01529 mchunkptr p = mem2chunk(mem);
01530 size_t sz = p->head & ~INUSE_BITS;
01531 do_check_inuse_chunk(m, p);
01532 assert((sz & CHUNK_ALIGN_MASK) == 0);
01533 assert(sz >= MIN_CHUNK_SIZE);
01534 assert(sz >= s);
01535
01536 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
01537 }
01538 }
01539
01540
01541 static void do_check_tree(mstate m, tchunkptr t) {
01542 tchunkptr head = 0;
01543 tchunkptr u = t;
01544 bindex_t tindex = t->index;
01545 size_t tsize = chunksize(t);
01546 bindex_t idx;
01547 compute_tree_index(tsize, idx);
01548 assert(tindex == idx);
01549 assert(tsize >= MIN_LARGE_SIZE);
01550 assert(tsize >= minsize_for_tree_index(idx));
01551 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
01552
01553 do {
01554 do_check_any_chunk(m, ((mchunkptr)u));
01555 assert(u->index == tindex);
01556 assert(chunksize(u) == tsize);
01557 assert(!is_inuse(u));
01558 assert(!next_pinuse(u));
01559 assert(u->fd->bk == u);
01560 assert(u->bk->fd == u);
01561 if (u->parent == 0) {
01562 assert(u->child[0] == 0);
01563 assert(u->child[1] == 0);
01564 }
01565 else {
01566 assert(head == 0);
01567 head = u;
01568 assert(u->parent != u);
01569 assert (u->parent->child[0] == u ||
01570 u->parent->child[1] == u ||
01571 *((tbinptr*)(u->parent)) == u);
01572 if (u->child[0] != 0) {
01573 assert(u->child[0]->parent == u);
01574 assert(u->child[0] != u);
01575 do_check_tree(m, u->child[0]);
01576 }
01577 if (u->child[1] != 0) {
01578 assert(u->child[1]->parent == u);
01579 assert(u->child[1] != u);
01580 do_check_tree(m, u->child[1]);
01581 }
01582 if (u->child[0] != 0 && u->child[1] != 0) {
01583 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
01584 }
01585 }
01586 u = u->fd;
01587 } while (u != t);
01588 assert(head != 0);
01589 }
01590
01591
01592 static void do_check_treebin(mstate m, bindex_t i) {
01593 tbinptr* tb = treebin_at(m, i);
01594 tchunkptr t = *tb;
01595 int empty = (m->treemap & (1U << i)) == 0;
01596 if (t == 0)
01597 assert(empty);
01598 if (!empty)
01599 do_check_tree(m, t);
01600 }
01601
01602
01603 static void do_check_smallbin(mstate m, bindex_t i) {
01604 sbinptr b = smallbin_at(m, i);
01605 mchunkptr p = b->bk;
01606 unsigned int empty = (m->smallmap & (1U << i)) == 0;
01607 if (p == b)
01608 assert(empty);
01609 if (!empty) {
01610 for (; p != b; p = p->bk) {
01611 size_t size = chunksize(p);
01612 mchunkptr q;
01613
01614 do_check_free_chunk(m, p);
01615
01616 assert(small_index(size) == i);
01617 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
01618
01619 q = next_chunk(p);
01620 if (q->head != FENCEPOST_HEAD)
01621 do_check_inuse_chunk(m, q);
01622 }
01623 }
01624 }
01625
01626
01627 static int bin_find(mstate m, mchunkptr x) {
01628 size_t size = chunksize(x);
01629 if (is_small(size)) {
01630 bindex_t sidx = small_index(size);
01631 sbinptr b = smallbin_at(m, sidx);
01632 if (smallmap_is_marked(m, sidx)) {
01633 mchunkptr p = b;
01634 do {
01635 if (p == x)
01636 return 1;
01637 } while ((p = p->fd) != b);
01638 }
01639 }
01640 else {
01641 bindex_t tidx;
01642 compute_tree_index(size, tidx);
01643 if (treemap_is_marked(m, tidx)) {
01644 tchunkptr t = *treebin_at(m, tidx);
01645 size_t sizebits = size << leftshift_for_tree_index(tidx);
01646 while (t != 0 && chunksize(t) != size) {
01647 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
01648 sizebits <<= 1;
01649 }
01650 if (t != 0) {
01651 tchunkptr u = t;
01652 do {
01653 if (u == (tchunkptr)x)
01654 return 1;
01655 } while ((u = u->fd) != t);
01656 }
01657 }
01658 }
01659 return 0;
01660 }
01661
01662
01663 static size_t traverse_and_check(mstate m) {
01664 size_t sum = 0;
01665 if (is_initialized(m)) {
01666 msegmentptr s = &m->seg;
01667 sum += m->topsize + TOP_FOOT_SIZE;
01668 while (s != 0) {
01669 mchunkptr q = align_as_chunk(s->base);
01670 mchunkptr lastq = 0;
01671 assert(pinuse(q));
01672 while (segment_holds(s, q) &&
01673 q != m->top && q->head != FENCEPOST_HEAD) {
01674 sum += chunksize(q);
01675 if (is_inuse(q)) {
01676 assert(!bin_find(m, q));
01677 do_check_inuse_chunk(m, q);
01678 }
01679 else {
01680 assert(q == m->dv || bin_find(m, q));
01681 assert(lastq == 0 || is_inuse(lastq));
01682 do_check_free_chunk(m, q);
01683 }
01684 lastq = q;
01685 q = next_chunk(q);
01686 }
01687 s = s->next;
01688 }
01689 }
01690 return sum;
01691 }
01692
01693
01694 static void do_check_malloc_state(mstate m) {
01695 bindex_t i;
01696 size_t total;
01697
01698 for (i = 0; i < NSMALLBINS; ++i)
01699 do_check_smallbin(m, i);
01700 for (i = 0; i < NTREEBINS; ++i)
01701 do_check_treebin(m, i);
01702
01703 if (m->dvsize != 0) {
01704 do_check_any_chunk(m, m->dv);
01705 assert(m->dvsize == chunksize(m->dv));
01706 assert(m->dvsize >= MIN_CHUNK_SIZE);
01707 assert(bin_find(m, m->dv) == 0);
01708 }
01709
01710 if (m->top != 0) {
01711 do_check_top_chunk(m, m->top);
01712
01713 assert(m->topsize > 0);
01714 assert(bin_find(m, m->top) == 0);
01715 }
01716
01717 total = traverse_and_check(m);
01718 assert(total <= m->footprint);
01719 assert(m->footprint <= m->max_footprint);
01720 }
01721 #endif
01722
01723
01724
01725 #if !NO_MALLINFO
01726 static struct mallinfo internal_mallinfo(mstate m) {
01727 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
01728 ensure_initialization();
01729 if (!PREACTION(m)) {
01730 check_malloc_state(m);
01731 if (is_initialized(m)) {
01732 size_t nfree = SIZE_T_ONE;
01733 size_t mfree = m->topsize + TOP_FOOT_SIZE;
01734 size_t sum = mfree;
01735 msegmentptr s = &m->seg;
01736 while (s != 0) {
01737 mchunkptr q = align_as_chunk(s->base);
01738 while (segment_holds(s, q) &&
01739 q != m->top && q->head != FENCEPOST_HEAD) {
01740 size_t sz = chunksize(q);
01741 sum += sz;
01742 if (!is_inuse(q)) {
01743 mfree += sz;
01744 ++nfree;
01745 }
01746 q = next_chunk(q);
01747 }
01748 s = s->next;
01749 }
01750
01751 nm.arena = sum;
01752 nm.ordblks = nfree;
01753 nm.hblkhd = m->footprint - sum;
01754 nm.usmblks = m->max_footprint;
01755 nm.uordblks = m->footprint - mfree;
01756 nm.fordblks = mfree;
01757 nm.keepcost = m->topsize;
01758 }
01759
01760 POSTACTION(m);
01761 }
01762 return nm;
01763 }
01764 #endif
01765
01766 static void internal_malloc_stats(mstate m) {
01767 ensure_initialization();
01768 if (!PREACTION(m)) {
01769 size_t maxfp = 0;
01770 size_t fp = 0;
01771 size_t used = 0;
01772 check_malloc_state(m);
01773 if (is_initialized(m)) {
01774 msegmentptr s = &m->seg;
01775 maxfp = m->max_footprint;
01776 fp = m->footprint;
01777 used = fp - (m->topsize + TOP_FOOT_SIZE);
01778
01779 while (s != 0) {
01780 mchunkptr q = align_as_chunk(s->base);
01781 while (segment_holds(s, q) &&
01782 q != m->top && q->head != FENCEPOST_HEAD) {
01783 if (!is_inuse(q))
01784 used -= chunksize(q);
01785 q = next_chunk(q);
01786 }
01787 s = s->next;
01788 }
01789 }
01790
01791 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
01792 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
01793 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
01794
01795 POSTACTION(m);
01796 }
01797 }
01798
01799
01800
01801
01802
01803
01804
01805
01806
01807
01808
01809 #define insert_small_chunk(M, P, S) {\
01810 bindex_t I = small_index(S);\
01811 mchunkptr B = smallbin_at(M, I);\
01812 mchunkptr F = B;\
01813 assert(S >= MIN_CHUNK_SIZE);\
01814 if (!smallmap_is_marked(M, I))\
01815 mark_smallmap(M, I);\
01816 else if (RTCHECK(ok_address(M, B->fd)))\
01817 F = B->fd;\
01818 else {\
01819 CORRUPTION_ERROR_ACTION(M);\
01820 }\
01821 B->fd = P;\
01822 F->bk = P;\
01823 P->fd = F;\
01824 P->bk = B;\
01825 }
01826
01827
01828 #define unlink_small_chunk(M, P, S) {\
01829 mchunkptr F = P->fd;\
01830 mchunkptr B = P->bk;\
01831 bindex_t I = small_index(S);\
01832 assert(P != B);\
01833 assert(P != F);\
01834 assert(chunksize(P) == small_index2size(I));\
01835 if (F == B)\
01836 clear_smallmap(M, I);\
01837 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
01838 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
01839 F->bk = B;\
01840 B->fd = F;\
01841 }\
01842 else {\
01843 CORRUPTION_ERROR_ACTION(M);\
01844 }\
01845 }
01846
01847
01848 #define unlink_first_small_chunk(M, B, P, I) {\
01849 mchunkptr F = P->fd;\
01850 assert(P != B);\
01851 assert(P != F);\
01852 assert(chunksize(P) == small_index2size(I));\
01853 if (B == F)\
01854 clear_smallmap(M, I);\
01855 else if (RTCHECK(ok_address(M, F))) {\
01856 B->fd = F;\
01857 F->bk = B;\
01858 }\
01859 else {\
01860 CORRUPTION_ERROR_ACTION(M);\
01861 }\
01862 }
01863
01864
01865
01866
01867
01868 #define replace_dv(M, P, S) {\
01869 size_t DVS = M->dvsize;\
01870 if (DVS != 0) {\
01871 mchunkptr DV = M->dv;\
01872 assert(is_small(DVS));\
01873 insert_small_chunk(M, DV, DVS);\
01874 }\
01875 M->dvsize = S;\
01876 M->dv = P;\
01877 }
01878
01879
01880
01881
01882 #define insert_large_chunk(M, X, S) {\
01883 tbinptr* H;\
01884 bindex_t I;\
01885 compute_tree_index(S, I);\
01886 H = treebin_at(M, I);\
01887 X->index = I;\
01888 X->child[0] = X->child[1] = 0;\
01889 if (!treemap_is_marked(M, I)) {\
01890 mark_treemap(M, I);\
01891 *H = X;\
01892 X->parent = (tchunkptr)H;\
01893 X->fd = X->bk = X;\
01894 }\
01895 else {\
01896 tchunkptr T = *H;\
01897 size_t K = S << leftshift_for_tree_index(I);\
01898 for (;;) {\
01899 if (chunksize(T) != S) {\
01900 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
01901 K <<= 1;\
01902 if (*C != 0)\
01903 T = *C;\
01904 else if (RTCHECK(ok_address(M, C))) {\
01905 *C = X;\
01906 X->parent = T;\
01907 X->fd = X->bk = X;\
01908 break;\
01909 }\
01910 else {\
01911 CORRUPTION_ERROR_ACTION(M);\
01912 break;\
01913 }\
01914 }\
01915 else {\
01916 tchunkptr F = T->fd;\
01917 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
01918 T->fd = F->bk = X;\
01919 X->fd = F;\
01920 X->bk = T;\
01921 X->parent = 0;\
01922 break;\
01923 }\
01924 else {\
01925 CORRUPTION_ERROR_ACTION(M);\
01926 break;\
01927 }\
01928 }\
01929 }\
01930 }\
01931 }
01932
01933
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950 #define unlink_large_chunk(M, X) {\
01951 tchunkptr XP = X->parent;\
01952 tchunkptr R;\
01953 if (X->bk != X) {\
01954 tchunkptr F = X->fd;\
01955 R = X->bk;\
01956 if (RTCHECK(ok_address(M, F))) {\
01957 F->bk = R;\
01958 R->fd = F;\
01959 }\
01960 else {\
01961 CORRUPTION_ERROR_ACTION(M);\
01962 }\
01963 }\
01964 else {\
01965 tchunkptr* RP;\
01966 if (((R = *(RP = &(X->child[1]))) != 0) ||\
01967 ((R = *(RP = &(X->child[0]))) != 0)) {\
01968 tchunkptr* CP;\
01969 while ((*(CP = &(R->child[1])) != 0) ||\
01970 (*(CP = &(R->child[0])) != 0)) {\
01971 R = *(RP = CP);\
01972 }\
01973 if (RTCHECK(ok_address(M, RP)))\
01974 *RP = 0;\
01975 else {\
01976 CORRUPTION_ERROR_ACTION(M);\
01977 }\
01978 }\
01979 }\
01980 if (XP != 0) {\
01981 tbinptr* H = treebin_at(M, X->index);\
01982 if (X == *H) {\
01983 if ((*H = R) == 0) \
01984 clear_treemap(M, X->index);\
01985 }\
01986 else if (RTCHECK(ok_address(M, XP))) {\
01987 if (XP->child[0] == X) \
01988 XP->child[0] = R;\
01989 else \
01990 XP->child[1] = R;\
01991 }\
01992 else\
01993 CORRUPTION_ERROR_ACTION(M);\
01994 if (R != 0) {\
01995 if (RTCHECK(ok_address(M, R))) {\
01996 tchunkptr C0, C1;\
01997 R->parent = XP;\
01998 if ((C0 = X->child[0]) != 0) {\
01999 if (RTCHECK(ok_address(M, C0))) {\
02000 R->child[0] = C0;\
02001 C0->parent = R;\
02002 }\
02003 else\
02004 CORRUPTION_ERROR_ACTION(M);\
02005 }\
02006 if ((C1 = X->child[1]) != 0) {\
02007 if (RTCHECK(ok_address(M, C1))) {\
02008 R->child[1] = C1;\
02009 C1->parent = R;\
02010 }\
02011 else\
02012 CORRUPTION_ERROR_ACTION(M);\
02013 }\
02014 }\
02015 else\
02016 CORRUPTION_ERROR_ACTION(M);\
02017 }\
02018 }\
02019 }
02020
02021
02022
02023 #define insert_chunk(M, P, S)\
02024 if (is_small(S)) insert_small_chunk(M, P, S)\
02025 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
02026
02027 #define unlink_chunk(M, P, S)\
02028 if (is_small(S)) unlink_small_chunk(M, P, S)\
02029 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
02030
02031
02032
02033
02034 #if ONLY_MSPACES
02035 #define internal_malloc(m, b) rak_mspace_malloc(m, b)
02036 #define internal_free(m, mem) rak_mspace_free(m,mem);
02037 #else
02038 #if MSPACES
02039 #define internal_malloc(m, b)\
02040 (m == gm)? rdlmalloc(b) : rak_mspace_malloc(m, b)
02041 #define internal_free(m, mem)\
02042 if (m == gm) rdlfree(mem); else rak_mspace_free(m,mem);
02043 #else
02044 #define internal_malloc(m, b) rdlmalloc(b)
02045 #define internal_free(m, mem) rdlfree(mem)
02046 #endif
02047 #endif
02048
02049
02050
02051
02052
02053
02054
02055
02056
02057
02058
02059
02060 static void* mmap_alloc(mstate m, size_t nb) {
02061 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02062 if (mmsize > nb) {
02063 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
02064 if (mm != CMFAIL) {
02065 size_t offset = align_offset(chunk2mem(mm));
02066 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
02067 mchunkptr p = (mchunkptr)(mm + offset);
02068 p->prev_foot = offset;
02069 p->head = psize;
02070 mark_inuse_foot(m, p, psize);
02071 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
02072 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
02073
02074 if (m->least_addr == 0 || mm < m->least_addr)
02075 m->least_addr = mm;
02076 if ((m->footprint += mmsize) > m->max_footprint)
02077 m->max_footprint = m->footprint;
02078 assert(is_aligned(chunk2mem(p)));
02079 check_mmapped_chunk(m, p);
02080 return chunk2mem(p);
02081 }
02082 }
02083 return 0;
02084 }
02085
02086
02087 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
02088 size_t oldsize = chunksize(oldp);
02089 if (is_small(nb))
02090 return 0;
02091
02092 if (oldsize >= nb + SIZE_T_SIZE &&
02093 (oldsize - nb) <= (mparams.granularity << 1))
02094 return oldp;
02095 else {
02096 size_t offset = oldp->prev_foot;
02097 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
02098 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02099 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
02100 oldmmsize, newmmsize, 1);
02101 if (cp != CMFAIL) {
02102 mchunkptr newp = (mchunkptr)(cp + offset);
02103 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
02104 newp->head = psize;
02105 mark_inuse_foot(m, newp, psize);
02106 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
02107 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
02108
02109 if (cp < m->least_addr)
02110 m->least_addr = cp;
02111 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
02112 m->max_footprint = m->footprint;
02113 check_mmapped_chunk(m, newp);
02114 return newp;
02115 }
02116 }
02117 return 0;
02118 }
02119
02120
02121
02122
02123 static void init_top(mstate m, mchunkptr p, size_t psize) {
02124
02125 size_t offset = align_offset(chunk2mem(p));
02126 p = (mchunkptr)((char*)p + offset);
02127 psize -= offset;
02128
02129 m->top = p;
02130 m->topsize = psize;
02131 p->head = psize | PINUSE_BIT;
02132
02133 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
02134 m->trim_check = mparams.trim_threshold;
02135 }
02136
02137
02138 static void init_bins(mstate m) {
02139
02140 bindex_t i;
02141 for (i = 0; i < NSMALLBINS; ++i) {
02142 sbinptr bin = smallbin_at(m,i);
02143 bin->fd = bin->bk = bin;
02144 }
02145 }
02146
02147 #if PROCEED_ON_ERROR
02148
02149
02150 static void reset_on_error(mstate m) {
02151 int i;
02152 ++malloc_corruption_error_count;
02153
02154 m->smallbins = m->treebins = 0;
02155 m->dvsize = m->topsize = 0;
02156 m->seg.base = 0;
02157 m->seg.size = 0;
02158 m->seg.next = 0;
02159 m->top = m->dv = 0;
02160 for (i = 0; i < NTREEBINS; ++i)
02161 *treebin_at(m, i) = 0;
02162 init_bins(m);
02163 }
02164 #endif
02165
02166
02167 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
02168 size_t nb) {
02169 mchunkptr p = align_as_chunk(newbase);
02170 mchunkptr oldfirst = align_as_chunk(oldbase);
02171 size_t psize = (char*)oldfirst - (char*)p;
02172 mchunkptr q = chunk_plus_offset(p, nb);
02173 size_t qsize = psize - nb;
02174 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02175
02176 assert((char*)oldfirst > (char*)q);
02177 assert(pinuse(oldfirst));
02178 assert(qsize >= MIN_CHUNK_SIZE);
02179
02180
02181 if (oldfirst == m->top) {
02182 size_t tsize = m->topsize += qsize;
02183 m->top = q;
02184 q->head = tsize | PINUSE_BIT;
02185 check_top_chunk(m, q);
02186 }
02187 else if (oldfirst == m->dv) {
02188 size_t dsize = m->dvsize += qsize;
02189 m->dv = q;
02190 set_size_and_pinuse_of_free_chunk(q, dsize);
02191 }
02192 else {
02193 if (!is_inuse(oldfirst)) {
02194 size_t nsize = chunksize(oldfirst);
02195 unlink_chunk(m, oldfirst, nsize);
02196 oldfirst = chunk_plus_offset(oldfirst, nsize);
02197 qsize += nsize;
02198 }
02199 set_free_with_pinuse(q, qsize, oldfirst);
02200 insert_chunk(m, q, qsize);
02201 check_free_chunk(m, q);
02202 }
02203
02204 check_malloced_chunk(m, chunk2mem(p), nb);
02205 return chunk2mem(p);
02206 }
02207
02208
02209 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
02210
02211 char* old_top = (char*)m->top;
02212 msegmentptr oldsp = segment_holding(m, old_top);
02213 char* old_end = oldsp->base + oldsp->size;
02214 size_t ssize = pad_request(sizeof(struct malloc_segment));
02215 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
02216 size_t offset = align_offset(chunk2mem(rawsp));
02217 char* asp = rawsp + offset;
02218 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
02219 mchunkptr sp = (mchunkptr)csp;
02220 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
02221 mchunkptr tnext = chunk_plus_offset(sp, ssize);
02222 mchunkptr p = tnext;
02223 int nfences = 0;
02224
02225
02226 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02227
02228
02229 assert(is_aligned(ss));
02230 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
02231 *ss = m->seg;
02232 m->seg.base = tbase;
02233 m->seg.size = tsize;
02234 m->seg.sflags = mmapped;
02235 m->seg.next = ss;
02236
02237
02238 for (;;) {
02239 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
02240 p->head = FENCEPOST_HEAD;
02241 ++nfences;
02242 if ((char*)(&(nextp->head)) < old_end)
02243 p = nextp;
02244 else
02245 break;
02246 }
02247 assert(nfences >= 2);
02248
02249
02250 if (csp != old_top) {
02251 mchunkptr q = (mchunkptr)old_top;
02252 size_t psize = csp - old_top;
02253 mchunkptr tn = chunk_plus_offset(q, psize);
02254 set_free_with_pinuse(q, psize, tn);
02255 insert_chunk(m, q, psize);
02256 }
02257
02258 check_top_chunk(m, m->top);
02259 }
02260
02261
02262
02263
02264 static void* sys_alloc(mstate m, size_t nb) {
02265 char* tbase = CMFAIL;
02266 size_t tsize = 0;
02267 flag_t mmap_flag = 0;
02268
02269 ensure_initialization();
02270
02271
02272 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
02273 void* mem = mmap_alloc(m, nb);
02274 if (mem != 0)
02275 return mem;
02276 }
02277
02278
02279
02280
02281
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
02301 char* br = CMFAIL;
02302 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
02303 size_t asize = 0;
02304 ACQUIRE_MALLOC_GLOBAL_LOCK();
02305
02306 if (ss == 0) {
02307 char* base = (char*)CALL_MORECORE(0);
02308 if (base != CMFAIL) {
02309 asize = granularity_align(nb + SYS_ALLOC_PADDING);
02310
02311 if (!is_page_aligned(base))
02312 asize += (page_align((size_t)base) - (size_t)base);
02313
02314 if (asize < HALF_MAX_SIZE_T &&
02315 (br = (char*)(CALL_MORECORE(asize))) == base) {
02316 tbase = base;
02317 tsize = asize;
02318 }
02319 }
02320 }
02321 else {
02322
02323 asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
02324
02325 if (asize < HALF_MAX_SIZE_T &&
02326 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
02327 tbase = br;
02328 tsize = asize;
02329 }
02330 }
02331
02332 if (tbase == CMFAIL) {
02333 if (br != CMFAIL) {
02334 if (asize < HALF_MAX_SIZE_T &&
02335 asize < nb + SYS_ALLOC_PADDING) {
02336 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize);
02337 if (esize < HALF_MAX_SIZE_T) {
02338 char* end = (char*)CALL_MORECORE(esize);
02339 if (end != CMFAIL)
02340 asize += esize;
02341 else {
02342 (void) CALL_MORECORE(-asize);
02343 br = CMFAIL;
02344 }
02345 }
02346 }
02347 }
02348 if (br != CMFAIL) {
02349 tbase = br;
02350 tsize = asize;
02351 }
02352 else
02353 disable_contiguous(m);
02354 }
02355
02356 RELEASE_MALLOC_GLOBAL_LOCK();
02357 }
02358
02359 if (HAVE_MMAP && tbase == CMFAIL) {
02360 size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
02361 if (rsize > nb) {
02362 char* mp = (char*)(CALL_MMAP(rsize));
02363 if (mp != CMFAIL) {
02364 tbase = mp;
02365 tsize = rsize;
02366 mmap_flag = USE_MMAP_BIT;
02367 }
02368 }
02369 }
02370
02371 if (HAVE_MORECORE && tbase == CMFAIL) {
02372 size_t asize = granularity_align(nb + SYS_ALLOC_PADDING);
02373 if (asize < HALF_MAX_SIZE_T) {
02374 char* br = CMFAIL;
02375 char* end = CMFAIL;
02376 ACQUIRE_MALLOC_GLOBAL_LOCK();
02377 br = (char*)(CALL_MORECORE(asize));
02378 end = (char*)(CALL_MORECORE(0));
02379 RELEASE_MALLOC_GLOBAL_LOCK();
02380 if (br != CMFAIL && end != CMFAIL && br < end) {
02381 size_t ssize = end - br;
02382 if (ssize > nb + TOP_FOOT_SIZE) {
02383 tbase = br;
02384 tsize = ssize;
02385 }
02386 }
02387 }
02388 }
02389
02390 if (tbase != CMFAIL) {
02391
02392 if ((m->footprint += tsize) > m->max_footprint)
02393 m->max_footprint = m->footprint;
02394
02395 if (!is_initialized(m)) {
02396 if (m->least_addr == 0 || tbase < m->least_addr)
02397 m->least_addr = tbase;
02398 m->seg.base = tbase;
02399 m->seg.size = tsize;
02400 m->seg.sflags = mmap_flag;
02401 m->magic = mparams.magic;
02402 m->release_checks = MAX_RELEASE_CHECK_RATE;
02403 init_bins(m);
02404 #if !ONLY_MSPACES
02405 if (is_global(m))
02406 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
02407 else
02408 #endif
02409 {
02410
02411 mchunkptr mn = next_chunk(mem2chunk(m));
02412 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
02413 }
02414 }
02415
02416 else {
02417
02418 msegmentptr sp = &m->seg;
02419
02420 while (sp != 0 && tbase != sp->base + sp->size)
02421 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
02422 if (sp != 0 &&
02423 !is_extern_segment(sp) &&
02424 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
02425 segment_holds(sp, m->top)) {
02426 sp->size += tsize;
02427 init_top(m, m->top, m->topsize + tsize);
02428 }
02429 else {
02430 if (tbase < m->least_addr)
02431 m->least_addr = tbase;
02432 sp = &m->seg;
02433 while (sp != 0 && sp->base != tbase + tsize)
02434 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
02435 if (sp != 0 &&
02436 !is_extern_segment(sp) &&
02437 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
02438 char* oldbase = sp->base;
02439 sp->base = tbase;
02440 sp->size += tsize;
02441 return prepend_alloc(m, tbase, oldbase, nb);
02442 }
02443 else
02444 add_segment(m, tbase, tsize, mmap_flag);
02445 }
02446 }
02447
02448 if (nb < m->topsize) {
02449 size_t rsize = m->topsize -= nb;
02450 mchunkptr p = m->top;
02451 mchunkptr r = m->top = chunk_plus_offset(p, nb);
02452 r->head = rsize | PINUSE_BIT;
02453 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
02454 check_top_chunk(m, m->top);
02455 check_malloced_chunk(m, chunk2mem(p), nb);
02456 return chunk2mem(p);
02457 }
02458 }
02459
02460 MALLOC_FAILURE_ACTION;
02461 return 0;
02462 }
02463
02464
02465
02466
02467 static size_t release_unused_segments(mstate m) {
02468 size_t released = 0;
02469 int nsegs = 0;
02470 msegmentptr pred = &m->seg;
02471 msegmentptr sp = pred->next;
02472 while (sp != 0) {
02473 char* base = sp->base;
02474 size_t size = sp->size;
02475 msegmentptr next = sp->next;
02476 ++nsegs;
02477 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
02478 mchunkptr p = align_as_chunk(base);
02479 size_t psize = chunksize(p);
02480
02481 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
02482 tchunkptr tp = (tchunkptr)p;
02483 assert(segment_holds(sp, (char*)sp));
02484 if (p == m->dv) {
02485 m->dv = 0;
02486 m->dvsize = 0;
02487 }
02488 else {
02489 unlink_large_chunk(m, tp);
02490 }
02491 if (CALL_MUNMAP(base, size) == 0) {
02492 released += size;
02493 m->footprint -= size;
02494
02495 sp = pred;
02496 sp->next = next;
02497 }
02498 else {
02499 insert_large_chunk(m, tp, psize);
02500 }
02501 }
02502 }
02503 if (NO_SEGMENT_TRAVERSAL)
02504 break;
02505 pred = sp;
02506 sp = next;
02507 }
02508
02509 m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
02510 nsegs : MAX_RELEASE_CHECK_RATE);
02511 return released;
02512 }
02513
02514 static int sys_trim(mstate m, size_t pad) {
02515 size_t released = 0;
02516 ensure_initialization();
02517 if (pad < MAX_REQUEST && is_initialized(m)) {
02518 pad += TOP_FOOT_SIZE;
02519
02520 if (m->topsize > pad) {
02521
02522 size_t unit = mparams.granularity;
02523 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
02524 SIZE_T_ONE) * unit;
02525 msegmentptr sp = segment_holding(m, (char*)m->top);
02526
02527 if (!is_extern_segment(sp)) {
02528 if (is_mmapped_segment(sp)) {
02529 if (HAVE_MMAP &&
02530 sp->size >= extra &&
02531 !has_segment_link(m, sp)) {
02532 size_t newsize = sp->size - extra;
02533
02534 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
02535 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
02536 released = extra;
02537 }
02538 }
02539 }
02540 else if (HAVE_MORECORE) {
02541 if (extra >= HALF_MAX_SIZE_T)
02542 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
02543 ACQUIRE_MALLOC_GLOBAL_LOCK();
02544 {
02545
02546 char* old_br = (char*)(CALL_MORECORE(0));
02547 if (old_br == sp->base + sp->size) {
02548 char* rel_br = (char*)(CALL_MORECORE(-extra));
02549 char* new_br = (char*)(CALL_MORECORE(0));
02550 if (rel_br != CMFAIL && new_br < old_br)
02551 released = old_br - new_br;
02552 }
02553 }
02554 RELEASE_MALLOC_GLOBAL_LOCK();
02555 }
02556 }
02557
02558 if (released != 0) {
02559 sp->size -= released;
02560 m->footprint -= released;
02561 init_top(m, m->top, m->topsize - released);
02562 check_top_chunk(m, m->top);
02563 }
02564 }
02565
02566
02567 if (HAVE_MMAP)
02568 released += release_unused_segments(m);
02569
02570
02571 if (released == 0 && m->topsize > m->trim_check)
02572 m->trim_check = MAX_SIZE_T;
02573 }
02574
02575 return (released != 0)? 1 : 0;
02576 }
02577
02578
02579
02580
02581
02582 static void* tmalloc_large(mstate m, size_t nb) {
02583 tchunkptr v = 0;
02584 size_t rsize = -nb;
02585 tchunkptr t;
02586 bindex_t idx;
02587 compute_tree_index(nb, idx);
02588 if ((t = *treebin_at(m, idx)) != 0) {
02589
02590 size_t sizebits = nb << leftshift_for_tree_index(idx);
02591 tchunkptr rst = 0;
02592 for (;;) {
02593 tchunkptr rt;
02594 size_t trem = chunksize(t) - nb;
02595 if (trem < rsize) {
02596 v = t;
02597 if ((rsize = trem) == 0)
02598 break;
02599 }
02600 rt = t->child[1];
02601 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
02602 if (rt != 0 && rt != t)
02603 rst = rt;
02604 if (t == 0) {
02605 t = rst;
02606 break;
02607 }
02608 sizebits <<= 1;
02609 }
02610 }
02611 if (t == 0 && v == 0) {
02612 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
02613 if (leftbits != 0) {
02614 bindex_t i;
02615 binmap_t leastbit = least_bit(leftbits);
02616 compute_bit2idx(leastbit, i);
02617 t = *treebin_at(m, i);
02618 }
02619 }
02620
02621 while (t != 0) {
02622 size_t trem = chunksize(t) - nb;
02623 if (trem < rsize) {
02624 rsize = trem;
02625 v = t;
02626 }
02627 t = leftmost_child(t);
02628 }
02629
02630
02631 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
02632 if (RTCHECK(ok_address(m, v))) {
02633 mchunkptr r = chunk_plus_offset(v, nb);
02634 assert(chunksize(v) == rsize + nb);
02635 if (RTCHECK(ok_next(v, r))) {
02636 unlink_large_chunk(m, v);
02637 if (rsize < MIN_CHUNK_SIZE)
02638 set_inuse_and_pinuse(m, v, (rsize + nb));
02639 else {
02640 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
02641 set_size_and_pinuse_of_free_chunk(r, rsize);
02642 insert_chunk(m, r, rsize);
02643 }
02644 return chunk2mem(v);
02645 }
02646 }
02647 CORRUPTION_ERROR_ACTION(m);
02648 }
02649 return 0;
02650 }
02651
02652
02653 static void* tmalloc_small(mstate m, size_t nb) {
02654 tchunkptr t, v;
02655 size_t rsize;
02656 bindex_t i;
02657 binmap_t leastbit = least_bit(m->treemap);
02658 compute_bit2idx(leastbit, i);
02659 v = t = *treebin_at(m, i);
02660 rsize = chunksize(t) - nb;
02661
02662 while ((t = leftmost_child(t)) != 0) {
02663 size_t trem = chunksize(t) - nb;
02664 if (trem < rsize) {
02665 rsize = trem;
02666 v = t;
02667 }
02668 }
02669
02670 if (RTCHECK(ok_address(m, v))) {
02671 mchunkptr r = chunk_plus_offset(v, nb);
02672 assert(chunksize(v) == rsize + nb);
02673 if (RTCHECK(ok_next(v, r))) {
02674 unlink_large_chunk(m, v);
02675 if (rsize < MIN_CHUNK_SIZE)
02676 set_inuse_and_pinuse(m, v, (rsize + nb));
02677 else {
02678 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
02679 set_size_and_pinuse_of_free_chunk(r, rsize);
02680 replace_dv(m, r, rsize);
02681 }
02682 return chunk2mem(v);
02683 }
02684 }
02685
02686 CORRUPTION_ERROR_ACTION(m);
02687 return 0;
02688 }
02689
02690
02691
02692 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
02693 if (bytes >= MAX_REQUEST) {
02694 MALLOC_FAILURE_ACTION;
02695 return 0;
02696 }
02697 if (!PREACTION(m)) {
02698 mchunkptr oldp = mem2chunk(oldmem);
02699 size_t oldsize = chunksize(oldp);
02700 mchunkptr next = chunk_plus_offset(oldp, oldsize);
02701 mchunkptr newp = 0;
02702 void* extra = 0;
02703
02704
02705
02706 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
02707 ok_next(oldp, next) && ok_pinuse(next))) {
02708 size_t nb = request2size(bytes);
02709 if (is_mmapped(oldp))
02710 newp = mmap_resize(m, oldp, nb);
02711 else if (oldsize >= nb) {
02712 size_t rsize = oldsize - nb;
02713 newp = oldp;
02714 if (rsize >= MIN_CHUNK_SIZE) {
02715 mchunkptr remainder = chunk_plus_offset(newp, nb);
02716 set_inuse(m, newp, nb);
02717 set_inuse_and_pinuse(m, remainder, rsize);
02718 extra = chunk2mem(remainder);
02719 }
02720 }
02721 else if (next == m->top && oldsize + m->topsize > nb) {
02722
02723 size_t newsize = oldsize + m->topsize;
02724 size_t newtopsize = newsize - nb;
02725 mchunkptr newtop = chunk_plus_offset(oldp, nb);
02726 set_inuse(m, oldp, nb);
02727 newtop->head = newtopsize |PINUSE_BIT;
02728 m->top = newtop;
02729 m->topsize = newtopsize;
02730 newp = oldp;
02731 }
02732 }
02733 else {
02734 USAGE_ERROR_ACTION(m, oldmem);
02735 POSTACTION(m);
02736 return 0;
02737 }
02738 #if DEBUG
02739 if (newp != 0) {
02740 check_inuse_chunk(m, newp);
02741 }
02742 #endif
02743
02744 POSTACTION(m);
02745
02746 if (newp != 0) {
02747 if (extra != 0) {
02748 internal_free(m, extra);
02749 }
02750 return chunk2mem(newp);
02751 }
02752 else {
02753 void* newmem = internal_malloc(m, bytes);
02754 if (newmem != 0) {
02755 size_t oc = oldsize - overhead_for(oldp);
02756 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
02757 internal_free(m, oldmem);
02758 }
02759 return newmem;
02760 }
02761 }
02762 return 0;
02763 }
02764
02765
02766
02767 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
02768 if (alignment <= MALLOC_ALIGNMENT)
02769 return internal_malloc(m, bytes);
02770 if (alignment < MIN_CHUNK_SIZE)
02771 alignment = MIN_CHUNK_SIZE;
02772 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
02773 size_t a = MALLOC_ALIGNMENT << 1;
02774 while (a < alignment) a <<= 1;
02775 alignment = a;
02776 }
02777
02778 if (bytes >= MAX_REQUEST - alignment) {
02779 if (m != 0) {
02780 MALLOC_FAILURE_ACTION;
02781 }
02782 }
02783 else {
02784 size_t nb = request2size(bytes);
02785 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
02786 char* mem = (char*)internal_malloc(m, req);
02787 if (mem != 0) {
02788 void* leader = 0;
02789 void* trailer = 0;
02790 mchunkptr p = mem2chunk(mem);
02791
02792 if (PREACTION(m)) return 0;
02793 if ((((size_t)(mem)) % alignment) != 0) {
02794
02795
02796
02797
02798
02799
02800
02801
02802 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
02803 alignment -
02804 SIZE_T_ONE)) &
02805 -alignment));
02806 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
02807 br : br+alignment;
02808 mchunkptr newp = (mchunkptr)pos;
02809 size_t leadsize = pos - (char*)(p);
02810 size_t newsize = chunksize(p) - leadsize;
02811
02812 if (is_mmapped(p)) {
02813 newp->prev_foot = p->prev_foot + leadsize;
02814 newp->head = newsize;
02815 }
02816 else {
02817 set_inuse(m, newp, newsize);
02818 set_inuse(m, p, leadsize);
02819 leader = chunk2mem(p);
02820 }
02821 p = newp;
02822 }
02823
02824
02825 if (!is_mmapped(p)) {
02826 size_t size = chunksize(p);
02827 if (size > nb + MIN_CHUNK_SIZE) {
02828 size_t remainder_size = size - nb;
02829 mchunkptr remainder = chunk_plus_offset(p, nb);
02830 set_inuse(m, p, nb);
02831 set_inuse(m, remainder, remainder_size);
02832 trailer = chunk2mem(remainder);
02833 }
02834 }
02835
02836 assert (chunksize(p) >= nb);
02837 assert((((size_t)(chunk2mem(p))) % alignment) == 0);
02838 check_inuse_chunk(m, p);
02839 POSTACTION(m);
02840 if (leader != 0) {
02841 internal_free(m, leader);
02842 }
02843 if (trailer != 0) {
02844 internal_free(m, trailer);
02845 }
02846 return chunk2mem(p);
02847 }
02848 }
02849 return 0;
02850 }
02851
02852
02853
02854 static void** ialloc(mstate m,
02855 size_t n_elements,
02856 size_t* sizes,
02857 int opts,
02858 void* chunks[]) {
02859
02860
02861
02862
02863
02864
02865
02866
02867
02868 size_t element_size;
02869 size_t contents_size;
02870 size_t array_size;
02871 void* mem;
02872 mchunkptr p;
02873 size_t remainder_size;
02874 void** marray;
02875 mchunkptr array_chunk;
02876 flag_t was_enabled;
02877 size_t size;
02878 size_t i;
02879
02880 ensure_initialization();
02881
02882 if (chunks != 0) {
02883 if (n_elements == 0)
02884 return chunks;
02885 marray = chunks;
02886 array_size = 0;
02887 }
02888 else {
02889
02890 if (n_elements == 0)
02891 return (void**)internal_malloc(m, 0);
02892 marray = 0;
02893 array_size = request2size(n_elements * (sizeof(void*)));
02894 }
02895
02896
02897 if (opts & 0x1) {
02898 element_size = request2size(*sizes);
02899 contents_size = n_elements * element_size;
02900 }
02901 else {
02902 element_size = 0;
02903 contents_size = 0;
02904 for (i = 0; i != n_elements; ++i)
02905 contents_size += request2size(sizes[i]);
02906 }
02907
02908 size = contents_size + array_size;
02909
02910
02911
02912
02913
02914
02915 was_enabled = use_mmap(m);
02916 disable_mmap(m);
02917 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
02918 if (was_enabled)
02919 enable_mmap(m);
02920 if (mem == 0)
02921 return 0;
02922
02923 if (PREACTION(m)) return 0;
02924 p = mem2chunk(mem);
02925 remainder_size = chunksize(p);
02926
02927 assert(!is_mmapped(p));
02928
02929 if (opts & 0x2) {
02930 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
02931 }
02932
02933
02934 if (marray == 0) {
02935 size_t array_chunk_size;
02936 array_chunk = chunk_plus_offset(p, contents_size);
02937 array_chunk_size = remainder_size - contents_size;
02938 marray = (void**) (chunk2mem(array_chunk));
02939 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
02940 remainder_size = contents_size;
02941 }
02942
02943
02944 for (i = 0; ; ++i) {
02945 marray[i] = chunk2mem(p);
02946 if (i != n_elements-1) {
02947 if (element_size != 0)
02948 size = element_size;
02949 else
02950 size = request2size(sizes[i]);
02951 remainder_size -= size;
02952 set_size_and_pinuse_of_inuse_chunk(m, p, size);
02953 p = chunk_plus_offset(p, size);
02954 }
02955 else {
02956 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
02957 break;
02958 }
02959 }
02960
02961 #if DEBUG
02962 if (marray != chunks) {
02963
02964 if (element_size != 0) {
02965 assert(remainder_size == element_size);
02966 }
02967 else {
02968 assert(remainder_size == request2size(sizes[i]));
02969 }
02970 check_inuse_chunk(m, mem2chunk(marray));
02971 }
02972 for (i = 0; i != n_elements; ++i)
02973 check_inuse_chunk(m, mem2chunk(marray[i]));
02974
02975 #endif
02976
02977 POSTACTION(m);
02978 return marray;
02979 }
02980
02981
02982
02983
02984 #if !ONLY_MSPACES
02985
02986 void* rdlmalloc(size_t bytes) {
02987
02988
02989
02990
02991
02992
02993
02994
02995
02996
02997
02998
02999
03000
03001
03002
03003
03004
03005
03006
03007
03008
03009
03010 #if USE_LOCKS
03011 ensure_initialization();
03012 #endif
03013
03014 if (!PREACTION(gm)) {
03015 void* mem;
03016 size_t nb;
03017 if (bytes <= MAX_SMALL_REQUEST) {
03018 bindex_t idx;
03019 binmap_t smallbits;
03020 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03021 idx = small_index(nb);
03022 smallbits = gm->smallmap >> idx;
03023
03024 if ((smallbits & 0x3U) != 0) {
03025 mchunkptr b, p;
03026 idx += ~smallbits & 1;
03027 b = smallbin_at(gm, idx);
03028 p = b->fd;
03029 assert(chunksize(p) == small_index2size(idx));
03030 unlink_first_small_chunk(gm, b, p, idx);
03031 set_inuse_and_pinuse(gm, p, small_index2size(idx));
03032 mem = chunk2mem(p);
03033 check_malloced_chunk(gm, mem, nb);
03034 goto postaction;
03035 }
03036
03037 else if (nb > gm->dvsize) {
03038 if (smallbits != 0) {
03039 mchunkptr b, p, r;
03040 size_t rsize;
03041 bindex_t i;
03042 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03043 binmap_t leastbit = least_bit(leftbits);
03044 compute_bit2idx(leastbit, i);
03045 b = smallbin_at(gm, i);
03046 p = b->fd;
03047 assert(chunksize(p) == small_index2size(i));
03048 unlink_first_small_chunk(gm, b, p, i);
03049 rsize = small_index2size(i) - nb;
03050
03051 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03052 set_inuse_and_pinuse(gm, p, small_index2size(i));
03053 else {
03054 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03055 r = chunk_plus_offset(p, nb);
03056 set_size_and_pinuse_of_free_chunk(r, rsize);
03057 replace_dv(gm, r, rsize);
03058 }
03059 mem = chunk2mem(p);
03060 check_malloced_chunk(gm, mem, nb);
03061 goto postaction;
03062 }
03063
03064 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
03065 check_malloced_chunk(gm, mem, nb);
03066 goto postaction;
03067 }
03068 }
03069 }
03070 else if (bytes >= MAX_REQUEST)
03071 nb = MAX_SIZE_T;
03072 else {
03073 nb = pad_request(bytes);
03074 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
03075 check_malloced_chunk(gm, mem, nb);
03076 goto postaction;
03077 }
03078 }
03079
03080 if (nb <= gm->dvsize) {
03081 size_t rsize = gm->dvsize - nb;
03082 mchunkptr p = gm->dv;
03083 if (rsize >= MIN_CHUNK_SIZE) {
03084 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
03085 gm->dvsize = rsize;
03086 set_size_and_pinuse_of_free_chunk(r, rsize);
03087 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03088 }
03089 else {
03090 size_t dvs = gm->dvsize;
03091 gm->dvsize = 0;
03092 gm->dv = 0;
03093 set_inuse_and_pinuse(gm, p, dvs);
03094 }
03095 mem = chunk2mem(p);
03096 check_malloced_chunk(gm, mem, nb);
03097 goto postaction;
03098 }
03099
03100 else if (nb < gm->topsize) {
03101 size_t rsize = gm->topsize -= nb;
03102 mchunkptr p = gm->top;
03103 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
03104 r->head = rsize | PINUSE_BIT;
03105 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
03106 mem = chunk2mem(p);
03107 check_top_chunk(gm, gm->top);
03108 check_malloced_chunk(gm, mem, nb);
03109 goto postaction;
03110 }
03111
03112 mem = sys_alloc(gm, nb);
03113
03114 postaction:
03115 POSTACTION(gm);
03116 return mem;
03117 }
03118
03119 return 0;
03120 }
03121
03122 void rdlfree(void* mem) {
03123
03124
03125
03126
03127
03128
03129 if (mem != 0) {
03130 mchunkptr p = mem2chunk(mem);
03131 #if FOOTERS
03132 mstate fm = get_mstate_for(p);
03133 if (!ok_magic(fm)) {
03134 USAGE_ERROR_ACTION(fm, p);
03135 return;
03136 }
03137 #else
03138 #define fm gm
03139 #endif
03140 if (!PREACTION(fm)) {
03141 check_inuse_chunk(fm, p);
03142 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
03143 size_t psize = chunksize(p);
03144 mchunkptr next = chunk_plus_offset(p, psize);
03145 if (!pinuse(p)) {
03146 size_t prevsize = p->prev_foot;
03147 if (is_mmapped(p)) {
03148 psize += prevsize + MMAP_FOOT_PAD;
03149 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03150 fm->footprint -= psize;
03151 goto postaction;
03152 }
03153 else {
03154 mchunkptr prev = chunk_minus_offset(p, prevsize);
03155 psize += prevsize;
03156 p = prev;
03157 if (RTCHECK(ok_address(fm, prev))) {
03158 if (p != fm->dv) {
03159 unlink_chunk(fm, p, prevsize);
03160 }
03161 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03162 fm->dvsize = psize;
03163 set_free_with_pinuse(p, psize, next);
03164 goto postaction;
03165 }
03166 }
03167 else
03168 goto erroraction;
03169 }
03170 }
03171
03172 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03173 if (!cinuse(next)) {
03174 if (next == fm->top) {
03175 size_t tsize = fm->topsize += psize;
03176 fm->top = p;
03177 p->head = tsize | PINUSE_BIT;
03178 if (p == fm->dv) {
03179 fm->dv = 0;
03180 fm->dvsize = 0;
03181 }
03182 if (should_trim(fm, tsize))
03183 sys_trim(fm, 0);
03184 goto postaction;
03185 }
03186 else if (next == fm->dv) {
03187 size_t dsize = fm->dvsize += psize;
03188 fm->dv = p;
03189 set_size_and_pinuse_of_free_chunk(p, dsize);
03190 goto postaction;
03191 }
03192 else {
03193 size_t nsize = chunksize(next);
03194 psize += nsize;
03195 unlink_chunk(fm, next, nsize);
03196 set_size_and_pinuse_of_free_chunk(p, psize);
03197 if (p == fm->dv) {
03198 fm->dvsize = psize;
03199 goto postaction;
03200 }
03201 }
03202 }
03203 else
03204 set_free_with_pinuse(p, psize, next);
03205
03206 if (is_small(psize)) {
03207 insert_small_chunk(fm, p, psize);
03208 check_free_chunk(fm, p);
03209 }
03210 else {
03211 tchunkptr tp = (tchunkptr)p;
03212 insert_large_chunk(fm, tp, psize);
03213 check_free_chunk(fm, p);
03214 if (--fm->release_checks == 0)
03215 release_unused_segments(fm);
03216 }
03217 goto postaction;
03218 }
03219 }
03220 erroraction:
03221 USAGE_ERROR_ACTION(fm, p);
03222 postaction:
03223 POSTACTION(fm);
03224 }
03225 }
03226 #if !FOOTERS
03227 #undef fm
03228 #endif
03229 }
03230
03231 void* rdlcalloc(size_t n_elements, size_t elem_size) {
03232 void* mem;
03233 size_t req = 0;
03234 if (n_elements != 0) {
03235 req = n_elements * elem_size;
03236 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
03237 (req / n_elements != elem_size))
03238 req = MAX_SIZE_T;
03239 }
03240 mem = rdlmalloc(req);
03241 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
03242 memset(mem, 0, req);
03243 return mem;
03244 }
03245
03246 void* rdlrealloc(void* oldmem, size_t bytes) {
03247 if (oldmem == 0)
03248 return rdlmalloc(bytes);
03249 #ifdef REALLOC_ZERO_BYTES_FREES
03250 if (bytes == 0) {
03251 rdlfree(oldmem);
03252 return 0;
03253 }
03254 #endif
03255 else {
03256 #if ! FOOTERS
03257 mstate m = gm;
03258 #else
03259 mstate m = get_mstate_for(mem2chunk(oldmem));
03260 if (!ok_magic(m)) {
03261 USAGE_ERROR_ACTION(m, oldmem);
03262 return 0;
03263 }
03264 #endif
03265 return internal_realloc(m, oldmem, bytes);
03266 }
03267 }
03268
03269 void* rdlmemalign(size_t alignment, size_t bytes) {
03270 return internal_memalign(gm, alignment, bytes);
03271 }
03272
03273 void** rdlindependent_calloc(size_t n_elements, size_t elem_size,
03274 void* chunks[]) {
03275 size_t sz = elem_size;
03276 return ialloc(gm, n_elements, &sz, 3, chunks);
03277 }
03278
03279 void** rdlindependent_comalloc(size_t n_elements, size_t sizes[],
03280 void* chunks[]) {
03281 return ialloc(gm, n_elements, sizes, 0, chunks);
03282 }
03283
03284 void* rdlvalloc(size_t bytes) {
03285 size_t pagesz;
03286 ensure_initialization();
03287 pagesz = mparams.page_size;
03288 return rdlmemalign(pagesz, bytes);
03289 }
03290
03291 void* rdlpvalloc(size_t bytes) {
03292 size_t pagesz;
03293 ensure_initialization();
03294 pagesz = mparams.page_size;
03295 return rdlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
03296 }
03297
03298 int rdlmalloc_trim(size_t pad) {
03299 int result = 0;
03300 ensure_initialization();
03301 if (!PREACTION(gm)) {
03302 result = sys_trim(gm, pad);
03303 POSTACTION(gm);
03304 }
03305 return result;
03306 }
03307
03308 size_t rdlmalloc_footprint(void) {
03309 return gm->footprint;
03310 }
03311
03312 size_t dlmalloc_max_footprint(void) {
03313 return gm->max_footprint;
03314 }
03315
03316 #if !NO_MALLINFO
03317 struct mallinfo rdlmallinfo(void) {
03318 return internal_mallinfo(gm);
03319 }
03320 #endif
03321
03322 void rdlmalloc_stats() {
03323 internal_malloc_stats(gm);
03324 }
03325
03326 int rdlmallopt(int param_number, int value) {
03327 return change_mparam(param_number, value);
03328 }
03329
03330 #endif
03331
03332 size_t rdlmalloc_usable_size(void* mem) {
03333 if (mem != 0) {
03334 mchunkptr p = mem2chunk(mem);
03335 if (is_inuse(p))
03336 return chunksize(p) - overhead_for(p);
03337 }
03338 return 0;
03339 }
03340
03341
03342
03343 #if MSPACES
03344
03345 static mstate init_user_mstate(char* tbase, size_t tsize) {
03346 size_t msize = pad_request(sizeof(struct malloc_state));
03347 mchunkptr mn;
03348 mchunkptr msp = align_as_chunk(tbase);
03349 mstate m = (mstate)(chunk2mem(msp));
03350 memset(m, 0, msize);
03351 INITIAL_LOCK(&m->mutex);
03352 msp->head = (msize|INUSE_BITS);
03353 m->seg.base = m->least_addr = tbase;
03354 m->seg.size = m->footprint = m->max_footprint = tsize;
03355 m->magic = mparams.magic;
03356 m->release_checks = MAX_RELEASE_CHECK_RATE;
03357 m->mflags = mparams.default_mflags;
03358 m->extp = 0;
03359 m->exts = 0;
03360 disable_contiguous(m);
03361 init_bins(m);
03362 mn = next_chunk(mem2chunk(m));
03363 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
03364 check_top_chunk(m, m->top);
03365 return m;
03366 }
03367
03368 mspace rak_create_mspace(size_t capacity, int locked) {
03369 mstate m = 0;
03370 size_t msize;
03371 ensure_initialization();
03372 msize = pad_request(sizeof(struct malloc_state));
03373 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03374 size_t rs = ((capacity == 0)? mparams.granularity :
03375 (capacity + TOP_FOOT_SIZE + msize));
03376 size_t tsize = granularity_align(rs);
03377 char* tbase = (char*)(CALL_MMAP(tsize));
03378 if (tbase != CMFAIL) {
03379 m = init_user_mstate(tbase, tsize);
03380 m->seg.sflags = USE_MMAP_BIT;
03381 set_lock(m, locked);
03382 }
03383 }
03384 return (mspace)m;
03385 }
03386
03387 mspace rak_create_mspace_with_base(void* base, size_t capacity, int locked) {
03388 mstate m = 0;
03389 size_t msize;
03390 ensure_initialization();
03391 msize = pad_request(sizeof(struct malloc_state));
03392 if (capacity > msize + TOP_FOOT_SIZE &&
03393 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
03394 m = init_user_mstate((char*)base, capacity);
03395 m->seg.sflags = EXTERN_BIT;
03396 set_lock(m, locked);
03397 }
03398 return (mspace)m;
03399 }
03400
03401 int rak_mspace_track_large_chunks(mspace msp, int enable) {
03402 int ret = 0;
03403 mstate ms = (mstate)msp;
03404 if (!PREACTION(ms)) {
03405 if (!use_mmap(ms))
03406 ret = 1;
03407 if (!enable)
03408 enable_mmap(ms);
03409 else
03410 disable_mmap(ms);
03411 POSTACTION(ms);
03412 }
03413 return ret;
03414 }
03415
03416 size_t rak_destroy_mspace(mspace msp) {
03417 size_t freed = 0;
03418 mstate ms = (mstate)msp;
03419 if (ok_magic(ms)) {
03420 msegmentptr sp = &ms->seg;
03421 while (sp != 0) {
03422 char* base = sp->base;
03423 size_t size = sp->size;
03424 flag_t flag = sp->sflags;
03425 sp = sp->next;
03426 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
03427 CALL_MUNMAP(base, size) == 0)
03428 freed += size;
03429 }
03430 }
03431 else {
03432 USAGE_ERROR_ACTION(ms,ms);
03433 }
03434 return freed;
03435 }
03436
03437
03438
03439
03440
03441
03442
03443 void* rak_mspace_malloc(mspace msp, size_t bytes) {
03444 mstate ms = (mstate)msp;
03445 if (!ok_magic(ms)) {
03446 USAGE_ERROR_ACTION(ms,ms);
03447 return 0;
03448 }
03449 if (!PREACTION(ms)) {
03450 void* mem;
03451 size_t nb;
03452 if (bytes <= MAX_SMALL_REQUEST) {
03453 bindex_t idx;
03454 binmap_t smallbits;
03455 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
03456 idx = small_index(nb);
03457 smallbits = ms->smallmap >> idx;
03458
03459 if ((smallbits & 0x3U) != 0) {
03460 mchunkptr b, p;
03461 idx += ~smallbits & 1;
03462 b = smallbin_at(ms, idx);
03463 p = b->fd;
03464 assert(chunksize(p) == small_index2size(idx));
03465 unlink_first_small_chunk(ms, b, p, idx);
03466 set_inuse_and_pinuse(ms, p, small_index2size(idx));
03467 mem = chunk2mem(p);
03468 check_malloced_chunk(ms, mem, nb);
03469 goto postaction;
03470 }
03471
03472 else if (nb > ms->dvsize) {
03473 if (smallbits != 0) {
03474 mchunkptr b, p, r;
03475 size_t rsize;
03476 bindex_t i;
03477 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
03478 binmap_t leastbit = least_bit(leftbits);
03479 compute_bit2idx(leastbit, i);
03480 b = smallbin_at(ms, i);
03481 p = b->fd;
03482 assert(chunksize(p) == small_index2size(i));
03483 unlink_first_small_chunk(ms, b, p, i);
03484 rsize = small_index2size(i) - nb;
03485
03486 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
03487 set_inuse_and_pinuse(ms, p, small_index2size(i));
03488 else {
03489 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03490 r = chunk_plus_offset(p, nb);
03491 set_size_and_pinuse_of_free_chunk(r, rsize);
03492 replace_dv(ms, r, rsize);
03493 }
03494 mem = chunk2mem(p);
03495 check_malloced_chunk(ms, mem, nb);
03496 goto postaction;
03497 }
03498
03499 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
03500 check_malloced_chunk(ms, mem, nb);
03501 goto postaction;
03502 }
03503 }
03504 }
03505 else if (bytes >= MAX_REQUEST)
03506 nb = MAX_SIZE_T;
03507 else {
03508 nb = pad_request(bytes);
03509 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
03510 check_malloced_chunk(ms, mem, nb);
03511 goto postaction;
03512 }
03513 }
03514
03515 if (nb <= ms->dvsize) {
03516 size_t rsize = ms->dvsize - nb;
03517 mchunkptr p = ms->dv;
03518 if (rsize >= MIN_CHUNK_SIZE) {
03519 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
03520 ms->dvsize = rsize;
03521 set_size_and_pinuse_of_free_chunk(r, rsize);
03522 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03523 }
03524 else {
03525 size_t dvs = ms->dvsize;
03526 ms->dvsize = 0;
03527 ms->dv = 0;
03528 set_inuse_and_pinuse(ms, p, dvs);
03529 }
03530 mem = chunk2mem(p);
03531 check_malloced_chunk(ms, mem, nb);
03532 goto postaction;
03533 }
03534
03535 else if (nb < ms->topsize) {
03536 size_t rsize = ms->topsize -= nb;
03537 mchunkptr p = ms->top;
03538 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
03539 r->head = rsize | PINUSE_BIT;
03540 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
03541 mem = chunk2mem(p);
03542 check_top_chunk(ms, ms->top);
03543 check_malloced_chunk(ms, mem, nb);
03544 goto postaction;
03545 }
03546
03547 mem = sys_alloc(ms, nb);
03548
03549 postaction:
03550 POSTACTION(ms);
03551 return mem;
03552 }
03553
03554 return 0;
03555 }
03556
03557 void rak_mspace_free(mspace msp, void* mem) {
03558 if (mem != 0) {
03559 mchunkptr p = mem2chunk(mem);
03560 #if FOOTERS
03561 mstate fm = get_mstate_for(p);
03562 msp = msp;
03563 #else
03564 mstate fm = (mstate)msp;
03565 #endif
03566 if (!ok_magic(fm)) {
03567 USAGE_ERROR_ACTION(fm, p);
03568 return;
03569 }
03570 if (!PREACTION(fm)) {
03571 check_inuse_chunk(fm, p);
03572 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
03573 size_t psize = chunksize(p);
03574 mchunkptr next = chunk_plus_offset(p, psize);
03575 if (!pinuse(p)) {
03576 size_t prevsize = p->prev_foot;
03577 if (is_mmapped(p)) {
03578 psize += prevsize + MMAP_FOOT_PAD;
03579 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
03580 fm->footprint -= psize;
03581 goto postaction;
03582 }
03583 else {
03584 mchunkptr prev = chunk_minus_offset(p, prevsize);
03585 psize += prevsize;
03586 p = prev;
03587 if (RTCHECK(ok_address(fm, prev))) {
03588 if (p != fm->dv) {
03589 unlink_chunk(fm, p, prevsize);
03590 }
03591 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
03592 fm->dvsize = psize;
03593 set_free_with_pinuse(p, psize, next);
03594 goto postaction;
03595 }
03596 }
03597 else
03598 goto erroraction;
03599 }
03600 }
03601
03602 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
03603 if (!cinuse(next)) {
03604 if (next == fm->top) {
03605 size_t tsize = fm->topsize += psize;
03606 fm->top = p;
03607 p->head = tsize | PINUSE_BIT;
03608 if (p == fm->dv) {
03609 fm->dv = 0;
03610 fm->dvsize = 0;
03611 }
03612 if (should_trim(fm, tsize))
03613 sys_trim(fm, 0);
03614 goto postaction;
03615 }
03616 else if (next == fm->dv) {
03617 size_t dsize = fm->dvsize += psize;
03618 fm->dv = p;
03619 set_size_and_pinuse_of_free_chunk(p, dsize);
03620 goto postaction;
03621 }
03622 else {
03623 size_t nsize = chunksize(next);
03624 psize += nsize;
03625 unlink_chunk(fm, next, nsize);
03626 set_size_and_pinuse_of_free_chunk(p, psize);
03627 if (p == fm->dv) {
03628 fm->dvsize = psize;
03629 goto postaction;
03630 }
03631 }
03632 }
03633 else
03634 set_free_with_pinuse(p, psize, next);
03635
03636 if (is_small(psize)) {
03637 insert_small_chunk(fm, p, psize);
03638 check_free_chunk(fm, p);
03639 }
03640 else {
03641 tchunkptr tp = (tchunkptr)p;
03642 insert_large_chunk(fm, tp, psize);
03643 check_free_chunk(fm, p);
03644 if (--fm->release_checks == 0)
03645 release_unused_segments(fm);
03646 }
03647 goto postaction;
03648 }
03649 }
03650 erroraction:
03651 USAGE_ERROR_ACTION(fm, p);
03652 postaction:
03653 POSTACTION(fm);
03654 }
03655 }
03656 }
03657
03658 void* rak_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
03659 void* mem;
03660 size_t req = 0;
03661 mstate ms = (mstate)msp;
03662 if (!ok_magic(ms)) {
03663 USAGE_ERROR_ACTION(ms,ms);
03664 return 0;
03665 }
03666 if (n_elements != 0) {
03667 req = n_elements * elem_size;
03668 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
03669 (req / n_elements != elem_size))
03670 req = MAX_SIZE_T;
03671 }
03672 mem = internal_malloc(ms, req);
03673 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
03674 memset(mem, 0, req);
03675 return mem;
03676 }
03677
03678 void* rak_mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
03679 if (oldmem == 0)
03680 return rak_mspace_malloc(msp, bytes);
03681 #ifdef REALLOC_ZERO_BYTES_FREES
03682 if (bytes == 0) {
03683 rak_mspace_free(msp, oldmem);
03684 return 0;
03685 }
03686 #endif
03687 else {
03688 #if FOOTERS
03689 mchunkptr p = mem2chunk(oldmem);
03690 mstate ms = get_mstate_for(p);
03691 #else
03692 mstate ms = (mstate)msp;
03693 #endif
03694 if (!ok_magic(ms)) {
03695 USAGE_ERROR_ACTION(ms,ms);
03696 return 0;
03697 }
03698 return internal_realloc(ms, oldmem, bytes);
03699 }
03700 }
03701
03702 void* rak_mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
03703 mstate ms = (mstate)msp;
03704 if (!ok_magic(ms)) {
03705 USAGE_ERROR_ACTION(ms,ms);
03706 return 0;
03707 }
03708 return internal_memalign(ms, alignment, bytes);
03709 }
03710
03711 void** rak_mspace_independent_calloc(mspace msp, size_t n_elements,
03712 size_t elem_size, void* chunks[]) {
03713 size_t sz = elem_size;
03714 mstate ms = (mstate)msp;
03715 if (!ok_magic(ms)) {
03716 USAGE_ERROR_ACTION(ms,ms);
03717 return 0;
03718 }
03719 return ialloc(ms, n_elements, &sz, 3, chunks);
03720 }
03721
03722 void** rak_mspace_independent_comalloc(mspace msp, size_t n_elements,
03723 size_t sizes[], void* chunks[]) {
03724 mstate ms = (mstate)msp;
03725 if (!ok_magic(ms)) {
03726 USAGE_ERROR_ACTION(ms,ms);
03727 return 0;
03728 }
03729 return ialloc(ms, n_elements, sizes, 0, chunks);
03730 }
03731
03732 int rak_mspace_trim(mspace msp, size_t pad) {
03733 int result = 0;
03734 mstate ms = (mstate)msp;
03735 if (ok_magic(ms)) {
03736 if (!PREACTION(ms)) {
03737 result = sys_trim(ms, pad);
03738 POSTACTION(ms);
03739 }
03740 }
03741 else {
03742 USAGE_ERROR_ACTION(ms,ms);
03743 }
03744 return result;
03745 }
03746
03747 void rak_mspace_malloc_stats(mspace msp) {
03748 mstate ms = (mstate)msp;
03749 if (ok_magic(ms)) {
03750 internal_malloc_stats(ms);
03751 }
03752 else {
03753 USAGE_ERROR_ACTION(ms,ms);
03754 }
03755 }
03756
03757 size_t rak_mspace_footprint(mspace msp) {
03758 size_t result = 0;
03759 mstate ms = (mstate)msp;
03760 if (ok_magic(ms)) {
03761 result = ms->footprint;
03762 }
03763 else {
03764 USAGE_ERROR_ACTION(ms,ms);
03765 }
03766 return result;
03767 }
03768
03769
03770 size_t mspace_max_footprint(mspace msp) {
03771 size_t result = 0;
03772 mstate ms = (mstate)msp;
03773 if (ok_magic(ms)) {
03774 result = ms->max_footprint;
03775 }
03776 else {
03777 USAGE_ERROR_ACTION(ms,ms);
03778 }
03779 return result;
03780 }
03781
03782
03783 #if !NO_MALLINFO
03784 struct mallinfo rak_mspace_mallinfo(mspace msp) {
03785 mstate ms = (mstate)msp;
03786 if (!ok_magic(ms)) {
03787 USAGE_ERROR_ACTION(ms,ms);
03788 }
03789 return internal_mallinfo(ms);
03790 }
03791 #endif
03792
03793 size_t rak_mspace_usable_size(void* mem) {
03794 if (mem != 0) {
03795 mchunkptr p = mem2chunk(mem);
03796 if (is_inuse(p))
03797 return chunksize(p) - overhead_for(p);
03798 }
03799 return 0;
03800 }
03801
03802 int rak_mspace_mallopt(int param_number, int value) {
03803 return change_mparam(param_number, value);
03804 }
03805
03806 #endif
03807
03808
03809
03810
03811
03812
03813
03814
03815
03816
03817
03818
03819
03820
03821
03822
03823
03824
03825
03826
03827
03828
03829
03830
03831
03832
03833
03834
03835
03836
03837
03838
03839
03840
03841
03842
03843
03844
03845
03846
03847
03848
03849
03850
03851
03852
03853
03854
03855
03856
03857
03858
03859
03860
03861
03862
03863
03864
03865
03866
03867
03868
03869
03870
03871
03872
03873
03874
03875
03876
03877
03878
03879
03880
03881
03882
03883
03884
03885
03886
03887
03888
03889
03890
03891
03892
03893
03894
03895
03896
03897
03898
03899
03900
03901
03902
03903
03904
03905
03906
03907
03908
03909
03910
03911
03912
03913
03914
03915
03916
03917
03918
03919
03920
03921
03922
03923
03924
03925
03926
03927
03928
03929
03930
03931
03932
03933
03934
03935
03936
03937
03938
03939
03940
03941
03942
03943
03944
03945
03946
03947
03948
03949
03950
03951
03952
03953
03954
03955
03956
03957
03958
03959
03960
03961
03962
03963
03964
03965
03966
03967
03968
03969
03970
03971
03972
03973
03974
03975
03976
03977
03978
03979
03980
03981
03982
03983
03984
03985
03986
03987
03988
03989
03990
03991
03992
03993
03994
03995
03996
03997
03998
03999
04000
04001
04002
04003
04004
04005
04006
04007
04008
04009
04010
04011
04012
04013
04014
04015
04016
04017
04018
04019
04020
04021
04022
04023
04024
04025
04026
04027
04028
04029
04030
04031
04032
04033
04034
04035
04036
04037
04038
04039
04040
04041
04042
04043
04044
04045
04046
04047
04048
04049
04050
04051
04052
04053
04054
04055
04056
04057
04058
04059
04060
04061
04062
04063
04064
04065
04066
04067
04068
04069
04070
04071
04072
04073
04074
04075
04076
04077
04078
04079
04080
04081
04082
04083
04084
04085
04086
04087
04088
04089
04090
04091
04092
04093
04094
04095
04096
04097 #endif // _RAKNET_SUPPORT_DL_MALLOC