4
* This is a very fast storage allocator. It allocates blocks of a small
5
* number of different sizes, and keeps free lists of each size. Blocks
6
* that don't exactly fit are passed up to the next larger size. Blocks
7
* over a certain size are directly allocated from the system.
9
* Copyright (c) 1983 Regents of the University of California.
10
* Copyright (c) 1996-1997 Sun Microsystems, Inc.
11
* Copyright (c) 1998-1999 by Scriptics Corporation.
13
* Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
15
* See the file "license.terms" for information on usage and redistribution of
16
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
18
* RCS: @(#) $Id: tclAlloc.c,v 1.25 2007/06/29 03:17:05 das Exp $
22
* Windows and Unix use an alternative allocator when building with threads
23
* that has significantly reduced lock contention.
27
#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC)
38
* We should really make use of AC_CHECK_TYPE(caddr_t) here, but it can wait
39
* until Tcl uses config.h properly.
42
#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
43
typedef unsigned long caddr_t;
47
* Alignment for allocated memory.
50
#if defined(__APPLE__)
57
* The overhead on a block is at least 8 bytes. When free, this space contains
58
* a pointer to the next free block, and the bottom two bits must be zero.
59
* When in use, the first byte is set to MAGIC, and the second byte is the
60
* size index. The remaining bytes are for alignment. If range checking is
61
* enabled then a second word holds the size of the requested block, less 1,
62
* rounded up to a multiple of sizeof(RMAGIC). The order of elements is
63
* critical: ov.magic must overlay the low order bits of ov.next, and ov.magic
64
* can not be a valid ov.next bit pattern.
68
union overhead *next; /* when free */
69
unsigned char padding[ALLOCALIGN]; /* align struct to ALLOCALIGN bytes */
71
unsigned char magic0; /* magic number */
72
unsigned char index; /* bucket # */
73
unsigned char unused; /* unused */
74
unsigned char magic1; /* other magic number */
76
unsigned short rmagic; /* range magic number */
77
unsigned long size; /* actual block size */
78
unsigned short unused2; /* padding to 8-byte align */
81
#define overMagic0 ovu.magic0
82
#define overMagic1 ovu.magic1
83
#define bucketIndex ovu.index
84
#define rangeCheckMagic ovu.rmagic
85
#define realBlockSize ovu.size
89
#define MAGIC 0xef /* magic # on accounting info */
90
#define RMAGIC 0x5555 /* magic # on range info */
93
#define RSLOP sizeof (unsigned short)
98
#define OVERHEAD (sizeof(union overhead) + RSLOP)
101
* Macro to make it easier to refer to the end-of-block guard magic.
104
#define BLOCK_END(overPtr) \
105
(*(unsigned short *)((caddr_t)((overPtr) + 1) + (overPtr)->realBlockSize))
108
* nextf[i] is the pointer to the next free block of size 2^(i+3). The
109
* smallest allocatable block is MINBLOCK bytes. The overhead information
110
* precedes the data area returned to the user.
113
#define MINBLOCK ((sizeof(union overhead) + (ALLOCALIGN-1)) & ~(ALLOCALIGN-1))
114
#define NBUCKETS (13 - (MINBLOCK >> 4))
115
#define MAXMALLOC (1<<(NBUCKETS+2))
116
static union overhead *nextf[NBUCKETS];
119
* The following structure is used to keep track of all system memory
120
* currently owned by Tcl. When finalizing, all this memory will be returned
125
struct block *nextPtr; /* Linked list. */
126
struct block *prevPtr; /* Linked list for big blocks, ensures 8-byte
127
* alignment for suballocated blocks. */
130
static struct block *blockList; /* Tracks the suballocated blocks. */
131
static struct block bigBlocks={ /* Big blocks aren't suballocated. */
132
&bigBlocks, &bigBlocks
136
* The allocator is protected by a special mutex that must be explicitly
137
* initialized. Futhermore, because Tcl_Alloc may be used before anything else
138
* in Tcl, we make this module self-initializing after all with the allocInit
143
static Tcl_Mutex *allocMutexPtr;
145
static int allocInit = 0;
150
* numMallocs[i] is the difference between the number of mallocs and frees for
151
* a given block size.
154
static unsigned int numMallocs[NBUCKETS+1];
158
#if defined(DEBUG) || defined(RCHECK)
159
#define ASSERT(p) if (!(p)) Tcl_Panic(# p)
160
#define RANGE_ASSERT(p) if (!(p)) Tcl_Panic(# p)
163
#define RANGE_ASSERT(p)
167
* Prototypes for functions used only in this file.
170
static void MoreCore(int bucket);
173
*-------------------------------------------------------------------------
177
* Initialize the memory system.
183
* Initialize the mutex used to serialize allocations.
185
*-------------------------------------------------------------------------
194
allocMutexPtr = Tcl_GetAllocMutex();
200
*-------------------------------------------------------------------------
202
* TclFinalizeAllocSubsystem --
204
* Release all resources being used by this subsystem, including
205
* aggressively freeing all memory allocated by TclpAlloc() that has not
206
* yet been released with TclpFree().
208
* After this function is called, all memory allocated with TclpAlloc()
209
* should be considered unusable.
215
* This subsystem is self-initializing, since memory can be allocated
216
* before Tcl is formally initialized. After this call, this subsystem
217
* has been reset to its initial state and is usable again.
219
*-------------------------------------------------------------------------
223
TclFinalizeAllocSubsystem(void)
226
struct block *blockPtr, *nextPtr;
228
Tcl_MutexLock(allocMutexPtr);
229
for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
230
nextPtr = blockPtr->nextPtr;
231
TclpSysFree(blockPtr);
235
for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
236
nextPtr = blockPtr->nextPtr;
237
TclpSysFree(blockPtr);
240
bigBlocks.nextPtr = &bigBlocks;
241
bigBlocks.prevPtr = &bigBlocks;
243
for (i=0 ; i<NBUCKETS ; i++) {
252
Tcl_MutexUnlock(allocMutexPtr);
256
*----------------------------------------------------------------------
260
* Allocate more memory.
268
*----------------------------------------------------------------------
273
unsigned int numBytes) /* Number of bytes to allocate. */
275
register union overhead *overPtr;
276
register long bucket;
277
register unsigned amount;
278
struct block *bigBlockPtr;
282
* We have to make the "self initializing" because Tcl_Alloc may be
283
* used before any other part of Tcl. E.g., see main() for tclsh!
288
Tcl_MutexLock(allocMutexPtr);
291
* First the simple case: we simple allocate big blocks directly.
294
if (numBytes + OVERHEAD >= MAXMALLOC) {
295
bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
296
(sizeof(struct block) + OVERHEAD + numBytes), 0);
297
if (bigBlockPtr == NULL) {
298
Tcl_MutexUnlock(allocMutexPtr);
301
bigBlockPtr->nextPtr = bigBlocks.nextPtr;
302
bigBlocks.nextPtr = bigBlockPtr;
303
bigBlockPtr->prevPtr = &bigBlocks;
304
bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
306
overPtr = (union overhead *) (bigBlockPtr + 1);
307
overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
308
overPtr->bucketIndex = 0xff;
310
numMallocs[NBUCKETS]++;
315
* Record allocated size of block and bound space with magic numbers.
318
overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
319
overPtr->rangeCheckMagic = RMAGIC;
320
BLOCK_END(overPtr) = RMAGIC;
323
Tcl_MutexUnlock(allocMutexPtr);
324
return (void *)(overPtr+1);
328
* Convert amount of memory requested into closest block size stored in
329
* hash buckets which satisfies request. Account for space used per block
333
amount = MINBLOCK; /* size of first bucket */
334
bucket = MINBLOCK >> 4;
336
while (numBytes + OVERHEAD > amount) {
339
Tcl_MutexUnlock(allocMutexPtr);
344
ASSERT(bucket < NBUCKETS);
347
* If nothing in hash bucket right now, request more memory from the
351
if ((overPtr = nextf[bucket]) == NULL) {
353
if ((overPtr = nextf[bucket]) == NULL) {
354
Tcl_MutexUnlock(allocMutexPtr);
360
* Remove from linked list
363
nextf[bucket] = overPtr->next;
364
overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
365
overPtr->bucketIndex = (unsigned char) bucket;
368
numMallocs[bucket]++;
373
* Record allocated size of block and bound space with magic numbers.
376
overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
377
overPtr->rangeCheckMagic = RMAGIC;
378
BLOCK_END(overPtr) = RMAGIC;
381
Tcl_MutexUnlock(allocMutexPtr);
382
return ((char *)(overPtr + 1));
386
*----------------------------------------------------------------------
390
* Allocate more memory to the indicated bucket.
392
* Assumes Mutex is already held.
398
* Attempts to get more memory from the system.
400
*----------------------------------------------------------------------
405
int bucket) /* What bucket to allocat to. */
407
register union overhead *overPtr;
408
register long size; /* size of desired block */
409
long amount; /* amount to allocate */
410
int numBlocks; /* how many blocks we get */
411
struct block *blockPtr;
414
* sbrk_size <= 0 only for big, FLUFFY, requests (about 2^30 bytes on a
415
* VAX, I think) or for a negative arg.
418
size = 1 << (bucket + 3);
422
numBlocks = amount / size;
423
ASSERT(numBlocks*size == amount);
425
blockPtr = (struct block *) TclpSysAlloc((unsigned)
426
(sizeof(struct block) + amount), 1);
428
if (blockPtr == NULL) {
431
blockPtr->nextPtr = blockList;
432
blockList = blockPtr;
434
overPtr = (union overhead *) (blockPtr + 1);
437
* Add new memory allocated to that on free list for this hash bucket.
440
nextf[bucket] = overPtr;
441
while (--numBlocks > 0) {
442
overPtr->next = (union overhead *)((caddr_t)overPtr + size);
443
overPtr = (union overhead *)((caddr_t)overPtr + size);
445
overPtr->next = NULL;
449
*----------------------------------------------------------------------
461
*----------------------------------------------------------------------
466
char *oldPtr) /* Pointer to memory to free. */
469
register union overhead *overPtr;
470
struct block *bigBlockPtr;
472
if (oldPtr == NULL) {
476
Tcl_MutexLock(allocMutexPtr);
477
overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
479
ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */
480
ASSERT(overPtr->overMagic1 == MAGIC);
481
if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
482
Tcl_MutexUnlock(allocMutexPtr);
486
RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
487
RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
488
size = overPtr->bucketIndex;
491
numMallocs[NBUCKETS]--;
494
bigBlockPtr = (struct block *) overPtr - 1;
495
bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
496
bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
497
TclpSysFree(bigBlockPtr);
499
Tcl_MutexUnlock(allocMutexPtr);
502
ASSERT(size < NBUCKETS);
503
overPtr->next = nextf[size]; /* also clobbers overMagic */
504
nextf[size] = overPtr;
510
Tcl_MutexUnlock(allocMutexPtr);
514
*----------------------------------------------------------------------
526
*----------------------------------------------------------------------
531
char *oldPtr, /* Pointer to alloced block. */
532
unsigned int numBytes) /* New size of memory. */
535
union overhead *overPtr;
536
struct block *bigBlockPtr;
538
unsigned long maxSize;
540
if (oldPtr == NULL) {
541
return TclpAlloc(numBytes);
544
Tcl_MutexLock(allocMutexPtr);
546
overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
548
ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */
549
ASSERT(overPtr->overMagic1 == MAGIC);
550
if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
551
Tcl_MutexUnlock(allocMutexPtr);
555
RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
556
RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
557
i = overPtr->bucketIndex;
560
* If the block isn't in a bin, just realloc it.
564
struct block *prevPtr, *nextPtr;
565
bigBlockPtr = (struct block *) overPtr - 1;
566
prevPtr = bigBlockPtr->prevPtr;
567
nextPtr = bigBlockPtr->nextPtr;
568
bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
569
sizeof(struct block) + OVERHEAD + numBytes);
570
if (bigBlockPtr == NULL) {
571
Tcl_MutexUnlock(allocMutexPtr);
575
if (prevPtr->nextPtr != bigBlockPtr) {
577
* If the block has moved, splice the new block into the list
578
* where the old block used to be.
581
prevPtr->nextPtr = bigBlockPtr;
582
nextPtr->prevPtr = bigBlockPtr;
585
overPtr = (union overhead *) (bigBlockPtr + 1);
588
numMallocs[NBUCKETS]++;
593
* Record allocated size of block and update magic number bounds.
596
overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
597
BLOCK_END(overPtr) = RMAGIC;
600
Tcl_MutexUnlock(allocMutexPtr);
601
return (char *)(overPtr+1);
603
maxSize = 1 << (i+3);
605
if (numBytes+OVERHEAD > maxSize) {
607
} else if (i>0 && numBytes+OVERHEAD < maxSize/2) {
614
Tcl_MutexUnlock(allocMutexPtr);
616
newPtr = TclpAlloc(numBytes);
617
if (newPtr == NULL) {
621
if (maxSize < numBytes) {
624
memcpy(newPtr, oldPtr, (size_t) numBytes);
630
* Ok, we don't have to copy, it fits as-is
634
overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
635
BLOCK_END(overPtr) = RMAGIC;
638
Tcl_MutexUnlock(allocMutexPtr);
643
*----------------------------------------------------------------------
647
* Prints two lines of numbers, one showing the length of the free list
648
* for each size category, the second showing the number of mallocs -
649
* frees for each size category.
657
*----------------------------------------------------------------------
663
char *s) /* Where to write info. */
666
register union overhead *overPtr;
667
int totalFree = 0, totalUsed = 0;
669
Tcl_MutexLock(allocMutexPtr);
671
fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
672
for (i = 0; i < NBUCKETS; i++) {
673
for (j=0, overPtr=nextf[i]; overPtr; overPtr=overPtr->next, j++) {
674
fprintf(stderr, " %d", j);
676
totalFree += j * (1 << (i + 3));
679
fprintf(stderr, "\nused:\t");
680
for (i = 0; i < NBUCKETS; i++) {
681
fprintf(stderr, " %d", numMallocs[i]);
682
totalUsed += numMallocs[i] * (1 << (i + 3));
685
fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
686
totalUsed, totalFree);
687
fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
688
MAXMALLOC, numMallocs[NBUCKETS]);
690
Tcl_MutexUnlock(allocMutexPtr);
694
#else /* !USE_TCLALLOC */
697
*----------------------------------------------------------------------
701
* Allocate more memory.
709
*----------------------------------------------------------------------
714
unsigned int numBytes) /* Number of bytes to allocate. */
716
return (char*) malloc(numBytes);
720
*----------------------------------------------------------------------
732
*----------------------------------------------------------------------
737
char *oldPtr) /* Pointer to memory to free. */
744
*----------------------------------------------------------------------
756
*----------------------------------------------------------------------
761
char *oldPtr, /* Pointer to alloced block. */
762
unsigned int numBytes) /* New size of memory. */
764
return (char*) realloc(oldPtr, numBytes);
767
#endif /* !USE_TCLALLOC */
768
#endif /* !TCL_THREADS */