3
* tc.alloc.c (Caltech) 2/21/82
4
* Chris Kingsley, kingsley@cit-20.
6
* This is a very fast storage allocator. It allocates blocks of a small
7
* number of different sizes, and keeps free lists of each size. Blocks that
8
* don't exactly fit are passed up to the next larger size. In this
9
* implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10
* This is designed for use in a program that uses vast quantities of memory,
11
* but bombs when it runs out.
14
* Copyright (c) 1980, 1991 The Regents of the University of California.
15
* All rights reserved.
17
* Redistribution and use in source and binary forms, with or without
18
* modification, are permitted provided that the following conditions
20
* 1. Redistributions of source code must retain the above copyright
21
* notice, this list of conditions and the following disclaimer.
22
* 2. Redistributions in binary form must reproduce the above copyright
23
* notice, this list of conditions and the following disclaimer in the
24
* documentation and/or other materials provided with the distribution.
25
* 3. All advertising materials mentioning features or use of this software
26
* must display the following acknowledgement:
27
* This product includes software developed by the University of
28
* California, Berkeley and its contributors.
29
* 4. Neither the name of the University nor the names of its contributors
30
* may be used to endorse or promote products derived from this software
31
* without specific prior written permission.
33
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49
static char *memtop = NULL; /* PWP: top of current memory */
50
static char *membot = NULL; /* PWP: bottom of allocatable memory */
57
#if defined(_VMS_POSIX) || defined(_AMIGA_MEMORY)
62
# define malloc fmalloc
64
# define calloc fcalloc
65
# define realloc frealloc
68
#if defined(AIX51) || defined(AIX43)
81
* Lots of os routines are busted and try to free invalid pointers.
82
* Although our free routine is smart enough and it will pick bad
83
* pointers most of the time, in cases where we know we are going to get
84
* a bad pointer, we'd rather leak.
91
typedef unsigned char U_char; /* we don't really have signed chars */
92
typedef unsigned int U_int;
93
typedef unsigned short U_short;
94
typedef unsigned long U_long;
98
* The overhead on a block is at least 4 bytes. When free, this space
99
* contains a pointer to the next free block, and the bottom two bits must
100
* be zero. When in use, the first byte is set to MAGIC, and the second
101
* byte is the size index. The remaining bytes are for alignment.
102
* If range checking is enabled and the size of the block fits
103
* in two bytes, then the top two bytes hold the size of the requested block
104
* plus the range checking words, and the header word MINUS ONE.
108
#define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
111
union overhead *ov_next; /* when free */
113
U_char ovu_magic; /* magic number */
114
U_char ovu_index; /* bucket # */
116
U_short ovu_size; /* actual block size */
117
U_int ovu_rmagic; /* range magic number */
120
#define ov_magic ovu.ovu_magic
121
#define ov_index ovu.ovu_index
122
#define ov_size ovu.ovu_size
123
#define ov_rmagic ovu.ovu_rmagic
126
#define MAGIC 0xfd /* magic # on accounting info */
127
#define RMAGIC 0x55555555 /* magic # on range info */
129
#define RSLOP sizeof (U_int)
138
* nextf[i] is the pointer to the next free block of size 2^(i+3). The
139
* smallest allocatable block is 8 bytes. The overhead information
140
* precedes the data area returned to the user.
142
#define NBUCKETS ((sizeof(long) << 3) - 3)
143
static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
146
* nmalloc[i] is the difference between the number of mallocs and frees
147
* for a given block size.
149
static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
152
static int findbucket __P((union overhead *, int));
153
static void morecore __P((int));
158
# define CHECK(a, str, p) \
161
xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot); \
165
# define CHECK(a, str, p) \
168
xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot); \
175
register size_t nbytes;
178
register union overhead *p;
179
register int bucket = 0;
180
register unsigned shiftr;
183
* Convert amount of memory requested into closest block size stored in
184
* hash buckets which satisfies request. Account for space used per block
189
* SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
190
* so we get one more...
191
* From Michael Schroeder: This is not true. It depends on the
192
* timezone string. In Europe it can overwrite the 13th byte on a
194
* So we punt and we always allocate an extra byte.
199
nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
200
shiftr = (nbytes - 1) >> 2;
202
/* apart from this loop, this is O(1) */
203
while ((shiftr >>= 1) != 0)
206
* If nothing in hash bucket right now, request more memory from the
209
if (nextf[bucket] == NULL)
211
if ((p = (union overhead *) nextf[bucket]) == NULL) {
217
xprintf(CGETS(19, 1, "nbytes=%d: Out of memory\n"), nbytes);
221
return ((memalign_t) 0);
223
/* remove from linked list */
224
nextf[bucket] = nextf[bucket]->ov_next;
226
p->ov_index = bucket;
230
* Record allocated size of block and bound space with magic numbers.
232
p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
233
p->ov_rmagic = RMAGIC;
234
*((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
236
return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
239
return ((memalign_t) 0);
241
return ((memalign_t) 0);
246
* Allocate more memory to the indicated bucket.
252
register union overhead *op;
253
register int rnu; /* 2^rnu bytes will be requested */
254
register int nblks; /* become nblks blocks of the desired size */
260
* Insure memory is allocated on a page boundary. Should make getpageize
263
op = (union overhead *) sbrk(0);
264
memtop = (char *) op;
267
if ((long) op & 0x3ff) {
268
memtop = (char *) sbrk((int) (1024 - ((long) op & 0x3ff)));
269
memtop += (long) (1024 - ((long) op & 0x3ff));
272
/* take 2k unless the block is bigger than that */
273
rnu = (bucket <= 8) ? 11 : bucket + 3;
274
nblks = 1 << (rnu - (bucket + 3)); /* how many blocks to get */
275
memtop = (char *) sbrk(1 << rnu); /* PWP */
276
op = (union overhead *) memtop;
280
memtop += (long) (1 << rnu);
282
* Round up to minimum allocation size boundary and deduct from block count
285
if (((U_long) op) & ROUNDUP) {
286
op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
290
* Add new memory allocated to that on free list for this hash bucket.
293
siz = 1 << (bucket + 3);
294
while (--nblks > 0) {
295
op->ov_next = (union overhead *) (((caddr_t) op) + siz);
296
op = (union overhead *) (((caddr_t) op) + siz);
309
register union overhead *op;
312
* the don't free flag is there so that we avoid os bugs in routines
313
* that free invalid pointers!
315
if (cp == NULL || dont_free)
317
CHECK(!memtop || !membot,
318
CGETS(19, 2, "free(%lx) called before any allocations."), cp);
319
CHECK(cp > (ptr_t) memtop,
320
CGETS(19, 3, "free(%lx) above top of memory."), cp);
321
CHECK(cp < (ptr_t) membot,
322
CGETS(19, 4, "free(%lx) below bottom of memory."), cp);
323
op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
324
CHECK(op->ov_magic != MAGIC,
325
CGETS(19, 5, "free(%lx) bad block."), cp);
328
if (op->ov_index <= 13)
329
CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
330
CGETS(19, 6, "free(%lx) bad range check."), cp);
332
CHECK(op->ov_index >= NBUCKETS,
333
CGETS(19, 7, "free(%lx) bad block index."), cp);
335
op->ov_next = nextf[size];
351
register char *cp, *scp;
354
scp = cp = (char *) xmalloc((size_t) i);
360
return ((memalign_t) scp);
363
return ((memalign_t) 0);
365
return ((memalign_t) 0);
370
* When a program attempts "storage compaction" as mentioned in the
371
* old malloc man page, it realloc's an already freed block. Usually
372
* this is the last block it freed; occasionally it might be farther
373
* back. We have to search all the free lists for the block in order
374
* to determine its bucket: 1st we make one pass thru the lists
375
* checking only the first block in each; if that fails we search
376
* ``realloc_srchlen'' blocks in each list for a match (the variable
377
* is extern so the caller can modify it). If that fails we just copy
378
* however many bytes was given to realloc() and hope it's not huge.
381
/* 4 should be plenty, -1 =>'s whole list */
382
static int realloc_srchlen = 4;
398
return (malloc(nbytes));
399
op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
400
if (op->ov_magic == MAGIC) {
406
* Already free, doing "compaction".
408
* Search for the old block of memory on the free list. First, check the
409
* most common case (last element free'd), then (this failing) the last
410
* ``realloc_srchlen'' items free'd. If all lookups fail, then assume
411
* the size of the memory block being realloc'd is the smallest
414
if ((i = findbucket(op, 1)) < 0 &&
415
(i = findbucket(op, realloc_srchlen)) < 0)
418
onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
420
/* avoid the copy if same size block */
421
if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
422
(onb > (U_int) (1 << (i + 2)))) {
424
/* JMR: formerly this wasn't updated ! */
425
nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
426
*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
427
op->ov_rmagic = RMAGIC;
428
op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
430
return ((memalign_t) cp);
432
if ((res = malloc(nbytes)) == NULL)
433
return ((memalign_t) NULL);
434
if (cp != res) { /* common optimization */
436
* christos: this used to copy nbytes! It should copy the
437
* smaller of the old and new size
439
onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
440
(void) memmove((ptr_t) res, (ptr_t) cp,
441
(size_t) (onb < nbytes ? onb : nbytes));
445
return ((memalign_t) res);
448
return ((memalign_t) 0);
450
return ((memalign_t) 0);
458
* Search ``srchlen'' elements of each free list for a block whose
459
* header starts at ``freep''. If srchlen is -1 search the whole list.
460
* Return bucket number, or -1 if not found.
463
findbucket(freep, srchlen)
464
union overhead *freep;
467
register union overhead *p;
470
for (i = 0; i < NBUCKETS; i++) {
472
for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
484
#else /* SYSMALLOC */
487
** ``Protected versions'' of malloc, realloc, calloc, and free
491
** 1. malloc(0) is bad
493
** 3. realloc(0, n) is bad
494
** 4. realloc(n, 0) is bad
496
** Also we call our error routine if we run out of memory.
508
membot = (char*) sbrk(0);
509
#endif /* !NO_SBRK */
511
if ((ptr = malloc(n)) == (ptr_t) 0) {
516
if (memtop < ((char *) ptr) + n)
517
memtop = ((char *) ptr) + n;
519
membot = (char*) ptr;
521
return ((memalign_t) ptr);
535
membot = (char*) sbrk(0);
538
if ((ptr = (p ? realloc(p, n) : malloc(n))) == (ptr_t) 0) {
543
if (memtop < ((char *) ptr) + n)
544
memtop = ((char *) ptr) + n;
546
membot = (char*) ptr;
548
return ((memalign_t) ptr);
563
membot = (char*) sbrk(0);
566
if ((ptr = malloc(n)) == (ptr_t) 0) {
578
if (memtop < ((char *) ptr) + n)
579
memtop = ((char *) ptr) + n;
581
membot = (char*) ptr;
584
return ((memalign_t) ptr);
595
#endif /* SYSMALLOC */
598
* mstats - print out statistics about malloc
600
* Prints two lines of numbers, one showing the length of the free list
601
* for each size category, the second showing the number of mallocs -
602
* frees for each size category.
612
register union overhead *p;
613
int totfree = 0, totused = 0;
615
xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
616
for (i = 0; i < NBUCKETS; i++) {
617
for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
620
totfree += j * (1 << (i + 3));
622
xprintf(CGETS(19, 9, "\nused:\t"));
623
for (i = 0; i < NBUCKETS; i++) {
624
xprintf(" %4u", nmalloc[i]);
625
totused += nmalloc[i] * (1 << (i + 3));
627
xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
629
xprintf(CGETS(19, 11,
630
"\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"),
631
(unsigned long) membot, (unsigned long) memtop,
632
(unsigned long) sbrk(0));
635
memtop = (char *) sbrk(0);
636
#endif /* !NO_SBRK */
637
xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
638
(unsigned long) membot, (unsigned long) memtop,
639
(unsigned long) (memtop - membot));
640
#endif /* SYSMALLOC */