1
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
3
* ***** BEGIN LICENSE BLOCK *****
4
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
6
* The contents of this file are subject to the Mozilla Public License Version
7
* 1.1 (the "License"); you may not use this file except in compliance with
8
* the License. You may obtain a copy of the License at
9
* http://www.mozilla.org/MPL/
11
* Software distributed under the License is distributed on an "AS IS" basis,
12
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13
* for the specific language governing rights and limitations under the
16
* The Original Code is Mozilla Communicator client code, released
19
* The Initial Developer of the Original Code is
20
* Netscape Communications Corporation.
21
* Portions created by the Initial Developer are Copyright (C) 1998
22
* the Initial Developer. All Rights Reserved.
26
* Alternatively, the contents of this file may be used under the terms of
27
* either of the GNU General Public License Version 2 or later (the "GPL"),
28
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29
* in which case the provisions of the GPL or the LGPL are applicable instead
30
* of those above. If you wish to allow use of your version of this file only
31
* under the terms of either the GPL or the LGPL, and not to allow others to
32
* use your version of this file under the terms of the MPL, indicate your
33
* decision by deleting the provisions above and replace them with the notice
34
* and other provisions required by the GPL or the LGPL. If you do not delete
35
* the provisions above, a recipient may use your version of this file under
36
* the terms of any one of the MPL, the GPL or the LGPL.
38
* ***** END LICENSE BLOCK ***** */
41
* Lifetime-based fast allocation, inspired by much prior art, including
42
* "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
43
* David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
50
#include "jsarena.h" /* Added by JSIFY */
51
#include "jsutil.h" /* Added by JSIFY */
54
static JSArenaStats *arena_stats_list;
56
#define COUNT(pool,what) (pool)->stats.what++
58
#define COUNT(pool,what) /* nothing */
61
#define JS_ARENA_DEFAULT_ALIGN sizeof(double)
64
JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align)
67
align = JS_ARENA_DEFAULT_ALIGN;
68
pool->mask = JS_BITMASK(JS_CeilingLog2(align));
69
pool->first.next = NULL;
70
pool->first.base = pool->first.avail = pool->first.limit =
71
JS_ARENA_ALIGN(pool, &pool->first + 1);
72
pool->current = &pool->first;
73
pool->arenasize = size;
75
memset(&pool->stats, 0, sizeof pool->stats);
76
pool->stats.name = strdup(name);
77
pool->stats.next = arena_stats_list;
78
arena_stats_list = &pool->stats;
83
* An allocation that consumes more than pool->arenasize also has a header
84
* pointing back to its previous arena's next member. This header is not
85
* included in [a->base, a->limit), so its space can't be wrongly claimed.
87
* As the header is a pointer, it must be well-aligned. If pool->mask is
88
* greater than or equal to POINTER_MASK, the header just preceding a->base
89
* for an oversized arena a is well-aligned, because a->base is well-aligned.
90
* However, we may need to add more space to pad the JSArena ** back-pointer
91
* so that it lies just behind a->base, because a might not be aligned such
92
* that (jsuword)(a + 1) is on a pointer boundary.
94
* By how much must we pad? Let M be the alignment modulus for pool and P
95
* the modulus for a pointer. Given M >= P, the base of an oversized arena
96
* that satisfies M is well-aligned for P.
98
* On the other hand, if M < P, we must include enough space in the header
99
* size to align the back-pointer on a P boundary so that it can be found by
100
* subtracting P from a->base. This means a->base must be on a P boundary,
101
* even though subsequent allocations from a may be aligned on a lesser (M)
102
* boundary. Given powers of two M and P as above, the extra space needed
103
* when M < P is P-M or POINTER_MASK - pool->mask.
105
* The size of a header including padding is given by the HEADER_SIZE macro,
106
* below, for any pool (for any value of M).
108
* The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
109
* HEADER_BASE_MASK(pool).
111
* PTR_TO_HEADER computes the address of the back-pointer, given an oversized
112
* allocation at p. By definition, p must be a->base for the arena a that
113
* contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
114
* the case of SET_HEADER with back-pointer ap.
116
#define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
117
#define HEADER_SIZE(pool) (sizeof(JSArena **) \
118
+ (((pool)->mask < POINTER_MASK) \
119
? POINTER_MASK - (pool)->mask \
121
#define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
122
#define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
123
& HEADER_BASE_MASK(pool)) \
125
(JSArena ***)(p) - 1)
126
#define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
127
#define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
129
JS_PUBLIC_API(void *)
130
JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
132
JSArena **ap, *a, *b;
133
jsuword extra, hdrsz, gross;
137
* Search pool from current forward till we find or make enough space.
139
* NB: subtract nb from a->limit in the loop condition, instead of adding
140
* nb to a->avail, to avoid overflowing a 32-bit address space (possible
141
* when running a 32-bit program on a 64-bit system where the kernel maps
142
* the heap up against the top of the 32-bit address space).
144
* Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
145
* https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
147
JS_ASSERT((nb & pool->mask) == 0);
148
for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
152
/* Not enough space in pool, so we must malloc. */
153
extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
154
hdrsz = sizeof *a + extra + pool->mask;
155
gross = hdrsz + JS_MAX(nb, pool->arenasize);
158
b = (JSArena *) malloc(gross);
162
b->limit = (jsuword)b + gross;
163
JS_COUNT_ARENA(pool,++);
164
COUNT(pool, nmallocs);
166
/* If oversized, store ap in the header, just before a->base. */
168
JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
171
((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
172
SET_HEADER(pool, a, ap);
174
a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
178
a = *ap; /* move to next arena */
181
p = (void *)a->avail;
183
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
187
JS_PUBLIC_API(void *)
188
JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
190
JSArena **ap, *a, *b;
191
jsuword boff, aoff, extra, hdrsz, gross;
194
* Use the oversized-single-allocation header to avoid searching for ap.
195
* See JS_ArenaAllocate, the SET_HEADER call.
197
if (size > pool->arenasize) {
198
ap = *PTR_TO_HEADER(pool, p);
201
ap = &pool->first.next;
202
while ((a = *ap) != pool->current)
206
JS_ASSERT(a->base == (jsuword)p);
207
boff = JS_UPTRDIFF(a->base, a);
208
aoff = JS_ARENA_ALIGN(pool, size + incr);
209
JS_ASSERT(aoff > pool->arenasize);
210
extra = HEADER_SIZE(pool); /* oversized header holds ap */
211
hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
212
gross = hdrsz + aoff;
213
JS_ASSERT(gross > aoff);
214
a = (JSArena *) realloc(a, gross);
218
pool->stats.nreallocs++;
222
/* Oops, realloc moved the allocation: update other pointers to a. */
223
if (pool->current == *ap)
226
if (b && b->avail - b->base > pool->arenasize) {
227
JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
228
SET_HEADER(pool, b, &a->next);
231
/* Now update *ap, the next link of the arena before a. */
235
a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
236
a->limit = (jsuword)a + gross;
237
a->avail = a->base + aoff;
238
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
240
/* Check whether realloc aligned differently, and copy if necessary. */
241
if (boff != JS_UPTRDIFF(a->base, a))
242
memmove((void *)a->base, (char *)a + boff, size);
244
/* Store ap in the oversized-load arena header. */
245
SET_HEADER(pool, a, ap);
246
return (void *)a->base;
249
JS_PUBLIC_API(void *)
250
JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
255
* If p points to an oversized allocation, it owns an entire arena, so we
256
* can simply realloc the arena.
258
if (size > pool->arenasize)
259
return JS_ArenaRealloc(pool, p, size, incr);
261
JS_ARENA_ALLOCATE(newp, pool, size + incr);
263
memcpy(newp, p, size);
268
* Free tail arenas linked after head, which may not be the true list head.
269
* Reset pool->current to point to head in case it pointed at a tail arena.
272
FreeArenaList(JSArenaPool *pool, JSArena *head)
283
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
286
} while ((a = a->next) != NULL);
293
JS_COUNT_ARENA(pool,--);
295
} while ((a = *ap) != NULL);
297
pool->current = head;
301
JS_ArenaRelease(JSArenaPool *pool, char *mark)
305
for (a = &pool->first; a; a = a->next) {
306
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
308
if (JS_UPTRDIFF(mark, a->base) <= JS_UPTRDIFF(a->avail, a->base)) {
309
a->avail = JS_ARENA_ALIGN(pool, mark);
310
JS_ASSERT(a->avail <= a->limit);
311
FreeArenaList(pool, a);
318
JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size)
320
JSArena **ap, *a, *b;
324
* If the allocation is oversized, it consumes an entire arena, and it has
325
* a header just before the allocation pointing back to its predecessor's
326
* next member. Otherwise, we have to search pool for a.
328
if (size > pool->arenasize) {
329
ap = *PTR_TO_HEADER(pool, p);
332
q = (jsuword)p + size;
333
q = JS_ARENA_ALIGN(pool, q);
334
ap = &pool->first.next;
335
while ((a = *ap) != NULL) {
336
JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
340
* If a is consumed by the allocation at p, we can free it to
343
if (a->base == (jsuword)p)
347
* We can't free a, but we can "retract" its avail cursor --
348
* whether there are others after it in pool.
350
a->avail = (jsuword)p;
358
* At this point, a is doomed, so ensure that pool->current doesn't point
359
* at it. We must preserve LIFO order of mark/release cursors, so we use
360
* the oversized-allocation arena's back pointer (or if not oversized, we
361
* use the result of searching the entire pool) to compute the address of
362
* the arena that precedes a.
364
if (pool->current == a)
365
pool->current = (JSArena *) ((char *)ap - offsetof(JSArena, next));
368
* This is a non-LIFO deallocation, so take care to fix up a->next's back
369
* pointer in its header, if a->next is oversized.
372
if (b && b->avail - b->base > pool->arenasize) {
373
JS_ASSERT(GET_HEADER(pool, b) == &a->next);
374
SET_HEADER(pool, b, ap);
377
JS_COUNT_ARENA(pool,--);
382
JS_FreeArenaPool(JSArenaPool *pool)
384
FreeArenaList(pool, &pool->first);
385
COUNT(pool, ndeallocs);
389
JS_FinishArenaPool(JSArenaPool *pool)
391
FreeArenaList(pool, &pool->first);
394
JSArenaStats *stats, **statsp;
396
if (pool->stats.name)
397
free(pool->stats.name);
398
for (statsp = &arena_stats_list; (stats = *statsp) != 0;
399
statsp = &stats->next) {
400
if (stats == &pool->stats) {
401
*statsp = stats->next;
415
JS_ArenaShutDown(void)
421
JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
423
pool->stats.nallocs++;
424
pool->stats.nbytes += nb;
425
if (nb > pool->stats.maxalloc)
426
pool->stats.maxalloc = nb;
427
pool->stats.variance += nb * nb;
431
JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
433
pool->stats.ninplace++;
437
JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
439
pool->stats.ngrows++;
440
pool->stats.nbytes += incr;
441
pool->stats.variance -= size * size;
443
if (size > pool->stats.maxalloc)
444
pool->stats.maxalloc = size;
445
pool->stats.variance += size * size;
449
JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
451
pool->stats.nreleases++;
455
JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
457
pool->stats.nfastrels++;
464
JS_DumpArenaStats(FILE *fp)
467
uint32 nallocs, nbytes;
468
double mean, variance, sigma;
470
for (stats = arena_stats_list; stats; stats = stats->next) {
471
nallocs = stats->nallocs;
473
nbytes = stats->nbytes;
474
mean = (double)nbytes / nallocs;
475
variance = stats->variance * nallocs - nbytes * nbytes;
476
if (variance < 0 || nallocs == 1)
479
variance /= nallocs * (nallocs - 1);
480
sigma = sqrt(variance);
482
mean = variance = sigma = 0;
485
fprintf(fp, "\n%s allocation statistics:\n", stats->name);
486
fprintf(fp, " number of arenas: %u\n", stats->narenas);
487
fprintf(fp, " number of allocations: %u\n", stats->nallocs);
488
fprintf(fp, " number of free arena reclaims: %u\n", stats->nreclaims);
489
fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
490
fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
491
fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
492
fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
493
fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
494
fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
495
fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
496
fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
497
fprintf(fp, " mean allocation size: %g\n", mean);
498
fprintf(fp, " standard deviation: %g\n", sigma);
499
fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
502
#endif /* JS_ARENAMETER */