31
31
/* Some externally visible but unadvertised variables to allow access to */
32
32
/* free lists from inlined allocators without including gc_priv.h */
33
33
/* or introducing dependencies on internal data structure layouts. */
34
ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
35
ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
36
ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
34
void ** const GC_objfreelist_ptr = GC_objfreelist;
35
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
36
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
37
37
# ifdef ATOMIC_UNCOLLECTABLE
38
ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
38
void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
42
GC_PTR GC_generic_or_special_malloc(lb,knd)
42
void * GC_generic_or_special_malloc(size_t lb, int knd)
47
45
# ifdef STUBBORN_ALLOC
68
66
/* lb bytes. The object may be (and quite likely will be) moved. */
69
67
/* The kind (e.g. atomic) is the same as that of the old. */
70
68
/* Shrinking of large blocks is not implemented well. */
72
GC_PTR GC_realloc(GC_PTR p, size_t lb)
74
GC_PTR GC_realloc(p,lb)
69
void * GC_realloc(void * p, size_t lb)
79
register struct hblk * h;
81
register word sz; /* Current size in bytes */
82
register word orig_sz; /* Original sz in bytes */
73
size_t sz; /* Current size in bytes */
74
size_t orig_sz; /* Original sz in bytes */
85
77
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
88
80
sz = hhdr -> hb_sz;
89
81
obj_kind = hhdr -> hb_obj_kind;
90
sz = WORDS_TO_BYTES(sz);
93
84
if (sz > MAXOBJBYTES) {
95
86
register word descr;
97
88
sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
98
hhdr -> hb_sz = BYTES_TO_WORDS(sz);
99
90
descr = GC_obj_kinds[obj_kind].ok_descriptor;
100
91
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
101
92
hhdr -> hb_descr = descr;
93
# ifdef MARK_BIT_PER_OBJ
94
GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
96
GC_ASSERT(hhdr -> hb_large_block &&
97
hhdr -> hb_map[ANY_INDEX] == 1);
102
99
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
103
100
/* Extra area is already cleared by GC_alloc_large_and_clear. */
172
163
# endif /* REDIRECT_REALLOC */
175
/* Allocate memory such that only pointers to near the */
176
/* beginning of the object are considered. */
166
/* Allocate memory such that only pointers to near the */
167
/* beginning of the object are considered. */
177
168
/* We avoid holding allocation lock while we clear memory. */
178
ptr_t GC_generic_malloc_ignore_off_page(lb, k)
169
void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
182
register ptr_t result;
188
178
if (SMALL_OBJ(lb))
189
179
return(GC_generic_malloc((word)lb, k));
190
180
lw = ROUNDED_UP_WORDS(lb);
191
n_blocks = OBJ_SZ_TO_BLOCKS(lw);
181
lb_rounded = WORDS_TO_BYTES(lw);
182
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
192
183
init = GC_obj_kinds[k].ok_init;
193
184
if (GC_have_errors) GC_print_all_errors();
194
185
GC_INVOKE_FINALIZERS();
197
result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
187
result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
198
188
if (0 != result) {
199
189
if (GC_debugging_started) {
200
190
BZERO(result, n_blocks * HBLKSIZE);
225
# if defined(__STDC__) || defined(__cplusplus)
226
void * GC_malloc_ignore_off_page(size_t lb)
228
char * GC_malloc_ignore_off_page(lb)
232
return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
235
# if defined(__STDC__) || defined(__cplusplus)
236
void * GC_malloc_atomic_ignore_off_page(size_t lb)
238
char * GC_malloc_atomic_ignore_off_page(lb)
242
return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
245
/* Increment GC_words_allocd from code that doesn't have direct access */
214
void * GC_malloc_ignore_off_page(size_t lb)
216
return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
219
void * GC_malloc_atomic_ignore_off_page(size_t lb)
221
return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
224
/* Increment GC_bytes_allocd from code that doesn't have direct access */
246
225
/* to GC_arrays. */
248
void GC_incr_words_allocd(size_t n)
250
GC_words_allocd += n;
253
/* The same for GC_mem_freed. */
254
void GC_incr_mem_freed(size_t n)
258
# endif /* __STDC__ */
260
/* Analogous to the above, but assumes a small object size, and */
261
/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
262
ptr_t GC_generic_malloc_words_small_inner(lw, k)
268
register struct obj_kind * kind = GC_obj_kinds + k;
270
opp = &(kind -> ok_freelist[lw]);
271
if( (op = *opp) == 0 ) {
272
if (!GC_is_initialized) {
275
if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
276
op = GC_clear_stack(GC_allocobj((word)lw, k));
281
return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
286
GC_words_allocd += lw;
290
/* Analogous to the above, but assumes a small object size, and */
291
/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
293
ptr_t GC_generic_malloc_words_small(size_t lw, int k)
295
ptr_t GC_generic_malloc_words_small(lw, k)
303
if (GC_have_errors) GC_print_all_errors();
304
GC_INVOKE_FINALIZERS();
307
op = GC_generic_malloc_words_small_inner(lw, k);
313
#if defined(THREADS) && !defined(SRC_M3)
315
extern signed_word GC_mem_found; /* Protected by GC lock. */
226
void GC_incr_bytes_allocd(size_t n)
228
GC_bytes_allocd += n;
231
/* The same for GC_bytes_freed. */
232
void GC_incr_bytes_freed(size_t n)
239
extern signed_word GC_bytes_found; /* Protected by GC lock. */
317
241
#ifdef PARALLEL_MARK
318
volatile signed_word GC_words_allocd_tmp = 0;
319
/* Number of words of memory allocated since */
242
volatile signed_word GC_bytes_allocd_tmp = 0;
243
/* Number of bytes of memory allocated since */
320
244
/* we released the GC lock. Instead of */
321
245
/* reacquiring the GC lock just to add this in, */
322
246
/* we add it in the next time we reacquire */
323
247
/* the lock. (Atomically adding it doesn't */
324
248
/* work, since we would have to atomically */
325
249
/* update it in GC_malloc, which is too */
327
251
#endif /* PARALLEL_MARK */
330
extern ptr_t GC_reclaim_generic();
332
253
/* Return a list of 1 or more objects of the indicated size, linked */
333
254
/* through the first word in the object. This has the advantage that */
334
255
/* it acquires the allocation lock only once, and may greatly reduce */
338
259
/* GC_malloc_many or friends to replenish it. (We do not round up */
339
260
/* object sizes, since a call indicates the intention to consume many */
340
261
/* objects of exactly this size.) */
262
/* We assume that the size is a multiple of GRANULE_BYTES. */
341
263
/* We return the free-list by assigning it to *result, since it is */
342
264
/* not safe to return, e.g. a linked list of pointer-free objects, */
343
265
/* since the collector would not retain the entire list if it were */
344
266
/* invoked just as we were returning. */
345
267
/* Note that the client should usually clear the link field. */
346
void GC_generic_malloc_many(lb, k, result)
268
void GC_generic_malloc_many(size_t lb, int k, void **result)
355
word my_words_allocd = 0;
273
size_t lw; /* Length in words. */
274
size_t lg; /* Length in granules. */
275
signed_word my_bytes_allocd = 0;
356
276
struct obj_kind * ok = &(GC_obj_kinds[k]);
359
# if defined(GATHERSTATS) || defined(PARALLEL_MARK)
360
# define COUNT_ARG , &my_words_allocd
363
# define NEED_TO_COUNT
279
GC_ASSERT((lb & (GRANULE_BYTES-1)) == 0);
365
280
if (!SMALL_OBJ(lb)) {
366
281
op = GC_generic_malloc(lb, k);
367
282
if(0 != op) obj_link(op) = 0;
371
lw = ALIGNED_WORDS(lb);
286
lw = BYTES_TO_WORDS(lb);
287
lg = BYTES_TO_GRANULES(lb);
372
288
if (GC_have_errors) GC_print_all_errors();
373
289
GC_INVOKE_FINALIZERS();
376
291
if (!GC_is_initialized) GC_init_inner();
377
292
/* Do our share of marking work */
387
302
struct hblk * hbp;
391
306
while ((hbp = *rlh) != 0) {
393
308
*rlh = hhdr -> hb_next;
309
GC_ASSERT(hhdr -> hb_sz == lb);
394
310
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
395
311
# ifdef PARALLEL_MARK
397
signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
313
signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
399
GC_ASSERT(my_words_allocd_tmp >= 0);
315
GC_ASSERT(my_bytes_allocd_tmp >= 0);
400
316
/* We only decrement it while holding the GC lock. */
401
317
/* Thus we can't accidentally adjust it down in more */
402
318
/* than one thread simultaneously. */
403
if (my_words_allocd_tmp != 0) {
405
(volatile GC_word *)(&GC_words_allocd_tmp),
406
(GC_word)(-my_words_allocd_tmp));
407
GC_words_allocd += my_words_allocd_tmp;
319
if (my_bytes_allocd_tmp != 0) {
320
(void)AO_fetch_and_add(
321
(volatile AO_t *)(&GC_bytes_allocd_tmp),
322
(AO_t)(-my_bytes_allocd_tmp));
323
GC_bytes_allocd += my_bytes_allocd_tmp;
410
326
GC_acquire_mark_lock();
411
327
++ GC_fl_builder_count;
414
329
GC_release_mark_lock();
416
op = GC_reclaim_generic(hbp, hhdr, lw,
417
ok -> ok_init, 0 COUNT_ARG);
331
op = GC_reclaim_generic(hbp, hhdr, lb,
332
ok -> ok_init, 0, &my_bytes_allocd);
419
# ifdef NEED_TO_COUNT
420
/* We are neither gathering statistics, nor marking in */
421
/* parallel. Thus GC_reclaim_generic doesn't count */
423
for (p = op; p != 0; p = obj_link(p)) {
424
my_words_allocd += lw;
427
# if defined(GATHERSTATS)
428
/* We also reclaimed memory, so we need to adjust */
430
/* This should be atomic, so the results may be */
432
GC_mem_found += my_words_allocd;
334
/* We also reclaimed memory, so we need to adjust */
336
/* This should be atomic, so the results may be */
338
GC_bytes_found += my_bytes_allocd;
434
339
# ifdef PARALLEL_MARK
437
(volatile GC_word *)(&GC_words_allocd_tmp),
438
(GC_word)(my_words_allocd));
341
(void)AO_fetch_and_add(
342
(volatile AO_t *)(&GC_bytes_allocd_tmp),
343
(AO_t)(my_bytes_allocd));
439
344
GC_acquire_mark_lock();
440
345
-- GC_fl_builder_count;
441
346
if (GC_fl_builder_count == 0) GC_notify_all_builder();
463
367
/* Next try to use prefix of global free list if there is one. */
464
368
/* We don't refill it, but we need to use it up before allocating */
465
369
/* a new block ourselves. */
466
opp = &(GC_obj_kinds[k].ok_freelist[lw]);
370
opp = &(GC_obj_kinds[k].ok_freelist[lg]);
467
371
if ( (op = *opp) != 0 ) {
470
374
for (p = op; p != 0; p = obj_link(p)) {
471
my_words_allocd += lw;
472
if (my_words_allocd >= BODY_SZ) {
375
my_bytes_allocd += lb;
376
if (my_bytes_allocd >= HBLKSIZE) {
473
377
*opp = obj_link(p);
478
GC_words_allocd += my_words_allocd;
382
GC_bytes_allocd += my_bytes_allocd;
481
385
/* Next try to allocate a new block worth of objects of this size. */
483
struct hblk *h = GC_allochblk(lw, k, 0);
387
struct hblk *h = GC_allochblk(lb, k, 0);
485
389
if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
486
GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
487
- BYTES_TO_WORDS(HBLKSIZE) % lw;
390
GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
488
391
# ifdef PARALLEL_MARK
489
392
GC_acquire_mark_lock();
490
393
++ GC_fl_builder_count;
493
395
GC_release_mark_lock();
534
437
/* Allocate lb bytes of pointerful, traced, but not collectable data */
536
GC_PTR GC_malloc_uncollectable(size_t lb)
538
GC_PTR GC_malloc_uncollectable(lb)
438
void * GC_malloc_uncollectable(size_t lb)
547
445
if( SMALL_OBJ(lb) ) {
549
if (EXTRA_BYTES != 0 && lb != 0) lb--;
446
if (EXTRA_BYTES != 0 && lb != 0) lb--;
550
447
/* We don't need the extra byte, since this won't be */
551
448
/* collected anyway. */
552
lw = GC_size_map[lb];
554
lw = ALIGNED_WORDS(lb);
556
opp = &(GC_uobjfreelist[lw]);
558
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
449
lg = GC_size_map[lb];
450
opp = &(GC_uobjfreelist[lg]);
452
if( (op = *opp) != 0 ) {
559
453
/* See above comment on signals. */
560
454
*opp = obj_link(op);
561
455
obj_link(op) = 0;
562
GC_words_allocd += lw;
456
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
563
457
/* Mark bit ws already set on free list. It will be */
564
458
/* cleared only temporarily during a collection, as a */
565
459
/* result of the normal free list mark bit clearing. */
566
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
571
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
460
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
464
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
465
/* For small objects, the free lists are completely marked. */
467
GC_ASSERT(0 == op || GC_is_marked(op));
573
472
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
575
if (0 == op) return(0);
576
/* We don't need the lock here, since we have an undisguised */
577
/* pointer. We do need to hold the lock while we adjust */
580
register struct hblk * h;
583
lw = HDR(h) -> hb_sz;
473
if (0 == op) return(0);
475
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
476
hhdr = HDR((struct hbklk *)op);
477
/* We don't need the lock here, since we have an undisguised */
478
/* pointer. We do need to hold the lock while we adjust */
588
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
482
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
483
GC_ASSERT(hhdr -> hb_n_marks == 0);
484
hhdr -> hb_n_marks = 1;
596
490
/* Not well tested nor integrated. */
597
491
/* Debug version is tricky and currently missing. */
598
492
#include <limits.h>
600
GC_PTR GC_memalign(size_t align, size_t lb)
494
void * GC_memalign(size_t align, size_t lb)
607
if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
609
if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
500
if (align <= GRANULE_BYTES) return GC_malloc(lb);
610
501
if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
611
502
if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
612
503
return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
624
515
GC_register_displacement(offset);
627
result = (GC_PTR) ((ptr_t)result + offset);
518
result = (void *) ((ptr_t)result + offset);
628
519
GC_ASSERT((word)result % align == 0);
633
523
# ifdef ATOMIC_UNCOLLECTABLE
634
524
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
635
525
/* This is normally roughly equivalent to the system malloc. */
636
526
/* But it may be useful if malloc is redefined. */
638
GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
640
GC_PTR GC_malloc_atomic_uncollectable(lb)
527
void * GC_malloc_atomic_uncollectable(size_t lb)
649
534
if( SMALL_OBJ(lb) ) {
651
if (EXTRA_BYTES != 0 && lb != 0) lb--;
535
if (EXTRA_BYTES != 0 && lb != 0) lb--;
652
536
/* We don't need the extra byte, since this won't be */
653
537
/* collected anyway. */
654
lw = GC_size_map[lb];
656
lw = ALIGNED_WORDS(lb);
658
opp = &(GC_auobjfreelist[lw]);
660
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
538
lg = GC_size_map[lb];
539
opp = &(GC_auobjfreelist[lg]);
541
if( (op = *opp) != 0 ) {
661
542
/* See above comment on signals. */
662
543
*opp = obj_link(op);
663
544
obj_link(op) = 0;
664
GC_words_allocd += lw;
545
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
665
546
/* Mark bit was already set while object was on free list. */
666
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
671
op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
547
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
551
op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
553
GC_ASSERT(0 == op || GC_is_marked(op));
673
op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
675
if (0 == op) return(0);
676
/* We don't need the lock here, since we have an undisguised */
677
/* pointer. We do need to hold the lock while we adjust */
680
register struct hblk * h;
683
lw = HDR(h) -> hb_sz;
558
op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
559
if (0 == op) return(0);
561
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
562
hhdr = HDR((struct hbklk *)op);
688
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
566
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
567
GC_ASSERT(hhdr -> hb_n_marks == 0);
568
hhdr -> hb_n_marks = 1;