2
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5
* Copyright (C) 2007 Free Software Foundation, Inc
7
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10
* Permission is hereby granted to use or copy this program
11
* for any purpose, provided the above notices are retained on all copies.
12
* Permission to modify the code and to distribute modified code is granted,
13
* provided the above notices are retained, and a notice that the code was
14
* modified is included with the above copyright notice.
16
/* Boehm, February 1, 1996 1:19 pm PST */
17
# define I_HIDE_POINTERS
18
# include "private/gc_pmark.h"
20
# ifdef FINALIZE_ON_DEMAND
21
int GC_finalize_on_demand = 1;
23
int GC_finalize_on_demand = 0;
26
# ifdef JAVA_FINALIZATION
27
int GC_java_finalization = 1;
29
int GC_java_finalization = 0;
32
/* Type of mark procedure used for marking from finalizable object. */
33
/* This procedure normally does not mark the object, only its */
35
typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
37
# define HASH3(addr,size,log_size) \
38
((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
40
#define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
42
struct hash_chain_entry {
44
struct hash_chain_entry * next;
47
unsigned GC_finalization_failures = 0;
48
/* Number of finalization requests that failed for lack of memory. */
50
static struct disappearing_link {
51
struct hash_chain_entry prolog;
52
# define dl_hidden_link prolog.hidden_key
53
/* Field to be cleared. */
54
# define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
55
# define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
57
word dl_hidden_obj; /* Pointer to object base */
60
static signed_word log_dl_table_size = -1;
62
/* current size of array pointed to by dl_head. */
63
/* -1 ==> size is 0. */
65
word GC_dl_entries = 0; /* Number of entries currently in disappearing */
68
static struct finalizable_object {
69
struct hash_chain_entry prolog;
70
# define fo_hidden_base prolog.hidden_key
71
/* Pointer to object base. */
72
/* No longer hidden once object */
73
/* is on finalize_now queue. */
74
# define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
75
# define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
76
GC_finalization_proc fo_fn; /* Finalizer. */
78
word fo_object_size; /* In bytes. */
79
finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
82
struct finalizable_object * GC_finalize_now = 0;
83
/* LIst of objects that should be finalized now. */
85
static signed_word log_fo_table_size = -1;
87
word GC_fo_entries = 0;
89
void GC_push_finalizer_structures(void)
91
GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
92
GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
93
GC_push_all((ptr_t)(&GC_finalize_now),
94
(ptr_t)(&GC_finalize_now) + sizeof(word));
97
/* Double the size of a hash table. *size_ptr is the log of its current */
98
/* size. May be a noop. */
99
/* *table is a pointer to an array of hash headers. If we succeed, we */
100
/* update both *table and *log_size_ptr. */
101
/* Lock is held. Signals are disabled. */
102
void GC_grow_table(struct hash_chain_entry ***table,
103
signed_word *log_size_ptr)
106
register struct hash_chain_entry *p;
107
signed_word log_old_size = *log_size_ptr;
108
signed_word log_new_size = log_old_size + 1;
109
word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
110
word new_size = (word)1 << log_new_size;
111
/* FIXME: Power of 2 size often gets rounded up to one more page. */
112
struct hash_chain_entry **new_table = (struct hash_chain_entry **)
113
GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
114
(size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
116
if (new_table == 0) {
118
ABORT("Insufficient space for initial table allocation");
123
for (i = 0; i < old_size; i++) {
126
ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
127
struct hash_chain_entry *next = p -> next;
128
size_t new_hash = HASH3(real_key, new_size, log_new_size);
130
p -> next = new_table[new_hash];
131
new_table[new_hash] = p;
135
*log_size_ptr = log_new_size;
139
int GC_register_disappearing_link(void * * link)
143
base = (ptr_t)GC_base((void *)link);
145
ABORT("Bad arg to GC_register_disappearing_link");
146
return(GC_general_register_disappearing_link(link, base));
149
int GC_general_register_disappearing_link(void * * link, void * obj)
151
struct disappearing_link *curr_dl;
153
struct disappearing_link * new_dl;
156
if ((word)link & (ALIGNMENT-1))
157
ABORT("Bad arg to GC_general_register_disappearing_link");
161
if (log_dl_table_size == -1
162
|| GC_dl_entries > ((word)1 << log_dl_table_size)) {
163
GC_grow_table((struct hash_chain_entry ***)(&dl_head),
165
if (GC_print_stats) {
166
GC_log_printf("Grew dl table to %u entries\n",
167
(1 << log_dl_table_size));
170
index = HASH2(link, log_dl_table_size);
171
curr_dl = dl_head[index];
172
for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
173
if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
174
curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
181
new_dl = (struct disappearing_link *)
182
GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
187
new_dl = (struct disappearing_link *)
188
GC_oom_fn(sizeof(struct disappearing_link));
190
GC_finalization_failures++;
193
/* It's not likely we'll make it here, but ... */
198
new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
199
new_dl -> dl_hidden_link = HIDE_POINTER(link);
200
dl_set_next(new_dl, dl_head[index]);
201
dl_head[index] = new_dl;
209
int GC_unregister_disappearing_link(void * * link)
211
struct disappearing_link *curr_dl, *prev_dl;
216
index = HASH2(link, log_dl_table_size);
217
if (((word)link & (ALIGNMENT-1))) goto out;
218
prev_dl = 0; curr_dl = dl_head[index];
219
while (curr_dl != 0) {
220
if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
222
dl_head[index] = dl_next(curr_dl);
224
dl_set_next(prev_dl, dl_next(curr_dl));
229
dl_set_next(curr_dl, 0);
231
GC_free((void *)curr_dl);
236
curr_dl = dl_next(curr_dl);
243
/* Possible finalization_marker procedures. Note that mark stack */
244
/* overflow is handled by the caller, and is not a disaster. */
245
GC_API void GC_normal_finalize_mark_proc(ptr_t p)
249
PUSH_OBJ(p, hhdr, GC_mark_stack_top,
250
&(GC_mark_stack[GC_mark_stack_size]));
253
/* This only pays very partial attention to the mark descriptor. */
254
/* It does the right thing for normal and atomic objects, and treats */
255
/* most others as normal. */
256
GC_API void GC_ignore_self_finalize_mark_proc(ptr_t p)
259
word descr = hhdr -> hb_descr;
262
ptr_t target_limit = p + hhdr -> hb_sz - 1;
264
if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
265
scan_limit = p + descr - sizeof(word);
267
scan_limit = target_limit + 1 - sizeof(word);
269
for (q = p; q <= scan_limit; q += ALIGNMENT) {
271
if (r < p || r > target_limit) {
272
GC_PUSH_ONE_HEAP(r, q);
278
GC_API void GC_null_finalize_mark_proc(ptr_t p)
282
/* Possible finalization_marker procedures. Note that mark stack */
283
/* overflow is handled by the caller, and is not a disaster. */
285
/* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
286
/* but it is explicitly tested for, and triggers different */
287
/* behavior. Objects registered in this way are not finalized */
288
/* if they are reachable by other finalizable objects, eve if those */
289
/* other objects specify no ordering. */
290
GC_API void GC_unreachable_finalize_mark_proc(ptr_t p)
292
GC_normal_finalize_mark_proc(p);
297
/* Register a finalization function. See gc.h for details. */
298
/* in the nonthreads case, we try to avoid disabling signals, */
299
/* since it can be expensive. Threads packages typically */
300
/* make it cheaper. */
301
/* The last parameter is a procedure that determines */
302
/* marking for finalization ordering. Any objects marked */
303
/* by that procedure will be guaranteed to not have been */
304
/* finalized when this finalizer is invoked. */
305
GC_API void GC_register_finalizer_inner(void * obj,
306
GC_finalization_proc fn, void *cd,
307
GC_finalization_proc *ofn, void **ocd,
308
finalization_mark_proc mp)
311
struct finalizable_object * curr_fo, * prev_fo;
313
struct finalizable_object *new_fo;
320
if (log_fo_table_size == -1
321
|| GC_fo_entries > ((word)1 << log_fo_table_size)) {
322
GC_grow_table((struct hash_chain_entry ***)(&fo_head),
324
if (GC_print_stats) {
325
GC_log_printf("Grew fo table to %u entries\n",
326
(1 << log_fo_table_size));
329
/* in the THREADS case signals are disabled and we hold allocation */
330
/* lock; otherwise neither is true. Proceed carefully. */
332
index = HASH2(base, log_fo_table_size);
333
prev_fo = 0; curr_fo = fo_head[index];
334
while (curr_fo != 0) {
335
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
336
if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
337
/* Interruption by a signal in the middle of this */
338
/* should be safe. The client may see only *ocd */
339
/* updated, but we'll declare that to be his */
341
if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
342
if (ofn) *ofn = curr_fo -> fo_fn;
343
/* Delete the structure for base. */
345
fo_head[index] = fo_next(curr_fo);
347
fo_set_next(prev_fo, fo_next(curr_fo));
351
/* May not happen if we get a signal. But a high */
352
/* estimate will only make the table larger than */
354
# if !defined(THREADS) && !defined(DBG_HDRS_ALL)
355
GC_free((void *)curr_fo);
358
curr_fo -> fo_fn = fn;
359
curr_fo -> fo_client_data = (ptr_t)cd;
360
curr_fo -> fo_mark_proc = mp;
361
/* Reinsert it. We deleted it first to maintain */
362
/* consistency in the event of a signal. */
364
fo_head[index] = curr_fo;
366
fo_set_next(prev_fo, curr_fo);
375
curr_fo = fo_next(curr_fo);
387
/* We won't collect it, hence finalizer wouldn't be run. */
393
new_fo = (struct finalizable_object *)
394
GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
395
GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
396
if (EXPECT(0 == new_fo, FALSE)) {
400
new_fo = (struct finalizable_object *)
401
GC_oom_fn(sizeof(struct finalizable_object));
403
GC_finalization_failures++;
406
/* It's not likely we'll make it here, but ... */
411
new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
412
new_fo -> fo_fn = fn;
413
new_fo -> fo_client_data = (ptr_t)cd;
414
new_fo -> fo_object_size = hhdr -> hb_sz;
415
new_fo -> fo_mark_proc = mp;
416
fo_set_next(new_fo, fo_head[index]);
418
fo_head[index] = new_fo;
424
void GC_register_finalizer(void * obj,
425
GC_finalization_proc fn, void * cd,
426
GC_finalization_proc *ofn, void ** ocd)
428
GC_register_finalizer_inner(obj, fn, cd, ofn,
429
ocd, GC_normal_finalize_mark_proc);
432
void GC_register_finalizer_ignore_self(void * obj,
433
GC_finalization_proc fn, void * cd,
434
GC_finalization_proc *ofn, void ** ocd)
436
GC_register_finalizer_inner(obj, fn, cd, ofn,
437
ocd, GC_ignore_self_finalize_mark_proc);
440
void GC_register_finalizer_no_order(void * obj,
441
GC_finalization_proc fn, void * cd,
442
GC_finalization_proc *ofn, void ** ocd)
444
GC_register_finalizer_inner(obj, fn, cd, ofn,
445
ocd, GC_null_finalize_mark_proc);
448
static GC_bool need_unreachable_finalization = FALSE;
449
/* Avoid the work if this isn't used. */
451
void GC_register_finalizer_unreachable(void * obj,
452
GC_finalization_proc fn, void * cd,
453
GC_finalization_proc *ofn, void ** ocd)
455
need_unreachable_finalization = TRUE;
456
GC_ASSERT(GC_java_finalization);
457
GC_register_finalizer_inner(obj, fn, cd, ofn,
458
ocd, GC_unreachable_finalize_mark_proc);
462
void GC_dump_finalization(void)
464
struct disappearing_link * curr_dl;
465
struct finalizable_object * curr_fo;
466
ptr_t real_ptr, real_link;
467
int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
468
int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
471
GC_printf("Disappearing links:\n");
472
for (i = 0; i < dl_size; i++) {
473
for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
474
real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
475
real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
476
GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
479
GC_printf("Finalizers:\n");
480
for (i = 0; i < fo_size; i++) {
481
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
482
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
483
GC_printf("Finalizable object: %p\n", real_ptr);
489
/* Called with world stopped. Cause disappearing links to disappear, */
490
/* and invoke finalizers. */
491
void GC_finalize(void)
493
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
494
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
495
ptr_t real_ptr, real_link;
497
size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
498
size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
500
/* Make disappearing links disappear */
501
for (i = 0; i < dl_size; i++) {
502
curr_dl = dl_head[i];
504
while (curr_dl != 0) {
505
real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
506
real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
507
if (!GC_is_marked(real_ptr)) {
508
*(word *)real_link = 0;
509
next_dl = dl_next(curr_dl);
511
dl_head[i] = next_dl;
513
dl_set_next(prev_dl, next_dl);
515
GC_clear_mark_bit((ptr_t)curr_dl);
520
curr_dl = dl_next(curr_dl);
524
/* Mark all objects reachable via chains of 1 or more pointers */
525
/* from finalizable objects. */
526
GC_ASSERT(GC_mark_state == MS_NONE);
527
for (i = 0; i < fo_size; i++) {
528
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
529
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
530
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
531
if (!GC_is_marked(real_ptr)) {
532
GC_MARKED_FOR_FINALIZATION(real_ptr);
533
GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
534
if (GC_is_marked(real_ptr)) {
535
WARN("Finalization cycle involving %lx\n", real_ptr);
540
/* Enqueue for finalization all objects that are still */
542
GC_bytes_finalized = 0;
543
for (i = 0; i < fo_size; i++) {
544
curr_fo = fo_head[i];
546
while (curr_fo != 0) {
547
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
548
if (!GC_is_marked(real_ptr)) {
549
if (!GC_java_finalization) {
550
GC_set_mark_bit(real_ptr);
552
/* Delete from hash table */
553
next_fo = fo_next(curr_fo);
555
fo_head[i] = next_fo;
557
fo_set_next(prev_fo, next_fo);
560
/* Add to list of objects awaiting finalization. */
561
fo_set_next(curr_fo, GC_finalize_now);
562
GC_finalize_now = curr_fo;
563
/* unhide object pointer so any future collections will */
565
curr_fo -> fo_hidden_base =
566
(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
567
GC_bytes_finalized +=
568
curr_fo -> fo_object_size
569
+ sizeof(struct finalizable_object);
570
GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
574
curr_fo = fo_next(curr_fo);
579
if (GC_java_finalization) {
580
/* make sure we mark everything reachable from objects finalized
581
using the no_order mark_proc */
582
for (curr_fo = GC_finalize_now;
583
curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
584
real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
585
if (!GC_is_marked(real_ptr)) {
586
if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
587
GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
589
if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
590
GC_set_mark_bit(real_ptr);
595
/* now revive finalize-when-unreachable objects reachable from
596
other finalizable objects */
597
if (need_unreachable_finalization) {
598
curr_fo = GC_finalize_now;
600
while (curr_fo != 0) {
601
next_fo = fo_next(curr_fo);
602
if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
603
real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
604
if (!GC_is_marked(real_ptr)) {
605
GC_set_mark_bit(real_ptr);
608
GC_finalize_now = next_fo;
610
fo_set_next(prev_fo, next_fo);
612
curr_fo -> fo_hidden_base =
613
(word) HIDE_POINTER(curr_fo -> fo_hidden_base);
614
GC_bytes_finalized -=
615
curr_fo -> fo_object_size + sizeof(struct finalizable_object);
617
i = HASH2(real_ptr, log_fo_table_size);
618
fo_set_next (curr_fo, fo_head[i]);
620
fo_head[i] = curr_fo;
630
/* Remove dangling disappearing links. */
631
for (i = 0; i < dl_size; i++) {
632
curr_dl = dl_head[i];
634
while (curr_dl != 0) {
635
real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
636
if (real_link != 0 && !GC_is_marked(real_link)) {
637
next_dl = dl_next(curr_dl);
639
dl_head[i] = next_dl;
641
dl_set_next(prev_dl, next_dl);
643
GC_clear_mark_bit((ptr_t)curr_dl);
648
curr_dl = dl_next(curr_dl);
654
#ifndef JAVA_FINALIZATION_NOT_NEEDED
656
/* Enqueue all remaining finalizers to be run - Assumes lock is
657
* held, and signals are disabled */
658
void GC_enqueue_all_finalizers(void)
660
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
665
fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
666
GC_bytes_finalized = 0;
667
for (i = 0; i < fo_size; i++) {
668
curr_fo = fo_head[i];
670
while (curr_fo != 0) {
671
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
672
GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
673
GC_set_mark_bit(real_ptr);
675
/* Delete from hash table */
676
next_fo = fo_next(curr_fo);
678
fo_head[i] = next_fo;
680
fo_set_next(prev_fo, next_fo);
684
/* Add to list of objects awaiting finalization. */
685
fo_set_next(curr_fo, GC_finalize_now);
686
GC_finalize_now = curr_fo;
688
/* unhide object pointer so any future collections will */
690
curr_fo -> fo_hidden_base =
691
(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
693
GC_bytes_finalized +=
694
curr_fo -> fo_object_size + sizeof(struct finalizable_object);
702
/* Invoke all remaining finalizers that haven't yet been run.
703
* This is needed for strict compliance with the Java standard,
704
* which can make the runtime guarantee that all finalizers are run.
705
* Unfortunately, the Java standard implies we have to keep running
706
* finalizers until there are no more left, a potential infinite loop.
708
* Note that this is even more dangerous than the usual Java
709
* finalizers, in that objects reachable from static variables
710
* may have been finalized when these finalizers are run.
711
* Finalizers run at this point must be prepared to deal with a
712
* mostly broken world.
713
* This routine is externally callable, so is called without
714
* the allocation lock.
716
GC_API void GC_finalize_all(void)
721
while (GC_fo_entries > 0) {
722
GC_enqueue_all_finalizers();
724
GC_INVOKE_FINALIZERS();
731
/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
732
/* finalizers can only be called from some kind of `safe state' and */
733
/* getting into that safe state is expensive.) */
734
int GC_should_invoke_finalizers(void)
736
return GC_finalize_now != 0;
739
/* Invoke finalizers for all objects that are ready to be finalized. */
740
/* Should be called without allocation lock. */
741
int GC_invoke_finalizers(void)
743
struct finalizable_object * curr_fo;
745
word bytes_freed_before;
748
while (GC_finalize_now != 0) {
753
bytes_freed_before = GC_bytes_freed;
754
/* Don't do this outside, since we need the lock. */
756
curr_fo = GC_finalize_now;
758
if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
760
if (curr_fo == 0) break;
762
GC_finalize_now = fo_next(curr_fo);
764
fo_set_next(curr_fo, 0);
765
(*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
766
curr_fo -> fo_client_data);
767
curr_fo -> fo_client_data = 0;
770
/* This is probably a bad idea. It throws off accounting if */
771
/* nearly all objects are finalizable. O.w. it shouldn't */
773
GC_free((void *)curr_fo);
776
/* bytes_freed_before is initialized whenever count != 0 */
777
if (count != 0 && bytes_freed_before != GC_bytes_freed) {
779
GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
785
void (* GC_finalizer_notifier)() = (void (*) (void))0;
787
static GC_word last_finalizer_notification = 0;
789
void GC_notify_or_invoke_finalizers(void)
791
/* This is a convenient place to generate backtraces if appropriate, */
792
/* since that code is not callable with the allocation lock. */
793
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
794
static word last_back_trace_gc_no = 1; /* Skip first one. */
796
if (GC_gc_no > last_back_trace_gc_no) {
799
# ifdef KEEP_BACK_PTRS
801
/* Stops when GC_gc_no wraps; that's OK. */
802
last_back_trace_gc_no = (word)(-1); /* disable others. */
803
for (i = 0; i < GC_backtraces; ++i) {
804
/* FIXME: This tolerates concurrent heap mutation, */
805
/* which may cause occasional mysterious results. */
806
/* We need to release the GC lock, since GC_print_callers */
807
/* acquires it. It probably shouldn't. */
809
GC_generate_random_backtrace_no_gc();
812
last_back_trace_gc_no = GC_gc_no;
815
# ifdef MAKE_BACK_GRAPH
816
if (GC_print_back_height)
817
GC_print_back_graph_stats();
821
if (GC_finalize_now == 0) return;
822
if (!GC_finalize_on_demand) {
823
(void) GC_invoke_finalizers();
825
GC_ASSERT(GC_finalize_now == 0);
826
# endif /* Otherwise GC can run concurrently and add more */
829
if (GC_finalizer_notifier != (void (*) (void))0
830
&& last_finalizer_notification != GC_gc_no) {
831
last_finalizer_notification = GC_gc_no;
832
GC_finalizer_notifier();
836
void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data)
843
/* FIXME - This looks wrong!! */
846
result = (*fn)(client_data);
848
# ifndef GC_ASSERTIONS
850
# endif /* o.w. UNLOCK() does it implicitly */
856
#if !defined(NO_DEBUGGING)
858
void GC_print_finalization_stats(void)
860
struct finalizable_object *fo = GC_finalize_now;
863
GC_printf("%u finalization table entries; %u disappearing links\n",
864
GC_fo_entries, GC_dl_entries);
865
for (; 0 != fo; fo = fo_next(fo)) ++ready;
866
GC_printf("%u objects are eligible for immediate finalization\n", ready);
869
#endif /* NO_DEBUGGING */