2
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4
* Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5
* Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
7
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10
* Permission is hereby granted to use or copy this program
11
* for any purpose, provided the above notices are retained on all copies.
12
* Permission to modify the code and to distribute modified code is granted,
13
* provided the above notices are retained, and a notice that the code was
14
* modified is included with the above copyright notice.
17
* Support code for LinuxThreads, the clone()-based kernel
18
* thread package for Linux which is included in libc6.
20
* This code relies on implementation details of LinuxThreads,
21
* (i.e. properties not guaranteed by the Pthread standard),
22
* though this version now does less of that than the other Pthreads
25
* Note that there is a lot of code duplication between linux_threads.c
26
* and thread support for some of the other Posix platforms; any changes
27
* made here may need to be reflected there too.
29
/* DG/UX ix86 support <takis@xfree86.org> */
31
* Linux_threads.c now also includes some code to support HPUX and
32
* OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
35
* Eric also suggested an alternate basis for a lock implementation in
37
* + #elif defined(OSF1)
38
* + unsigned long GC_allocate_lock = 0;
39
* + msemaphore GC_allocate_semaphore;
40
* + # define GC_TRY_LOCK() \
41
* + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42
* + ? (GC_allocate_lock = 1) \
44
* + # define GC_LOCK_TAKEN GC_allocate_lock
47
/*#define DEBUG_THREADS 1*/
48
/*#define GC_ASSERTIONS*/
50
# include "private/pthread_support.h"
52
# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
53
&& !defined(GC_WIN32_THREADS)
55
# if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
56
&& !defined(USE_COMPILER_TLS)
58
# define USE_PTHREAD_SPECIFIC
59
/* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
61
# define USE_COMPILER_TLS
65
# if defined USE_HPUX_TLS
66
--> Macro replaced by USE_COMPILER_TLS
69
# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
70
defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \
71
&& !defined(USE_PTHREAD_SPECIFIC)
72
# define USE_PTHREAD_SPECIFIC
75
# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
76
# define _POSIX4A_DRAFT10_SOURCE 1
79
# if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
80
# define _USING_POSIX4A_DRAFT10 1
83
# ifdef THREAD_LOCAL_ALLOC
84
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)
85
# include "private/specific.h"
87
# if defined(USE_PTHREAD_SPECIFIC)
88
# define GC_getspecific pthread_getspecific
89
# define GC_setspecific pthread_setspecific
90
# define GC_key_create pthread_key_create
91
typedef pthread_key_t GC_key_t;
93
# if defined(USE_COMPILER_TLS)
94
# define GC_getspecific(x) (x)
95
# define GC_setspecific(key, v) ((key) = (v), 0)
96
# define GC_key_create(key, d) 0
97
typedef void * GC_key_t;
101
# include <pthread.h>
106
# include <sys/mman.h>
107
# include <sys/time.h>
108
# include <sys/types.h>
109
# include <sys/stat.h>
113
#if defined(GC_DARWIN_THREADS)
114
# include "private/darwin_semaphore.h"
116
# include <semaphore.h>
117
#endif /* !GC_DARWIN_THREADS */
119
#if defined(GC_DARWIN_THREADS)
120
# include <sys/sysctl.h>
121
#endif /* GC_DARWIN_THREADS */
125
#if defined(GC_DGUX386_THREADS)
126
# include <sys/dg_sys_info.h>
127
# include <sys/_int_psem.h>
128
/* sem_t is an uint in DG/UX */
129
typedef unsigned int sem_t;
130
#endif /* GC_DGUX386_THREADS */
136
#ifdef GC_USE_LD_WRAP
137
# define WRAP_FUNC(f) __wrap_##f
138
# define REAL_FUNC(f) __real_##f
140
# define WRAP_FUNC(f) GC_##f
141
# if !defined(GC_DGUX386_THREADS)
142
# define REAL_FUNC(f) f
143
# else /* GC_DGUX386_THREADS */
144
# define REAL_FUNC(f) __d10_##f
145
# endif /* GC_DGUX386_THREADS */
146
# undef pthread_create
147
# if !defined(GC_DARWIN_THREADS)
148
# undef pthread_sigmask
151
# undef pthread_detach
152
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
153
&& !defined(_PTHREAD_USE_PTDNAM_)
154
/* Restore the original mangled names on Tru64 UNIX. */
155
# define pthread_create __pthread_create
156
# define pthread_join __pthread_join
157
# define pthread_detach __pthread_detach
163
static GC_bool parallel_initialized = FALSE;
165
void GC_init_parallel();
167
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
169
/* We don't really support thread-local allocation with DBG_HDRS_ALL */
171
#ifdef USE_COMPILER_TLS
174
GC_key_t GC_thread_key;
176
static GC_bool keys_initialized;
178
/* Recover the contents of the freelist array fl into the global one gfl.*/
179
/* Note that the indexing scheme differs, in that gfl has finer size */
180
/* resolution, even if not all entries are used. */
181
/* We hold the allocator lock. */
182
static void return_freelists(ptr_t *fl, ptr_t *gfl)
188
for (i = 1; i < NFREELISTS; ++i) {
189
nwords = i * (GRANULARITY/sizeof(word));
192
if ((word)q >= HBLKSIZE) {
193
if (gfl[nwords] == 0) {
197
for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
203
/* Clear fl[i], since the thread structure may hang around. */
204
/* Do it in a way that is likely to trap if we access it. */
205
fl[i] = (ptr_t)HBLKSIZE;
209
/* We statically allocate a single "size 0" object. It is linked to */
210
/* itself, and is thus repeatedly reused for all size 0 allocation */
211
/* requests. (Size 0 gcj allocation requests are incorrect, and */
212
/* we arrange for those to fault asap.) */
213
static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
215
/* Each thread structure must be initialized. */
216
/* This call must be made from the new thread. */
217
/* Caller holds allocation lock. */
218
void GC_init_thread_local(GC_thread p)
222
if (!keys_initialized) {
223
if (0 != GC_key_create(&GC_thread_key, 0)) {
224
ABORT("Failed to create key for local allocator");
226
keys_initialized = TRUE;
228
if (0 != GC_setspecific(GC_thread_key, p)) {
229
ABORT("Failed to set thread specific allocation pointers");
231
for (i = 1; i < NFREELISTS; ++i) {
232
p -> ptrfree_freelists[i] = (ptr_t)1;
233
p -> normal_freelists[i] = (ptr_t)1;
234
# ifdef GC_GCJ_SUPPORT
235
p -> gcj_freelists[i] = (ptr_t)1;
238
/* Set up the size 0 free lists. */
239
p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
240
p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
241
# ifdef GC_GCJ_SUPPORT
242
p -> gcj_freelists[0] = (ptr_t)(-1);
246
#ifdef GC_GCJ_SUPPORT
247
extern ptr_t * GC_gcjobjfreelist;
250
/* We hold the allocator lock. */
251
void GC_destroy_thread_local(GC_thread p)
253
/* We currently only do this from the thread itself or from */
254
/* the fork handler for a child process. */
256
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
258
return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
259
return_freelists(p -> normal_freelists, GC_objfreelist);
260
# ifdef GC_GCJ_SUPPORT
261
return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
265
extern GC_PTR GC_generic_malloc_many();
267
GC_PTR GC_local_malloc(size_t bytes)
269
if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
270
return(GC_malloc(bytes));
272
int index = INDEX_FROM_BYTES(bytes);
275
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
276
GC_key_t k = GC_thread_key;
280
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
281
if (EXPECT(0 == k, 0)) {
282
/* This can happen if we get called when the world is */
283
/* being initialized. Whether we can actually complete */
284
/* the initialization then is unclear. */
289
tsd = GC_getspecific(GC_thread_key);
290
# ifdef GC_ASSERTIONS
292
GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
295
my_fl = ((GC_thread)tsd) -> normal_freelists + index;
297
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
298
ptr_t next = obj_link(my_entry);
299
GC_PTR result = (GC_PTR)my_entry;
301
obj_link(my_entry) = 0;
302
PREFETCH_FOR_WRITE(next);
304
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
305
*my_fl = my_entry + index + 1;
306
return GC_malloc(bytes);
308
GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
309
if (*my_fl == 0) return GC_oom_fn(bytes);
310
return GC_local_malloc(bytes);
315
GC_PTR GC_local_malloc_atomic(size_t bytes)
317
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
318
return(GC_malloc_atomic(bytes));
320
int index = INDEX_FROM_BYTES(bytes);
321
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
322
-> ptrfree_freelists + index;
323
ptr_t my_entry = *my_fl;
325
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
326
GC_PTR result = (GC_PTR)my_entry;
327
*my_fl = obj_link(my_entry);
329
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
330
*my_fl = my_entry + index + 1;
331
return GC_malloc_atomic(bytes);
333
GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
334
/* *my_fl is updated while the collector is excluded; */
335
/* the free list is always visible to the collector as */
337
if (*my_fl == 0) return GC_oom_fn(bytes);
338
return GC_local_malloc_atomic(bytes);
343
#ifdef GC_GCJ_SUPPORT
345
#include "include/gc_gcj.h"
348
extern GC_bool GC_gcj_malloc_initialized;
351
extern int GC_gcj_kind;
353
GC_PTR GC_local_gcj_malloc(size_t bytes,
354
void * ptr_to_struct_containing_descr)
356
GC_ASSERT(GC_gcj_malloc_initialized);
357
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
358
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
360
int index = INDEX_FROM_BYTES(bytes);
361
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
362
-> gcj_freelists + index;
363
ptr_t my_entry = *my_fl;
364
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
365
GC_PTR result = (GC_PTR)my_entry;
366
GC_ASSERT(!GC_incremental);
367
/* We assert that any concurrent marker will stop us. */
368
/* Thus it is impossible for a mark procedure to see the */
369
/* allocation of the next object, but to see this object */
370
/* still containing a free list pointer. Otherwise the */
371
/* marker might find a random "mark descriptor". */
372
*(volatile ptr_t *)my_fl = obj_link(my_entry);
373
/* We must update the freelist before we store the pointer. */
374
/* Otherwise a GC at this point would see a corrupted */
376
/* A memory barrier is probably never needed, since the */
377
/* action of stopping this thread will cause prior writes */
379
GC_ASSERT(((void * volatile *)result)[1] == 0);
380
*(void * volatile *)result = ptr_to_struct_containing_descr;
382
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
383
if (!GC_incremental) *my_fl = my_entry + index + 1;
384
/* In the incremental case, we always have to take this */
385
/* path. Thus we leave the counter alone. */
386
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
388
GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
389
if (*my_fl == 0) return GC_oom_fn(bytes);
390
return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
395
#endif /* GC_GCJ_SUPPORT */
397
# else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
399
# define GC_destroy_thread_local(t)
401
# endif /* !THREAD_LOCAL_ALLOC */
405
To make sure that we're using LinuxThreads and not some other thread
406
package, we generate a dummy reference to `pthread_kill_other_threads_np'
407
(was `__pthread_initial_thread_bos' but that disappeared),
408
which is a symbol defined in LinuxThreads, but (hopefully) not in other
411
We no longer do this, since this code is now portable enough that it might
412
actually work for something else.
414
void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
417
long GC_nprocs = 1; /* Number of processors. We may not have */
418
/* access to all of them, but this is as good */
419
/* a guess as any ... */
424
# define MAX_MARKERS 16
427
static ptr_t marker_sp[MAX_MARKERS] = {0};
429
void * GC_mark_thread(void * id)
433
marker_sp[(word)id] = GC_approx_sp();
434
for (;; ++my_mark_no) {
435
/* GC_mark_no is passed only to allow GC_help_marker to terminate */
436
/* promptly. This is important if it were called from the signal */
437
/* handler or from the GC lock acquisition code. Under Linux, it's */
438
/* not safe to call it from a signal handler, since it uses mutexes */
439
/* and condition variables. Since it is called only here, the */
440
/* argument is unnecessary. */
441
if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
442
/* resynchronize if we get far off, e.g. because GC_mark_no */
444
my_mark_no = GC_mark_no;
446
# ifdef DEBUG_THREADS
447
GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
449
GC_help_marker(my_mark_no);
453
extern long GC_markers; /* Number of mark threads we would */
454
/* like to have. Includes the */
455
/* initiating thread. */
457
pthread_t GC_mark_threads[MAX_MARKERS];
459
#define PTHREAD_CREATE REAL_FUNC(pthread_create)
461
static void start_mark_threads()
466
if (GC_markers > MAX_MARKERS) {
467
WARN("Limiting number of mark threads\n", 0);
468
GC_markers = MAX_MARKERS;
470
if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
472
if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
473
ABORT("pthread_attr_setdetachstate failed");
475
# if defined(HPUX) || defined(GC_DGUX386_THREADS)
476
/* Default stack size is usually too small: fix it. */
477
/* Otherwise marker threads or GC may run out of */
479
# define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
484
if (pthread_attr_getstacksize(&attr, &old_size) != 0)
485
ABORT("pthread_attr_getstacksize failed\n");
486
if (old_size < MIN_STACK_SIZE) {
487
if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
488
ABORT("pthread_attr_setstacksize failed\n");
491
# endif /* HPUX || GC_DGUX386_THREADS */
493
if (GC_print_stats) {
494
GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
497
for (i = 0; i < GC_markers - 1; ++i) {
498
if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
499
GC_mark_thread, (void *)(word)i)) {
500
WARN("Marker thread creation failed, errno = %ld.\n", errno);
505
#else /* !PARALLEL_MARK */
507
static __inline__ void start_mark_threads()
511
#endif /* !PARALLEL_MARK */
513
GC_bool GC_thr_initialized = FALSE;
515
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
517
void GC_push_thread_structures GC_PROTO((void))
519
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
520
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
521
GC_push_all((ptr_t)(&GC_thread_key),
522
(ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
526
#ifdef THREAD_LOCAL_ALLOC
527
/* We must explicitly mark ptrfree and gcj free lists, since the free */
528
/* list links wouldn't otherwise be found. We also set them in the */
529
/* normal free lists, since that involves touching less memory than if */
530
/* we scanned them normally. */
531
void GC_mark_thread_local_free_lists(void)
537
for (i = 0; i < THREAD_TABLE_SZ; ++i) {
538
for (p = GC_threads[i]; 0 != p; p = p -> next) {
539
for (j = 1; j < NFREELISTS; ++j) {
540
q = p -> ptrfree_freelists[j];
541
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
542
q = p -> normal_freelists[j];
543
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
544
# ifdef GC_GCJ_SUPPORT
545
q = p -> gcj_freelists[j];
546
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
547
# endif /* GC_GCJ_SUPPORT */
552
#endif /* THREAD_LOCAL_ALLOC */
554
static struct GC_Thread_Rep first_thread;
556
/* Add a thread to GC_threads. We assume it wasn't already there. */
557
/* Caller holds allocation lock. */
558
GC_thread GC_new_thread(pthread_t id)
560
int hv = ((word)id) % THREAD_TABLE_SZ;
562
static GC_bool first_thread_used = FALSE;
564
if (!first_thread_used) {
565
result = &first_thread;
566
first_thread_used = TRUE;
568
result = (struct GC_Thread_Rep *)
569
GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
571
if (result == 0) return(0);
573
result -> next = GC_threads[hv];
574
GC_threads[hv] = result;
575
GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
579
/* Delete a thread from GC_threads. We assume it is there. */
580
/* (The code intentionally traps if it wasn't.) */
581
/* Caller holds allocation lock. */
582
void GC_delete_thread(pthread_t id)
584
int hv = ((word)id) % THREAD_TABLE_SZ;
585
register GC_thread p = GC_threads[hv];
586
register GC_thread prev = 0;
588
while (!pthread_equal(p -> id, id)) {
593
GC_threads[hv] = p -> next;
595
prev -> next = p -> next;
600
/* If a thread has been joined, but we have not yet */
601
/* been notified, then there may be more than one thread */
602
/* in the table with the same pthread id. */
603
/* This is OK, but we need a way to delete a specific one. */
604
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
606
int hv = ((word)id) % THREAD_TABLE_SZ;
607
register GC_thread p = GC_threads[hv];
608
register GC_thread prev = 0;
615
GC_threads[hv] = p -> next;
617
prev -> next = p -> next;
622
/* Return a GC_thread corresponding to a given pthread_t. */
623
/* Returns 0 if it's not there. */
624
/* Caller holds allocation lock or otherwise inhibits */
626
/* If there is more than one thread with the given id we */
627
/* return the most recent one. */
628
GC_thread GC_lookup_thread(pthread_t id)
630
int hv = ((word)id) % THREAD_TABLE_SZ;
631
register GC_thread p = GC_threads[hv];
633
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
638
/* Remove all entries from the GC_threads table, except the */
639
/* one for the current thread. We need to do this in the child */
640
/* process after a fork(), since only the current thread */
641
/* survives in the child. */
642
void GC_remove_all_threads_but_me(void)
644
pthread_t self = pthread_self();
646
GC_thread p, next, me;
648
for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
650
for (p = GC_threads[hv]; 0 != p; p = next) {
652
if (p -> id == self) {
656
# ifdef THREAD_LOCAL_ALLOC
657
if (!(p -> flags & FINISHED)) {
658
GC_destroy_thread_local(p);
660
# endif /* THREAD_LOCAL_ALLOC */
661
if (p != &first_thread) GC_INTERNAL_FREE(p);
667
#endif /* HANDLE_FORK */
669
#ifdef USE_PROC_FOR_LIBRARIES
670
int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
675
# ifdef PARALLEL_MARK
676
for (i = 0; i < GC_markers; ++i) {
677
if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
680
for (i = 0; i < THREAD_TABLE_SZ; i++) {
681
for (p = GC_threads[i]; p != 0; p = p -> next) {
682
if (0 != p -> stack_end) {
683
# ifdef STACK_GROWS_UP
684
if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
685
# else /* STACK_GROWS_DOWN */
686
if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
693
#endif /* USE_PROC_FOR_LIBRARIES */
695
#ifdef GC_LINUX_THREADS
696
/* Return the number of processors, or i<= 0 if it can't be determined. */
699
/* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
700
/* appears to be buggy in many cases. */
701
/* We look for lines "cpu<n>" in /proc/stat. */
702
# define STAT_BUF_SIZE 4096
703
# define STAT_READ read
704
/* If read is wrapped, this may need to be redefined to call */
706
char stat_buf[STAT_BUF_SIZE];
709
/* Some old kernels only have a single "cpu nnnn ..." */
710
/* entry in /proc/stat. We identify those as */
714
f = open("/proc/stat", O_RDONLY);
715
if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
716
WARN("Couldn't read /proc/stat\n", 0);
719
for (i = 0; i < len - 100; ++i) {
720
if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
721
&& stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
722
int cpu_no = atoi(stat_buf + i + 4);
723
if (cpu_no >= result) result = cpu_no + 1;
729
#endif /* GC_LINUX_THREADS */
731
/* We hold the GC lock. Wait until an in-progress GC has finished. */
732
/* Repeatedly RELEASES GC LOCK in order to wait. */
733
/* If wait_for_all is true, then we exit with the GC lock held and no */
734
/* collection in progress; otherwise we just wait for the current GC */
736
extern GC_bool GC_collection_in_progress();
737
void GC_wait_for_gc_completion(GC_bool wait_for_all)
739
if (GC_incremental && GC_collection_in_progress()) {
740
int old_gc_no = GC_gc_no;
742
/* Make sure that no part of our stack is still on the mark stack, */
743
/* since it's about to be unmapped. */
744
while (GC_incremental && GC_collection_in_progress()
745
&& (wait_for_all || old_gc_no == GC_gc_no)) {
747
GC_in_thread_creation = TRUE;
748
GC_collect_a_little_inner(1);
749
GC_in_thread_creation = FALSE;
759
/* Procedures called before and after a fork. The goal here is to make */
760
/* it safe to call GC_malloc() in a forked child. It's unclear that is */
761
/* attainable, since the single UNIX spec seems to imply that one */
762
/* should only call async-signal-safe functions, and we probably can't */
763
/* quite guarantee that. But we give it our best shot. (That same */
764
/* spec also implies that it's not safe to call the system malloc */
765
/* between fork() and exec(). Thus we're doing no worse than it. */
767
/* Called before a fork() */
768
void GC_fork_prepare_proc(void)
770
/* Acquire all relevant locks, so that after releasing the locks */
771
/* the child will see a consistent state in which monitor */
772
/* invariants hold. Unfortunately, we can't acquire libc locks */
773
/* we might need, and there seems to be no guarantee that libc */
774
/* must install a suitable fork handler. */
775
/* Wait for an ongoing GC to finish, since we can't finish it in */
776
/* the (one remaining thread in) the child. */
778
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
779
GC_wait_for_reclaim();
781
GC_wait_for_gc_completion(TRUE);
782
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
783
GC_acquire_mark_lock();
787
/* Called in parent after a fork() */
788
void GC_fork_parent_proc(void)
790
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
791
GC_release_mark_lock();
796
/* Called in child after a fork() */
797
void GC_fork_child_proc(void)
799
/* Clean up the thread table, so that just our thread is left. */
800
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
801
GC_release_mark_lock();
803
GC_remove_all_threads_but_me();
804
# ifdef PARALLEL_MARK
805
/* Turn off parallel marking in the child, since we are probably */
806
/* just going to exec, and we would have to restart mark threads. */
809
# endif /* PARALLEL_MARK */
812
#endif /* HANDLE_FORK */
814
#if defined(GC_DGUX386_THREADS)
815
/* Return the number of processors, or i<= 0 if it can't be determined. */
818
/* <takis@XFree86.Org> */
820
struct dg_sys_info_pm_info pm_sysinfo;
823
status = dg_sys_info((long int *) &pm_sysinfo,
824
DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
826
/* set -1 for error */
830
numCpus = pm_sysinfo.idle_vp_count;
832
# ifdef DEBUG_THREADS
833
GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
837
#endif /* GC_DGUX386_THREADS */
839
/* We hold the allocation lock. */
842
# ifndef GC_DARWIN_THREADS
847
if (GC_thr_initialized) return;
848
GC_thr_initialized = TRUE;
851
/* Prepare for a possible fork. */
852
pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
854
# endif /* HANDLE_FORK */
855
/* Add the initial thread, so we can stop it. */
856
t = GC_new_thread(pthread_self());
857
# ifdef GC_DARWIN_THREADS
858
t -> stop_info.mach_thread = mach_thread_self();
860
t -> stop_info.stack_ptr = (ptr_t)(&dummy);
862
t -> flags = DETACHED | MAIN_THREAD;
868
char * nprocs_string = GETENV("GC_NPROCS");
870
if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
872
if (GC_nprocs <= 0) {
873
# if defined(GC_HPUX_THREADS)
874
GC_nprocs = pthread_num_processors_np();
876
# if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS)
877
GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
878
if (GC_nprocs <= 0) GC_nprocs = 1;
880
# if defined(GC_FREEBSD_THREADS) || defined(GC_IRIX_THREADS)
881
/* FIXME: For Irix, that's a ridiculous assumption. */
884
# if defined(GC_DARWIN_THREADS)
886
size_t len = sizeof(ncpus);
887
sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
890
# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
891
GC_nprocs = GC_get_nprocs();
894
if (GC_nprocs <= 0) {
895
WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
897
# ifdef PARALLEL_MARK
901
# ifdef PARALLEL_MARK
903
char * markers_string = GETENV("GC_MARKERS");
904
if (markers_string != NULL) {
905
GC_markers = atoi(markers_string);
907
GC_markers = GC_nprocs;
912
# ifdef PARALLEL_MARK
914
if (GC_print_stats) {
915
GC_printf2("Number of processors = %ld, "
916
"number of marker threads = %ld\n", GC_nprocs, GC_markers);
919
if (GC_markers == 1) {
922
if (GC_print_stats) {
923
GC_printf0("Single marker thread, turning off parallel marking\n");
928
/* Disable true incremental collection, but generational is OK. */
929
GC_time_limit = GC_TIME_UNLIMITED;
931
/* If we are using a parallel marker, actually start helper threads. */
932
if (GC_parallel) start_mark_threads();
937
/* Perform all initializations, including those that */
938
/* may require allocation. */
939
/* Called without allocation lock. */
940
/* Must be called before a second thread is created. */
941
/* Called without allocation lock. */
942
void GC_init_parallel()
944
if (parallel_initialized) return;
945
parallel_initialized = TRUE;
947
/* GC_init() calls us back, so set flag first. */
948
if (!GC_is_initialized) GC_init();
949
/* Initialize thread local free lists if used. */
950
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
952
GC_init_thread_local(GC_lookup_thread(pthread_self()));
958
#if !defined(GC_DARWIN_THREADS)
959
int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
963
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
965
sigdelset(&fudged_set, SIG_SUSPEND);
968
return(REAL_FUNC(pthread_sigmask)(how, set, oset));
970
#endif /* !GC_DARWIN_THREADS */
972
/* Wrappers for functions that are likely to block for an appreciable */
973
/* length of time. Must be called in pairs, if at all. */
974
/* Nothing much beyond the system call itself should be executed */
977
void GC_start_blocking(void) {
981
me = GC_lookup_thread(pthread_self());
982
GC_ASSERT(!(me -> thread_blocked));
984
me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
986
# ifndef GC_DARWIN_THREADS
987
me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
991
me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
993
/* Add some slop to the stack pointer, since the wrapped call may */
994
/* end up pushing more callee-save registers. */
995
# ifndef GC_DARWIN_THREADS
996
# ifdef STACK_GROWS_UP
997
me -> stop_info.stack_ptr += SP_SLOP;
999
me -> stop_info.stack_ptr -= SP_SLOP;
1002
me -> thread_blocked = TRUE;
1006
void GC_end_blocking(void) {
1008
LOCK(); /* This will block if the world is stopped. */
1009
me = GC_lookup_thread(pthread_self());
1010
GC_ASSERT(me -> thread_blocked);
1011
me -> thread_blocked = FALSE;
1015
#if defined(GC_DGUX386_THREADS)
1016
#define __d10_sleep sleep
1017
#endif /* GC_DGUX386_THREADS */
1019
/* A wrapper for the standard C sleep function */
1020
int WRAP_FUNC(sleep) (unsigned int seconds)
1024
GC_start_blocking();
1025
result = REAL_FUNC(sleep)(seconds);
1031
void *(*start_routine)(void *);
1034
sem_t registered; /* 1 ==> in our thread table, but */
1035
/* parent hasn't yet noticed. */
1038
/* Called at thread exit. */
1039
/* Never called for main thread. That's OK, since it */
1040
/* results in at most a tiny one-time leak. And */
1041
/* linuxthreads doesn't reclaim the main threads */
1042
/* resources or id anyway. */
1043
void GC_thread_exit_proc(void *arg)
1048
me = GC_lookup_thread(pthread_self());
1049
GC_destroy_thread_local(me);
1050
if (me -> flags & DETACHED) {
1051
GC_delete_thread(pthread_self());
1053
me -> flags |= FINISHED;
1055
# if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1056
&& !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
1057
GC_remove_specific(GC_thread_key);
1059
/* The following may run the GC from "nonexistent" thread. */
1060
GC_wait_for_gc_completion(FALSE);
1064
int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1067
GC_thread thread_gc_id;
1070
thread_gc_id = GC_lookup_thread(thread);
1071
/* This is guaranteed to be the intended one, since the thread id */
1072
/* cant have been recycled by pthreads. */
1074
result = REAL_FUNC(pthread_join)(thread, retval);
1075
# if defined (GC_FREEBSD_THREADS)
1076
/* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1077
appears to be) a spurious EINTR which caused the test and real code
1078
to gratuitously fail. Having looked at system pthread library source
1079
code, I see how this return code may be generated. In one path of
1080
code, pthread_join() just returns the errno setting of the thread
1081
being joined. This does not match the POSIX specification or the
1082
local man pages thus I have taken the liberty to catch this one
1083
spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1084
if (result == EINTR) result = 0;
1088
/* Here the pthread thread id may have been recycled. */
1089
GC_delete_gc_thread(thread, thread_gc_id);
1096
WRAP_FUNC(pthread_detach)(pthread_t thread)
1099
GC_thread thread_gc_id;
1102
thread_gc_id = GC_lookup_thread(thread);
1104
result = REAL_FUNC(pthread_detach)(thread);
1107
thread_gc_id -> flags |= DETACHED;
1108
/* Here the pthread thread id may have been recycled. */
1109
if (thread_gc_id -> flags & FINISHED) {
1110
GC_delete_gc_thread(thread, thread_gc_id);
1117
GC_bool GC_in_thread_creation = FALSE;
1119
void * GC_start_routine(void * arg)
1122
struct start_info * si = arg;
1125
pthread_t my_pthread;
1126
void *(*start)(void *);
1129
my_pthread = pthread_self();
1130
# ifdef DEBUG_THREADS
1131
GC_printf1("Starting thread 0x%lx\n", my_pthread);
1132
GC_printf1("pid = %ld\n", (long) getpid());
1133
GC_printf1("sp = 0x%lx\n", (long) &arg);
1136
GC_in_thread_creation = TRUE;
1137
me = GC_new_thread(my_pthread);
1138
GC_in_thread_creation = FALSE;
1139
#ifdef GC_DARWIN_THREADS
1140
me -> stop_info.mach_thread = mach_thread_self();
1142
me -> stop_info.stack_ptr = 0;
1144
me -> flags = si -> flags;
1145
/* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1146
/* doesn't work because the stack base in /proc/self/stat is the */
1147
/* one for the main thread. There is a strong argument that that's */
1148
/* a kernel bug, but a pervasive one. */
1149
# ifdef STACK_GROWS_DOWN
1150
me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1151
& ~(GC_page_size - 1));
1152
# ifndef GC_DARWIN_THREADS
1153
me -> stop_info.stack_ptr = me -> stack_end - 0x10;
1155
/* Needs to be plausible, since an asynchronous stack mark */
1156
/* should not crash. */
1158
me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1159
me -> stop_info.stack_ptr = me -> stack_end + 0x10;
1161
/* This is dubious, since we may be more than a page into the stack, */
1162
/* and hence skip some of it, though it's not clear that matters. */
1164
me -> backing_store_end = (ptr_t)
1165
(GC_save_regs_in_stack() & ~(GC_page_size - 1));
1166
/* This is also < 100% convincing. We should also read this */
1167
/* from /proc, but the hook to do so isn't there yet. */
1170
start = si -> start_routine;
1171
# ifdef DEBUG_THREADS
1172
GC_printf1("start_routine = 0x%lx\n", start);
1174
start_arg = si -> arg;
1175
sem_post(&(si -> registered)); /* Last action on si. */
1176
/* OK to deallocate. */
1177
pthread_cleanup_push(GC_thread_exit_proc, 0);
1178
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1180
GC_init_thread_local(me);
1183
result = (*start)(start_arg);
1185
GC_printf1("Finishing thread 0x%x\n", pthread_self());
1187
me -> status = result;
1188
pthread_cleanup_pop(1);
1189
/* Cleanup acquires lock, ensuring that we can't exit */
1190
/* while a collection that thinks we're alive is trying to stop */
1196
WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1197
const pthread_attr_t *attr,
1198
void *(*start_routine)(void *), void *arg)
1203
struct start_info * si;
1204
/* This is otherwise saved only in an area mmapped by the thread */
1205
/* library, which isn't visible to the collector. */
1207
/* We resist the temptation to muck with the stack size here, */
1208
/* even if the default is unreasonably small. That's the client's */
1209
/* responsibility. */
1212
si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1215
if (!parallel_initialized) GC_init_parallel();
1216
if (0 == si) return(ENOMEM);
1217
sem_init(&(si -> registered), 0, 0);
1218
si -> start_routine = start_routine;
1221
if (!GC_thr_initialized) GC_thr_init();
1222
# ifdef GC_ASSERTIONS
1226
pthread_attr_t my_attr;
1227
pthread_attr_init(&my_attr);
1228
pthread_attr_getstacksize(&my_attr, &stack_size);
1230
pthread_attr_getstacksize(attr, &stack_size);
1232
# ifdef PARALLEL_MARK
1233
GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1235
/* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
1236
/* HBLKSIZE=8192, sizeof(word)=8 */
1237
GC_ASSERT(stack_size >= 65536);
1239
/* Our threads may need to do some work for the GC. */
1240
/* Ridiculously small threads won't work, and they */
1241
/* probably wouldn't work anyway. */
1245
detachstate = PTHREAD_CREATE_JOINABLE;
1247
pthread_attr_getdetachstate(attr, &detachstate);
1249
if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1250
si -> flags = my_flags;
1252
# ifdef DEBUG_THREADS
1253
GC_printf1("About to start new thread from thread 0x%X\n",
1257
result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1259
# ifdef DEBUG_THREADS
1260
GC_printf1("Started thread 0x%X\n", *new_thread);
1262
/* Wait until child has been added to the thread table. */
1263
/* This also ensures that we hold onto si until the child is done */
1264
/* with it. Thus it doesn't matter whether it is otherwise */
1265
/* visible to the collector. */
1267
while (0 != sem_wait(&(si -> registered))) {
1268
if (EINTR != errno) ABORT("sem_wait failed");
1271
sem_destroy(&(si -> registered));
1273
GC_INTERNAL_FREE(si);
1279
#ifdef GENERIC_COMPARE_AND_SWAP
1280
pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1282
GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1283
GC_word old, GC_word new_val)
1286
pthread_mutex_lock(&GC_compare_and_swap_lock);
1293
pthread_mutex_unlock(&GC_compare_and_swap_lock);
1297
GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1300
pthread_mutex_lock(&GC_compare_and_swap_lock);
1302
*addr = old + how_much;
1303
pthread_mutex_unlock(&GC_compare_and_swap_lock);
1307
#endif /* GENERIC_COMPARE_AND_SWAP */
1308
/* Spend a few cycles in a way that can't introduce contention with */
1309
/* othre threads. */
1313
# if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1314
volatile word dummy = 0;
1317
for (i = 0; i < 10; ++i) {
1318
# if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1319
__asm__ __volatile__ (" " : : : "memory");
1321
/* Something that's unlikely to be optimized away. */
1327
#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1330
VOLATILE GC_bool GC_collecting = 0;
1331
/* A hint that we're in the collector and */
1332
/* holding the allocation lock for an */
1333
/* extended period. */
1335
#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1336
/* If we don't want to use the below spinlock implementation, either */
1337
/* because we don't have a GC_test_and_set implementation, or because */
1338
/* we don't want to risk sleeping, we can still try spinning on */
1339
/* pthread_mutex_trylock for a while. This appears to be very */
1340
/* beneficial in many cases. */
1341
/* I suspect that under high contention this is nearly always better */
1342
/* than the spin lock. But it's a bit slower on a uniprocessor. */
1343
/* Hence we still default to the spin lock. */
1344
/* This is also used to acquire the mark lock for the parallel */
1347
/* Here we use a strict exponential backoff scheme. I don't know */
1348
/* whether that's better or worse than the above. We eventually */
1349
/* yield by calling pthread_mutex_lock(); it never makes sense to */
1350
/* explicitly sleep. */
1354
unsigned long GC_spin_count = 0;
1355
unsigned long GC_block_count = 0;
1356
unsigned long GC_unlocked_count = 0;
1359
void GC_generic_lock(pthread_mutex_t * lock)
1361
#ifndef NO_PTHREAD_TRYLOCK
1362
unsigned pause_length = 1;
1365
if (0 == pthread_mutex_trylock(lock)) {
1367
++GC_unlocked_count;
1371
for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1372
for (i = 0; i < pause_length; ++i) {
1375
switch(pthread_mutex_trylock(lock)) {
1384
ABORT("Unexpected error from pthread_mutex_trylock");
1387
#endif /* !NO_PTHREAD_TRYLOCK */
1391
pthread_mutex_lock(lock);
1394
#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1396
#if defined(USE_SPIN_LOCK)
1398
/* Reasonably fast spin locks. Basically the same implementation */
1399
/* as STL alloc.h. This isn't really the right way to do this. */
1400
/* but until the POSIX scheduling mess gets straightened out ... */
1402
volatile unsigned int GC_allocate_lock = 0;
1407
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1408
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1409
static unsigned spin_max = low_spin_max;
1410
unsigned my_spin_max;
1411
static unsigned last_spins = 0;
1412
unsigned my_last_spins;
1415
if (!GC_test_and_set(&GC_allocate_lock)) {
1418
my_spin_max = spin_max;
1419
my_last_spins = last_spins;
1420
for (i = 0; i < my_spin_max; i++) {
1421
if (GC_collecting || GC_nprocs == 1) goto yield;
1422
if (i < my_last_spins/2 || GC_allocate_lock) {
1426
if (!GC_test_and_set(&GC_allocate_lock)) {
1429
* Spinning worked. Thus we're probably not being scheduled
1430
* against the other process with which we were contending.
1431
* Thus it makes sense to spin longer the next time.
1434
spin_max = high_spin_max;
1438
/* We are probably being scheduled against the other process. Sleep. */
1439
spin_max = low_spin_max;
1442
if (!GC_test_and_set(&GC_allocate_lock)) {
1445
# define SLEEP_THRESHOLD 12
1446
/* Under Linux very short sleeps tend to wait until */
1447
/* the current time quantum expires. On old Linux */
1448
/* kernels nanosleep(<= 2ms) just spins under Linux. */
1449
/* (Under 2.4, this happens only for real-time */
1450
/* processes.) We want to minimize both behaviors */
1452
if (i < SLEEP_THRESHOLD) {
1458
/* Don't wait for more than about 15msecs, even */
1459
/* under extreme contention. */
1461
ts.tv_nsec = 1 << i;
1467
#else /* !USE_SPINLOCK */
1470
#ifndef NO_PTHREAD_TRYLOCK
1471
if (1 == GC_nprocs || GC_collecting) {
1472
pthread_mutex_lock(&GC_allocate_ml);
1474
GC_generic_lock(&GC_allocate_ml);
1476
#else /* !NO_PTHREAD_TRYLOCK */
1477
pthread_mutex_lock(&GC_allocate_ml);
1478
#endif /* !NO_PTHREAD_TRYLOCK */
1481
#endif /* !USE_SPINLOCK */
1483
#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1485
#ifdef GC_ASSERTIONS
1486
pthread_t GC_mark_lock_holder = NO_THREAD;
1490
/* Ugly workaround for a linux threads bug in the final versions */
1491
/* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1492
/* field even when it fails to acquire the mutex. This causes */
1493
/* pthread_cond_wait to die. Remove for glibc2.2. */
1494
/* According to the man page, we should use */
1495
/* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1497
static pthread_mutex_t mark_mutex =
1498
{0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1500
static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1503
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1505
void GC_acquire_mark_lock()
1508
if (pthread_mutex_lock(&mark_mutex) != 0) {
1509
ABORT("pthread_mutex_lock failed");
1512
GC_generic_lock(&mark_mutex);
1513
# ifdef GC_ASSERTIONS
1514
GC_mark_lock_holder = pthread_self();
1518
void GC_release_mark_lock()
1520
GC_ASSERT(GC_mark_lock_holder == pthread_self());
1521
# ifdef GC_ASSERTIONS
1522
GC_mark_lock_holder = NO_THREAD;
1524
if (pthread_mutex_unlock(&mark_mutex) != 0) {
1525
ABORT("pthread_mutex_unlock failed");
1529
/* Collector must wait for a freelist builders for 2 reasons: */
1530
/* 1) Mark bits may still be getting examined without lock. */
1531
/* 2) Partial free lists referenced only by locals may not be scanned */
1532
/* correctly, e.g. if they contain "pointer-free" objects, since the */
1533
/* free-list link may be ignored. */
1534
void GC_wait_builder()
1536
GC_ASSERT(GC_mark_lock_holder == pthread_self());
1537
# ifdef GC_ASSERTIONS
1538
GC_mark_lock_holder = NO_THREAD;
1540
if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1541
ABORT("pthread_cond_wait failed");
1543
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1544
# ifdef GC_ASSERTIONS
1545
GC_mark_lock_holder = pthread_self();
1549
void GC_wait_for_reclaim()
1551
GC_acquire_mark_lock();
1552
while (GC_fl_builder_count > 0) {
1555
GC_release_mark_lock();
1558
void GC_notify_all_builder()
1560
GC_ASSERT(GC_mark_lock_holder == pthread_self());
1561
if (pthread_cond_broadcast(&builder_cv) != 0) {
1562
ABORT("pthread_cond_broadcast failed");
1566
#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1568
#ifdef PARALLEL_MARK
1570
static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1572
void GC_wait_marker()
1574
GC_ASSERT(GC_mark_lock_holder == pthread_self());
1575
# ifdef GC_ASSERTIONS
1576
GC_mark_lock_holder = NO_THREAD;
1578
if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1579
ABORT("pthread_cond_wait failed");
1581
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1582
# ifdef GC_ASSERTIONS
1583
GC_mark_lock_holder = pthread_self();
1587
void GC_notify_all_marker()
1589
if (pthread_cond_broadcast(&mark_cv) != 0) {
1590
ABORT("pthread_cond_broadcast failed");
1594
#endif /* PARALLEL_MARK */
1596
# endif /* GC_LINUX_THREADS and friends */