2
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4
* Copyright (c) 1998 by Fergus Henderson. All rights reserved.
6
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9
* Permission is hereby granted to use or copy this program
10
* for any purpose, provided the above notices are retained on all copies.
11
* Permission to modify the code and to distribute modified code is granted,
12
* provided the above notices are retained, and a notice that the code was
13
* modified is included with the above copyright notice.
16
* Support code for LinuxThreads, the clone()-based kernel
17
* thread package for Linux which is included in libc6.
19
* This code relies on implementation details of LinuxThreads,
20
* (i.e. properties not guaranteed by the Pthread standard):
22
* - the function GC_linux_thread_top_of_stack(void)
23
* relies on the way LinuxThreads lays out thread stacks
24
* in the address space.
26
* Note that there is a lot of code duplication between linux_threads.c
27
* and irix_threads.c; any changes made here may need to be reflected
31
/* #define DEBUG_THREADS 1 */
33
/* ANSI C requires that a compilation unit contains something */
36
# if defined(LINUX_THREADS)
42
# include <sys/mman.h>
43
# include <sys/time.h>
44
# include <semaphore.h>
47
#undef pthread_sigmask
53
void GC_print_sig_mask()
58
if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
59
ABORT("pthread_sigmask");
60
GC_printf0("Blocked: ");
61
for (i = 1; i <= MAXSIG; i++) {
62
if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
68
/* We use the allocation lock to protect thread-related data structures. */
70
/* The set of all known threads. We intercept thread creation and */
71
/* joins. We never actually create detached threads. We allocate all */
72
/* new thread stacks ourselves. These allow us to maintain this */
74
/* Protected by GC_thr_lock. */
75
/* Some of this should be declared volatile, but that's incosnsistent */
76
/* with some library routine declarations. */
77
typedef struct GC_Thread_Rep {
78
struct GC_Thread_Rep * next; /* More recently allocated threads */
79
/* with a given pthread id come */
80
/* first. (All but the first are */
81
/* guaranteed to be dead, but we may */
82
/* not yet have registered the join.) */
85
# define FINISHED 1 /* Thread has exited. */
86
# define DETACHED 2 /* Thread is intended to be detached. */
87
# define MAIN_THREAD 4 /* True for the original thread only. */
90
ptr_t stack_ptr; /* Valid only when stopped. */
92
void * status; /* The value returned from the thread. */
93
/* Used only to avoid premature */
94
/* reclamation of any data it might */
98
GC_thread GC_lookup_thread(pthread_t id);
101
* The only way to suspend threads given the pthread interface is to send
102
* signals. We can't use SIGSTOP directly, because we need to get the
103
* thread to save its stack pointer in the GC thread table before
104
* suspending. So we have to reserve a signal of our own for this.
105
* This means we have to intercept client calls to change the signal mask.
106
* The linuxthreads package already uses SIGUSR1 and SIGUSR2,
107
* so we need to reuse something else. I chose SIGPWR.
108
* (Perhaps SIGUNUSED would be a better choice.)
110
#define SIG_SUSPEND SIGPWR
112
#define SIG_RESTART SIGXCPU
114
sem_t GC_suspend_ack_sem;
117
GC_linux_thread_top_of_stack() relies on implementation details of
118
LinuxThreads, namely that thread stacks are allocated on 2M boundaries
119
and grow to no more than 2M.
120
To make sure that we're using LinuxThreads and not some other thread
121
package, we generate a dummy reference to `__pthread_initial_thread_bos',
122
which is a symbol defined in LinuxThreads, but (hopefully) not in other
126
extern char * __pthread_initial_thread_bos;
127
char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
130
#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
132
static inline ptr_t GC_linux_thread_top_of_stack(void)
134
char *sp = GC_approx_sp();
135
ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
137
GC_printf1("SP = %lx\n", (unsigned long)sp);
138
GC_printf1("TOS = %lx\n", (unsigned long)tos);
143
void GC_suspend_handler(int sig)
146
pthread_t my_thread = pthread_self();
153
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
156
GC_printf1("Suspending 0x%x\n", my_thread);
159
me = GC_lookup_thread(my_thread);
160
/* The lookup here is safe, since I'm doing this on behalf */
161
/* of a thread which holds the allocation lock in order */
162
/* to stop the world. Thus concurrent modification of the */
163
/* data structure is impossible. */
164
me -> stack_ptr = (ptr_t)(&dummy);
165
me -> stack_end = GC_linux_thread_top_of_stack();
167
/* Tell the thread that wants to stop the world that this */
168
/* thread has been stopped. Note that sem_post() is */
169
/* the only async-signal-safe primitive in LinuxThreads. */
170
sem_post(&GC_suspend_ack_sem);
172
/* Wait until that thread tells us to restart by sending */
173
/* this thread a SIG_RESTART signal. */
174
/* SIG_RESTART should be masked at this point. Thus there */
176
if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
177
if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
180
sigsuspend(&mask); /* Wait for signal */
181
} while (me->signal != SIG_RESTART);
184
GC_printf1("Continuing 0x%x\n", my_thread);
188
void GC_restart_handler(int sig)
192
if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
194
/* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
195
/* The lookup here is safe, since I'm doing this on behalf */
196
/* of a thread which holds the allocation lock in order */
197
/* to stop the world. Thus concurrent modification of the */
198
/* data structure is impossible. */
199
me = GC_lookup_thread(pthread_self());
200
me->signal = SIG_RESTART;
203
** Note: even if we didn't do anything useful here,
204
** it would still be necessary to have a signal handler,
205
** rather than ignoring the signals, otherwise
206
** the signals will not be delivered at all, and
207
** will thus not interrupt the sigsuspend() above.
211
GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
215
GC_bool GC_thr_initialized = FALSE;
217
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
218
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
220
/* Add a thread to GC_threads. We assume it wasn't already there. */
221
/* Caller holds allocation lock. */
222
GC_thread GC_new_thread(pthread_t id)
224
int hv = ((word)id) % THREAD_TABLE_SZ;
226
static struct GC_Thread_Rep first_thread;
227
static GC_bool first_thread_used = FALSE;
229
if (!first_thread_used) {
230
result = &first_thread;
231
first_thread_used = TRUE;
232
/* Dont acquire allocation lock, since we may already hold it. */
234
result = (struct GC_Thread_Rep *)
235
GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
237
if (result == 0) return(0);
239
result -> next = GC_threads[hv];
240
GC_threads[hv] = result;
241
/* result -> flags = 0; */
245
/* Delete a thread from GC_threads. We assume it is there. */
246
/* (The code intentionally traps if it wasn't.) */
247
/* Caller holds allocation lock. */
248
void GC_delete_thread(pthread_t id)
250
int hv = ((word)id) % THREAD_TABLE_SZ;
251
register GC_thread p = GC_threads[hv];
252
register GC_thread prev = 0;
254
while (!pthread_equal(p -> id, id)) {
259
GC_threads[hv] = p -> next;
261
prev -> next = p -> next;
265
/* If a thread has been joined, but we have not yet */
266
/* been notified, then there may be more than one thread */
267
/* in the table with the same pthread id. */
268
/* This is OK, but we need a way to delete a specific one. */
269
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
271
int hv = ((word)id) % THREAD_TABLE_SZ;
272
register GC_thread p = GC_threads[hv];
273
register GC_thread prev = 0;
280
GC_threads[hv] = p -> next;
282
prev -> next = p -> next;
286
/* Return a GC_thread corresponding to a given thread_t. */
287
/* Returns 0 if it's not there. */
288
/* Caller holds allocation lock or otherwise inhibits */
290
/* If there is more than one thread with the given id we */
291
/* return the most recent one. */
292
GC_thread GC_lookup_thread(pthread_t id)
294
int hv = ((word)id) % THREAD_TABLE_SZ;
295
register GC_thread p = GC_threads[hv];
297
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
301
/* Caller holds allocation lock. */
304
pthread_t my_thread = pthread_self();
306
register GC_thread p;
307
register int n_live_threads = 0;
310
for (i = 0; i < THREAD_TABLE_SZ; i++) {
311
for (p = GC_threads[i]; p != 0; p = p -> next) {
312
if (p -> id != my_thread) {
313
if (p -> flags & FINISHED) continue;
316
GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
318
result = pthread_kill(p -> id, SIG_SUSPEND);
321
/* Not really there anymore. Possible? */
327
ABORT("pthread_kill failed");
332
for (i = 0; i < n_live_threads; i++) {
333
sem_wait(&GC_suspend_ack_sem);
336
GC_printf1("World stopped 0x%x\n", pthread_self());
340
/* Caller holds allocation lock. */
341
void GC_start_world()
343
pthread_t my_thread = pthread_self();
345
register GC_thread p;
346
register int n_live_threads = 0;
350
GC_printf0("World starting\n");
353
for (i = 0; i < THREAD_TABLE_SZ; i++) {
354
for (p = GC_threads[i]; p != 0; p = p -> next) {
355
if (p -> id != my_thread) {
356
if (p -> flags & FINISHED) continue;
359
GC_printf1("Sending restart signal to 0x%x\n", p -> id);
361
result = pthread_kill(p -> id, SIG_RESTART);
364
/* Not really there anymore. Possible? */
370
ABORT("pthread_kill failed");
376
GC_printf0("World started\n");
380
/* We hold allocation lock. We assume the world is stopped. */
381
void GC_push_all_stacks()
384
register GC_thread p;
385
register ptr_t sp = GC_approx_sp();
386
register ptr_t lo, hi;
387
pthread_t me = pthread_self();
389
if (!GC_thr_initialized) GC_thr_init();
391
GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
393
for (i = 0; i < THREAD_TABLE_SZ; i++) {
394
for (p = GC_threads[i]; p != 0; p = p -> next) {
395
if (p -> flags & FINISHED) continue;
396
if (pthread_equal(p -> id, me)) {
401
if ((p -> flags & MAIN_THREAD) == 0) {
402
if (pthread_equal(p -> id, me)) {
403
hi = GC_linux_thread_top_of_stack();
408
/* The original stack. */
412
GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
413
(unsigned long) p -> id,
414
(unsigned long) lo, (unsigned long) hi);
416
GC_push_all_stack(lo, hi);
422
/* We hold the allocation lock. */
426
struct sigaction act;
428
if (GC_thr_initialized) return;
429
GC_thr_initialized = TRUE;
431
if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
432
ABORT("sem_init failed");
434
act.sa_flags = SA_RESTART;
435
if (sigfillset(&act.sa_mask) != 0) {
436
ABORT("sigfillset() failed");
438
/* SIG_RESTART is unmasked by the handler when necessary. */
439
act.sa_handler = GC_suspend_handler;
440
if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
441
ABORT("Cannot set SIG_SUSPEND handler");
444
act.sa_handler = GC_restart_handler;
445
if (sigaction(SIG_RESTART, &act, NULL) != 0) {
446
ABORT("Cannot set SIG_SUSPEND handler");
449
/* Add the initial thread, so we can stop it. */
450
t = GC_new_thread(pthread_self());
452
t -> flags = DETACHED | MAIN_THREAD;
455
int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
459
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
461
sigdelset(&fudged_set, SIG_SUSPEND);
464
return(pthread_sigmask(how, set, oset));
468
void *(*start_routine)(void *);
471
sem_t registered; /* 1 ==> in our thread table, but */
472
/* parent hasn't yet noticed. */
476
void GC_thread_exit_proc(void *arg)
479
struct start_info * si = arg;
482
me = GC_lookup_thread(pthread_self());
483
if (me -> flags & DETACHED) {
484
GC_delete_thread(pthread_self());
486
me -> flags |= FINISHED;
491
int GC_pthread_join(pthread_t thread, void **retval)
494
GC_thread thread_gc_id;
497
thread_gc_id = GC_lookup_thread(thread);
498
/* This is guaranteed to be the intended one, since the thread id */
499
/* cant have been recycled by pthreads. */
501
result = pthread_join(thread, retval);
503
/* Here the pthread thread id may have been recycled. */
504
GC_delete_gc_thread(thread, thread_gc_id);
509
void * GC_start_routine(void * arg)
511
struct start_info * si = arg;
514
pthread_t my_pthread;
515
void *(*start)(void *);
518
my_pthread = pthread_self();
520
me = GC_new_thread(my_pthread);
521
me -> flags = si -> flags;
525
start = si -> start_routine;
526
start_arg = si -> arg;
527
sem_post(&(si -> registered));
528
pthread_cleanup_push(GC_thread_exit_proc, si);
529
# ifdef DEBUG_THREADS
530
GC_printf1("Starting thread 0x%lx\n", pthread_self());
531
GC_printf1("pid = %ld\n", (long) getpid());
532
GC_printf1("sp = 0x%lx\n", (long) &arg);
533
GC_printf1("start_routine = 0x%lx\n", start);
535
result = (*start)(start_arg);
537
GC_printf1("Finishing thread 0x%x\n", pthread_self());
539
me -> status = result;
540
me -> flags |= FINISHED;
541
pthread_cleanup_pop(1);
542
/* Cleanup acquires lock, ensuring that we can't exit */
543
/* while a collection that thinks we're alive is trying to stop */
549
GC_pthread_create(pthread_t *new_thread,
550
const pthread_attr_t *attr,
551
void *(*start_routine)(void *), void *arg)
555
pthread_t my_new_thread;
558
pthread_attr_t new_attr;
561
struct start_info * si = GC_malloc(sizeof(struct start_info));
562
/* This is otherwise saved only in an area mmapped by the thread */
563
/* library, which isn't visible to the collector. */
565
if (0 == si) return(ENOMEM);
566
sem_init(&(si -> registered), 0, 0);
567
si -> start_routine = start_routine;
570
if (!GC_thr_initialized) GC_thr_init();
573
(void) pthread_attr_init(&new_attr);
577
pthread_attr_getdetachstate(&new_attr, &detachstate);
578
if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
579
si -> flags = my_flags;
581
result = pthread_create(new_thread, &new_attr, GC_start_routine, si);
582
/* Wait until child has been added to the thread table. */
583
/* This also ensures that we hold onto si until the child is done */
584
/* with it. Thus it doesn't matter whether it is otherwise */
585
/* visible to the collector. */
586
if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
587
sem_destroy(&(si -> registered));
588
/* pthread_attr_destroy(&new_attr); */
589
/* pthread_attr_destroy(&new_attr); */
593
GC_bool GC_collecting = 0;
594
/* A hint that we're in the collector and */
595
/* holding the allocation lock for an */
596
/* extended period. */
598
/* Reasonably fast spin locks. Basically the same implementation */
599
/* as STL alloc.h. This isn't really the right way to do this. */
600
/* but until the POSIX scheduling mess gets straightened out ... */
602
volatile unsigned int GC_allocate_lock = 0;
607
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
608
# define high_spin_max 1000 /* spin cycles for multiprocessor */
609
static unsigned spin_max = low_spin_max;
610
unsigned my_spin_max;
611
static unsigned last_spins = 0;
612
unsigned my_last_spins;
613
volatile unsigned junk;
614
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
617
if (!GC_test_and_set(&GC_allocate_lock)) {
621
my_spin_max = spin_max;
622
my_last_spins = last_spins;
623
for (i = 0; i < my_spin_max; i++) {
624
if (GC_collecting) goto yield;
625
if (i < my_last_spins/2 || GC_allocate_lock) {
629
if (!GC_test_and_set(&GC_allocate_lock)) {
632
* Spinning worked. Thus we're probably not being scheduled
633
* against the other process with which we were contending.
634
* Thus it makes sense to spin longer the next time.
637
spin_max = high_spin_max;
641
/* We are probably being scheduled against the other process. Sleep. */
642
spin_max = low_spin_max;
645
if (!GC_test_and_set(&GC_allocate_lock)) {
648
# define SLEEP_THRESHOLD 12
649
/* nanosleep(<= 2ms) just spins under Linux. We */
650
/* want to be careful to avoid that behavior. */
651
if (i < SLEEP_THRESHOLD) {
657
/* Don't wait for more than about 60msecs, even */
658
/* under extreme contention. */
666
# endif /* LINUX_THREADS */