2
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
4
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7
* Permission is hereby granted to use or copy this program
8
* for any purpose, provided the above notices are retained on all copies.
9
* Permission to modify the code and to distribute modified code is granted,
10
* provided the above notices are retained, and a notice that the code was
11
* modified is included with the above copyright notice.
14
* Support code for Solaris threads. Provides functionality we wish Sun
15
* had provided. Relies on some information we probably shouldn't rely on.
17
/* Boehm, September 14, 1994 4:44 pm PDT */
19
# include "private/gc_priv.h"
21
# if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS)
22
# include "private/solaris_threads.h"
27
# include <sys/types.h>
28
# include <sys/mman.h>
29
# include <sys/time.h>
30
# include <sys/resource.h>
31
# include <sys/stat.h>
32
# include <sys/syscall.h>
33
# include <sys/procfs.h>
36
# define _CLASSIC_XOPEN_TYPES
41
--> Not yet supported. Try porting the code from linux_threads.c.
45
* This is the default size of the LWP arrays. If there are more LWPs
46
* than this when a stop-the-world GC happens, set_max_lwps will be
48
* This must be higher than the number of LWPs at startup time.
49
* The threads library creates a thread early on, so the min. is 3
51
# define DEFAULT_MAX_LWPS 4
58
cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
59
cond_t GC_create_cv; /* Signalled when a new undetached */
65
#endif /* MMAP_STACKS */
67
/* We use the allocation lock to protect thread-related data structures. */
69
/* We stop the world using /proc primitives. This makes some */
70
/* minimal assumptions about the threads implementation. */
71
/* We don't play by the rules, since the rules make this */
72
/* impossible (as of Solaris 2.3). Also note that as of */
73
/* Solaris 2.3 the various thread and lwp suspension */
74
/* primitives failed to stop threads by the time the request */
78
static sigset_t old_mask;
80
/* Sleep for n milliseconds, n < 1000 */
81
void GC_msec_sleep(int n)
86
ts.tv_nsec = 1000000*n;
87
if (syscall(SYS_nanosleep, &ts, 0) < 0) {
88
ABORT("nanosleep failed");
91
/* Turn off preemption; gross but effective. */
92
/* Caller has allocation lock. */
93
/* Actually this is not needed under Solaris 2.3 and */
94
/* 2.4, but hopefully that'll change. */
99
(void)sigfillset(&set);
100
sigdelset(&set, SIGABRT);
101
syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
106
syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
109
int GC_main_proc_fd = -1;
112
struct lwp_cache_entry {
114
int lc_descr; /* /proc file descriptor. */
115
} GC_lwp_cache_default[DEFAULT_MAX_LWPS];
117
static int max_lwps = DEFAULT_MAX_LWPS;
118
static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
120
static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
121
static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
123
/* Return a file descriptor for the /proc entry corresponding */
124
/* to the given lwp. The file descriptor may be stale if the */
125
/* lwp exited and a new one was forked. */
126
static int open_lwp(lwpid_t id)
129
static int next_victim = 0;
132
for (i = 0; i < max_lwps; i++) {
133
if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
135
result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
137
* If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
139
if (result < 0 && errno == EMFILE) {
140
for (i = 0; i < max_lwps; i++) {
141
if (GC_lwp_cache[i].lc_id != 0) {
142
(void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
143
result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
144
if (result >= 0 || (result < 0 && errno != EMFILE))
150
if (errno == EMFILE) {
151
ABORT("Too many open files");
153
return(-1) /* exited? */;
155
if (GC_lwp_cache[next_victim].lc_id != 0)
156
(void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
157
GC_lwp_cache[next_victim].lc_id = id;
158
GC_lwp_cache[next_victim].lc_descr = result;
159
if (++next_victim >= max_lwps)
164
static void uncache_lwp(lwpid_t id)
168
for (i = 0; i < max_lwps; i++) {
169
if (GC_lwp_cache[i].lc_id == id) {
170
(void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
171
GC_lwp_cache[i].lc_id = 0;
176
/* Sequence of current lwp ids */
177
static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
178
static lwpid_t *GC_current_ids = GC_current_ids_default;
180
/* Temporary used below (can be big if large number of LWPs) */
181
static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
182
static lwpid_t *last_ids = last_ids_default;
185
#define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
187
static void set_max_lwps(GC_word n)
191
int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
192
+ ROUNDUP(n * sizeof(prgregset_t))
193
+ ROUNDUP((n + 1) * sizeof(lwpid_t))
194
+ ROUNDUP((n + 1) * sizeof(lwpid_t));
196
GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
197
oldmem = mem = GC_scratch_alloc(required_bytes);
198
if (0 == mem) ABORT("No space for lwp data structures");
201
* We can either flush the old lwp cache or copy it over. Do the latter.
203
memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
204
GC_lwp_cache = (struct lwp_cache_entry*)mem;
205
mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
207
BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
208
GC_lwp_registers = (prgregset_t *)mem;
209
mem += ROUNDUP(n * sizeof(prgregset_t));
212
GC_current_ids = (lwpid_t *)mem;
213
mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
215
last_ids = (lwpid_t *)mem;
216
mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
218
if (mem > oldmem + required_bytes)
219
ABORT("set_max_lwps buffer overflow");
225
/* Stop all lwps in process. Assumes preemption is off. */
226
/* Caller has allocation lock (and any other locks he may */
228
static void stop_all_lwps()
235
lwpid_t me = _lwp_self();
237
if (GC_main_proc_fd == -1) {
238
sprintf(buf, "/proc/%d", getpid());
239
GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
240
if (GC_main_proc_fd < 0) {
242
ABORT("/proc open failed: too many open files");
243
GC_printf1("/proc open failed: errno %d", errno);
247
BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
248
for (i = 0; i < max_lwps; i++)
251
if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
252
ABORT("Main PIOCSTATUS failed");
253
if (status.pr_nlwp < 1)
254
ABORT("Invalid number of lwps returned by PIOCSTATUS");
255
if (status.pr_nlwp >= max_lwps) {
256
set_max_lwps(status.pr_nlwp*2 + 10);
258
* The data in the old GC_current_ids and
259
* GC_lwp_registers has been trashed. Cleaning out last_ids
260
* will make sure every LWP gets re-examined.
262
for (i = 0; i < max_lwps; i++)
266
if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
267
ABORT("PIOCLWPIDS failed");
269
for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
270
if (GC_current_ids[i] != last_ids[i]) {
272
if (GC_current_ids[i] != me) {
273
/* PIOCSTOP doesn't work without a writable */
274
/* descriptor. And that makes the process */
276
if (_lwp_suspend(GC_current_ids[i]) < 0) {
277
/* Could happen if the lwp exited */
278
uncache_lwp(GC_current_ids[i]);
279
GC_current_ids[i] = me; /* ignore */
285
* In the unlikely event something does a fork between the
286
* PIOCSTATUS and the PIOCLWPIDS.
290
/* All lwps in GC_current_ids != me have been suspended. Note */
291
/* that _lwp_suspend is idempotent. */
292
for (i = 0; GC_current_ids[i] != 0; i++) {
293
if (GC_current_ids[i] != last_ids[i]) {
294
if (GC_current_ids[i] != me) {
295
lwp_fd = open_lwp(GC_current_ids[i]);
298
GC_current_ids[i] = me;
301
/* LWP should be stopped. Empirically it sometimes */
302
/* isn't, and more frequently the PR_STOPPED flag */
303
/* is not set. Wait for PR_STOPPED. */
304
if (syscall(SYS_ioctl, lwp_fd,
305
PIOCSTATUS, &status) < 0) {
306
/* Possible if the descriptor was stale, or */
307
/* we encountered the 2.3 _lwp_suspend bug. */
308
uncache_lwp(GC_current_ids[i]);
309
GC_current_ids[i] = me; /* handle next time. */
311
while (!(status.pr_flags & PR_STOPPED)) {
313
if (syscall(SYS_ioctl, lwp_fd,
314
PIOCSTATUS, &status) < 0) {
315
ABORT("Repeated PIOCSTATUS failed");
317
if (status.pr_flags & PR_STOPPED) break;
320
if (syscall(SYS_ioctl, lwp_fd,
321
PIOCSTATUS, &status) < 0) {
322
ABORT("Repeated PIOCSTATUS failed");
325
if (status.pr_who != GC_current_ids[i]) {
326
/* can happen if thread was on death row */
327
uncache_lwp(GC_current_ids[i]);
328
GC_current_ids[i] = me; /* handle next time. */
331
/* Save registers where collector can */
333
BCOPY(status.pr_reg, GC_lwp_registers[i],
334
sizeof (prgregset_t));
340
for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
344
/* Restart all lwps in process. Assumes preemption is off. */
345
static void restart_all_lwps()
350
lwpid_t me = _lwp_self();
353
for (i = 0; GC_current_ids[i] != 0; i++) {
355
if (GC_current_ids[i] != me) {
356
int lwp_fd = open_lwp(GC_current_ids[i]);
359
if (lwp_fd < 0) ABORT("open_lwp failed");
360
if (syscall(SYS_ioctl, lwp_fd,
361
PIOCSTATUS, &status) < 0) {
362
ABORT("PIOCSTATUS failed in restart_all_lwps");
364
if (memcmp(status.pr_reg, GC_lwp_registers[i],
365
sizeof (prgregset_t)) != 0) {
368
for(j = 0; j < NPRGREG; j++)
370
GC_printf3("%i: %x -> %x\n", j,
371
GC_lwp_registers[i][j],
374
ABORT("Register contents changed");
376
if (!status.pr_flags & PR_STOPPED) {
377
ABORT("lwp no longer stopped");
382
if (syscall(SYS_ioctl, lwp_fd,
383
PIOCGWIN, &windows) < 0) {
384
ABORT("PIOCSTATUS failed in restart_all_lwps");
386
if (windows.wbcnt > 0) ABORT("unsaved register windows");
390
# endif /* PARANOID */
391
if (GC_current_ids[i] == me) continue;
392
if (_lwp_continue(GC_current_ids[i]) < 0) {
393
ABORT("Failed to restart lwp");
396
if (i >= max_lwps) ABORT("Too many lwps");
399
GC_bool GC_multithreaded = 0;
404
if (GC_multithreaded)
408
void GC_start_world()
410
if (GC_multithreaded)
415
void GC_thr_init(void);
417
GC_bool GC_thr_initialized = FALSE;
419
size_t GC_min_stack_sz;
423
* stack_head is stored at the top of free stacks
426
struct stack_head *next;
431
# define N_FREE_LISTS 25
432
struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
433
/* GC_stack_free_lists[i] is free list for stacks of */
434
/* size GC_min_stack_sz*2**i. */
435
/* Free lists are linked through stack_head stored */ /* at top of stack. */
437
/* Return a stack of size at least *stack_size. *stack_size is */
438
/* replaced by the actual stack size. */
439
/* Caller holds allocation lock. */
440
ptr_t GC_stack_alloc(size_t * stack_size)
442
register size_t requested_sz = *stack_size;
443
register size_t search_sz = GC_min_stack_sz;
444
register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
446
register struct stack_head *result;
448
while (search_sz < requested_sz) {
452
if ((result = GC_stack_free_lists[index]) == 0
453
&& (result = GC_stack_free_lists[index+1]) != 0) {
454
/* Try next size up. */
455
search_sz *= 2; index++;
458
base = GC_stack_free_lists[index]->base;
459
GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
462
base = (ptr_t)mmap(0, search_sz + GC_page_size,
463
PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
465
if (base == (ptr_t)-1)
471
mprotect(base, GC_page_size, PROT_NONE);
472
/* Should this use divHBLKSZ(search_sz + GC_page_size) ? -- cf */
473
GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
474
base += GC_page_size;
477
base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
484
base = (ptr_t)(((word)base + GC_page_size) & ~(GC_page_size - 1));
485
/* Protect hottest page to detect overflow. */
486
# ifdef SOLARIS23_MPROTECT_BUG_FIXED
487
mprotect(base, GC_page_size, PROT_NONE);
489
GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
491
base += GC_page_size;
494
*stack_size = search_sz;
498
/* Caller holds allocationlock. */
499
void GC_stack_free(ptr_t stack, size_t size)
501
register int index = 0;
502
register size_t search_sz = GC_min_stack_sz;
503
register struct stack_head *head;
507
mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
510
while (search_sz < size) {
514
if (search_sz != size) ABORT("Bad stack size");
516
head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
517
head->next = GC_stack_free_lists[index];
519
GC_stack_free_lists[index] = head;
522
void GC_my_stack_limits();
524
/* Notify virtual dirty bit implementation that known empty parts of */
525
/* stacks do not contain useful data. */
526
/* Caller holds allocation lock. */
527
void GC_old_stacks_are_fresh()
529
/* No point in doing this for MMAP stacks - and pointers are zero'd out */
530
/* by the mmap in GC_stack_free */
533
register struct stack_head *s;
536
register struct hblk * h;
539
for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
541
for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
543
h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
545
GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
547
GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
548
BZERO(p, (ptr_t)h - p);
552
#endif /* MMAP_STACKS */
553
GC_my_stack_limits();
556
/* The set of all known threads. We intercept thread creation and */
557
/* joins. We never actually create detached threads. We allocate all */
558
/* new thread stacks ourselves. These allow us to maintain this */
559
/* data structure. */
561
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
562
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
564
void GC_push_thread_structures GC_PROTO((void))
566
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
569
/* Add a thread to GC_threads. We assume it wasn't already there. */
570
/* Caller holds allocation lock. */
571
GC_thread GC_new_thread(thread_t id)
573
int hv = ((word)id) % THREAD_TABLE_SZ;
575
static struct GC_Thread_Rep first_thread;
576
static GC_bool first_thread_used = FALSE;
578
if (!first_thread_used) {
579
result = &first_thread;
580
first_thread_used = TRUE;
581
/* Dont acquire allocation lock, since we may already hold it. */
583
result = (struct GC_Thread_Rep *)
584
GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
586
if (result == 0) return(0);
588
result -> next = GC_threads[hv];
589
GC_threads[hv] = result;
590
/* result -> finished = 0; */
591
(void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
595
/* Delete a thread from GC_threads. We assume it is there. */
596
/* (The code intentionally traps if it wasn't.) */
597
/* Caller holds allocation lock. */
598
void GC_delete_thread(thread_t id)
600
int hv = ((word)id) % THREAD_TABLE_SZ;
601
register GC_thread p = GC_threads[hv];
602
register GC_thread prev = 0;
604
while (p -> id != id) {
609
GC_threads[hv] = p -> next;
611
prev -> next = p -> next;
615
/* Return the GC_thread correpsonding to a given thread_t. */
616
/* Returns 0 if it's not there. */
617
/* Caller holds allocation lock. */
618
GC_thread GC_lookup_thread(thread_t id)
620
int hv = ((word)id) % THREAD_TABLE_SZ;
621
register GC_thread p = GC_threads[hv];
623
while (p != 0 && p -> id != id) p = p -> next;
627
/* Solaris 2/Intel uses an initial stack size limit slightly bigger than the
628
SPARC default of 8 MB. Account for this to warn only if the user has
629
raised the limit beyond the default.
631
This is identical to DFLSSIZ defined in <sys/vm_machparam.h>. This file
632
is installed in /usr/platform/`uname -m`/include, which is not in the
633
default include directory list, so copy the definition here. */
635
# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024 + ((USRSTACK) & 0x3FFFFF))
637
# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
640
word GC_get_orig_stack_size() {
642
static int warned = 0;
645
if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
646
result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
647
if (result > MAX_ORIG_STACK_SIZE) {
649
WARN("Large stack limit(%ld): only scanning 8 MB\n", result);
652
result = MAX_ORIG_STACK_SIZE;
657
/* Notify dirty bit implementation of unused parts of my stack. */
658
/* Caller holds allocation lock. */
659
void GC_my_stack_limits()
662
register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
663
register GC_thread me = GC_lookup_thread(thr_self());
664
register size_t stack_size = me -> stack_size;
665
register ptr_t stack;
667
if (stack_size == 0) {
668
/* original thread */
669
/* Empirically, what should be the stack page with lowest */
670
/* address is actually inaccessible. */
671
stack_size = GC_get_orig_stack_size() - GC_page_size;
672
stack = GC_stackbottom - stack_size + GC_page_size;
676
if (stack > hottest || stack + stack_size < hottest) {
677
ABORT("sp out of bounds");
679
GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
683
/* We hold allocation lock. Should do exactly the right thing if the */
684
/* world is stopped. Should not fail if it isn't. */
685
void GC_push_all_stacks()
688
register GC_thread p;
689
register ptr_t sp = GC_approx_sp();
690
register ptr_t bottom, top;
693
# define PUSH(bottom,top) \
694
if (GC_dirty_maintained) { \
695
GC_push_selected((bottom), (top), GC_page_was_ever_dirty, \
696
GC_push_all_stack); \
698
GC_push_all_stack((bottom), (top)); \
700
GC_push_all_stack((ptr_t)GC_lwp_registers,
701
(ptr_t)GC_lwp_registers
702
+ max_lwps * sizeof(GC_lwp_registers[0]));
703
for (i = 0; i < THREAD_TABLE_SZ; i++) {
704
for (p = GC_threads[i]; p != 0; p = p -> next) {
705
if (p -> stack_size != 0) {
707
top = p -> stack + p -> stack_size;
709
/* The original stack. */
710
bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_size;
711
top = GC_stackbottom;
713
if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
720
int GC_is_thread_stack(ptr_t addr)
723
register GC_thread p;
724
register ptr_t bottom, top;
726
for (i = 0; i < THREAD_TABLE_SZ; i++) {
727
for (p = GC_threads[i]; p != 0; p = p -> next) {
728
if (p -> stack_size != 0) {
729
if (p -> stack <= addr &&
730
addr < p -> stack + p -> stack_size)
738
/* The only thread that ever really performs a thr_join. */
739
void * GC_thr_daemon(void * dummy)
743
register GC_thread t;
749
result = thr_join((thread_t)0, &departed, &status);
752
/* No more threads; wait for create. */
753
for (i = 0; i < THREAD_TABLE_SZ; i++) {
754
for (t = GC_threads[i]; t != 0; t = t -> next) {
755
if (!(t -> flags & (DETACHED | FINISHED))) {
757
goto start; /* Thread started just before we */
758
/* acquired the lock. */
762
cond_wait(&GC_create_cv, &GC_allocate_ml);
765
t = GC_lookup_thread(departed);
767
if (!(t -> flags & CLIENT_OWNS_STACK)) {
768
GC_stack_free(t -> stack, t -> stack_size);
770
if (t -> flags & DETACHED) {
771
GC_delete_thread(departed);
773
t -> status = status;
774
t -> flags |= FINISHED;
775
cond_signal(&(t -> join_cv));
776
cond_broadcast(&GC_prom_join_cv);
783
/* We hold the allocation lock, or caller ensures that 2 instances */
784
/* cannot be invoked concurrently. */
785
void GC_thr_init(void)
791
if (GC_thr_initialized)
793
GC_thr_initialized = TRUE;
794
GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
797
GC_zfd = open("/dev/zero", O_RDONLY);
799
ABORT("Can't open /dev/zero");
800
#endif /* MMAP_STACKS */
801
cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
802
cond_init(&GC_create_cv, USYNC_THREAD, 0);
803
/* Add the initial thread, so we can stop it. */
804
t = GC_new_thread(thr_self());
806
t -> flags = DETACHED | CLIENT_OWNS_STACK;
807
ret = thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
808
0 /* arg */, THR_DETACHED | THR_DAEMON,
809
&tid /* thread_id */);
811
GC_err_printf1("Thr_create returned %ld\n", ret);
812
ABORT("Cant fork daemon");
814
thr_setprio(tid, 126);
817
/* We acquire the allocation lock to prevent races with */
818
/* stopping/starting world. */
819
/* This is no more correct than the underlying Solaris 2.X */
820
/* implementation. Under 2.3 THIS IS BROKEN. */
821
int GC_thr_suspend(thread_t target_thread)
827
result = thr_suspend(target_thread);
829
t = GC_lookup_thread(target_thread);
830
if (t == 0) ABORT("thread unknown to GC");
831
t -> flags |= SUSPNDED;
837
int GC_thr_continue(thread_t target_thread)
843
result = thr_continue(target_thread);
845
t = GC_lookup_thread(target_thread);
846
if (t == 0) ABORT("thread unknown to GC");
847
t -> flags &= ~SUSPNDED;
853
int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
855
register GC_thread t;
861
register GC_bool thread_exists;
864
thread_exists = FALSE;
865
for (i = 0; i < THREAD_TABLE_SZ; i++) {
866
for (t = GC_threads[i]; t != 0; t = t -> next) {
867
if (!(t -> flags & DETACHED)) {
868
if (t -> flags & FINISHED) {
871
thread_exists = TRUE;
875
if (!thread_exists) {
879
cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
882
t = GC_lookup_thread(wait_for);
883
if (t == 0 || t -> flags & DETACHED) {
887
if (wait_for == thr_self()) {
891
while (!(t -> flags & FINISHED)) {
892
cond_wait(&(t -> join_cv), &GC_allocate_ml);
897
if (status) *status = t -> status;
898
if (departed) *departed = t -> id;
899
cond_destroy(&(t -> join_cv));
900
GC_delete_thread(t -> id);
908
GC_thr_create(void *stack_base, size_t stack_size,
909
void *(*start_routine)(void *), void *arg, long flags,
910
thread_t *new_thread)
914
thread_t my_new_thread;
916
void * stack = stack_base;
919
if (!GC_is_initialized) GC_init_inner();
922
if (stack_size == 0) stack_size = 1024*1024;
923
stack = (void *)GC_stack_alloc(&stack_size);
930
my_flags |= CLIENT_OWNS_STACK;
932
if (flags & THR_DETACHED) my_flags |= DETACHED;
933
if (flags & THR_SUSPENDED) my_flags |= SUSPNDED;
934
result = thr_create(stack, stack_size, start_routine,
935
arg, flags & ~THR_DETACHED, &my_new_thread);
937
t = GC_new_thread(my_new_thread);
938
t -> flags = my_flags;
939
if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
941
t -> stack_size = stack_size;
942
if (new_thread != 0) *new_thread = my_new_thread;
943
cond_signal(&GC_create_cv);
946
if (!(my_flags & CLIENT_OWNS_STACK)) {
947
GC_stack_free(stack, stack_size);
954
# else /* !GC_SOLARIS_THREADS */
957
int GC_no_sunOS_threads;