22
22
* Mutual exclusion between allocator/collector routines.
23
23
* Needed if there is more than one allocator thread.
24
* FASTLOCK() is assumed to try to acquire the lock in a cheap and
25
* dirty way that is acceptable for a few instructions, e.g. by
26
* inhibiting preemption. This is assumed to have succeeded only
27
* if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28
* FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29
* If signals cannot be tolerated with the FASTLOCK held, then
30
* FASTLOCK should disable signals. The code executed under
31
* FASTLOCK is otherwise immune to interruption, provided it is
33
* DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34
* and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35
* (There is currently no equivalent for FASTLOCK.)
24
* DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK.
37
* In the PARALLEL_MARK case, we also need to define a number of
38
* other inline finctions here:
39
* GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40
* GC_word old, GC_word new )
41
* GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42
* void GC_memory_barrier( )
26
* Note that I_HOLD_LOCK and I_DONT_HOLD_LOCK are used only positively
27
* in assertions, and may return TRUE in the "dont know" case.
46
void GC_noop1 GC_PROTO((word));
47
# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
48
# include "th/PCR_Th.h"
49
# include "th/PCR_ThCrSec.h"
50
extern struct PCR_Th_MLRep GC_allocate_ml;
51
# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
52
# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
53
# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54
# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55
# define FASTLOCK() PCR_ThCrSec_EnterSys()
56
/* Here we cheat (a lot): */
57
# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58
/* TRUE if nobody currently holds the lock */
59
# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
30
# include <atomic_ops.h>
62
34
# include <base/PCR_Base.h>
63
35
# include <th/PCR_Th.h>
64
36
extern PCR_Th_ML GC_allocate_ml;
65
37
# define DCL_LOCK_STATE \
66
38
PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67
# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68
# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69
# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70
# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71
# define FASTUNLOCK() {\
72
if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
75
extern GC_word RT0u__inCritical;
76
# define LOCK() RT0u__inCritical++
77
# define UNLOCK() RT0u__inCritical--
79
# ifdef GC_SOLARIS_THREADS
82
extern mutex_t GC_allocate_ml;
83
# define LOCK() mutex_lock(&GC_allocate_ml);
84
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
87
/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
88
/* acquisition and release. We need this for correct operation of the */
92
inline static int GC_test_and_set(volatile unsigned int *addr) {
94
/* Note: the "xchg" instruction does not need a "lock" prefix */
95
__asm__ __volatile__("xchgl %0, %1"
96
: "=r"(oldval), "=m"(*(addr))
97
: "0"(1), "m"(*(addr)) : "memory");
100
# define GC_TEST_AND_SET_DEFINED
103
# if defined(__INTEL_COMPILER)
104
# include <ia64intrin.h>
106
inline static int GC_test_and_set(volatile unsigned int *addr) {
108
# ifndef __INTEL_COMPILER
109
__asm__ __volatile__("xchg4 %0=%1,%2"
110
: "=r"(oldval), "=m"(*addr)
111
: "r"(n), "1"(*addr) : "memory");
113
oldval = _InterlockedExchange(addr, n);
117
# define GC_TEST_AND_SET_DEFINED
118
/* Should this handle post-increment addressing?? */
119
inline static void GC_clear(volatile unsigned int *addr) {
120
# ifndef __INTEL_COMPILER
121
__asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
123
// there is no st4 but I can use xchg I hope
124
_InterlockedExchange(addr, 0);
127
# define GC_CLEAR_DEFINED
130
inline static int GC_test_and_set(volatile unsigned int *addr) {
133
__asm__ __volatile__("ldstub %1,%0"
134
: "=r"(oldval), "=m"(*addr)
135
: "m"(*addr) : "memory");
138
# define GC_TEST_AND_SET_DEFINED
141
/* Contributed by Tony Mantler. I'm not sure how well it was */
143
inline static int GC_test_and_set(volatile unsigned int *addr) {
144
char oldval; /* this must be no longer than 8 bits */
146
/* The return value is semi-phony. */
147
/* 'tas' sets bit 7 while the return */
148
/* value pretends bit 0 was set */
149
__asm__ __volatile__(
150
"tas %1@; sne %0; negb %0"
152
: "a" (addr) : "memory");
155
# define GC_TEST_AND_SET_DEFINED
157
# if defined(POWERPC)
158
inline static int GC_test_and_set(volatile unsigned int *addr) {
160
int temp = 1; /* locked value */
162
__asm__ __volatile__(
163
"1:\tlwarx %0,0,%3\n" /* load and reserve */
164
"\tcmpwi %0, 0\n" /* if load is */
165
"\tbne 2f\n" /* non-zero, return already set */
166
"\tstwcx. %2,0,%1\n" /* else store conditional */
167
"\tbne- 1b\n" /* retry if lost reservation */
168
"\tsync\n" /* import barrier */
169
"2:\t\n" /* oldval is zero if we set */
170
: "=&r"(oldval), "=p"(addr)
171
: "r"(temp), "1"(addr)
175
# define GC_TEST_AND_SET_DEFINED
176
inline static void GC_clear(volatile unsigned int *addr) {
177
__asm__ __volatile__("lwsync" : : : "memory");
180
# define GC_CLEAR_DEFINED
183
inline static int GC_test_and_set(volatile unsigned int * addr)
185
unsigned long oldvalue;
188
__asm__ __volatile__(
202
".section .text2,\"ax\"\n"
206
:"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
207
:"Ir" (1), "m" (*addr)
212
# define GC_TEST_AND_SET_DEFINED
213
inline static void GC_clear(volatile unsigned int *addr) {
214
__asm__ __volatile__("mb" : : : "memory");
217
# define GC_CLEAR_DEFINED
220
inline static int GC_test_and_set(volatile unsigned int *addr) {
222
/* SWP on ARM is very similar to XCHG on x86. */
223
/* The first operand is the result, the second the value */
224
/* to be stored. Both registers must be different from addr. */
225
/* Make the address operand an early clobber output so it */
226
/* doesn't overlap with the other operands. The early clobber*/
227
/* on oldval is neccessary to prevent the compiler allocating */
228
/* them to the same register if they are both unused. */
229
__asm__ __volatile__("swp %0, %2, [%3]"
230
: "=&r"(oldval), "=&r"(addr)
235
# define GC_TEST_AND_SET_DEFINED
238
inline static int GC_test_and_set(volatile unsigned int *addr) {
239
/* Ripped from linuxthreads/sysdeps/cris/pt-machine.h. */
240
/* Included with Hans-Peter Nilsson's permission. */
241
register unsigned long int ret;
243
/* Note the use of a dummy output of *addr to expose the write.
244
* The memory barrier is to stop *other* writes being moved past
247
__asm__ __volatile__("clearf\n"
254
: "=&r" (ret), "=m" (*addr)
255
: "r" (addr), "r" ((int) 1), "m" (*addr)
259
# define GC_TEST_AND_SET_DEFINED
262
inline static int GC_test_and_set(volatile unsigned int *addr) {
264
__asm__ __volatile__ (
266
"0: cs %0,%1,0(%2)\n"
269
: "d" (1), "a" (addr)
274
# endif /* __GNUC__ */
275
# if (defined(ALPHA) && !defined(__GNUC__))
277
--> We currently assume that if gcc is not used, we are
278
--> running under Tru64.
280
# include <machine/builtins.h>
282
# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
283
# define GC_TEST_AND_SET_DEFINED
284
# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
285
# define GC_CLEAR_DEFINED
287
# if defined(MSWIN32)
288
# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
289
# define GC_TEST_AND_SET_DEFINED
293
# include <sys/tas.h>
294
# define GC_test_and_set(addr) _test_and_set((int *) addr,1)
295
# define GC_TEST_AND_SET_DEFINED
296
# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
297
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
299
# define GC_test_and_set(addr) _test_and_set((void *)addr,1)
301
# define GC_test_and_set(addr) test_and_set((void *)addr,1)
304
# include <sgidefs.h>
306
# define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
307
# define GC_clear(addr) __lock_release(addr);
308
# define GC_CLEAR_DEFINED
310
# define GC_TEST_AND_SET_DEFINED
313
# include <sys/atomic_op.h>
314
# if (defined(_POWER) || defined(_POWERPC))
315
# if defined(__GNUC__)
316
inline static void GC_memsync() {
317
__asm__ __volatile__ ("sync" : : : "memory");
321
# define inline __inline
323
# pragma mc_func GC_memsync { \
324
"7c0004ac" /* sync (same opcode used for dcs)*/ \
328
# error dont know how to memsync
330
inline static int GC_test_and_set(volatile unsigned int * addr) {
332
if (compare_and_swap((void *)addr, &oldvalue, 1)) {
337
# define GC_TEST_AND_SET_DEFINED
338
inline static void GC_clear(volatile unsigned int *addr) {
342
# define GC_CLEAR_DEFINED
345
# if 0 /* defined(HP_PA) */
346
/* The official recommendation seems to be to not use ldcw from */
347
/* user mode. Since multithreaded incremental collection doesn't */
348
/* work anyway on HP_PA, this shouldn't be a major loss. */
350
/* "set" means 0 and "clear" means 1 here. */
351
# define GC_test_and_set(addr) !GC_test_and_clear(addr);
352
# define GC_TEST_AND_SET_DEFINED
353
# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
354
/* The above needs a memory barrier! */
355
# define GC_CLEAR_DEFINED
357
# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
359
inline static void GC_clear(volatile unsigned int *addr) {
360
/* Try to discourage gcc from moving anything past this. */
361
__asm__ __volatile__(" " : : : "memory");
365
/* The function call in the following should prevent the */
366
/* compiler from moving assignments to below the UNLOCK. */
367
# define GC_clear(addr) GC_noop1((word)(addr)); \
368
*((volatile unsigned int *)(addr)) = 0;
370
# define GC_CLEAR_DEFINED
371
# endif /* !GC_CLEAR_DEFINED */
373
# if !defined(GC_TEST_AND_SET_DEFINED)
374
# define USE_PTHREAD_LOCKS
377
# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
378
&& !defined(GC_WIN32_THREADS)
379
# define NO_THREAD (pthread_t)(-1)
39
# define UNCOND_LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
40
# define UNCOND_UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
43
# if !defined(AO_HAVE_test_and_set_acquire) && defined(GC_PTHREADS)
44
# define USE_PTHREAD_LOCKS
47
# if defined(GC_WIN32_THREADS) && defined(GC_PTHREADS)
48
# define USE_PTHREAD_LOCKS
51
# if defined(GC_WIN32_THREADS) && !defined(USE_PTHREAD_LOCKS)
53
# define NO_THREAD (DWORD)(-1)
54
extern DWORD GC_lock_holder;
55
GC_API CRITICAL_SECTION GC_allocate_ml;
57
# define UNCOND_LOCK() \
58
{ EnterCriticalSection(&GC_allocate_ml); \
60
# define UNCOND_UNLOCK() \
61
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
62
LeaveCriticalSection(&GC_allocate_ml); }
64
# define UNCOND_LOCK() EnterCriticalSection(&GC_allocate_ml);
65
# define UNCOND_UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
66
# endif /* !GC_ASSERTIONS */
67
# define SET_LOCK_HOLDER() GC_lock_holder = GetCurrentThreadId()
68
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
69
# define I_HOLD_LOCK() (!GC_need_to_lock \
70
|| GC_lock_holder == GetCurrentThreadId())
71
# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
72
|| GC_lock_holder != GetCurrentThreadId())
73
# elif defined(GC_PTHREADS)
380
74
# include <pthread.h>
381
# if defined(PARALLEL_MARK)
382
/* We need compare-and-swap to update mark bits, where it's */
383
/* performance critical. If USE_MARK_BYTES is defined, it is */
384
/* no longer needed for this purpose. However we use it in */
385
/* either case to implement atomic fetch-and-add, though that's */
386
/* less performance critical, and could perhaps be done with */
388
# if defined(GENERIC_COMPARE_AND_SWAP)
389
/* Probably not useful, except for debugging. */
390
/* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
391
/* minimize its use. */
392
extern pthread_mutex_t GC_compare_and_swap_lock;
394
/* Note that if GC_word updates are not atomic, a concurrent */
395
/* reader should acquire GC_compare_and_swap_lock. On */
396
/* currently supported platforms, such updates are atomic. */
397
extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
398
GC_word old, GC_word new_val);
399
# endif /* GENERIC_COMPARE_AND_SWAP */
401
# if !defined(GENERIC_COMPARE_AND_SWAP)
402
/* Returns TRUE if the comparison succeeded. */
403
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
408
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
409
: "+m"(*(addr)), "=r"(result)
410
: "r" (new_val), "a"(old) : "memory");
411
return (GC_bool) result;
413
# endif /* !GENERIC_COMPARE_AND_SWAP */
414
inline static void GC_memory_barrier()
416
/* We believe the processor ensures at least processor */
417
/* consistent ordering. Thus a compiler barrier */
418
/* should suffice. */
419
__asm__ __volatile__("" : : : "memory");
423
# if defined(POWERPC)
424
# if !defined(GENERIC_COMPARE_AND_SWAP)
425
# if CPP_WORDSZ == 64
426
/* Returns TRUE if the comparison succeeded. */
427
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
428
GC_word old, GC_word new_val)
430
unsigned long result, dummy;
431
__asm__ __volatile__(
432
"1:\tldarx %0,0,%5\n"
442
: "=&r" (dummy), "=r" (result), "=p" (addr)
443
: "r" (new_val), "r" (old), "2"(addr)
445
return (GC_bool) result;
448
/* Returns TRUE if the comparison succeeded. */
449
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
450
GC_word old, GC_word new_val)
453
__asm__ __volatile__(
454
"1:\tlwarx %0,0,%5\n"
464
: "=&r" (dummy), "=r" (result), "=p" (addr)
465
: "r" (new_val), "r" (old), "2"(addr)
467
return (GC_bool) result;
470
# endif /* !GENERIC_COMPARE_AND_SWAP */
471
inline static void GC_memory_barrier()
473
__asm__ __volatile__("sync" : : : "memory");
475
# endif /* POWERPC */
478
# if !defined(GENERIC_COMPARE_AND_SWAP)
479
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
480
GC_word old, GC_word new_val)
482
unsigned long oldval;
483
# if CPP_WORDSZ == 32
484
__asm__ __volatile__(
486
"mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%0],%2,ar.ccv"
488
: "r"(addr), "r"(new_val), "r"(old) : "memory");
490
__asm__ __volatile__(
491
"mov ar.ccv=%3 ;; cmpxchg8.rel %0=[%1],%2,ar.ccv"
493
: "r"(addr), "r"(new_val), "r"(old) : "memory");
495
return (oldval == old);
497
# endif /* !GENERIC_COMPARE_AND_SWAP */
499
/* Shouldn't be needed; we use volatile stores instead. */
500
inline static void GC_memory_barrier()
502
__asm__ __volatile__("mf" : : : "memory");
507
# if !defined(GENERIC_COMPARE_AND_SWAP)
508
# if defined(__GNUC__)
509
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
510
GC_word old, GC_word new_val)
512
unsigned long was_equal;
515
__asm__ __volatile__(
524
:"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
525
: "r" (new_val), "Ir" (old)
529
# else /* !__GNUC__ */
530
inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
531
GC_word old, GC_word new_val)
533
return __CMP_STORE_QUAD(addr, old, new_val, addr);
535
# endif /* !__GNUC__ */
536
# endif /* !GENERIC_COMPARE_AND_SWAP */
538
inline static void GC_memory_barrier()
540
__asm__ __volatile__("mb" : : : "memory");
76
/* Posix allows pthread_t to be a struct, though it rarely is. */
77
/* Unfortunately, we need to use a pthread_t to index a data */
78
/* structure. It also helps if comparisons don't involve a */
79
/* function call. Hence we introduce platform-dependent macros */
80
/* to compare pthread_t ids and to map them to integers. */
81
/* the mapping to integers does not need to result in different */
82
/* integers for each thread, though that should be true as much */
84
/* Refine to exclude platforms on which pthread_t is struct */
85
# if !defined(GC_WIN32_PTHREADS)
86
# define NUMERIC_THREAD_ID(id) ((unsigned long)(id))
87
# define THREAD_EQUAL(id1, id2) ((id1) == (id2))
88
# define NUMERIC_THREAD_ID_UNIQUE
90
# if defined(GC_WIN32_PTHREADS)
91
# define NUMERIC_THREAD_ID(id) ((unsigned long)(id.p))
92
/* Using documented internal details of win32_pthread library. */
93
/* Faster than pthread_equal(). Should not change with */
94
/* future versions of win32_pthread library. */
95
# define THREAD_EQUAL(id1, id2) ((id1.p == id2.p) && (id1.x == id2.x))
96
# undef NUMERIC_THREAD_ID_UNIQUE
543
# define GC_memory_barrier() asm("mb")
544
# endif /* !__GNUC__ */
547
# if !defined(GENERIC_COMPARE_AND_SWAP)
548
inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
549
GC_word old, GC_word new_val)
552
__asm__ __volatile__ (
560
: "=&d" (retval), "+d" (old)
561
: "d" (new_val), "a" (addr)
98
/* Generic definitions that always work, but will result in */
99
/* poor performance and weak assertion checking. */
100
# define NUMERIC_THREAD_ID(id) 1l
101
# define THREAD_EQUAL(id1, id2) pthread_equal(id1, id2)
102
# undef NUMERIC_THREAD_ID_UNIQUE
567
# if !defined(GENERIC_COMPARE_AND_SWAP)
568
/* Returns the original value of *addr. */
569
inline static GC_word GC_atomic_add(volatile GC_word *addr,
575
} while (!GC_compare_and_exchange(addr, old, old+how_much));
578
# else /* GENERIC_COMPARE_AND_SWAP */
579
/* So long as a GC_word can be atomically updated, it should */
580
/* be OK to read *addr without a lock. */
581
extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
582
# endif /* GENERIC_COMPARE_AND_SWAP */
584
# endif /* PARALLEL_MARK */
105
# define NO_THREAD ((unsigned long)(-1l))
106
/* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
586
108
# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
587
109
/* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
589
111
/* and sleeping for fixed periods are likely to result in */
590
112
/* significant wasted time. We thus rely mostly on queued locks. */
591
113
# define USE_SPIN_LOCK
592
extern volatile unsigned int GC_allocate_lock;
114
extern volatile AO_TS_t GC_allocate_lock;
593
115
extern void GC_lock(void);
594
116
/* Allocation lock holder. Only set if acquired by client through */
595
117
/* GC_call_with_alloc_lock. */
596
118
# ifdef GC_ASSERTIONS
598
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
119
# define UNCOND_LOCK() \
120
{ if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
599
122
SET_LOCK_HOLDER(); }
123
# define UNCOND_UNLOCK() \
601
124
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
602
GC_clear(&GC_allocate_lock); }
125
AO_CLEAR(&GC_allocate_lock); }
605
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
607
GC_clear(&GC_allocate_lock)
127
# define UNCOND_LOCK() \
128
{ if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
130
# define UNCOND_UNLOCK() \
131
AO_CLEAR(&GC_allocate_lock)
608
132
# endif /* !GC_ASSERTIONS */
610
/* Another alternative for OSF1 might be: */
611
# include <sys/mman.h>
612
extern msemaphore GC_allocate_semaphore;
613
# define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
614
!= 0) GC_lock(); else GC_allocate_lock = 1; }
615
/* The following is INCORRECT, since the memory model is too weak. */
616
/* Is this true? Presumably msem_unlock has the right semantics? */
618
# define UNLOCK() { GC_allocate_lock = 0; \
619
msem_unlock(&GC_allocate_semaphore, 0); }
621
133
# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
622
134
# ifndef USE_PTHREAD_LOCKS
623
135
# define USE_PTHREAD_LOCKS
625
# endif /* THREAD_LOCAL_ALLOC */
626
# ifdef USE_PTHREAD_LOCKS
137
# endif /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCK */
138
# ifdef USE_PTHREAD_LOCKS
627
139
# include <pthread.h>
628
140
extern pthread_mutex_t GC_allocate_ml;
629
141
# ifdef GC_ASSERTIONS
142
# define UNCOND_LOCK() \
632
144
SET_LOCK_HOLDER(); }
145
# define UNCOND_UNLOCK() \
634
146
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
635
147
pthread_mutex_unlock(&GC_allocate_ml); }
636
148
# else /* !GC_ASSERTIONS */
637
149
# if defined(NO_PTHREAD_TRYLOCK)
638
# define LOCK() GC_lock();
150
# define UNCOND_LOCK() GC_lock();
639
151
# else /* !defined(NO_PTHREAD_TRYLOCK) */
152
# define UNCOND_LOCK() \
641
153
{ if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
643
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
155
# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
644
156
# endif /* !GC_ASSERTIONS */
645
# endif /* USE_PTHREAD_LOCKS */
646
# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
647
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
648
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
649
extern VOLATILE GC_bool GC_collecting;
650
# define ENTER_GC() GC_collecting = 1;
651
# define EXIT_GC() GC_collecting = 0;
652
extern void GC_lock(void);
653
extern pthread_t GC_lock_holder;
654
# ifdef GC_ASSERTIONS
655
extern pthread_t GC_mark_lock_holder;
157
# endif /* USE_PTHREAD_LOCKS */
158
# define SET_LOCK_HOLDER() \
159
GC_lock_holder = NUMERIC_THREAD_ID(pthread_self())
160
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
161
# define I_HOLD_LOCK() \
162
(!GC_need_to_lock || \
163
GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
164
# ifndef NUMERIC_THREAD_ID_UNIQUE
165
# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */
167
# define I_DONT_HOLD_LOCK() \
169
|| GC_lock_holder != NUMERIC_THREAD_ID(pthread_self()))
171
extern volatile GC_bool GC_collecting;
172
# define ENTER_GC() GC_collecting = 1;
173
# define EXIT_GC() GC_collecting = 0;
174
extern void GC_lock(void);
175
extern unsigned long GC_lock_holder;
176
# ifdef GC_ASSERTIONS
177
extern unsigned long GC_mark_lock_holder;
657
179
# endif /* GC_PTHREADS with linux_threads.c implementation */
658
# if defined(GC_WIN32_THREADS)
659
# if defined(GC_PTHREADS)
660
# include <pthread.h>
661
extern pthread_mutex_t GC_allocate_ml;
662
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
663
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
665
# include <windows.h>
666
GC_API CRITICAL_SECTION GC_allocate_ml;
667
# define LOCK() EnterCriticalSection(&GC_allocate_ml);
668
# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
671
# ifndef SET_LOCK_HOLDER
672
# define SET_LOCK_HOLDER()
673
# define UNSET_LOCK_HOLDER()
674
# define I_HOLD_LOCK() FALSE
675
/* Used on platforms were locks can be reacquired, */
676
/* so it doesn't matter if we lie. */
678
182
# else /* !THREADS */
681
# endif /* !THREADS */
682
# ifndef SET_LOCK_HOLDER
683
185
# define SET_LOCK_HOLDER()
684
186
# define UNSET_LOCK_HOLDER()
685
# define I_HOLD_LOCK() FALSE
686
/* Used on platforms were locks can be reacquired, */
687
/* so it doesn't matter if we lie. */
187
# define I_HOLD_LOCK() TRUE
188
# define I_DONT_HOLD_LOCK() TRUE
189
/* Used only in positive assertions or to test whether */
190
/* we still need to acaquire the lock. TRUE works in */
192
# endif /* !THREADS */
194
#if defined(UNCOND_LOCK) && !defined(LOCK)
195
GC_API GC_bool GC_need_to_lock;
196
/* At least two thread running; need to lock. */
197
# define LOCK() if (GC_need_to_lock) { UNCOND_LOCK(); }
198
# define UNLOCK() if (GC_need_to_lock) { UNCOND_UNLOCK(); }
689
201
# ifndef ENTER_GC
690
202
# define ENTER_GC()
691
203
# define EXIT_GC()