129
129
typedef pthread_t ethr_tid;
131
typedef struct ethr_mutex_ ethr_mutex;
133
pthread_mutex_t pt_mtx;
142
typedef struct ethr_cond_ ethr_cond;
144
pthread_cond_t pt_cnd;
150
#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
151
#define ETHR_USE_RWMTX_FALLBACK
153
typedef struct ethr_rwmutex_ ethr_rwmutex;
154
struct ethr_rwmutex_ {
155
pthread_rwlock_t pt_rwlock;
162
/* Static initializers */
164
#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
165
#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
167
#define ETHR_MUTEX_XCHK_INITER
168
#define ETHR_COND_XCHK_INITER
171
#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
172
#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
174
#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
175
|| defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
176
# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
177
# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
178
# define ETHR_REC_MUTEX_INITER \
179
{PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
182
# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
185
#ifndef ETHR_HAVE_PTHREAD_ATFORK
186
# define ETHR_NO_FORKSAFETY 1
189
131
typedef pthread_key_t ethr_tsd_key;
191
133
#define ETHR_HAVE_ETHR_SIG_FUNCS 1
193
#ifdef ETHR_TRY_INLINE_FUNCS
195
static ETHR_INLINE int
196
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
198
return pthread_mutex_trylock(&mtx->pt_mtx);
201
static ETHR_INLINE int
202
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
204
return pthread_mutex_lock(&mtx->pt_mtx);
207
static ETHR_INLINE int
208
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
210
return pthread_mutex_unlock(&mtx->pt_mtx);
213
#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
215
static ETHR_INLINE int
216
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
218
return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
221
static ETHR_INLINE int
222
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
224
return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
227
static ETHR_INLINE int
228
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
230
return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
233
static ETHR_INLINE int
234
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
236
return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
239
static ETHR_INLINE int
240
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
242
return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
245
static ETHR_INLINE int
246
ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
248
return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
251
#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
253
#endif /* ETHR_TRY_INLINE_FUNCS */
135
#if defined(PURIFY) || defined(VALGRIND)
136
# define ETHR_FORCE_PTHREAD_RWLOCK
137
# define ETHR_FORCE_PTHREAD_MUTEX
140
#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
141
# define ETHR_USE_OWN_RWMTX_IMPL__
144
#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
145
# define ETHR_USE_OWN_MTX_IMPL__
255
148
#elif defined(ETHR_WIN32_THREADS)
256
149
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
273
166
# undef WIN32_LEAN_AND_MEAN
169
#if defined(_MSC_VER)
171
#if ETHR_SIZEOF_LONG == 4
172
#define ETHR_HAVE_INT32_T 1
173
typedef long ethr_sint32_t;
174
typedef unsigned long ethr_uint32_t;
177
#if ETHR_SIZEOF___INT64 == 8
178
#define ETHR_HAVE_INT64_T 1
179
typedef __int64 ethr_sint64_t;
180
typedef unsigned __int64 ethr_uint64_t;
185
struct ethr_join_data_;
277
typedef long ethr_tid; /* thread id type */
279
volatile int initialized;
286
typedef struct cnd_wait_event__ cnd_wait_event_;
289
volatile int initialized;
291
cnd_wait_event_ *queue;
292
cnd_wait_event_ *queue_end;
295
#define ETHR_USE_RWMTX_FALLBACK
297
/* Static initializers */
299
#define ETHR_MUTEX_INITER {0}
300
#define ETHR_COND_INITER {0}
302
#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
304
#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
190
struct ethr_join_data_ *jdata;
191
} ethr_tid; /* thread id type */
306
193
typedef DWORD ethr_tsd_key;
308
195
#undef ETHR_HAVE_ETHR_SIG_FUNCS
310
#ifdef ETHR_TRY_INLINE_FUNCS
311
int ethr_fake_static_mutex_init(ethr_mutex *mtx);
313
static ETHR_INLINE int
314
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
316
if (!mtx->initialized) {
317
int res = ethr_fake_static_mutex_init(mtx);
321
return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
324
static ETHR_INLINE int
325
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
327
if (!mtx->initialized) {
328
int res = ethr_fake_static_mutex_init(mtx);
332
EnterCriticalSection(&mtx->cs);
336
static ETHR_INLINE int
337
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
339
LeaveCriticalSection(&mtx->cs);
343
#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
345
#ifdef ERTS_MIXED_CYGWIN_VC
351
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
353
# if defined(_M_IX86)
354
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
355
# else /* I.e. IA64 */
356
# if _MSC_VER >= 1400
357
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
359
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
363
# if _MSC_VER >= 1400
365
# undef ETHR_COMPILER_BARRIER
366
# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
368
#pragma intrinsic(_ReadWriteBarrier)
369
#pragma intrinsic(_InterlockedAnd)
370
#pragma intrinsic(_InterlockedOr)
372
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
375
#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
376
#define ETHR_HAVE_OPTIMIZED_LOCKS 1
383
volatile LONG locked;
387
volatile LONG counter;
389
#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
391
#ifdef ETHR_TRY_INLINE_FUNCS
393
static ETHR_INLINE int
394
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
396
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
397
var->value = (LONG) i;
399
(void) InterlockedExchange(&var->value, (LONG) i);
404
static ETHR_INLINE int
405
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
407
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
408
var->value = (LONG) i;
410
(void) InterlockedExchange(&var->value, (LONG) i);
415
static ETHR_INLINE int
416
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
418
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
421
*i = InterlockedExchangeAdd(&var->value, (LONG) 0);
426
static ETHR_INLINE int
427
ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
429
(void) InterlockedExchangeAdd(&var->value, (LONG) incr);
433
static ETHR_INLINE int
434
ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
438
*testp = InterlockedExchangeAdd(&var->value, (LONG) i);
443
static ETHR_INLINE int
444
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
446
(void) InterlockedIncrement(&var->value);
450
static ETHR_INLINE int
451
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
453
(void) InterlockedDecrement(&var->value);
457
static ETHR_INLINE int
458
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
460
*testp = (long) InterlockedIncrement(&var->value);
464
static ETHR_INLINE int
465
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
467
*testp = (long) InterlockedDecrement(&var->value);
471
static ETHR_INLINE int
472
ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
477
* See "Extra memory barrier requirements" note at the top
480
* According to msdn _InterlockedAnd() provides a full
483
*old = (long) _InterlockedAnd(&var->value, mask);
487
static ETHR_INLINE int
488
ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
493
* See "Extra memory barrier requirements" note at the top
496
* According to msdn _InterlockedOr() provides a full
499
*old = (long) _InterlockedOr(&var->value, mask);
503
static ETHR_INLINE int
504
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
510
* See "Extra memory barrier requirements" note at the top
513
* According to msdn _InterlockedCompareExchange() provides a full
516
*old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
520
static ETHR_INLINE int
521
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
525
*old = (long) InterlockedExchange(&var->value, (LONG) new);
530
* According to msdn InterlockedExchange() provides a full
534
static ETHR_INLINE int
535
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
537
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
538
lock->locked = (LONG) 0;
540
(void) InterlockedExchange(&lock->locked, (LONG) 0);
545
static ETHR_INLINE int
546
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
552
static ETHR_INLINE int
553
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
555
ETHR_COMPILER_BARRIER;
560
InterlockedExchange(&lock->locked, (LONG) 0);
562
ETHR_ASSERT(old == 1);
568
static ETHR_INLINE int
569
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
573
old = InterlockedExchange(&lock->locked, (LONG) 1);
574
} while (old != (LONG) 0);
575
ETHR_COMPILER_BARRIER;
580
* According to msdn InterlockedIncrement, InterlockedDecrement,
581
* and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
582
* provides full memory barriers.
584
static ETHR_INLINE int
585
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
587
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
588
lock->counter = (LONG) 0;
590
(void) InterlockedExchange(&lock->counter, (LONG) 0);
595
static ETHR_INLINE int
596
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
601
static ETHR_INLINE int
602
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
604
ETHR_COMPILER_BARRIER;
609
InterlockedDecrement(&lock->counter);
610
ETHR_ASSERT(old != 0);
615
static ETHR_INLINE int
616
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
619
LONG old = InterlockedIncrement(&lock->counter);
620
if ((old & ETHR_WLOCK_FLAG__) == 0)
621
break; /* Got read lock */
622
/* Restore and wait for writers to unlock */
623
old = InterlockedDecrement(&lock->counter);
624
while (old & ETHR_WLOCK_FLAG__) {
625
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
628
old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
632
ETHR_COMPILER_BARRIER;
636
static ETHR_INLINE int
637
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
639
ETHR_COMPILER_BARRIER;
644
_InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
645
ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
650
static ETHR_INLINE int
651
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
655
old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
656
} while (old & ETHR_WLOCK_FLAG__);
657
/* We got the write part of the lock; wait for readers to unlock */
658
while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
659
#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
662
old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
664
ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
666
ETHR_COMPILER_BARRIER;
670
#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
672
#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
197
#define ETHR_USE_OWN_RWMTX_IMPL__
198
#define ETHR_USE_OWN_MTX_IMPL__
200
#define ETHR_YIELD() (Sleep(0), 0)
674
202
#else /* No supported thread lib found */
690
260
/* For CPU-optimised atomics, spinlocks, and rwlocks. */
691
#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__)
692
# if ETHR_SIZEOF_PTR == 4
693
# if defined(__i386__)
694
# include "i386/ethread.h"
695
# elif (defined(__powerpc__) || defined(__ppc__)) && !defined(__powerpc64__)
696
# include "ppc32/ethread.h"
261
#if !defined(ETHR_DISABLE_NATIVE_IMPLS)
262
# if defined(__GNUC__)
263
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
264
# include "gcc/ethread.h"
265
# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
266
# include "libatomic_ops/ethread.h"
268
# ifndef ETHR_HAVE_NATIVE_ATOMICS
269
# if ETHR_SIZEOF_PTR == 4
270
# if defined(__i386__)
271
# include "i386/ethread.h"
272
# elif (defined(__powerpc__)||defined(__ppc__))&&!defined(__powerpc64__)
273
# include "ppc32/ethread.h"
274
# elif defined(__sparc__)
275
# include "sparc32/ethread.h"
276
# elif defined(__tile__)
277
# include "tile/ethread.h"
279
# elif ETHR_SIZEOF_PTR == 8
280
# if defined(__x86_64__)
281
# include "x86_64/ethread.h"
282
# elif defined(__sparc__) && defined(__arch64__)
283
# include "sparc64/ethread.h"
286
# include "gcc/ethread.h"
287
# include "libatomic_ops/ethread.h"
289
# elif defined(ETHR_HAVE_LIBATOMIC_OPS)
290
# include "libatomic_ops/ethread.h"
291
# elif defined(ETHR_WIN32_THREADS)
292
# include "win/ethread.h"
294
#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
296
#if defined(__GNUC__)
297
# ifndef ETHR_COMPILER_BARRIER
298
# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
300
# ifndef ETHR_SPIN_BODY
301
# if defined(__i386__) || defined(__x86_64__)
302
# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
303
# elif defined(__ia64__)
304
# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
697
305
# elif defined(__sparc__)
698
# include "sparc32/ethread.h"
699
# elif defined(__tile__)
700
# include "tile/ethread.h"
702
# elif ETHR_SIZEOF_PTR == 8
703
# if defined(__x86_64__)
704
# include "x86_64/ethread.h"
705
# elif defined(__sparc__) && defined(__arch64__)
706
# include "sparc64/ethread.h"
709
#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__) */
711
#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
712
# undef ETHR_HAVE_NATIVE_ATOMICS
714
#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
715
# undef ETHR_HAVE_NATIVE_LOCKS
718
#ifdef ETHR_HAVE_NATIVE_ATOMICS
719
#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
721
#ifdef ETHR_HAVE_NATIVE_LOCKS
722
#define ETHR_HAVE_OPTIMIZED_LOCKS 1
731
#ifdef ETHR_HAVE_NATIVE_ATOMICS
733
* Map ethread native atomics to ethread API atomics.
735
typedef ethr_native_atomic_t ethr_atomic_t;
738
#ifdef ETHR_HAVE_NATIVE_LOCKS
740
* Map ethread native spinlocks to ethread API spinlocks.
742
typedef ethr_native_spinlock_t ethr_spinlock_t;
744
* Map ethread native rwlocks to ethread API rwlocks.
746
typedef ethr_native_rwlock_t ethr_rwlock_t;
749
#ifdef ETHR_USE_RWMTX_FALLBACK
755
unsigned waiting_readers;
756
unsigned waiting_writers;
763
#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
764
typedef long ethr_atomic_t;
767
#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
769
#if defined(ETHR_WIN32_THREADS)
778
int ethr_do_spinlock_init(ethr_spinlock_t *lock);
779
int ethr_do_rwlock_init(ethr_rwlock_t *lock);
781
#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
783
#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
785
pthread_spinlock_t spnlck;
788
pthread_spinlock_t spnlck;
791
#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
793
#else /* ethr mutex/rwmutex */
803
#endif /* end mutex/rwmutex */
804
#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
807
void *(*alloc)(size_t);
808
void *(*realloc)(void *, size_t);
809
void (*free)(void *);
306
# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
308
# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
311
#elif defined(ETHR_WIN32_THREADS)
312
# ifndef ETHR_COMPILER_BARRIER
314
# pragma intrinsic(_ReadWriteBarrier)
315
# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
317
# ifndef ETHR_SPIN_BODY
318
# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
322
#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
324
#ifndef ETHR_HAVE_NATIVE_ATOMICS
326
* ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
327
* i.e. when our lock based atomic fallback is used, a noop is sufficient.
329
#define ETHR_MEMORY_BARRIER do { } while (0)
330
#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
331
#define ETHR_READ_MEMORY_BARRIER do { } while (0)
332
#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
335
#ifndef ETHR_WRITE_MEMORY_BARRIER
336
# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
337
# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
339
#ifndef ETHR_READ_MEMORY_BARRIER
340
# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
341
# define ETHR_READ_MEMORY_BARRIER_IS_FULL
343
#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
344
# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
345
# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
348
#define ETHR_FATAL_ERROR__(ERR) \
349
ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
351
ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
356
void ethr_compiler_barrier_fallback(void);
357
#ifndef ETHR_COMPILER_BARRIER
358
# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
361
#ifndef ETHR_SPIN_BODY
362
# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
366
# if defined(ETHR_HAVE_SCHED_YIELD)
367
# ifdef ETHR_HAVE_SCHED_H
371
# if defined(ETHR_SCHED_YIELD_RET_INT)
372
# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
374
# define ETHR_YIELD() (sched_yield(), 0)
376
# elif defined(ETHR_HAVE_PTHREAD_YIELD)
377
# if defined(ETHR_PTHREAD_YIELD_RET_INT)
378
# define ETHR_YIELD() pthread_yield()
380
# define ETHR_YIELD() (pthread_yield(), 0)
383
# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
387
#include "ethr_optimized_fallbacks.h"
810
390
void *(*thread_create_prepare_func)(void);
811
391
void (*thread_create_parent_func)(void *);
812
392
void (*thread_create_child_func)(void *);
813
393
} ethr_init_data;
815
#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
395
#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
398
void *(*alloc)(size_t);
399
void *(*realloc)(void *, size_t);
400
void (*free)(void *);
401
} ethr_memory_allocator;
403
#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
406
ethr_memory_allocator std;
407
ethr_memory_allocator sl;
408
ethr_memory_allocator ll;
409
} ethr_memory_allocators;
411
#define ETHR_MEM_ALLOCS_DEF_INITER__ \
412
{ETHR_MEM_ALLOC_DEF_INITER__, \
413
ETHR_MEM_ALLOC_DEF_INITER__, \
414
ETHR_MEM_ALLOC_DEF_INITER__}
417
ethr_memory_allocators mem;
420
} ethr_late_init_data;
422
#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
423
{ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
818
426
int detached; /* boolean (default false) */
840
444
void ethr_thr_exit(void *);
841
445
ethr_tid ethr_self(void);
842
446
int ethr_equal_tids(ethr_tid, ethr_tid);
843
int ethr_mutex_init(ethr_mutex *);
844
#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
845
int ethr_rec_mutex_init(ethr_mutex *);
847
int ethr_mutex_destroy(ethr_mutex *);
848
int ethr_mutex_set_forksafe(ethr_mutex *);
849
int ethr_mutex_unset_forksafe(ethr_mutex *);
850
#ifdef ETHR_NEED_MTX_PROTOTYPES__
851
int ethr_mutex_trylock(ethr_mutex *);
852
int ethr_mutex_lock(ethr_mutex *);
853
int ethr_mutex_unlock(ethr_mutex *);
855
int ethr_cond_init(ethr_cond *);
856
int ethr_cond_destroy(ethr_cond *);
857
int ethr_cond_signal(ethr_cond *);
858
int ethr_cond_broadcast(ethr_cond *);
859
int ethr_cond_wait(ethr_cond *, ethr_mutex *);
860
int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
862
int ethr_rwmutex_init(ethr_rwmutex *);
863
int ethr_rwmutex_destroy(ethr_rwmutex *);
864
#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
865
int ethr_rwmutex_tryrlock(ethr_rwmutex *);
866
int ethr_rwmutex_rlock(ethr_rwmutex *);
867
int ethr_rwmutex_runlock(ethr_rwmutex *);
868
int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
869
int ethr_rwmutex_rwlock(ethr_rwmutex *);
870
int ethr_rwmutex_rwunlock(ethr_rwmutex *);
873
#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
874
int ethr_atomic_init(ethr_atomic_t *, long);
875
int ethr_atomic_set(ethr_atomic_t *, long);
876
int ethr_atomic_read(ethr_atomic_t *, long *);
877
int ethr_atomic_inctest(ethr_atomic_t *, long *);
878
int ethr_atomic_dectest(ethr_atomic_t *, long *);
879
int ethr_atomic_inc(ethr_atomic_t *);
880
int ethr_atomic_dec(ethr_atomic_t *);
881
int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
882
int ethr_atomic_add(ethr_atomic_t *, long);
883
int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
884
int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
885
int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
886
int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
889
#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
890
int ethr_spinlock_init(ethr_spinlock_t *);
891
int ethr_spinlock_destroy(ethr_spinlock_t *);
892
int ethr_spin_unlock(ethr_spinlock_t *);
893
int ethr_spin_lock(ethr_spinlock_t *);
895
int ethr_rwlock_init(ethr_rwlock_t *);
896
int ethr_rwlock_destroy(ethr_rwlock_t *);
897
int ethr_read_unlock(ethr_rwlock_t *);
898
int ethr_read_lock(ethr_rwlock_t *);
899
int ethr_write_unlock(ethr_rwlock_t *);
900
int ethr_write_lock(ethr_rwlock_t *);
903
int ethr_time_now(ethr_timeval *);
904
448
int ethr_tsd_key_create(ethr_tsd_key *);
905
449
int ethr_tsd_key_delete(ethr_tsd_key);
906
450
int ethr_tsd_set(ethr_tsd_key, void *);
907
451
void *ethr_tsd_get(ethr_tsd_key);
909
int ethr_gate_init(ethr_gate *);
910
int ethr_gate_destroy(ethr_gate *);
911
int ethr_gate_close(ethr_gate *);
912
int ethr_gate_let_through(ethr_gate *, unsigned);
913
int ethr_gate_wait(ethr_gate *);
914
int ethr_gate_swait(ethr_gate *, int);
916
453
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
917
454
#include <signal.h>
918
455
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
922
459
void ethr_compiler_barrier(void);
924
#ifdef ETHR_TRY_INLINE_FUNCS
926
#ifdef ETHR_HAVE_NATIVE_ATOMICS
928
static ETHR_INLINE int
929
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
931
ethr_native_atomic_init(var, i);
935
static ETHR_INLINE int
936
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
938
ethr_native_atomic_set(var, i);
942
static ETHR_INLINE int
943
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
945
*i = ethr_native_atomic_read(var);
949
static ETHR_INLINE int
950
ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
952
ethr_native_atomic_add(var, incr);
956
static ETHR_INLINE int
957
ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
961
*testp = ethr_native_atomic_add_return(var, i);
965
static ETHR_INLINE int
966
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
968
ethr_native_atomic_inc(var);
972
static ETHR_INLINE int
973
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
975
ethr_native_atomic_dec(var);
979
static ETHR_INLINE int
980
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
982
*testp = ethr_native_atomic_inc_return(var);
986
static ETHR_INLINE int
987
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
989
*testp = ethr_native_atomic_dec_return(var);
993
static ETHR_INLINE int
994
ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
999
* See "Extra memory barrier requirements" note at the top
1002
*old = ethr_native_atomic_and_retold(var, mask);
1006
static ETHR_INLINE int
1007
ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
1012
* See "Extra memory barrier requirements" note at the top
1015
*old = ethr_native_atomic_or_retold(var, mask);
1019
static ETHR_INLINE int
1020
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
1024
*old = ethr_native_atomic_xchg(var, new);
1029
* If *var == *old, replace *old with new, else do nothing.
1030
* In any case return the original value of *var in *old.
1032
static ETHR_INLINE int
1033
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
1039
* See "Extra memory barrier requirements" note at the top
1042
*old = ethr_native_atomic_cmpxchg(var, new, expected);
1046
#endif /* ETHR_HAVE_NATIVE_ATOMICS */
1048
#ifdef ETHR_HAVE_NATIVE_LOCKS
461
#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
462
typedef ethr_native_spinlock_t ethr_spinlock_t;
463
#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
464
typedef ethr_opt_spinlock_t ethr_spinlock_t;
465
#elif defined(__WIN32__)
466
typedef CRITICAL_SECTION ethr_spinlock_t;
468
typedef pthread_mutex_t ethr_spinlock_t;
471
#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
472
int ethr_spinlock_init(ethr_spinlock_t *);
473
int ethr_spinlock_destroy(ethr_spinlock_t *);
474
void ethr_spin_unlock(ethr_spinlock_t *);
475
void ethr_spin_lock(ethr_spinlock_t *);
478
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
1050
480
static ETHR_INLINE int
1051
481
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
483
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
1053
484
ethr_native_spinlock_init(lock);
486
#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
487
return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
488
#elif defined(__WIN32__)
489
if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
490
return ethr_win_get_errno__();
493
return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
1057
497
static ETHR_INLINE int
1058
498
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
500
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
502
#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
503
return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
504
#elif defined(__WIN32__)
505
DeleteCriticalSection((CRITICAL_SECTION *) lock);
508
return pthread_mutex_destroy((pthread_mutex_t *) lock);
1063
static ETHR_INLINE int
512
static ETHR_INLINE void
1064
513
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
515
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
1066
516
ethr_native_spin_unlock(lock);
517
#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
518
int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
520
ETHR_FATAL_ERROR__(err);
521
#elif defined(__WIN32__)
522
LeaveCriticalSection((CRITICAL_SECTION *) lock);
524
int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
526
ETHR_FATAL_ERROR__(err);
1070
static ETHR_INLINE int
530
static ETHR_INLINE void
1071
531
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
533
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
1073
534
ethr_native_spin_lock(lock);
535
#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
536
int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
538
ETHR_FATAL_ERROR__(err);
539
#elif defined(__WIN32__)
540
EnterCriticalSection((CRITICAL_SECTION *) lock);
542
int err = pthread_mutex_lock((pthread_mutex_t *) lock);
544
ETHR_FATAL_ERROR__(err);
548
#endif /* ETHR_TRY_INLINE_FUNCS */
550
#include "ethr_atomics.h"
552
typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
554
#if defined(ETHR_WIN32_THREADS)
555
# include "win/ethr_event.h"
557
# include "pthread/ethr_event.h"
560
int ethr_set_main_thr_status(int, int);
561
int ethr_get_main_thr_status(int *);
563
struct ethr_ts_event_ {
568
ethr_atomic32_t uaflgs;
570
unsigned iflgs; /* for ethr lib only */
571
short rgix; /* for ethr lib only */
572
short mtix; /* for ethr lib only */
575
#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
576
#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
577
#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
578
#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
580
int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
581
int ethr_free_ts_event__(ethr_ts_event *tsep);
582
int ethr_make_ts_event__(ethr_ts_event **tsepp);
584
#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
585
ethr_ts_event *ethr_get_ts_event(void);
586
void ethr_leave_ts_event(ethr_ts_event *);
589
#if defined(ETHR_PTHREADS)
591
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
593
extern pthread_key_t ethr_ts_event_key__;
595
static ETHR_INLINE ethr_ts_event *
596
ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
598
ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
600
int res = ethr_make_ts_event__(&tsep);
602
ETHR_FATAL_ERROR__(res);
608
static ETHR_INLINE void
609
ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
616
#elif defined(ETHR_WIN32_THREADS)
618
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
620
extern DWORD ethr_ts_event_key__;
622
static ETHR_INLINE ethr_ts_event *
623
ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
625
ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
627
int res = ethr_get_tmp_ts_event__(&tsep);
629
ETHR_FATAL_ERROR__(res);
635
static ETHR_INLINE void
636
ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
638
if (tsep->iflgs & ETHR_TS_EV_TMP) {
639
int res = ethr_free_ts_event__(tsep);
641
ETHR_FATAL_ERROR__(res);
649
#include "ethr_mutex.h" /* Need atomic declarations and tse */
651
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
652
typedef ethr_native_rwlock_t ethr_rwlock_t;
654
typedef ethr_rwmutex ethr_rwlock_t;
657
#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
658
int ethr_rwlock_init(ethr_rwlock_t *);
659
int ethr_rwlock_destroy(ethr_rwlock_t *);
660
void ethr_read_unlock(ethr_rwlock_t *);
661
void ethr_read_lock(ethr_rwlock_t *);
662
void ethr_write_unlock(ethr_rwlock_t *);
663
void ethr_write_lock(ethr_rwlock_t *);
666
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
1077
668
static ETHR_INLINE int
1078
669
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
671
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
1080
672
ethr_native_rwlock_init(lock);
675
return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
1084
679
static ETHR_INLINE int
1085
680
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
682
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
685
return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
1090
static ETHR_INLINE int
689
static ETHR_INLINE void
1091
690
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
692
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
1093
693
ethr_native_read_unlock(lock);
695
ethr_rwmutex_runlock((ethr_rwmutex *) lock);
1097
static ETHR_INLINE int
699
static ETHR_INLINE void
1098
700
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
702
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
1100
703
ethr_native_read_lock(lock);
705
ethr_rwmutex_rlock((ethr_rwmutex *) lock);
1104
static ETHR_INLINE int
709
static ETHR_INLINE void
1105
710
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
712
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
1107
713
ethr_native_write_unlock(lock);
715
ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
1111
static ETHR_INLINE int
719
static ETHR_INLINE void
1112
720
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
722
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
1114
723
ethr_native_write_lock(lock);
725
ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
1118
#endif /* ETHR_HAVE_NATIVE_LOCKS */
1120
729
#endif /* ETHR_TRY_INLINE_FUNCS */
1123
* Fallbacks for atomics used in absence of optimized implementation.
1125
#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
1127
#define ETHR_ATOMIC_ADDR_BITS 4
1128
#define ETHR_ATOMIC_ADDR_SHIFT 3
1132
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1133
pthread_spinlock_t spnlck;
1137
char buf[ETHR_CACHE_LINE_SIZE];
1139
} ethr_atomic_protection_t;
1141
extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
1144
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1146
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
1147
(ðr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
1148
& ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
1151
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
1153
pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
1154
int res__ = pthread_spin_lock(slp__); \
1158
return pthread_spin_unlock(slp__); \
1161
#else /* ethread mutex */
1163
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
1164
(ðr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
1165
& ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
1167
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
1169
ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
1170
int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
1174
return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
1177
#endif /* end ethread mutex */
1179
#ifdef ETHR_TRY_INLINE_FUNCS
1181
static ETHR_INLINE int
1182
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
1184
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
1187
static ETHR_INLINE int
1188
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
1190
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
1193
static ETHR_INLINE int
1194
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
1196
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
1199
static ETHR_INLINE int
1200
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
1202
ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
1205
static ETHR_INLINE int
1206
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
1208
ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
1211
static ETHR_INLINE int
1212
ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
1214
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
1217
static ETHR_INLINE int
1218
ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
1222
ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
1225
static ETHR_INLINE int
1226
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
1228
ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
1231
static ETHR_INLINE int
1232
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
1234
ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
1237
static ETHR_INLINE int
1238
ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
1243
* See "Extra memory barrier requirements" note at the top
1246
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
1249
static ETHR_INLINE int
1250
ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
1255
* See "Extra memory barrier requirements" note at the top
1258
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
1261
static ETHR_INLINE int
1262
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
1266
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
1270
* If *var == *old, replace *old with new, else do nothing.
1271
* In any case return the original value of *var in *old.
1273
static ETHR_INLINE int
1274
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
1280
* See "Extra memory barrier requirements" note at the top
1283
ETHR_ATOMIC_OP_FALLBACK_IMPL__(
1285
long old_val = *var;
1287
if (__builtin_expect(old_val == expected, 1))
1293
#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
1294
#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
1297
* Fallbacks for spin locks, and rw spin locks used in absence of
1298
* optimized implementation.
1300
#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
1302
#ifdef ETHR_TRY_INLINE_FUNCS
1304
static ETHR_INLINE int
1305
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
1307
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1308
return pthread_spin_init(&lock->spnlck, 0);
1310
return ethr_mutex_init(&lock->mtx);
1314
static ETHR_INLINE int
1315
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
1317
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1318
return pthread_spin_destroy(&lock->spnlck);
1320
return ethr_mutex_destroy(&lock->mtx);
1325
static ETHR_INLINE int
1326
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
1328
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1329
return pthread_spin_unlock(&lock->spnlck);
1331
return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
1335
static ETHR_INLINE int
1336
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
1338
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1339
return pthread_spin_lock(&lock->spnlck);
1341
return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
1345
#ifdef ETHR_USE_RWMTX_FALLBACK
1346
#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
1348
#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
1351
static ETHR_INLINE int
1352
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
1354
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1356
return pthread_spin_init(&lock->spnlck, 0);
1358
return ethr_rwmutex_init(&lock->rwmtx);
1362
static ETHR_INLINE int
1363
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
1365
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1366
return pthread_spin_destroy(&lock->spnlck);
1368
return ethr_rwmutex_destroy(&lock->rwmtx);
1372
static ETHR_INLINE int
1373
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
1375
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1376
int res = pthread_spin_lock(&lock->spnlck);
1380
return pthread_spin_unlock(&lock->spnlck);
1382
return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
1386
static ETHR_INLINE int
1387
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
1389
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1392
int res = pthread_spin_lock(&lock->spnlck);
1395
if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
1399
res = pthread_spin_unlock(&lock->spnlck);
1405
return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
1409
static ETHR_INLINE int
1410
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
1412
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1414
return pthread_spin_unlock(&lock->spnlck);
1416
return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
1420
static ETHR_INLINE int
1421
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
1423
#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1425
int res = pthread_spin_lock(&lock->spnlck);
1428
lock->counter |= ETHR_RWLOCK_WRITERS;
1429
if (lock->counter == ETHR_RWLOCK_WRITERS)
1431
res = pthread_spin_unlock(&lock->spnlck);
1436
return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
1440
#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
1442
#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
1444
#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
1445
# define ETHR_HAVE_OPTIMIZED_SPINLOCK
1448
731
#endif /* #ifndef ETHREAD_H__ */