1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32
#define NANOSEC ((uint64_t) 1e9)
36
void (*entry)(void* arg);
41
static void* uv__thread_start(void *arg)
43
struct thread_ctx *ctx_p;
44
struct thread_ctx ctx;
55
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
56
struct thread_ctx* ctx;
59
ctx = uv__malloc(sizeof(*ctx));
66
err = pthread_create(tid, NULL, uv__thread_start, ctx);
75
uv_thread_t uv_thread_self(void) {
76
return pthread_self();
79
int uv_thread_join(uv_thread_t *tid) {
80
return -pthread_join(*tid, NULL);
84
int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
85
return pthread_equal(*t1, *t2);
89
int uv_mutex_init(uv_mutex_t* mutex) {
90
#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
91
return -pthread_mutex_init(mutex, NULL);
93
pthread_mutexattr_t attr;
96
if (pthread_mutexattr_init(&attr))
99
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
102
err = pthread_mutex_init(mutex, &attr);
104
if (pthread_mutexattr_destroy(&attr))
112
void uv_mutex_destroy(uv_mutex_t* mutex) {
113
if (pthread_mutex_destroy(mutex))
118
void uv_mutex_lock(uv_mutex_t* mutex) {
119
if (pthread_mutex_lock(mutex))
124
int uv_mutex_trylock(uv_mutex_t* mutex) {
127
/* FIXME(bnoordhuis) EAGAIN means recursive lock limit reached. Arguably
128
* a bug, should probably abort rather than return -EAGAIN.
130
err = pthread_mutex_trylock(mutex);
131
if (err && err != EBUSY && err != EAGAIN)
138
void uv_mutex_unlock(uv_mutex_t* mutex) {
139
if (pthread_mutex_unlock(mutex))
144
int uv_rwlock_init(uv_rwlock_t* rwlock) {
145
return -pthread_rwlock_init(rwlock, NULL);
149
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
150
if (pthread_rwlock_destroy(rwlock))
155
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
156
if (pthread_rwlock_rdlock(rwlock))
161
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
164
err = pthread_rwlock_tryrdlock(rwlock);
165
if (err && err != EBUSY && err != EAGAIN)
172
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
173
if (pthread_rwlock_unlock(rwlock))
178
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
179
if (pthread_rwlock_wrlock(rwlock))
184
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
187
err = pthread_rwlock_trywrlock(rwlock);
188
if (err && err != EBUSY && err != EAGAIN)
195
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
196
if (pthread_rwlock_unlock(rwlock))
201
void uv_once(uv_once_t* guard, void (*callback)(void)) {
202
if (pthread_once(guard, callback))
206
#if defined(__APPLE__) && defined(__MACH__)
208
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
211
err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
212
if (err == KERN_SUCCESS)
214
if (err == KERN_INVALID_ARGUMENT)
216
if (err == KERN_RESOURCE_SHORTAGE)
220
return -EINVAL; /* Satisfy the compiler. */
224
void uv_sem_destroy(uv_sem_t* sem) {
225
if (semaphore_destroy(mach_task_self(), *sem))
230
void uv_sem_post(uv_sem_t* sem) {
231
if (semaphore_signal(*sem))
236
void uv_sem_wait(uv_sem_t* sem) {
240
r = semaphore_wait(*sem);
241
while (r == KERN_ABORTED);
243
if (r != KERN_SUCCESS)
248
int uv_sem_trywait(uv_sem_t* sem) {
249
mach_timespec_t interval;
253
interval.tv_nsec = 0;
255
err = semaphore_timedwait(*sem, interval);
256
if (err == KERN_SUCCESS)
258
if (err == KERN_OPERATION_TIMED_OUT)
262
return -EINVAL; /* Satisfy the compiler. */
265
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
267
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
268
if (sem_init(sem, 0, value))
274
void uv_sem_destroy(uv_sem_t* sem) {
275
if (sem_destroy(sem))
280
void uv_sem_post(uv_sem_t* sem) {
286
void uv_sem_wait(uv_sem_t* sem) {
291
while (r == -1 && errno == EINTR);
298
int uv_sem_trywait(uv_sem_t* sem) {
302
r = sem_trywait(sem);
303
while (r == -1 && errno == EINTR);
314
#endif /* defined(__APPLE__) && defined(__MACH__) */
317
#if defined(__APPLE__) && defined(__MACH__)
319
int uv_cond_init(uv_cond_t* cond) {
320
return -pthread_cond_init(cond, NULL);
323
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
325
int uv_cond_init(uv_cond_t* cond) {
326
pthread_condattr_t attr;
329
err = pthread_condattr_init(&attr);
333
#if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
334
err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
339
err = pthread_cond_init(cond, &attr);
343
err = pthread_condattr_destroy(&attr);
350
pthread_cond_destroy(cond);
352
pthread_condattr_destroy(&attr);
356
#endif /* defined(__APPLE__) && defined(__MACH__) */
358
void uv_cond_destroy(uv_cond_t* cond) {
359
if (pthread_cond_destroy(cond))
363
void uv_cond_signal(uv_cond_t* cond) {
364
if (pthread_cond_signal(cond))
368
void uv_cond_broadcast(uv_cond_t* cond) {
369
if (pthread_cond_broadcast(cond))
373
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
374
if (pthread_cond_wait(cond, mutex))
379
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
383
#if defined(__APPLE__) && defined(__MACH__)
384
ts.tv_sec = timeout / NANOSEC;
385
ts.tv_nsec = timeout % NANOSEC;
386
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
388
timeout += uv__hrtime(UV_CLOCK_PRECISE);
389
ts.tv_sec = timeout / NANOSEC;
390
ts.tv_nsec = timeout % NANOSEC;
391
#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
393
* The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
394
* but has this alternative function instead.
396
r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
398
r = pthread_cond_timedwait(cond, mutex, &ts);
399
#endif /* __ANDROID__ */
410
return -EINVAL; /* Satisfy the compiler. */
414
#if defined(__APPLE__) && defined(__MACH__)
416
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
422
err = uv_mutex_init(&barrier->mutex);
426
err = uv_sem_init(&barrier->turnstile1, 0);
430
err = uv_sem_init(&barrier->turnstile2, 1);
437
uv_sem_destroy(&barrier->turnstile1);
439
uv_mutex_destroy(&barrier->mutex);
445
void uv_barrier_destroy(uv_barrier_t* barrier) {
446
uv_sem_destroy(&barrier->turnstile2);
447
uv_sem_destroy(&barrier->turnstile1);
448
uv_mutex_destroy(&barrier->mutex);
452
int uv_barrier_wait(uv_barrier_t* barrier) {
455
uv_mutex_lock(&barrier->mutex);
456
if (++barrier->count == barrier->n) {
457
uv_sem_wait(&barrier->turnstile2);
458
uv_sem_post(&barrier->turnstile1);
460
uv_mutex_unlock(&barrier->mutex);
462
uv_sem_wait(&barrier->turnstile1);
463
uv_sem_post(&barrier->turnstile1);
465
uv_mutex_lock(&barrier->mutex);
466
serial_thread = (--barrier->count == 0);
468
uv_sem_wait(&barrier->turnstile1);
469
uv_sem_post(&barrier->turnstile2);
471
uv_mutex_unlock(&barrier->mutex);
473
uv_sem_wait(&barrier->turnstile2);
474
uv_sem_post(&barrier->turnstile2);
475
return serial_thread;
478
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
480
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
481
return -pthread_barrier_init(barrier, NULL, count);
485
void uv_barrier_destroy(uv_barrier_t* barrier) {
486
if (pthread_barrier_destroy(barrier))
491
int uv_barrier_wait(uv_barrier_t* barrier) {
492
int r = pthread_barrier_wait(barrier);
493
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
495
return r == PTHREAD_BARRIER_SERIAL_THREAD;
498
#endif /* defined(__APPLE__) && defined(__MACH__) */
500
int uv_key_create(uv_key_t* key) {
501
return -pthread_key_create(key, NULL);
505
void uv_key_delete(uv_key_t* key) {
506
if (pthread_key_delete(*key))
511
void* uv_key_get(uv_key_t* key) {
512
return pthread_getspecific(*key);
516
void uv_key_set(uv_key_t* key, void* value) {
517
if (pthread_setspecific(*key, value))