1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
3
* Permission is hereby granted, free of charge, to any person obtaining a copy
4
* of this software and associated documentation files (the "Software"), to
5
* deal in the Software without restriction, including without limitation the
6
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
* sell copies of the Software, and to permit persons to whom the Software is
8
* furnished to do so, subject to the following conditions:
10
* The above copyright notice and this permission notice shall be included in
11
* all copies or substantial portions of the Software.
13
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29
#if defined(__APPLE__) && defined(__MACH__)
31
#endif /* defined(__APPLE__) && defined(__MACH__) */
34
#define NANOSEC ((uint64_t) 1e9)
36
int uv_thread_join(uv_thread_t *tid) {
37
if (pthread_join(*tid, NULL))
44
int uv_mutex_init(uv_mutex_t* mutex) {
45
#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
46
if (pthread_mutex_init(mutex, NULL))
51
pthread_mutexattr_t attr;
54
if (pthread_mutexattr_init(&attr))
57
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
60
r = pthread_mutex_init(mutex, &attr);
62
if (pthread_mutexattr_destroy(&attr))
70
void uv_mutex_destroy(uv_mutex_t* mutex) {
71
if (pthread_mutex_destroy(mutex))
76
void uv_mutex_lock(uv_mutex_t* mutex) {
77
if (pthread_mutex_lock(mutex))
82
int uv_mutex_trylock(uv_mutex_t* mutex) {
85
r = pthread_mutex_trylock(mutex);
87
if (r && r != EBUSY && r != EAGAIN)
97
void uv_mutex_unlock(uv_mutex_t* mutex) {
98
if (pthread_mutex_unlock(mutex))
103
int uv_rwlock_init(uv_rwlock_t* rwlock) {
104
if (pthread_rwlock_init(rwlock, NULL))
111
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
112
if (pthread_rwlock_destroy(rwlock))
117
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
118
if (pthread_rwlock_rdlock(rwlock))
123
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
126
r = pthread_rwlock_tryrdlock(rwlock);
128
if (r && r != EBUSY && r != EAGAIN)
138
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
139
if (pthread_rwlock_unlock(rwlock))
144
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
145
if (pthread_rwlock_wrlock(rwlock))
150
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
153
r = pthread_rwlock_trywrlock(rwlock);
155
if (r && r != EBUSY && r != EAGAIN)
165
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
166
if (pthread_rwlock_unlock(rwlock))
171
void uv_once(uv_once_t* guard, void (*callback)(void)) {
172
if (pthread_once(guard, callback))
176
#if defined(__APPLE__) && defined(__MACH__)
178
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
179
if (semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value))
186
void uv_sem_destroy(uv_sem_t* sem) {
187
if (semaphore_destroy(mach_task_self(), *sem))
192
void uv_sem_post(uv_sem_t* sem) {
193
if (semaphore_signal(*sem))
198
void uv_sem_wait(uv_sem_t* sem) {
202
r = semaphore_wait(*sem);
203
while (r == KERN_ABORTED);
205
if (r != KERN_SUCCESS)
210
int uv_sem_trywait(uv_sem_t* sem) {
211
mach_timespec_t interval;
214
interval.tv_nsec = 0;
216
if (semaphore_timedwait(*sem, interval) == KERN_SUCCESS)
222
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
224
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
225
return sem_init(sem, 0, value);
229
void uv_sem_destroy(uv_sem_t* sem) {
230
if (sem_destroy(sem))
235
void uv_sem_post(uv_sem_t* sem) {
241
void uv_sem_wait(uv_sem_t* sem) {
246
while (r == -1 && errno == EINTR);
253
int uv_sem_trywait(uv_sem_t* sem) {
257
r = sem_trywait(sem);
258
while (r == -1 && errno == EINTR);
260
if (r && errno != EAGAIN)
266
#endif /* defined(__APPLE__) && defined(__MACH__) */
269
#if defined(__APPLE__) && defined(__MACH__)
271
int uv_cond_init(uv_cond_t* cond) {
272
if (pthread_cond_init(cond, NULL))
278
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
280
int uv_cond_init(uv_cond_t* cond) {
281
pthread_condattr_t attr;
283
if (pthread_condattr_init(&attr))
286
if (pthread_condattr_setclock(&attr, CLOCK_MONOTONIC))
289
if (pthread_cond_init(cond, &attr))
292
if (pthread_condattr_destroy(&attr))
298
pthread_cond_destroy(cond);
300
pthread_condattr_destroy(&attr);
304
#endif /* defined(__APPLE__) && defined(__MACH__) */
306
void uv_cond_destroy(uv_cond_t* cond) {
307
if (pthread_cond_destroy(cond))
311
void uv_cond_signal(uv_cond_t* cond) {
312
if (pthread_cond_signal(cond))
316
void uv_cond_broadcast(uv_cond_t* cond) {
317
if (pthread_cond_broadcast(cond))
321
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
322
if (pthread_cond_wait(cond, mutex))
327
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
331
#if defined(__APPLE__) && defined(__MACH__)
332
ts.tv_sec = timeout / NANOSEC;
333
ts.tv_nsec = timeout % NANOSEC;
334
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
336
timeout += uv__hrtime();
337
ts.tv_sec = timeout / NANOSEC;
338
ts.tv_nsec = timeout % NANOSEC;
339
r = pthread_cond_timedwait(cond, mutex, &ts);
350
return -1; /* Satisfy the compiler. */
354
#if defined(__APPLE__) && defined(__MACH__)
356
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
360
if (uv_mutex_init(&barrier->mutex))
363
if (uv_sem_init(&barrier->turnstile1, 0))
366
if (uv_sem_init(&barrier->turnstile2, 1))
372
uv_sem_destroy(&barrier->turnstile1);
374
uv_mutex_destroy(&barrier->mutex);
380
void uv_barrier_destroy(uv_barrier_t* barrier) {
381
uv_sem_destroy(&barrier->turnstile2);
382
uv_sem_destroy(&barrier->turnstile1);
383
uv_mutex_destroy(&barrier->mutex);
387
void uv_barrier_wait(uv_barrier_t* barrier) {
388
uv_mutex_lock(&barrier->mutex);
389
if (++barrier->count == barrier->n) {
390
uv_sem_wait(&barrier->turnstile2);
391
uv_sem_post(&barrier->turnstile1);
393
uv_mutex_unlock(&barrier->mutex);
395
uv_sem_wait(&barrier->turnstile1);
396
uv_sem_post(&barrier->turnstile1);
398
uv_mutex_lock(&barrier->mutex);
399
if (--barrier->count == 0) {
400
uv_sem_wait(&barrier->turnstile1);
401
uv_sem_post(&barrier->turnstile2);
403
uv_mutex_unlock(&barrier->mutex);
405
uv_sem_wait(&barrier->turnstile2);
406
uv_sem_post(&barrier->turnstile2);
409
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
411
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
412
if (pthread_barrier_init(barrier, NULL, count))
419
void uv_barrier_destroy(uv_barrier_t* barrier) {
420
if (pthread_barrier_destroy(barrier))
425
void uv_barrier_wait(uv_barrier_t* barrier) {
426
int r = pthread_barrier_wait(barrier);
427
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
431
#endif /* defined(__APPLE__) && defined(__MACH__) */