64
66
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
65
67
static void sleep_forever(rb_thread_t *th, int nodeadlock);
66
68
static double timeofday(void);
67
struct timeval rb_time_interval(VALUE);
68
69
static int rb_threadptr_dead(rb_thread_t *th);
70
71
static void rb_check_deadlock(rb_vm_t *vm);
72
int rb_signal_buff_size(void);
73
void rb_signal_exec(rb_thread_t *th, int sig);
74
void rb_disable_interrupt(void);
75
void rb_thread_stop_timer_thread(void);
77
static const VALUE eKillSignal = INT2FIX(0);
78
static const VALUE eTerminateSignal = INT2FIX(1);
73
#define eKillSignal INT2FIX(0)
74
#define eTerminateSignal INT2FIX(1)
79
75
static volatile int system_working = 1;
77
#define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
82
80
st_delete_wrap(st_table *table, st_data_t key)
108
106
#define GVL_UNLOCK_BEGIN() do { \
109
107
rb_thread_t *_th_stored = GET_THREAD(); \
110
108
RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
111
native_mutex_unlock(&_th_stored->vm->global_vm_lock)
109
gvl_release(_th_stored->vm);
113
111
#define GVL_UNLOCK_END() \
114
native_mutex_lock(&_th_stored->vm->global_vm_lock); \
112
gvl_acquire(_th_stored->vm, _th_stored); \
115
113
rb_thread_set_current(_th_stored); \
118
#define BLOCKING_REGION_CORE(exec) do { \
119
GVL_UNLOCK_BEGIN(); {\
125
116
#define blocking_region_begin(th, region, func, arg) \
127
118
(region)->prev_status = (th)->status; \
130
121
(th)->status = THREAD_STOPPED; \
131
122
thread_debug("enter blocking region (%p)\n", (void *)(th)); \
132
123
RB_GC_SAVE_MACHINE_CONTEXT(th); \
133
native_mutex_unlock(&(th)->vm->global_vm_lock); \
124
gvl_release((th)->vm); \
136
127
#define BLOCKING_REGION(exec, ubf, ubfarg) do { \
137
128
rb_thread_t *__th = GET_THREAD(); \
138
129
struct rb_blocking_region_buffer __region; \
139
blocking_region_begin(__th, &__region, ubf, ubfarg); \
130
blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
141
132
blocking_region_end(__th, &__region); \
142
133
RUBY_VM_CHECK_INTS(); \
329
327
rb_thread_lock_t lock;
330
328
rb_thread_cond_t cond;
331
329
struct rb_thread_struct volatile *th;
332
volatile int cond_waiting, cond_notified;
333
331
struct rb_mutex_struct *next_mutex;
336
static void rb_mutex_unlock_all(mutex_t *mutex, rb_thread_t *th);
337
static void rb_mutex_abandon_all(mutex_t *mutexes);
334
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
335
static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
338
rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
342
rb_mutex_t *mutexes = th->keeping_mutexes;
346
/* rb_warn("mutex #<%p> remains to be locked by terminated thread",
348
mutexes = mutex->next_mutex;
349
err = rb_mutex_unlock_th(mutex, th);
350
if (err) rb_bug("invalid keeping_mutexes: %s", err);
340
355
rb_thread_terminate_all(void)
997
1004
return RUBY_VM_INTERRUPTED(th);
1000
struct timeval rb_time_timeval(VALUE);
1003
1008
rb_thread_sleep(int sec)
1005
1010
rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
1008
static void rb_threadptr_execute_interrupts_rec(rb_thread_t *, int);
1013
static void rb_threadptr_execute_interrupts_common(rb_thread_t *);
1011
rb_thread_schedule_rec(int sched_depth)
1016
rb_thread_schedule_limits(unsigned long limits_us)
1013
1018
thread_debug("rb_thread_schedule\n");
1014
1019
if (!rb_thread_alone()) {
1015
1020
rb_thread_t *th = GET_THREAD();
1017
thread_debug("rb_thread_schedule/switch start\n");
1019
RB_GC_SAVE_MACHINE_CONTEXT(th);
1020
native_mutex_unlock(&th->vm->global_vm_lock);
1022
native_thread_yield();
1022
if (th->running_time_us >= limits_us) {
1023
thread_debug("rb_thread_schedule/switch start\n");
1024
RB_GC_SAVE_MACHINE_CONTEXT(th);
1025
gvl_yield(th->vm, th);
1026
rb_thread_set_current(th);
1027
thread_debug("rb_thread_schedule/switch done\n");
1024
native_mutex_lock(&th->vm->global_vm_lock);
1026
rb_thread_set_current(th);
1027
thread_debug("rb_thread_schedule/switch done\n");
1029
if (!sched_depth && UNLIKELY(GET_THREAD()->interrupt_flag)) {
1030
rb_threadptr_execute_interrupts_rec(GET_THREAD(), sched_depth+1);
1036
1033
rb_thread_schedule(void)
1038
rb_thread_schedule_rec(0);
1035
rb_thread_schedule_limits(0);
1037
if (UNLIKELY(GET_THREAD()->interrupt_flag)) {
1038
rb_threadptr_execute_interrupts_common(GET_THREAD());
1041
1042
/* blocking region */
1265
rb_threadptr_execute_interrupts_rec(rb_thread_t *th, int sched_depth)
1273
rb_threadptr_execute_interrupts_common(rb_thread_t *th)
1267
if (GET_VM()->main_thread == th) {
1268
while (rb_signal_buff_size() && !th->exec_signal) native_thread_yield();
1275
rb_atomic_t interrupt;
1271
1277
if (th->raised_flag) return;
1273
while (th->interrupt_flag) {
1279
while ((interrupt = ATOMIC_EXCHANGE(th->interrupt_flag, 0)) != 0) {
1274
1280
enum rb_thread_status status = th->status;
1275
int timer_interrupt = th->interrupt_flag & 0x01;
1276
int finalizer_interrupt = th->interrupt_flag & 0x04;
1281
int timer_interrupt = interrupt & 0x01;
1282
int finalizer_interrupt = interrupt & 0x04;
1278
1285
th->status = THREAD_RUNNABLE;
1279
th->interrupt_flag = 0;
1281
1287
/* signal handling */
1282
if (th->exec_signal) {
1283
int sig = th->exec_signal;
1284
th->exec_signal = 0;
1285
rb_signal_exec(th, sig);
1288
if (th == th->vm->main_thread) {
1289
while ((sig = rb_get_next_signal()) != 0) {
1290
rb_signal_exec(th, sig);
1288
1294
/* exception from another thread */
1289
1295
if (th->thrown_errinfo) {
1290
1296
VALUE err = th->thrown_errinfo;
1291
1297
th->thrown_errinfo = 0;
1292
thread_debug("rb_thread_execute_interrupts: %ld\n", err);
1298
thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1294
1300
if (err == eKillSignal || err == eTerminateSignal) {
1295
1301
th->errinfo = INT2FIX(TAG_FATAL);
1437
#define THREAD_IO_WAITING_P(th) ( \
1438
((th)->status == THREAD_STOPPED || \
1439
(th)->status == THREAD_STOPPED_FOREVER) && \
1440
(th)->blocking_region_buffer && \
1441
(th)->unblock.func == ubf_select && \
1445
thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
1449
GetThreadPtr((VALUE)key, th);
1451
if (THREAD_IO_WAITING_P(th)) {
1452
native_mutex_lock(&th->interrupt_lock);
1453
if (THREAD_IO_WAITING_P(th) && th->waiting_fd == fd) {
1454
th->thrown_errinfo = th->vm->special_exceptions[ruby_error_closed_stream];
1455
RUBY_VM_SET_INTERRUPT(th);
1456
(th->unblock.func)(th->unblock.arg);
1458
native_mutex_unlock(&th->interrupt_lock);
1429
1464
rb_thread_fd_close(int fd)
1466
st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2021
2062
* either a symbol or a string name. If the specified variable does not exist,
2022
2063
* returns <code>nil</code>.
2024
* a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
2025
* b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
2026
* c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
2027
* Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
2066
* Thread.new { Thread.current["name"] = "A" },
2067
* Thread.new { Thread.current[:name] = "B" },
2068
* Thread.new { Thread.current["name"] = "C" }
2071
* puts "#{th.inspect}: #{th[:name]}"
2029
2074
* <em>produces:</em>
2031
* #<Thread:0x401b3b3c sleep>: C
2032
* #<Thread:0x401b3bc8 sleep>: B
2033
* #<Thread:0x401b3c68 sleep>: A
2034
* #<Thread:0x401bdf4c run>:
2076
* #<Thread:0x00000002a54220 dead>: A
2077
* #<Thread:0x00000002a541a8 dead>: B
2078
* #<Thread:0x00000002a54130 dead>: C
2338
2391
memcpy(dst->fdset, src, size);
2395
rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2397
size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2399
if (size > sizeof(fd_set)) {
2400
rb_raise(rb_eArgError, "too large fdsets");
2402
memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
2406
rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
2408
size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2410
if (size < sizeof(fd_set))
2411
size = sizeof(fd_set);
2412
dst->maxfd = src->maxfd;
2413
dst->fdset = xrealloc(dst->fdset, size);
2414
memcpy(dst->fdset, src->fdset, size);
2342
2418
rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
2363
2439
#undef FD_ISSET
2365
2441
#define FD_ZERO(f) rb_fd_zero(f)
2366
#define FD_SET(i, f) rb_fd_set(i, f)
2367
#define FD_CLR(i, f) rb_fd_clr(i, f)
2368
#define FD_ISSET(i, f) rb_fd_isset(i, f)
2442
#define FD_SET(i, f) rb_fd_set((i), (f))
2443
#define FD_CLR(i, f) rb_fd_clr((i), (f))
2444
#define FD_ISSET(i, f) rb_fd_isset((i), (f))
2370
2446
#elif defined(_WIN32)
2373
rb_fd_init(volatile rb_fdset_t *set)
2449
rb_fd_init(rb_fdset_t *set)
2375
2451
set->capa = FD_SETSIZE;
2376
2452
set->fdset = ALLOC(fd_set);
2409
2507
#undef FD_ISSET
2411
2509
#define FD_ZERO(f) rb_fd_zero(f)
2412
#define FD_SET(i, f) rb_fd_set(i, f)
2413
#define FD_CLR(i, f) rb_fd_clr(i, f)
2414
#define FD_ISSET(i, f) rb_fd_isset(i, f)
2510
#define FD_SET(i, f) rb_fd_set((i), (f))
2511
#define FD_CLR(i, f) rb_fd_clr((i), (f))
2512
#define FD_ISSET(i, f) rb_fd_isset((i), (f))
2515
#define rb_fd_rcopy(d, s) (*(d) = *(s))
2418
#if defined(__CYGWIN__) || defined(_WIN32)
2518
#if defined(__CYGWIN__)
2420
2520
cmp_tv(const struct timeval *a, const struct timeval *b)
2446
do_select(int n, fd_set *read, fd_set *write, fd_set *except,
2546
do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
2447
2547
struct timeval *timeout)
2449
2549
int result, lerrno;
2450
fd_set UNINITIALIZED_VAR(orig_read);
2451
fd_set UNINITIALIZED_VAR(orig_write);
2452
fd_set UNINITIALIZED_VAR(orig_except);
2550
rb_fdset_t UNINITIALIZED_VAR(orig_read);
2551
rb_fdset_t UNINITIALIZED_VAR(orig_write);
2552
rb_fdset_t UNINITIALIZED_VAR(orig_except);
2455
2553
double limit = 0;
2456
2554
struct timeval wait_rest;
2457
# if defined(__CYGWIN__) || defined(_WIN32)
2555
# if defined(__CYGWIN__)
2458
2556
struct timeval start_time;
2462
# if defined(__CYGWIN__) || defined(_WIN32)
2560
# if defined(__CYGWIN__)
2463
2561
gettimeofday(&start_time, NULL);
2464
2562
limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2490
2590
wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2491
2591
BLOCKING_REGION({
2493
result = select(n, read, write, except, wait);
2593
result = rb_fd_select(n, read, write, except, wait);
2494
2594
if (result < 0) lerrno = errno;
2495
2595
if (result != 0) break;
2497
if (read) *read = orig_read;
2498
if (write) *write = orig_write;
2499
if (except) *except = orig_except;
2598
rb_fd_dup(read, &orig_read);
2600
rb_fd_dup(write, &orig_write);
2602
rb_fd_dup(except, &orig_except);
2501
2604
struct timeval elapsed;
2502
2605
gettimeofday(&elapsed, NULL);
2513
2616
} while (result == 0 && !finish);
2618
#elif defined(_WIN32)
2620
rb_thread_t *th = GET_THREAD();
2622
result = native_fd_select(n, read, write, except, timeout, th);
2623
if (result < 0) lerrno = errno;
2516
2627
BLOCKING_REGION({
2517
result = select(n, read, write, except, timeout);
2628
result = rb_fd_select(n, read, write, except, timeout);
2518
2629
if (result < 0) lerrno = errno;
2519
2630
}, ubf_select, GET_THREAD());
2597
2710
rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2598
2711
struct timeval *timeout)
2600
if (!read && !write && !except) {
2602
rb_thread_sleep_forever();
2605
rb_thread_wait_for(*timeout);
2609
return do_select(max, read, write, except, timeout);
2713
rb_fdset_t fdsets[3];
2714
rb_fdset_t *rfds = NULL;
2715
rb_fdset_t *wfds = NULL;
2716
rb_fdset_t *efds = NULL;
2722
rb_fd_copy(rfds, read, max);
2727
rb_fd_copy(wfds, write, max);
2732
rb_fd_copy(efds, except, max);
2735
retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
2738
rb_fd_rcopy(read, rfds);
2742
rb_fd_rcopy(write, wfds);
2746
rb_fd_rcopy(except, efds);
2615
2754
rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
2616
2755
struct timeval *timeout)
2618
fd_set *r = NULL, *w = NULL, *e = NULL;
2620
2757
if (!read && !write && !except) {
2621
2758
if (!timeout) {
2622
2759
rb_thread_sleep_forever();
2630
rb_fd_resize(max - 1, read);
2631
r = rb_fd_ptr(read);
2767
rb_fd_resize(max - 1, read);
2634
rb_fd_resize(max - 1, write);
2635
w = rb_fd_ptr(write);
2770
rb_fd_resize(max - 1, write);
2638
rb_fd_resize(max - 1, except);
2639
e = rb_fd_ptr(except);
2641
return do_select(max, r, w, e, timeout);
2773
rb_fd_resize(max - 1, except);
2775
return do_select(max, read, write, except, timeout);
2779
* poll() is supported by many OSes, but so far Linux is the only
2780
* one we know of that supports using poll() in all places select()
2783
#if defined(HAVE_POLL) && defined(linux)
2789
/* The same with linux kernel. TODO: make platform independent definition. */
2790
#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2791
#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2792
#define POLLEX_SET (POLLPRI)
2794
#define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2795
#define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2798
/* TODO: don't ignore sigmask */
2799
int ppoll(struct pollfd *fds, nfds_t nfds,
2800
const struct timespec *ts, const sigset_t *sigmask)
2807
if (ts->tv_sec > TIMET_MAX/1000)
2810
tmp = ts->tv_sec * 1000;
2811
tmp2 = ts->tv_nsec / (1000 * 1000);
2812
if (TIMET_MAX - tmp < tmp2)
2815
timeout_ms = tmp + tmp2;
2820
return poll(fds, nfds, timeout_ms);
2825
* returns a mask of events
2828
rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2834
struct timespec *timeout = NULL;
2837
ts.tv_sec = tv->tv_sec;
2838
ts.tv_nsec = tv->tv_usec * 1000;
2839
limit = timeofday();
2840
limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
2845
fds.events = (short)events;
2850
result = ppoll(&fds, 1, timeout, NULL);
2851
if (result < 0) lerrno = errno;
2852
}, ubf_select, GET_THREAD());
2862
double d = limit - timeofday();
2864
ts.tv_sec = (long)d;
2865
ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
2876
if (fds.revents & POLLNVAL) {
2882
* POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
2883
* Therefore we need fix it up.
2886
if (fds.revents & POLLIN_SET)
2887
result |= RB_WAITFD_IN;
2888
if (fds.revents & POLLOUT_SET)
2889
result |= RB_WAITFD_OUT;
2890
if (fds.revents & POLLEX_SET)
2891
result |= RB_WAITFD_PRI;
2895
#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
2896
static rb_fdset_t *init_set_fd(int fd, rb_fdset_t *fds)
2904
struct select_args {
2916
select_single(VALUE ptr)
2918
struct select_args *args = (struct select_args *)ptr;
2921
r = rb_thread_fd_select(args->as.fd + 1,
2922
args->read, args->write, args->except, args->tv);
2924
args->as.error = errno;
2927
if (args->read && rb_fd_isset(args->as.fd, args->read))
2929
if (args->write && rb_fd_isset(args->as.fd, args->write))
2931
if (args->except && rb_fd_isset(args->as.fd, args->except))
2938
select_single_cleanup(VALUE ptr)
2940
struct select_args *args = (struct select_args *)ptr;
2942
if (args->read) rb_fd_term(args->read);
2943
if (args->write) rb_fd_term(args->write);
2944
if (args->except) rb_fd_term(args->except);
2950
rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2952
rb_fdset_t rfds, wfds, efds;
2953
struct select_args args;
2955
VALUE ptr = (VALUE)&args;
2958
args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
2959
args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
2960
args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
2963
r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
2965
errno = args.as.error;
2969
#endif /* ! USE_POLL */
3064
3381
mutex_memsize(const void *ptr)
3066
return ptr ? sizeof(mutex_t) : 0;
3383
return ptr ? sizeof(rb_mutex_t) : 0;
3069
3386
static const rb_data_type_t mutex_data_type = {
3071
mutex_mark, mutex_free, mutex_memsize,
3388
{mutex_mark, mutex_free, mutex_memsize,},
3392
rb_obj_is_mutex(VALUE obj)
3394
if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
3075
3403
mutex_alloc(VALUE klass)
3077
3405
VALUE volatile obj;
3080
obj = TypedData_Make_Struct(klass, mutex_t, &mutex_data_type, mutex);
3408
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
3081
3409
native_mutex_initialize(&mutex->lock);
3082
native_cond_initialize(&mutex->cond);
3410
native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
3157
lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
3485
lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
3159
3487
int interrupted = 0;
3160
#if 0 /* for debug */
3161
native_thread_yield();
3164
native_mutex_lock(&mutex->lock);
3165
th->transition_for_lock = 0;
3166
while (mutex->th || (mutex->th = th, 0)) {
3490
mutex->cond_waiting++;
3172
mutex->cond_waiting++;
3173
native_cond_wait(&mutex->cond, &mutex->lock);
3174
mutex->cond_notified--;
3176
3496
if (RUBY_VM_INTERRUPTED(th)) {
3177
3497
interrupted = 1;
3500
if (err == ETIMEDOUT) {
3506
struct timespec timeout_rel;
3507
struct timespec timeout;
3509
timeout_rel.tv_sec = 0;
3510
timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
3511
timeout = native_cond_timeout(&mutex->cond, timeout_rel);
3512
err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
3515
native_cond_wait(&mutex->cond, &mutex->lock);
3181
th->transition_for_lock = 1;
3182
native_mutex_unlock(&mutex->lock);
3184
if (interrupted == 2) native_thread_yield();
3185
#if 0 /* for debug */
3186
native_thread_yield();
3519
mutex->cond_waiting--;
3189
3521
return interrupted;
3225
3554
while (mutex->th != th) {
3226
3555
int interrupted;
3227
3556
enum rb_thread_status prev_status = th->status;
3228
int last_thread = 0;
3229
3558
struct rb_unblock_callback oldubf;
3231
3560
set_unblock_function(th, lock_interrupt, mutex, &oldubf);
3232
3561
th->status = THREAD_STOPPED_FOREVER;
3234
3562
th->locking_mutex = self;
3564
native_mutex_lock(&mutex->lock);
3567
* Carefully! while some contended threads are in lock_func(),
3568
* vm->sleepr is unstable value. we have to avoid both deadlock
3235
3571
if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
3575
interrupted = lock_func(th, mutex, timeout_ms);
3576
native_mutex_unlock(&mutex->lock);
3239
th->transition_for_lock = 1;
3240
BLOCKING_REGION_CORE({
3241
interrupted = lock_func(th, mutex, last_thread);
3243
th->transition_for_lock = 0;
3244
remove_signal_thread_list(th);
3245
3579
reset_unblock_function(th, &oldubf);
3247
3581
th->locking_mutex = Qfalse;
3322
3652
rb_mutex_unlock(VALUE self)
3324
3654
const char *err;
3326
3656
GetMutexPtr(self, mutex);
3328
err = mutex_unlock(mutex, GET_THREAD());
3658
err = rb_mutex_unlock_th(mutex, GET_THREAD());
3329
3659
if (err) rb_raise(rb_eThreadError, "%s", err);
3335
rb_mutex_unlock_all(mutex_t *mutexes, rb_thread_t *th)
3342
/* rb_warn("mutex #<%p> remains to be locked by terminated thread",
3344
mutexes = mutex->next_mutex;
3345
err = mutex_unlock(mutex, th);
3346
if (err) rb_bug("invalid keeping_mutexes: %s", err);
3351
rb_mutex_abandon_all(mutex_t *mutexes)
3665
rb_mutex_abandon_all(rb_mutex_t *mutexes)
3355
3669
while (mutexes) {
3356
3670
mutex = mutexes;
3787
4122
st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3791
4126
exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
3793
4129
for (; hook; hook = hook->next) {
4130
if (hook->flag & RUBY_EVENT_REMOVED) {
3794
4134
if (flag & hook->flag) {
3795
4135
(*hook->func)(flag, hook->data, self, id, klass);
3801
rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
4141
static int remove_defered_event_hook(rb_event_hook_t **root);
4144
thread_exec_event_hooks(VALUE args, int running)
3803
const VALUE errinfo = th->errinfo;
4146
struct event_call_args *argp = (struct event_call_args *)args;
4147
rb_thread_t *th = argp->th;
4148
rb_event_flag_t flag = argp->event;
4149
VALUE self = argp->self;
4151
VALUE klass = argp->klass;
3804
4152
const rb_event_flag_t wait_event = th->event_flags;
3806
if (self == rb_mRubyVMFrozenCore) return;
3807
if (wait_event & flag) {
3808
exec_event_hooks(th->event_hooks, flag, self, id, klass);
4155
if (self == rb_mRubyVMFrozenCore) return 0;
4157
if ((wait_event & flag) && !(running & EVENT_RUNNING_THREAD)) {
4158
th->tracing |= EVENT_RUNNING_THREAD;
4159
removed = exec_event_hooks(th->event_hooks, flag, self, id, klass);
4160
th->tracing &= ~EVENT_RUNNING_THREAD;
4162
remove_defered_event_hook(&th->event_hooks);
3810
4165
if (wait_event & RUBY_EVENT_VM) {
3811
4166
if (th->vm->event_hooks == NULL) {
3812
4167
th->event_flags &= (~RUBY_EVENT_VM);
3815
exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
4169
else if (!(running & EVENT_RUNNING_VM)) {
4170
th->tracing |= EVENT_RUNNING_VM;
4171
removed = exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
4172
th->tracing &= ~EVENT_RUNNING_VM;
4174
remove_defered_event_hook(&th->vm->event_hooks);
4182
rb_threadptr_exec_event_hooks(rb_thread_t *th, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
4184
const VALUE errinfo = th->errinfo;
4185
struct event_call_args args;
4192
thread_suppress_tracing(th, EVENT_RUNNING_EVENT_MASK, thread_exec_event_hooks, (VALUE)&args, FALSE);
3818
4193
th->errinfo = errinfo;
4209
defer_remove_event_hook(rb_event_hook_t *hook, rb_event_hook_func_t func)
4212
if (func == 0 || hook->func == func) {
4213
hook->flag |= RUBY_EVENT_REMOVED;
3834
4221
remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3836
rb_event_hook_t *prev = NULL, *hook = *root, *next;
3840
if (func == 0 || hook->func == func) {
3842
prev->next = hook->next;
3858
rb_threadptr_revmove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3860
int ret = remove_event_hook(&th->event_hooks, func);
4223
rb_event_hook_t *hook = *root, *next;
4227
if (func == 0 || hook->func == func || (hook->flag & RUBY_EVENT_REMOVED)) {
4240
remove_defered_event_hook(rb_event_hook_t **root)
4242
rb_event_hook_t *hook = *root, *next;
4246
if (hook->flag & RUBY_EVENT_REMOVED) {
4259
rb_threadptr_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
4262
if (th->tracing & EVENT_RUNNING_THREAD) {
4263
ret = defer_remove_event_hook(th->event_hooks, func);
4266
ret = remove_event_hook(&th->event_hooks, func);
3861
4268
thread_reset_event_flags(th);
3866
4273
rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
3868
return rb_threadptr_revmove_event_hook(thval2thread_t(thval), func);
4275
return rb_threadptr_remove_event_hook(thval2thread_t(thval), func);
4278
static rb_event_hook_t *
4279
search_live_hook(rb_event_hook_t *hook)
4282
if (!(hook->flag & RUBY_EVENT_REMOVED))
4290
running_vm_event_hooks(st_data_t key, st_data_t val, st_data_t data)
4292
rb_thread_t *th = thval2thread_t((VALUE)key);
4293
if (!(th->tracing & EVENT_RUNNING_VM)) return ST_CONTINUE;
4294
*(rb_thread_t **)data = th;
4298
static rb_thread_t *
4299
vm_event_hooks_running_thread(rb_vm_t *vm)
4301
rb_thread_t *found = NULL;
4302
st_foreach(vm->living_threads, running_vm_event_hooks, (st_data_t)&found);
3872
4307
rb_remove_event_hook(rb_event_hook_func_t func)
3874
4309
rb_vm_t *vm = GET_VM();
3875
rb_event_hook_t *hook = vm->event_hooks;
3876
int ret = remove_event_hook(&vm->event_hooks, func);
3878
if (hook != NULL && vm->event_hooks == NULL) {
4310
rb_event_hook_t *hook = search_live_hook(vm->event_hooks);
4313
if (vm_event_hooks_running_thread(vm)) {
4314
ret = defer_remove_event_hook(vm->event_hooks, func);
4317
ret = remove_event_hook(&vm->event_hooks, func);
4320
if (hook && !search_live_hook(vm->event_hooks)) {
3879
4321
set_threads_event_flags(0);
4104
4538
ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
4106
4540
rb_thread_t *th = GET_THREAD();
4541
return thread_suppress_tracing(th, EVENT_RUNNING_TRACE, func, arg, always);
4545
thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always)
4547
int state, tracing = th->tracing, running = tracing & ev;
4108
4548
volatile int raised;
4109
4549
volatile int outer_state;
4110
4550
VALUE result = Qnil;
4112
if ((tracing = th->tracing) != 0 && !always) {
4552
if (running == ev && !always) {
4119
4559
raised = rb_threadptr_reset_raised(th);