52
75
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
53
76
Uint erts_process_tab_index_mask;
55
Uint erts_tot_proc_mem; /* in bytes */
78
erts_smp_atomic_t erts_tot_proc_mem; /* in bytes */
81
static erts_tsd_key_t sched_data_key;
84
static erts_smp_mtx_t schdlq_mtx;
85
static erts_smp_cnd_t schdlq_cnd;
86
static erts_smp_mtx_t proc_tab_mtx;
89
erts_proc_lock_t erts_proc_locks[ERTS_PROC_LOCKS_NO_OF];
91
static ErtsSchedulerData *schedulers;
92
static Uint last_scheduler_no;
93
static Uint no_schedulers;
94
static erts_smp_atomic_t atomic_no_schedulers;
95
static Uint schedulers_waiting_on_runq;
96
static Uint use_no_schedulers;
97
static int changing_no_schedulers;
98
static ProcessList *pending_exiters;
100
#ifdef ERTS_ENABLE_LOCK_CHECK
102
Sint16 proc_lock_main;
103
Sint16 proc_lock_link;
104
Sint16 proc_lock_msgq;
105
Sint16 proc_lock_status;
108
#else /* !ERTS_SMP */
109
ErtsSchedulerData erts_scheduler_data;
112
static void init_sched_thr_data(ErtsSchedulerData *esdp);
57
114
typedef struct schedule_q {
62
static ScheduleQ queue[NPRIORITY_LEVELS];
119
/* we use the same queue for low and normal prio processes */
120
static ScheduleQ queue[NPRIORITY_LEVELS-1];
64
121
static unsigned qmask;
123
static Uint queued_low;
124
static Uint queued_normal;
125
static Sint runq_len;
65
127
#ifndef BM_COUNTERS
66
128
static int processes_busy;
70
132
Process** process_tab;
71
Uint context_switches; /* no of context switches */
72
Uint reductions; /* total number of reductions */
73
Uint last_reds; /* used in process info */
133
static Uint context_switches; /* no of context switches */
134
static Uint reductions; /* total number of reductions */
135
static Uint last_reds;
136
static Uint last_exact_reds;
74
137
Uint erts_default_process_flags;
75
Eterm erts_default_tracer;
76
138
Eterm erts_system_monitor;
77
139
Eterm erts_system_monitor_long_gc;
78
140
Eterm erts_system_monitor_large_heap;
79
141
struct erts_system_monitor_flags_t erts_system_monitor_flags;
81
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0};
83
int erts_default_trace_pattern_is_on;
84
Binary *erts_default_match_spec;
85
Binary *erts_default_meta_match_spec;
86
struct trace_pattern_flags erts_default_trace_pattern_flags;
87
Eterm erts_default_meta_tracer_pid;
90
144
Uint erts_num_active_procs;
91
145
Process** erts_active_procs;
148
static erts_smp_atomic_t process_count;
95
151
* Local functions.
97
static void delete_process(Process* p);
98
static void print_function_from_pc(Eterm* x, CIO fd);
99
static int stack_element_dump(Process* p, Eterm* sp, int yreg, CIO fd);
153
static void print_function_from_pc(int to, void *to_arg, Eterm* x);
154
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
157
static void handle_pending_exiters(ProcessList *);
160
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
163
erts_proc_lc_lock(Process *p, Uint32 locks)
165
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
167
ERTS_LC_FLG_LT_PROCLOCK);
168
if (locks & ERTS_PROC_LOCK_MAIN) {
169
lck.id = lc_id.proc_lock_main;
172
if (locks & ERTS_PROC_LOCK_LINK) {
173
lck.id = lc_id.proc_lock_link;
176
if (locks & ERTS_PROC_LOCK_MSGQ) {
177
lck.id = lc_id.proc_lock_msgq;
180
if (locks & ERTS_PROC_LOCK_STATUS) {
181
lck.id = lc_id.proc_lock_status;
187
erts_proc_lc_trylock(Process *p, Uint32 locks, int locked)
189
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
191
ERTS_LC_FLG_LT_PROCLOCK);
192
if (locks & ERTS_PROC_LOCK_MAIN) {
193
lck.id = lc_id.proc_lock_main;
194
erts_lc_trylock(locked, &lck);
196
if (locks & ERTS_PROC_LOCK_LINK) {
197
lck.id = lc_id.proc_lock_link;
198
erts_lc_trylock(locked, &lck);
200
if (locks & ERTS_PROC_LOCK_MSGQ) {
201
lck.id = lc_id.proc_lock_msgq;
202
erts_lc_trylock(locked, &lck);
204
if (locks & ERTS_PROC_LOCK_STATUS) {
205
lck.id = lc_id.proc_lock_status;
206
erts_lc_trylock(locked, &lck);
211
erts_proc_lc_unlock(Process *p, Uint32 locks)
213
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
215
ERTS_LC_FLG_LT_PROCLOCK);
216
if (locks & ERTS_PROC_LOCK_STATUS) {
217
lck.id = lc_id.proc_lock_status;
218
erts_lc_unlock(&lck);
220
if (locks & ERTS_PROC_LOCK_MSGQ) {
221
lck.id = lc_id.proc_lock_msgq;
222
erts_lc_unlock(&lck);
224
if (locks & ERTS_PROC_LOCK_LINK) {
225
lck.id = lc_id.proc_lock_link;
226
erts_lc_unlock(&lck);
228
if (locks & ERTS_PROC_LOCK_MAIN) {
229
lck.id = lc_id.proc_lock_main;
230
erts_lc_unlock(&lck);
235
erts_proc_lc_trylock_force_busy(Process *p, Uint32 locks)
237
if (locks & ERTS_PROC_LOCKS_ALL) {
238
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
240
ERTS_LC_FLG_LT_PROCLOCK);
242
if (locks & ERTS_PROC_LOCK_MAIN)
243
lck.id = lc_id.proc_lock_main;
244
else if (locks & ERTS_PROC_LOCK_LINK)
245
lck.id = lc_id.proc_lock_link;
246
else if (locks & ERTS_PROC_LOCK_MSGQ)
247
lck.id = lc_id.proc_lock_msgq;
248
else if (locks & ERTS_PROC_LOCK_STATUS)
249
lck.id = lc_id.proc_lock_status;
251
erts_lc_fail("Unknown proc lock found");
253
return erts_lc_trylock_force_busy(&lck);
258
void erts_proc_lc_chk_only_proc_main(Process *p)
260
erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
262
ERTS_LC_FLG_LT_PROCLOCK);
263
erts_lc_check_exact(&proc_main, 1);
266
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
267
ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
270
erts_proc_lc_chk_have_proc_locks(Process *p, Uint32 locks)
272
int have_locks_len = 0;
273
erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
274
ERTS_PROC_LC_EMPTY_LOCK_INIT,
275
ERTS_PROC_LC_EMPTY_LOCK_INIT,
276
ERTS_PROC_LC_EMPTY_LOCK_INIT};
277
if (locks & ERTS_PROC_LOCK_MAIN) {
278
have_locks[have_locks_len].id = lc_id.proc_lock_main;
279
have_locks[have_locks_len++].extra = p->id;
281
if (locks & ERTS_PROC_LOCK_LINK) {
282
have_locks[have_locks_len].id = lc_id.proc_lock_link;
283
have_locks[have_locks_len++].extra = p->id;
285
if (locks & ERTS_PROC_LOCK_MSGQ) {
286
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
287
have_locks[have_locks_len++].extra = p->id;
289
if (locks & ERTS_PROC_LOCK_STATUS) {
290
have_locks[have_locks_len].id = lc_id.proc_lock_status;
291
have_locks[have_locks_len++].extra = p->id;
294
erts_lc_check(have_locks, have_locks_len, NULL, 0);
298
erts_proc_lc_chk_proc_locks(Process *p, Uint32 locks)
300
int have_locks_len = 0;
301
int have_not_locks_len = 0;
302
erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
303
ERTS_PROC_LC_EMPTY_LOCK_INIT,
304
ERTS_PROC_LC_EMPTY_LOCK_INIT,
305
ERTS_PROC_LC_EMPTY_LOCK_INIT};
306
erts_lc_lock_t have_not_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
307
ERTS_PROC_LC_EMPTY_LOCK_INIT,
308
ERTS_PROC_LC_EMPTY_LOCK_INIT,
309
ERTS_PROC_LC_EMPTY_LOCK_INIT};
311
if (locks & ERTS_PROC_LOCK_MAIN) {
312
have_locks[have_locks_len].id = lc_id.proc_lock_main;
313
have_locks[have_locks_len++].extra = p->id;
316
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
317
have_not_locks[have_not_locks_len++].extra = p->id;
319
if (locks & ERTS_PROC_LOCK_LINK) {
320
have_locks[have_locks_len].id = lc_id.proc_lock_link;
321
have_locks[have_locks_len++].extra = p->id;
324
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
325
have_not_locks[have_not_locks_len++].extra = p->id;
327
if (locks & ERTS_PROC_LOCK_MSGQ) {
328
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
329
have_locks[have_locks_len++].extra = p->id;
332
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
333
have_not_locks[have_not_locks_len++].extra = p->id;
335
if (locks & ERTS_PROC_LOCK_STATUS) {
336
have_locks[have_locks_len].id = lc_id.proc_lock_status;
337
have_locks[have_locks_len++].extra = p->id;
340
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
341
have_not_locks[have_not_locks_len++].extra = p->id;
344
erts_lc_check(have_locks, have_locks_len,
345
have_not_locks, have_not_locks_len);
349
erts_proc_lc_my_proc_locks(Process *p)
352
erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
354
ERTS_LC_FLG_LT_PROCLOCK),
355
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
357
ERTS_LC_FLG_LT_PROCLOCK),
358
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
360
ERTS_LC_FLG_LT_PROCLOCK),
361
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
363
ERTS_LC_FLG_LT_PROCLOCK)};
367
erts_lc_have_locks(resv, locks, 4);
369
res |= ERTS_PROC_LOCK_MAIN;
371
res |= ERTS_PROC_LOCK_LINK;
373
res |= ERTS_PROC_LOCK_MSGQ;
375
res |= ERTS_PROC_LOCK_STATUS;
381
erts_proc_lc_chk_no_proc_locks(char *file, int line)
384
int ids[4] = {lc_id.proc_lock_main,
385
lc_id.proc_lock_link,
386
lc_id.proc_lock_msgq,
387
lc_id.proc_lock_status};
388
erts_lc_have_lock_ids(resv, ids, 4);
389
if (resv[0] || resv[1] || resv[2] || resv[3]) {
390
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
391
"not to have any process locks locked",
399
erts_pre_init_process(void)
402
erts_tsd_key_create(&sched_data_key);
405
pending_exiters = NULL;
407
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
408
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
409
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
410
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
411
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
101
416
/* initialize the scheduler */
418
erts_init_process(void)
107
erts_tot_proc_mem = 0;
421
Uint proc_bits = ERTS_PROC_BITS;
423
ErtsSchedulerData *esdp;
426
erts_smp_atomic_init(&process_count, 0);
428
if (erts_use_r9_pids_ports) {
429
proc_bits = ERTS_R9_PROC_BITS;
430
ASSERT(erts_max_processes <= (1 << ERTS_R9_PROC_BITS));
433
erts_smp_atomic_init(&erts_tot_proc_mem, 0L);
109
435
process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE,
110
436
erts_max_processes*sizeof(Process*));
111
437
ERTS_PROC_MORE_MEM(erts_max_processes * sizeof(Process*));
112
438
sys_memzero(process_tab, erts_max_processes * sizeof(Process*));
114
440
erts_active_procs = (Process**)
115
erts_alloc(ERTS_ALC_T_ACTIVE_PROCS, erts_max_processes*sizeof(Process*));
441
erts_alloc(ERTS_ALC_T_ACTIVE_PROCS,
442
erts_max_processes * sizeof(Process*));
116
443
ERTS_PROC_MORE_MEM(erts_max_processes * sizeof(Process*));
117
444
erts_num_active_procs = 0;
448
erts_smp_mtx_init(&schdlq_mtx, "schdlq");
449
erts_smp_cnd_init(&schdlq_cnd);
451
use_no_schedulers = 1;
452
changing_no_schedulers = 0;
454
erts_smp_atomic_init(&atomic_no_schedulers, 0L);
455
last_scheduler_no = 0;
456
schedulers_waiting_on_runq = 0;
460
for (i = 0; i < ERTS_PROC_LOCKS_NO_OF; i++) {
461
erts_smp_mtx_init(&erts_proc_locks[i].mtx,
462
"proc_main" /* Con the lock checker */);
463
#ifdef ERTS_ENABLE_LOCK_CHECK
464
erts_proc_locks[i].mtx.lc.id = -1; /* Dont want lock checking on
467
erts_smp_cnd_init(&erts_proc_locks[i].cnd);
470
#else /* !ERTS_SMP */
472
esdp = &erts_scheduler_data;
475
erts_tsd_set(sched_data_key, (void *) esdp);
478
init_sched_thr_data(esdp);
482
erts_smp_mtx_init(&proc_tab_mtx, "proc_tab");
123
487
p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
124
p_serial_mask = ((~(~((Uint) 0) << ERTS_PROCESSES_BITS)) >> p_serial_shift);
488
p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
125
489
erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
127
491
/* mark the schedule queue as empty */
128
for(i = 0; i < NPRIORITY_LEVELS; i++)
492
for(i = 0; i < NPRIORITY_LEVELS - 1; i++)
129
493
queue[i].first = queue[i].last = (Process*) 0;
131
498
#ifndef BM_COUNTERS
132
499
processes_busy = 0;
135
501
context_switches = 0;
138
505
erts_default_process_flags = 0;
139
erts_default_tracer = NIL;
140
erts_system_monitor_clear();
511
prepare_for_block(void *c_p)
513
erts_smp_mtx_unlock(&schdlq_mtx);
515
erts_smp_proc_unlock((Process *) c_p, ERTS_PROC_LOCK_MAIN);
519
resume_after_block(void *c_p)
522
erts_smp_proc_lock((Process *) c_p, ERTS_PROC_LOCK_MAIN);
523
erts_smp_mtx_lock(&schdlq_mtx);
527
erts_start_schedulers(Uint wanted)
534
res = erts_set_no_schedulers(NULL, NULL, &actual, wanted,
538
"Failed to create any scheduler-threads: %s (%d)\n",
542
erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
544
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
545
ASSERT(actual != wanted);
546
erts_dsprintf(dsbufp,
547
"Failed to create %bpu scheduler-threads (%s:%d); "
548
"only %bpu scheduler-thread%s created.\n",
549
wanted, erl_errno_id(res), res,
550
actual, actual == 1 ? " was" : "s were");
551
erts_send_error_to_logger_nogl(dsbufp);
555
#endif /* #ifdef ERTS_SMP */
558
init_sched_thr_data(ErtsSchedulerData *esdp)
561
erts_bits_init_state(&esdp->erl_bits_state);
562
esdp->match_pseudo_process = NULL;
563
esdp->no = last_scheduler_no;
564
esdp->free_process = NULL;
567
esdp->current_process = NULL;
574
erts_get_scheduler_data(void)
576
return (ErtsSchedulerData *) erts_tsd_get(sched_data_key);
581
static int remove_proc_from_sched_q(Process *p);
583
static ERTS_INLINE void
584
suspend_process(Process *p)
586
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
587
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlq_mtx));
589
p->rcount++; /* count number of suspend */
591
ASSERT(!(p->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED)
592
|| p == erts_get_current_process());
593
ASSERT(p->status != P_RUNNING
594
|| p->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED);
595
if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ)
604
if (!ERTS_PROC_PENDING_EXIT(p))
606
remove_proc_from_sched_q(p);
608
* leave process in schedq so it will discover the pending exit
610
p->rstatus = P_RUNABLE; /* wakeup as runnable */
613
p->rstatus = P_RUNABLE; /* wakeup as runnable */
616
p->rstatus = P_WAITING; /* wakeup as waiting */
619
return; /* ignore this */
622
erl_exit(1, "bad state in suspend_process()\n");
624
p->status = P_SUSPENDED;
627
static ERTS_INLINE void
628
resume_process(Process *p)
630
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
631
/* We may get called from trace([suspend], false) */
632
if (p->status != P_SUSPENDED)
634
ASSERT(p->rcount > 0);
636
if (--p->rcount > 0) /* multiple suspend i.e trace and busy port */
640
p->status = P_WAITING; /* make add_to_schedule_q work */
641
add_to_schedule_q(p);
644
p->status = P_WAITING;
647
erl_exit(1, "bad state in resume_process()\n");
655
exit_sched_thr(ErtsSchedulerData *esdp, int schdlq_mtx_locked)
658
if (!schdlq_mtx_locked)
659
erts_smp_mtx_lock(&schdlq_mtx);
661
esdp->prev->next = esdp->next;
663
schedulers = esdp->next;
665
esdp->next->prev = esdp->prev;
667
erts_smp_atomic_dec(&atomic_no_schedulers);
668
erts_bits_destroy_state(&esdp->erl_bits_state);
669
erts_free(ERTS_ALC_T_SCHDLR_DATA, (void *) esdp);
670
erts_smp_cnd_broadcast(&schdlq_cnd);
671
erts_smp_mtx_unlock(&schdlq_mtx);
676
sched_thread_func(void *vesdp)
678
#ifdef ERTS_ENABLE_LOCK_CHECK
681
Uint no = ((ErtsSchedulerData *) vesdp)->no;
682
erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
683
erts_lc_set_thread_name(&buf[0]);
686
erts_tsd_set(sched_data_key, vesdp);
687
erts_register_blockable_thread();
689
hipe_thread_signal_init();
691
erts_thread_init_float();
693
exit_sched_thr((ErtsSchedulerData *) vesdp, 0);
698
add_to_proc_list(ProcessList** plpp, Eterm pid)
702
/* Add at the end of the list */
703
for (; *plpp; plpp = &(*plpp)->next) {
704
ASSERT((*plpp)->pid != pid);
707
plp = (ProcessList *) erts_alloc(ERTS_ALC_T_PROC_LIST, sizeof(ProcessList));
716
remove_from_proc_list(ProcessList** plpp, Eterm pid)
718
for (; *plpp; plpp = &(*plpp)->next) {
719
if ((*plpp)->pid == pid) {
720
ProcessList* plp = *plpp;
722
erts_free(ERTS_ALC_T_PROC_LIST, (void *) plp);
724
for (plp = *plpp; plp; plp = plp->next) {
725
ASSERT(plp->pid != pid);
736
handle_pending_suspend(Process *p, Uint32 p_locks)
742
ASSERT(p->pending_suspenders);
744
if (ERTS_PROC_IS_EXITING(p)) {
753
plp = p->pending_suspenders;
755
ProcessList *free_plp;
756
Process *rp = erts_pid2proc(p, p_locks,
757
plp->pid, ERTS_PROC_LOCK_STATUS);
759
ASSERT(is_nil(rp->suspendee));
760
rp->suspendee = suspendee;
762
erts_smp_mtx_lock(&schdlq_mtx);
764
erts_smp_mtx_unlock(&schdlq_mtx);
767
/* rp is suspended waiting for p to suspend: resume rp */
769
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
773
erts_free(ERTS_ALC_T_PROC_LIST, (void *) free_plp);
775
p->pending_suspenders = NULL;
778
static ERTS_INLINE void
779
cancel_suspend_of_suspendee(Process *p, Uint32 p_locks)
781
if (is_not_nil(p->suspendee)) {
783
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
784
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
785
rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
786
p->suspendee, ERTS_PROC_LOCK_STATUS);
788
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
789
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
790
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
796
erts_suspend_another_process(Process *c_p, Uint32 c_p_locks,
797
Eterm suspendee, Uint32 suspendee_locks)
800
int unlock_c_p_status;
802
ASSERT(c_p->id != suspendee);
804
ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
806
c_p->freason = EXC_NULL;
808
if (c_p_locks & ERTS_PROC_LOCK_STATUS)
809
unlock_c_p_status = 0;
811
unlock_c_p_status = 1;
812
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
815
if (c_p->suspendee == suspendee) {
817
if (unlock_c_p_status)
818
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
819
return erts_pid2proc(c_p, c_p_locks, suspendee, suspendee_locks);
142
erts_default_trace_pattern_is_on = 0;
143
erts_default_match_spec = NULL;
144
erts_default_meta_match_spec = NULL;
145
erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
146
erts_default_meta_tracer_pid = NIL;
822
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
823
suspendee, ERTS_PROC_LOCK_STATUS);
826
erts_smp_mtx_lock(&schdlq_mtx);
827
if (!(rp->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED)) {
828
Uint32 need_locks = suspendee_locks & ~ERTS_PROC_LOCK_STATUS;
830
erts_smp_mtx_unlock(&schdlq_mtx);
831
c_p->suspendee = suspendee;
832
if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
833
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
838
/* Mark rp pending for suspend by c_p */
839
add_to_proc_list(&rp->pending_suspenders, c_p->id);
840
ASSERT(is_nil(c_p->suspendee));
842
/* Suspend c_p (caller is assumed to return to process_main
843
immediately). When rp is suspended c_p will be resumed. */
844
suspend_process(c_p);
845
erts_smp_mtx_unlock(&schdlq_mtx);
846
c_p->freason = RESCHEDULE;
847
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
852
if (rp && !(suspendee_locks & ERTS_PROC_LOCK_STATUS))
853
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
854
if (unlock_c_p_status)
855
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
861
* Like erts_pid2proc() but:
863
* * At least ERTS_PROC_LOCK_MAIN have to be held on c_p.
864
* * At least ERTS_PROC_LOCK_MAIN have to be taken on pid.
865
* * It also waits for proc to be in a state != running and garbing.
866
* * If NULL is returned, process might have to be rescheduled.
867
* Use ERTS_SMP_BIF_CHK_RESCHEDULE(P) to check this.
872
erts_pid2proc_not_running(Process *c_p, Uint32 c_p_locks,
873
Eterm pid, Uint32 pid_locks)
876
int unlock_c_p_status;
878
ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
880
ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
882
c_p->freason = EXC_NULL;
885
return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
887
if (c_p_locks & ERTS_PROC_LOCK_STATUS)
888
unlock_c_p_status = 0;
890
unlock_c_p_status = 1;
891
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
894
if (c_p->suspendee == pid) {
895
/* Process previously suspended by c_p (below)... */
896
Uint32 rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS;
897
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks);
898
c_p->suspendee = NIL;
904
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
905
pid, ERTS_PROC_LOCK_STATUS);
910
erts_smp_mtx_lock(&schdlq_mtx);
911
if (rp->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED) {
915
/* Mark rp pending for suspend by c_p */
916
add_to_proc_list(&rp->pending_suspenders, c_p->id);
917
ASSERT(is_nil(c_p->suspendee));
919
/* Suspend c_p (caller is assumed to return to process_main
920
immediately). When rp is suspended c_p will be resumed. */
921
suspend_process(c_p);
922
c_p->freason = RESCHEDULE;
923
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
927
Uint32 need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
928
if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
929
erts_smp_mtx_unlock(&schdlq_mtx);
930
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
931
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
932
pid, pid_locks|ERTS_PROC_LOCK_STATUS);
935
erts_smp_mtx_lock(&schdlq_mtx);
936
if (rp->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED) {
938
erts_smp_proc_unlock(rp,
939
pid_locks & ~ERTS_PROC_LOCK_STATUS);
944
/* rp is not scheduled and we got the locks we want... */
946
erts_smp_mtx_unlock(&schdlq_mtx);
950
if (rp && !(pid_locks & ERTS_PROC_LOCK_STATUS))
951
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
952
if (unlock_c_p_status)
953
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
958
* erts_proc_get_locks() assumes that lckp->mtx is locked by calling
959
* thread and that one or more locks have been taken by other threads.
960
* erts_proc_get_locks() returns when all locks in lock_flags
961
* have been acquired if wait_for_locks != 0; otherwise, when
962
* as many locks as possible have been acquired.
966
erts_proc_get_locks(Process *p,
967
erts_proc_lock_t *lckp,
972
Uint32 got_locks = 0;
973
Uint32 need_locks = lock_flags & ERTS_PROC_LOCKS_ALL;
974
ASSERT(need_locks & (p->lock_flags & ERTS_PROC_LOCKS_ALL));
976
#ifdef ERTS_ENABLE_LOCK_CHECK
978
erts_proc_lc_lock(p, need_locks);
982
* Need to lock as many locks as possible (according to lock order)
983
* in order to avoid starvation.
987
Uint32 lock = (1 << i);
988
if (lock & need_locks) {
990
if (lock & p->lock_flags) {
991
if (wait_for_locks) {
992
p->lock_flags |= ERTS_PROC_LOCK_FLAG_WAITERS;
993
erts_smp_cnd_wait(&lckp->cnd, &lckp->mtx);
997
if (!(need_locks & p->lock_flags)) {
998
p->lock_flags |= need_locks; /* Got them all at once... */
999
#ifdef ERTS_ENABLE_LOCK_CHECK
1000
if (!wait_for_locks)
1001
erts_proc_lc_lock(p, need_locks);
1003
got_locks |= need_locks;
1004
ASSERT(got_locks == (lock_flags & ERTS_PROC_LOCKS_ALL));
1007
goto check_lock_again;
1010
p->lock_flags |= lock;
1011
#ifdef ERTS_ENABLE_LOCK_CHECK
1012
if (!wait_for_locks)
1013
erts_proc_lc_lock(p, lock);
1016
need_locks &= ~lock;
1018
ASSERT(got_locks == (lock_flags & ERTS_PROC_LOCKS_ALL));
1028
* proc_safelock_aux() is a helper function for erts_proc_safelock().
1030
* If no locks are held, process might have become exiting since the
1031
* last time we looked at it; therefore, we must check that process
1032
* is not exiting each time we acquires the lckp->mtx if no locks
1036
proc_safelock_aux(Process *p, Uint pid, erts_proc_lock_t *lckp,
1037
Uint32 *have_locks, Uint32 *need_locks,
1038
Uint32 get_locks, Uint32 allow_exiting)
1040
#define SAME_PROC(PID, PIX, PROC) \
1041
((PROC) == process_tab[(PIX)] && (PROC)->id == (PID))
1042
#define EXITING_PROC(PROC) \
1043
((PROC)->lock_flags & ERTS_PROC_LOCK_FLAG_EXITING)
1045
Uint pix = internal_pid_index(pid);
1046
int check_same_proc = !*have_locks && pid != ERTS_INVALID_PID;
1047
int check_exiting_proc = (!allow_exiting && !*have_locks);
1048
Uint32 got_locks = 0;
1050
ASSERT((*have_locks & get_locks) == 0);
1051
ASSERT((*have_locks & *need_locks) == 0);
1052
ASSERT((*need_locks & get_locks) != 0);
1054
erts_smp_mtx_lock(&lckp->mtx);
1055
if (check_same_proc && (!SAME_PROC(pid, pix, p)
1056
|| (check_exiting_proc && EXITING_PROC(p))))
1060
if (p->lock_flags & get_locks) {
1061
Uint32 locks = erts_proc_get_locks(p, lckp, get_locks, 0);
1062
get_locks &= ~locks;
1065
p->lock_flags |= ERTS_PROC_LOCK_FLAG_WAITERS;
1066
erts_smp_cnd_wait(&lckp->cnd, &lckp->mtx);
1068
&& (check_same_proc = !got_locks)
1069
&& (!SAME_PROC(pid, pix, p)
1070
|| (check_exiting_proc
1071
&& (check_exiting_proc = !got_locks)
1072
&& EXITING_PROC(p))))
1078
p->lock_flags |= get_locks; /* Got them all at once... */
1079
#ifdef ERTS_ENABLE_LOCK_CHECK
1080
erts_proc_lc_lock(p, get_locks);
1082
got_locks |= get_locks;
1083
/* get_locks = 0; */
1088
erts_smp_mtx_unlock(&lckp->mtx);
1089
*have_locks |= got_locks;
1090
*need_locks &= ~got_locks;
1097
* erts_proc_safelock() locks process locks on two processes. this_proc
1098
* should be the currently running process. In order to avoid a deadlock,
1099
* erts_proc_safelock() unlocks those locks that needs to be unlocked,
1100
* and then acquires locks in lock order (including the previously unlocked
1103
* If other_proc becomes invalid during the locking NULL is returned,
1104
* this_proc's lock state is restored, and all locks on other_proc are
1107
* If allow_this_exiting is true this_proc is allowed to become invalid
1108
* (exiting); otherwise if this_proc becomes invalid, NULL is returned
1109
* and both processes lock states are restored.
1113
erts_proc_safelock(Process * this_proc,
1114
Uint32 this_have_locks,
1115
Uint32 this_need_locks,
1116
int allow_this_exiting,
1118
Process *other_proc,
1119
Uint32 other_have_locks,
1120
Uint32 other_need_locks,
1121
int allow_other_exiting)
1123
Process *p1, *p2, *exiting_p;
1125
Uint32 need_locks1, have_locks1, need_locks2, have_locks2;
1126
Uint32 unlock_mask, ax1, ax2;
1127
erts_proc_lock_t *lckp1, *lckp2;
1133
/* Determine inter process lock order...
1134
* Locks with the same lock order should be locked on p1 before p2.
1137
if (this_proc->id < other_pid) {
1139
pid1 = this_proc->id;
1140
need_locks1 = this_need_locks;
1141
have_locks1 = this_have_locks;
1142
lckp1 = &erts_proc_locks[ERTS_PID2LOCKIX(pid1)];
1143
ax1 = allow_this_exiting;
1146
need_locks2 = other_need_locks;
1147
have_locks2 = other_have_locks;
1148
lckp2 = &erts_proc_locks[ERTS_PID2LOCKIX(pid2)];
1149
ax2 = allow_other_exiting;
1151
else if (this_proc->id > other_pid) {
1154
need_locks1 = other_need_locks;
1155
have_locks1 = other_have_locks;
1156
lckp1 = &erts_proc_locks[ERTS_PID2LOCKIX(pid1)];
1157
ax1 = allow_other_exiting;
1159
pid2 = this_proc->id;
1160
need_locks2 = this_need_locks;
1161
have_locks2 = this_have_locks;
1162
lckp2 = &erts_proc_locks[ERTS_PID2LOCKIX(pid2)];
1163
ax2 = allow_this_exiting;
1166
ASSERT(this_proc == other_proc);
1167
ASSERT(this_proc->id == other_pid);
1169
pid1 = this_proc->id;
1170
need_locks1 = this_need_locks | other_need_locks;
1171
have_locks1 = this_have_locks | other_have_locks;
1172
lckp1 = &erts_proc_locks[ERTS_PID2LOCKIX(pid1)];
1173
ax1 = allow_this_exiting || allow_other_exiting;
1185
need_locks1 = other_need_locks;
1186
have_locks1 = other_have_locks;
1187
lckp1 = &erts_proc_locks[ERTS_PID2LOCKIX(pid1)];
1188
ax1 = allow_other_exiting;
1195
#ifdef ERTS_ENABLE_LOCK_CHECK
1196
this_need_locks = 0;
1197
this_have_locks = 0;
1201
res = 1; /* Prepare for success... */
1206
#ifdef ERTS_ENABLE_LOCK_CHECK
1208
erts_proc_lc_chk_proc_locks(p1, have_locks1);
1210
erts_proc_lc_chk_proc_locks(p2, have_locks2);
1212
if ((need_locks1 & have_locks1) != have_locks1)
1213
erts_lc_fail("Thread tries to release process lock(s) "
1214
"on %T via erts_proc_safelock().", pid1);
1215
if ((need_locks2 & have_locks2) != have_locks2)
1216
erts_lc_fail("Thread tries to release process lock(s) "
1217
"on %T via erts_proc_safelock().",
1222
need_locks1 &= ~have_locks1;
1223
need_locks2 &= ~have_locks2;
1225
/* Figure out the range of locks that needs to be unlocked... */
1226
unlock_mask = ERTS_PROC_LOCKS_ALL;
1228
lock_no <= ERTS_PROC_LOCK_MAX_BIT;
1230
Uint32 lock = (1 << lock_no);
1231
if (lock & need_locks1)
1233
unlock_mask &= ~lock;
1234
if (lock & need_locks2)
1238
/* ... and unlock locks in that range... */
1239
if (have_locks1 || have_locks2) {
1240
Uint32 unlock_locks;
1241
unlock_locks = unlock_mask & have_locks1;
1243
have_locks1 &= ~unlock_locks;
1244
need_locks1 |= unlock_locks;
1245
erts_proc_unlock(p1, unlock_locks);
1247
unlock_locks = unlock_mask & have_locks2;
1249
have_locks2 &= ~unlock_locks;
1250
need_locks2 |= unlock_locks;
1251
erts_proc_unlock(p2, unlock_locks);
1256
* lock_no equals the number of the first lock to lock on
1257
* either p1 *or* p2.
1261
#ifdef ERTS_ENABLE_LOCK_CHECK
1263
erts_proc_lc_chk_proc_locks(p1, have_locks1);
1265
erts_proc_lc_chk_proc_locks(p2, have_locks2);
1268
/* Lock locks in lock order... */
1269
while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) {
1271
Uint32 lock = (1 << lock_no);
1272
Uint32 lock_mask = 0;
1273
if (need_locks1 & lock) {
1275
lock = (1 << lock_no++);
1277
} while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
1278
&& !(need_locks2 & lock));
1279
if (need_locks2 & lock)
1281
locks = need_locks1 & lock_mask;
1282
if (!proc_safelock_aux(p1, pid1, lckp1,
1283
&have_locks1, &need_locks1,
1289
else if (need_locks2 & lock) {
1290
while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
1291
&& !(need_locks1 & lock)) {
1293
lock = (1 << ++lock_no);
1295
locks = need_locks2 & lock_mask;
1296
if (!proc_safelock_aux(p2, pid2, lckp2,
1297
&have_locks2, &need_locks2,
1310
#ifdef ERTS_ENABLE_LOCK_CHECK
1312
erts_proc_lc_chk_proc_locks(p1, have_locks1);
1314
erts_proc_lc_chk_proc_locks(p2, have_locks2);
1317
if (p1 == this_proc) {
1318
ERTS_SMP_LC_ASSERT(this_need_locks == have_locks1);
1319
ERTS_SMP_LC_ASSERT(other_need_locks == have_locks2);
1322
ERTS_SMP_LC_ASSERT(this_need_locks == have_locks2);
1323
ERTS_SMP_LC_ASSERT(other_need_locks == have_locks1);
1327
ERTS_SMP_LC_ASSERT(p1);
1329
ERTS_SMP_LC_ASSERT(have_locks1
1331
| other_need_locks));
1334
ERTS_SMP_LC_ASSERT(have_locks1 == other_need_locks);
1345
* Note: We may end up here two times if this_proc gets exiting
1346
* the first time we try to lock, and other_proc gets exiting
1347
* when we try to restore the lock states. This is no problem
1348
* and will work out fine.
1352
* We have no locks on the proc that got exiting.
1355
/* Piuhhhh!!! Fix the mess... */
1356
Uint32 restore_locks1, restore_locks2;
1357
Uint32 unlock_locks;
1358
if (this_proc == exiting_p) {
1359
/* Restore locks on both procs */
1360
if (this_proc == p1) {
1361
ASSERT(!have_locks1);
1362
restore_locks1 = this_have_locks;
1363
restore_locks2 = other_have_locks;
1367
ASSERT(this_proc == p2);
1368
ASSERT(!have_locks2);
1369
restore_locks1 = other_have_locks;
1370
restore_locks2 = this_have_locks;
1373
#ifdef ERTS_ENABLE_LOCK_CHECK
1374
this_need_locks = this_have_locks;
1375
other_need_locks = other_have_locks;
1379
/* Restore locks on this_proc */
1380
if (this_proc == p1) {
1381
ASSERT(!have_locks2);
1382
restore_locks1 = this_have_locks;
1387
ASSERT(this_proc == p2);
1388
ASSERT(!have_locks1);
1390
restore_locks2 = this_have_locks;
1393
#ifdef ERTS_ENABLE_LOCK_CHECK
1394
this_need_locks = this_have_locks;
1395
other_need_locks = 0;
1399
unlock_locks = have_locks1 & ~restore_locks1;
1401
erts_proc_unlock(p1, unlock_locks);
1402
have_locks1 &= ~unlock_locks;
1404
need_locks1 = restore_locks1;
1406
unlock_locks = have_locks2 & ~restore_locks2;
1408
erts_proc_unlock(p2, unlock_locks);
1409
have_locks2 &= ~unlock_locks;
1411
need_locks2 = restore_locks2;
1413
if (need_locks1 != have_locks1 || need_locks2 != have_locks1)
1417
ASSERT(exiting_p == other_proc);
1418
/* No this_proc and other_proc exiting == we are done */
1419
#ifdef ERTS_ENABLE_LOCK_CHECK
1420
need_locks1 = have_locks1 = need_locks2 = have_locks2
1421
= this_need_locks = other_need_locks = 0;
1428
#endif /* ERTS_SMP */
1430
Uint erts_get_no_schedulers(void)
1435
return (Uint) erts_smp_atomic_read(&atomic_no_schedulers);
1440
erts_set_no_schedulers(Process *c_p, Uint *oldp, Uint *actualp, Uint wanted, int *reschedule)
1455
ErtsSchedulerData *esdp;
1458
erts_smp_mtx_lock(&schdlq_mtx);
1461
*oldp = no_schedulers;
1468
if (changing_no_schedulers) {
1470
* Only one scheduler at a time is allowed to change the
1471
* number of schedulers. Currently, someone else is doing
1472
* this, i.e. we need to rescheduler c_p ...
1478
changing_no_schedulers = 1;
1480
use_no_schedulers = wanted;
1482
if (use_no_schedulers > ERTS_MAX_NO_OF_SCHEDULERS) {
1483
use_no_schedulers = ERTS_MAX_NO_OF_SCHEDULERS;
1487
while (no_schedulers > use_no_schedulers) {
1488
erts_smp_cnd_broadcast(&schdlq_cnd);
1490
/* Wait for another scheduler to terminate ... */
1491
erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
1495
erts_smp_cnd_wait(&schdlq_cnd, &schdlq_mtx);
1496
erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
1503
while (no_schedulers < use_no_schedulers) {
1505
erts_smp_chk_system_block(prepare_for_block,
1508
esdp = erts_alloc_fnf(ERTS_ALC_T_SCHDLR_DATA, sizeof(ErtsSchedulerData));
1511
use_no_schedulers = no_schedulers;
1514
last_scheduler_no++;
1515
init_sched_thr_data(esdp);
1516
cres = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,1);
1519
erts_free(ERTS_ALC_T_SCHDLR_DATA, (void *) esdp);
1520
last_scheduler_no--;
1521
use_no_schedulers = no_schedulers;
1526
erts_smp_atomic_inc(&atomic_no_schedulers);
1529
schedulers->prev = esdp;
1530
esdp->next = schedulers;
1535
changing_no_schedulers = 0;
1539
*actualp = no_schedulers;
1541
erts_smp_mtx_unlock(&schdlq_mtx);
1544
#endif /* ERTS_SMP */
150
1548
sched_q_len(void)
155
for (i = 0; i < NPRIORITY_LEVELS; i++) {
1555
erts_smp_mtx_lock(&schdlq_mtx);
1558
for (i = 0; i < NPRIORITY_LEVELS - 1; i++) {
158
1561
for (p = queue[i].first; p != NULL; p = p->next) {
1565
ASSERT(len == runq_len);
1570
erts_smp_mtx_unlock(&schdlq_mtx);
1577
is_proc_in_schdl_q(Process *p)
1580
for (i = 0; i < NPRIORITY_LEVELS - 1; i++) {
1582
for (rp = queue[i].first; rp; rp = rp->next) {
165
1591
/* schedule a process */
167
add_to_schedule_q(Process *p)
1592
static ERTS_INLINE void
1593
internal_add_to_schedule_q(Process *p)
169
ScheduleQ* sq = &queue[p->prio];
171
/* Never schedule a suspended process */
1596
* ERTS_SMP: internal_add_to_schuduleq should only be used from:
1597
* - add_to_scheduleq()
1598
* - schedule() when schdlq_mtx and scheduler is about
1599
* to schedule a new process.
1605
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
1606
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlq_mtx));
1608
if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
1610
else if (p->scheduler_flags & ERTS_PROC_SCHED_FLG_SCHEDULED) {
1611
ASSERT(p->status != P_SUSPENDED);
1613
ASSERT(!is_proc_in_schdl_q(p));
1615
p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
1618
ASSERT(!p->scheduler_data);
1622
ASSERT(!is_proc_in_schdl_q(p));
1628
sq = &queue[PRIORITY_NORMAL];
1630
case PRIORITY_NORMAL:
1633
sq = &queue[p->prio];
1637
/* Never schedule a suspended process (ok in smp case) */
172
1638
ASSERT(p->status != P_SUSPENDED);
174
1641
qmask |= (1 << p->prio);
176
1644
if (sq->first == (Process *) 0)
1018
3250
sz_token = size_object(token);
1019
3251
bp = new_message_buffer(term_size+sz_token);
1021
mess = copy_struct(exit_term, term_size, &hp, &MSO(to));
3253
mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
1022
3254
/* the trace token must in this case be updated by the caller */
1023
3255
seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL);
1024
temp_token = copy_struct(token, sz_token, &hp, &MSO(to));
1025
queue_message_tt(to, bp, mess, temp_token);
3256
temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
3257
erts_queue_message(to, *to_locksp, bp, mess, temp_token);
3263
* *** Exit signal behavior ***
3265
* Exit signals are asynchronous (truly asynchronous in the
3266
* SMP emulator). When the signal is received the receiver receives an
3267
* 'EXIT' message if it is trapping exits; otherwise, it will either
3268
* ignore the signal if the exit reason is normal, or go into an
3269
* exiting state (status P_EXITING). When a process has gone into the
3270
* exiting state it will not execute any more Erlang code, but it might
3271
* take a while before it actually exits. The exit signal is being
3272
* received when the 'EXIT' message is put in the message queue, the
3273
* signal is dropped, or when it changes state into exiting. The time it
3274
* is in the exiting state before actually exiting is undefined (it
3275
* might take a really long time under certain conditions). The
3276
* receiver of the exit signal does not break links or trigger monitors
3277
* until it actually exits.
3279
* Exit signals and other signals, e.g. messages, have to be received
3280
* by a receiver in the same order as sent by a sender.
3284
* Exit signal implementation in the SMP emulator:
3286
* If the receiver is trapping exits, the signal is transformed
3287
* into an 'EXIT' message and sent as a normal message, if the
3288
* reason is normal the signal is dropped; otherwise, the process
3289
* is determined to be exited. The interesting case is when the
3290
* process is to be exited and this is what is described below.
3292
* If it is possible, the receiver is set in the exiting state straight
3293
* away and we are done; otherwise, the sender places the exit reason
3294
* in the pending_exit field of the process struct and if necessary
3295
* adds the receiver to the run queue. It is typically not possible
3296
* to set a scheduled process or a process which we cannot get all locks
3297
* on without releasing locks on it in an exiting state straight away.
3299
* The receiver will poll the pending_exit field when it reach certain
3300
* places during it's execution. When it discovers the pending exit
3301
* it will change state into the exiting state. If the receiver wasn't
3302
* scheduled when the pending exit was set, the first scheduler that
3303
* schedules a new process will set the receiving process in the exiting
3304
* state just before it schedules next process.
3306
* When the exit signal is placed in the pending_exit field, the signal
3307
* is considered as being in transit on the Erlang level. The signal is
3308
* actually in some kind of semi transit state, since we have already
3309
* determined how it should be received. It will exit the process no
3310
* matter what if it is received (the process may exit by itself before
3311
* reception of the exit signal). The signal is received when it is
3312
* discovered in the pending_exit field by the receiver.
3314
* The receiver have to poll the pending_exit field at least before:
3315
* - moving messages from the message in queue to the private message
3316
* queue. This in order to preserve signal order.
3317
* - unlink. Otherwise the process might get exited on a link that
3318
* have been removed.
3319
* - changing the trap_exit flag to true. This in order to simplify the
3320
* implementation; otherwise, we would have to transform the signal
3321
* into an 'EXIT' message when setting the trap_exit flag to true. We
3322
* would also have to maintain a queue of exit signals in transit.
3323
* - being scheduled in or out.
3326
static ERTS_INLINE int
3327
send_exit_signal(Process *c_p, /* current process if and only
3328
if reason is stored on it */
3329
Eterm from, /* Id of sender of signal */
3330
Process *rp, /* receiving process */
3331
Uint32 *rp_locks, /* current locks on receiver */
3332
Eterm reason, /* exit reason */
3333
Eterm exit_tuple, /* Prebuild exit tuple
3335
Uint exit_tuple_sz, /* Size of prebuilt exit tuple
3336
(if exit_tuple != THE_NON_VALUE) */
3337
Eterm token, /* token */
3338
Process *token_update, /* token updater */
3339
Uint32 flags /* flags */
3342
Eterm rsn = reason == am_kill ? am_killed : reason;
3344
ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
3345
ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
3346
== ERTS_PROC_LOCKS_XSIG_SEND);
3348
ASSERT(reason != THE_NON_VALUE);
3350
if (ERTS_PROC_IS_TRAPPING_EXITS(rp)
3351
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
3352
if (is_not_nil(token) && token_update)
3353
seq_trace_update_send(token_update);
3354
if (is_value(exit_tuple))
3355
send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
3357
erts_deliver_exit_message(from, rp, rp_locks, rsn, token);
3358
return 1; /* Receiver will get a message */
3360
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
3362
if (!ERTS_PROC_PENDING_EXIT(rp) && !rp->is_exiting) {
3363
ASSERT(rp->status != P_EXITING);
3364
ASSERT(rp->status != P_FREE);
3365
ASSERT(!rp->pending_exit.bp);
3367
if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
3368
/* Ensure that all locks on c_p are locked before
3370
if (*rp_locks != ERTS_PROC_LOCKS_ALL) {
3371
Uint32 need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
3372
if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) {
3373
erts_smp_proc_unlock(c_p,
3374
*rp_locks & ~ERTS_PROC_LOCK_MAIN);
3375
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
3377
*rp_locks = ERTS_PROC_LOCKS_ALL;
3379
set_proc_exiting(c_p, rsn, NULL);
3381
else if (!(rp->status_flags & ERTS_PROC_SFLG_SCHEDULED)) {
3382
/* Process not scheduled ... */
3383
Uint32 need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
3385
&& erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
3386
/* ... but we havn't got all locks on it ... */
3387
save_pending_exiter(rp->id);
3389
* The pending exit will be discovered when next
3390
* process is scheduled in
3392
goto set_pending_exit;
3395
/* ...and we have all locks on it... */
3396
*rp_locks = ERTS_PROC_LOCKS_ALL;
3397
set_proc_exiting(rp,
3400
: copy_object(rsn, rp)),
3404
else { /* Process scheduled... */
3407
* The pending exit will be discovered when the process
3408
* is scheduled out if not discovered earlier.
3412
if (is_immed(rsn)) {
3413
rp->pending_exit.reason = rsn;
3417
Uint sz = size_object(rsn);
3418
ErlHeapFragment *bp = new_message_buffer(sz);
3421
rp->pending_exit.reason = copy_struct(rsn,
3425
rp->pending_exit.bp = bp;
3427
ASSERT(ERTS_PROC_PENDING_EXIT(rp));
3429
if (!(rp->status_flags
3430
& (ERTS_PROC_SFLG_INRUNQ|ERTS_PROC_SFLG_SCHEDULED)))
3431
add_to_schedule_q(rp);
3435
* The receiver already has a pending exit (or is exiting)
3436
* so we drop this signal.
3438
* NOTE: dropping this exit signal is based on the assumption
3439
* that the receiver *will* exit; either on the pending
3440
* exit or by itself before seeing the pending exit.
3442
#else /* !ERTS_SMP */
3444
rp->status = P_EXITING;
3447
else if (rp->status != P_EXITING) { /* No recursive process exits /PaN */
3448
Eterm old_status = rp->status;
3449
set_proc_exiting(rp,
3450
is_immed(rsn) ? rsn : copy_object(rsn, rp),
3453
if (old_status != P_RUNABLE && old_status != P_RUNNING)
3454
add_to_schedule_q(rp);
3457
return -1; /* Receiver will exit */
3460
return 0; /* Receiver unaffected */
3465
erts_send_exit_signal(Process *c_p,
3471
Process *token_update,
3474
return send_exit_signal(c_p,
3489
} ExitMonitorContext;
3491
static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
3493
ExitMonitorContext *pcontext = vpcontext;
3498
if (mon->type == MON_ORIGIN) {
3499
/* We are monitoring someone else, we need to demonitor that one.. */
3500
if (is_atom(mon->pid)) { /* remote by name */
3501
ASSERT(is_node_name_atom(mon->pid));
3502
dep = erts_sysname_to_connected_dist_entry(mon->pid);
3505
erts_smp_dist_entry_lock(dep);
3506
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
3508
dist_demonitor(NULL,0,dep,rmon->pid,mon->name,mon->ref,1);
3509
erts_destroy_monitor(rmon);
3511
erts_smp_io_unlock();
3512
erts_smp_dist_entry_unlock(dep);
3513
erts_deref_dist_entry(dep);
3516
ASSERT(is_pid(mon->pid));
3517
if (is_internal_pid(mon->pid)) { /* local by pid or name */
3518
rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
3522
rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
3523
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
3527
erts_destroy_monitor(rmon);
3528
} else { /* remote by pid */
3529
ASSERT(is_external_pid(mon->pid));
3530
dep = external_pid_dist_entry(mon->pid);
3531
ASSERT(dep != NULL);
3534
erts_smp_dist_entry_lock(dep);
3535
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
3537
dist_demonitor(NULL,0,dep,rmon->pid,mon->pid,mon->ref,1);
3538
erts_destroy_monitor(rmon);
3540
erts_smp_io_unlock();
3541
erts_smp_dist_entry_unlock(dep);
3545
} else { /* type == MON_TARGET */
3546
ASSERT(mon->type == MON_TARGET && is_pid(mon->pid));
3547
if (is_internal_pid(mon->pid)) {/* local by name or pid */
3550
Uint32 rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_MSG_SEND;
3551
rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
3555
rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
3557
erts_destroy_monitor(rmon);
3558
watched = (is_atom(mon->name)
3559
? TUPLE2(lhp, mon->name,
3560
erts_this_dist_entry->sysname)
3562
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
3563
watched, pcontext->reason);
3565
/* else: demonitor while we exited, i.e. do nothing... */
3566
erts_smp_proc_unlock(rp, rp_locks);
3567
} else { /* external by pid or name */
3568
ASSERT(is_external_pid(mon->pid));
3570
dep = external_pid_dist_entry(mon->pid);
3571
ASSERT(dep != NULL);
3573
erts_smp_dist_entry_lock(dep);
3574
rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
3576
dist_m_exit(NULL, 0,
3577
dep, mon->pid, (rmon->name != NIL)
3578
? rmon->name : rmon->pid,
3579
mon->ref, pcontext->reason);
3580
erts_destroy_monitor(rmon);
3582
erts_smp_dist_entry_unlock(dep);
3584
erts_smp_io_unlock();
3588
/* As the monitors are previously removed from the process,
3589
distribution operations will not cause monitors to disappear,
3590
we can safely delete it. */
3592
erts_destroy_monitor(mon);
3602
static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
3604
ExitLinkContext *pcontext = vpcontext;
3605
/* Unpack context, it's readonly */
3606
Process *p = pcontext->p;
3607
Eterm reason = pcontext->reason;
3608
Eterm exit_tuple = pcontext->exit_tuple;
3609
Uint exit_tuple_sz = pcontext->exit_tuple_sz;
3610
Eterm item = lnk->pid;
3618
if(is_internal_port(item)) {
3620
ix = internal_port_index(item);
3621
if (! INVALID_PORT(erts_port+ix, item)) {
3622
rlnk = erts_remove_link(&(erts_port[ix].nlinks),
3625
erts_destroy_link(rlnk);
3627
erts_do_exit_port(item, p->id, reason);
3629
erts_smp_io_unlock();
3631
else if(is_external_port(item)) {
3632
dep = external_port_dist_entry(item);
3633
if(dep != erts_this_dist_entry) {
3635
erts_smp_dist_entry_lock(dep);
3636
dist_exit(NULL, 0, dep, p->id, item, reason);
3637
erts_smp_dist_entry_unlock(dep);
3638
erts_smp_io_unlock();
3641
else if (is_internal_pid(item)) {
3642
Uint32 rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
3643
rp = erts_pid2proc(NULL, 0, item, rp_locks);
3645
rlnk = erts_remove_link(&(rp->nlinks), p->id);
3646
/* If rlnk == NULL, we got unlinked while exiting,
3647
i.e., do nothing... */
3650
erts_destroy_link(rlnk);
3651
xres = send_exit_signal(NULL,
3660
ERTS_XSIG_FLG_IGN_KILL);
3661
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
3662
/* We didn't exit the process and it is traced */
3663
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
3664
trace_proc(p, rp, am_getting_unlinked, p->id);
3669
erts_smp_proc_unlock(rp, rp_locks);
3672
else if (is_external_pid(item)) {
3673
dep = external_pid_dist_entry(item);
3674
if(dep != erts_this_dist_entry) {
3676
erts_smp_dist_entry_lock(dep);
3677
if (SEQ_TRACE_TOKEN(p) != NIL) {
3678
seq_trace_update_send(p);
3680
dist_exit_tt(NULL,0,dep,p->id,item,reason,SEQ_TRACE_TOKEN(p));
3681
erts_smp_io_unlock();
3682
erts_smp_dist_entry_unlock(dep);
3687
ASSERT(is_node_name_atom(item));
3688
dep = erts_sysname_to_connected_dist_entry(item);
3690
/* dist entries have node links in a separate structure to
3692
erts_smp_dist_entry_lock(dep);
3693
rlnk = erts_remove_link(&(dep->node_links), p->id);
3694
erts_smp_dist_entry_unlock(dep);
3696
erts_destroy_link(rlnk);
3698
erts_deref_dist_entry(dep);
3701
/* XXX Is this possible? Shouldn't this link
3702
previously have been removed if the node
3703
had previously been disconnected. */
3706
/* This is possible when smp support has been enabled,
3707
and dist port and process exits simultaneously. */
3712
erl_exit(1, "bad type in link list\n");
3715
erts_destroy_link(lnk);
1030
3719
/* this function fishishes a process and propagates exit messages - called
1031
3720
by process_main when a process dies */
1033
do_exit(Process* p, Eterm reason)
3722
erts_do_exit_process(Process* p, Eterm reason)
1041
Eterm exit_tuple = NIL;
1042
Uint exit_tuple_sz = 0;
1044
3727
p->arity = 0; /* No live registers */
1045
3728
p->fvalue = reason;
3731
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p->id);
3732
/* By locking all locks (main lock is already locked) when going
3733
to status P_EXITING, it is enough to take any lock when
3734
looking up a process (erts_pid2proc()) to prevent the looked up
3735
process from exiting until the lock has been released. */
3736
erts_smp_proc_lock(p,
3737
ERTS_PROC_LOCKS_ALL_MINOR|ERTS_PROC_LOCK_FLAG_EXITING);
1046
3741
p->status = P_EXITING;
3745
if (ERTS_PROC_PENDING_EXIT(p)) {
3746
/* Process exited before pending exit was received... */
3747
p->pending_exit.reason = THE_NON_VALUE;
3748
if (p->pending_exit.bp) {
3749
free_message_buffer(p->pending_exit.bp);
3750
p->pending_exit.bp = NULL;
3754
cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
3756
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
1048
3759
if (IS_TRACED_FL(p,F_TRACE_PROCS))
1049
3760
trace_proc(p, p, am_exit, reason);
1051
if (p->flags & F_TRACER) {
1052
if (EQ(erts_default_tracer, p->id)) {
1053
erts_default_tracer = NIL;
1054
erts_default_process_flags &= ~TRACE_FLAGS;
1056
if (EQ(erts_system_monitor, p->id)) {
1057
erts_system_monitor_clear();
3762
erts_trace_check_exiting(p->id);
3764
cancel_timer(p); /* Always cancel timer just in case */
3767
erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
3769
if (p->flags & F_USING_DB)
3770
db_proc_dead(p->id);
3772
if (p->flags & F_USING_DDLL) {
3773
erts_ddll_proc_dead(p, ERTS_PROC_LOCKS_ALL);
3777
* The registered name *should* be the last "erlang resource" to
3781
(void) erts_unregister_name(p, ERTS_PROC_LOCKS_ALL, 0, p->reg->name);
3785
erts_smp_mtx_t *ptabix_mtxp;
3787
ptabix_mtxp = &(erts_proc_locks[ERTS_PID2LOCKIX(p->id)].mtx);
3792
ASSERT(internal_pid_index(p->id) < erts_max_processes);
3793
pix = internal_pid_index(p->id);
3795
erts_smp_mtx_lock(&proc_tab_mtx);
3796
erts_smp_mtx_lock(&schdlq_mtx);
3797
erts_smp_mtx_lock(ptabix_mtxp);
3800
ASSERT(p->scheduler_data);
3801
ASSERT(p->scheduler_data->current_process == p);
3802
ASSERT(p->scheduler_data->free_process == NULL);
3804
p->scheduler_data->current_process = NULL;
3805
p->scheduler_data->free_process = p;
3806
p->status_flags = 0;
3808
process_tab[pix] = NULL; /* Time of death! */
3809
ASSERT(erts_smp_atomic_read(&process_count) > 0);
3810
erts_smp_atomic_dec(&process_count);
3812
erts_smp_mtx_unlock(ptabix_mtxp);
3813
erts_smp_mtx_unlock(&schdlq_mtx);
3816
if (p_last >= p_next) {
3818
p_serial &= p_serial_mask;
3823
erts_smp_mtx_unlock(&proc_tab_mtx);
3827
* All "erlang resources" have to be deallocated before this point,
3828
* e.g. registered name, so monitoring and linked processes can
3829
* be sure that all interesting resources have been deallocated
3830
* when the monitors and/or links hit.
3834
p->monitors = NULL; /* to avoid recursive deletion during traversal */
3839
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
3842
if ((p->flags & F_DISTRIBUTION) && p->dist_entry)
3843
erts_do_net_exits(p->dist_entry);
1066
3846
* Pre-build the EXIT tuple if there are any links.
1070
if (HEAP_LIMIT(p) - HEAP_TOP(p) <= 4) {
1071
(void) erts_garbage_collect(p, 4, NULL, 0);
1076
3856
exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason);
1078
3858
exit_tuple_sz = size_object(exit_tuple);
1082
while (lnk != NULL) {
1086
if(is_internal_port(item)) {
1087
ix = internal_port_index(item);
1088
if (! INVALID_PORT(erts_port+ix, item)) {
1089
del_link(find_link(&erts_port[ix].links,LNK_LINK,
1091
do_exit_port(item, p->id, reason);
1094
else if(is_external_port(item)) {
1095
dep = external_port_dist_entry(item);
1096
if(dep != erts_this_dist_entry)
1097
dist_exit(dep, p->id, item, reason);
1099
else if (is_internal_pid(item)) {
1100
if ((rp = pid2proc(item)) != NULL) {
1102
find_link(&rp->links, LNK_LINK, p->id, NIL);
1104
if (rp->flags & F_TRAPEXIT) {
1105
if (SEQ_TRACE_TOKEN(p) != NIL ) {
1106
seq_trace_update_send(p);
1108
send_exit_message(rp, exit_tuple, exit_tuple_sz,
1109
SEQ_TRACE_TOKEN(p));
1110
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rlinkpp != NULL) {
1111
trace_proc(p, rp, am_getting_unlinked, p->id);
1113
} else if (reason == am_normal) {
1114
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rlinkpp != NULL) {
1115
trace_proc(p, rp, am_getting_unlinked, p->id);
1118
schedule_exit(rp, reason);
1122
else if (is_external_pid(item)) {
1123
dep = external_pid_dist_entry(item);
1124
if(dep != erts_this_dist_entry) {
1125
if (SEQ_TRACE_TOKEN(p) != NIL) {
1126
seq_trace_update_send(p);
1128
dist_exit_tt(dep, p->id, item, reason, SEQ_TRACE_TOKEN(p));
1134
if (item == p->id) {
1135
/* We are monitoring 'data' */
1136
if (is_atom(lnk->data)) {
1137
/* Monitoring a name on this node */
1138
ASSERT(is_node_name_atom(lnk->data));
1139
dep = erts_sysname_to_connected_dist_entry(lnk->data);
1144
ASSERT(is_pid(lnk->data));
1145
dep = pid_dist_entry(lnk->data);
1147
if (dep != erts_this_dist_entry) {
1149
lnkp = find_link_by_ref(&dep->links, ref);
1151
/* Force send, use the atom in dist slot
1152
* link list as data for the message.
1154
dist_demonitor(dep, item, (*lnkp)->data, ref, 1);
1155
/* dist_demonitor() may have removed the link;
1156
therefore, look it up again. */
1157
lnkp = find_link_by_ref(&dep->links, ref);
1161
if ((rp = pid2proc(lnk->data)) != NULL)
1162
del_link(find_link_by_ref(&rp->links, ref));
1165
/* 'Item' is monitoring us */
1166
if (is_internal_pid(item)) {
1167
if ((rp = pid2proc(item)) != NULL) {
1169
Eterm item = (is_atom(lnk->data)
1172
erts_this_dist_entry->sysname)
1174
ASSERT(lnk->data == p->id || is_atom(lnk->data));
1175
queue_monitor_message(rp, ref, am_process,
1177
del_link(find_link_by_ref(&rp->links, ref));
1179
} else if (is_external_pid(item)) {
1180
dep = external_pid_dist_entry(item);
1181
if(dep != erts_this_dist_entry)
1182
dist_m_exit(dep, item, lnk->data, ref, reason);
1191
ASSERT(is_node_name_atom(item));
1192
dep = erts_sysname_to_connected_dist_entry(item);
1194
del_link(find_link(&dep->links,LNK_NODE,p->id,NIL));
1196
/* XXX Is this possible? Shouldn't this link
1197
previously have been removed if the node
1198
had previously been disconnected. */
1206
erl_exit(1, "bad type in link list\n");
3861
ExitLinkContext context = {p, reason, exit_tuple, exit_tuple_sz};
3862
erts_sweep_links(lnk, &doit_exit_link, &context);
1209
del_link(&lnk); /* will set lnk to next as well !! */
1212
if ((p->flags & F_DISTRIBUTION) && p->dist_entry)
1213
do_net_exits(p->dist_entry);
3867
ExitMonitorContext context = {reason, p};
3868
erts_sweep_monitors(mon,&doit_exit_monitor,&context);
1215
3871
delete_process(p);
3873
#ifdef ERTS_ENABLE_LOCK_CHECK
3874
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); /* Make process_main() happy */
1218
3878
/* Callback for process timeout */