1
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* ***** BEGIN LICENSE BLOCK *****
3
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
5
* The contents of this file are subject to the Mozilla Public License Version
6
* 1.1 (the "License"); you may not use this file except in compliance with
7
* the License. You may obtain a copy of the License at
8
* http://www.mozilla.org/MPL/
10
* Software distributed under the License is distributed on an "AS IS" basis,
11
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12
* for the specific language governing rights and limitations under the
15
* The Original Code is the Netscape Portable Runtime (NSPR).
17
* The Initial Developer of the Original Code is
18
* Netscape Communications Corporation.
19
* Portions created by the Initial Developer are Copyright (C) 1998-2000
20
* the Initial Developer. All Rights Reserved.
24
* Alternatively, the contents of this file may be used under the terms of
25
* either the GNU General Public License Version 2 or later (the "GPL"), or
26
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27
* in which case the provisions of the GPL or the LGPL are applicable instead
28
* of those above. If you wish to allow use of your version of this file only
29
* under the terms of either the GPL or the LGPL, and not to allow others to
30
* use your version of this file under the terms of the MPL, indicate your
31
* decision by deleting the provisions above and replace them with the notice
32
* and other provisions required by the GPL or the LGPL. If you do not delete
33
* the provisions above, a recipient may use your version of this file under
34
* the terms of any one of the MPL, the GPL or the LGPL.
36
* ***** END LICENSE BLOCK ***** */
44
** Some local variables report warnings on Win95 because the code paths
45
** using them are conditioned on HAVE_CUSTOME_USER_THREADS.
46
** The pragma suppresses the warning.
49
#pragma warning(disable : 4101)
56
/* _pr_activeLock protects the following global variables */
57
PRLock *_pr_activeLock;
58
PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread
59
* waits until all other user (non-system)
60
* threads have terminated before it exits.
61
* So whenever we decrement _pr_userActive,
63
* _pr_primordialExitCount.
64
* If the primordial thread is a system
65
* thread, then _pr_primordialExitCount
66
* is 0. If the primordial thread is
67
* itself a user thread, then
68
* _pr_primordialThread is 1.
70
PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to
71
* _pr_primordialExitCount, this condition
72
* variable is notified.
75
PRLock *_pr_deadQLock;
76
PRUint32 _pr_numNativeDead;
77
PRUint32 _pr_numUserDead;
78
PRCList _pr_deadNativeQ;
79
PRCList _pr_deadUserQ;
81
PRUint32 _pr_join_counter;
83
PRUint32 _pr_local_threads;
84
PRUint32 _pr_global_threads;
86
PRBool suspendAllOn = PR_FALSE;
87
PRThread *suspendAllThread = NULL;
89
extern PRCList _pr_active_global_threadQ;
90
extern PRCList _pr_active_local_threadQ;
92
static void _PR_DecrActiveThreadCount(PRThread *thread);
93
static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *);
94
static void _PR_InitializeNativeStack(PRThreadStack *ts);
95
static void _PR_InitializeRecycledThread(PRThread *thread);
96
static void _PR_UserRunThread(void);
98
void _PR_InitThreads(PRThreadType type, PRThreadPriority priority,
102
#pragma unused (maxPTDs)
106
PRThreadStack *stack;
108
_pr_terminationCVLock = PR_NewLock();
109
_pr_activeLock = PR_NewLock();
111
#ifndef HAVE_CUSTOM_USER_THREADS
112
stack = PR_NEWZAP(PRThreadStack);
113
#ifdef HAVE_STACK_GROWING_UP
114
stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift)
117
#if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS)
118
stack->stackTop = (char*) &thread;
119
#elif defined(XP_MAC)
120
stack->stackTop = (char*) LMGetCurStackBase();
122
stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1)
123
>> _pr_pageShift) << _pr_pageShift);
127
/* If stack is NULL, we're using custom user threads like NT fibers. */
128
stack = PR_NEWZAP(PRThreadStack);
130
stack->stackSize = 0;
131
_PR_InitializeNativeStack(stack);
133
#endif /* HAVE_CUSTOM_USER_THREADS */
135
thread = _PR_AttachThread(type, priority, stack);
137
_PR_MD_SET_CURRENT_THREAD(thread);
139
if (type == PR_SYSTEM_THREAD) {
140
thread->flags = _PR_SYSTEM;
142
_pr_primordialExitCount = 0;
145
_pr_primordialExitCount = 1;
147
thread->no_sched = 1;
148
_pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock);
151
if (!thread) PR_Abort();
152
#ifdef _PR_LOCAL_THREADS_ONLY
153
thread->flags |= _PR_PRIMORDIAL;
155
thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE;
159
* Needs _PR_PRIMORDIAL flag set before calling
160
* _PR_MD_INIT_THREAD()
162
if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
168
if (_PR_IS_NATIVE_THREAD(thread)) {
169
PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
170
_pr_global_threads++;
172
PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
176
_pr_recycleThreads = 0;
177
_pr_deadQLock = PR_NewLock();
178
_pr_numNativeDead = 0;
180
PR_INIT_CLIST(&_pr_deadNativeQ);
181
PR_INIT_CLIST(&_pr_deadUserQ);
184
void _PR_CleanupThreads(void)
186
if (_pr_terminationCVLock) {
187
PR_DestroyLock(_pr_terminationCVLock);
188
_pr_terminationCVLock = NULL;
190
if (_pr_activeLock) {
191
PR_DestroyLock(_pr_activeLock);
192
_pr_activeLock = NULL;
194
if (_pr_primordialExitCVar) {
195
PR_DestroyCondVar(_pr_primordialExitCVar);
196
_pr_primordialExitCVar = NULL;
198
/* TODO _pr_dead{Native,User}Q need to be deleted */
200
PR_DestroyLock(_pr_deadQLock);
201
_pr_deadQLock = NULL;
206
** Initialize a stack for a native thread
208
static void _PR_InitializeNativeStack(PRThreadStack *ts)
210
if( ts && (ts->stackTop == 0) ) {
211
ts->allocSize = ts->stackSize;
214
** Setup stackTop and stackBottom values.
216
#ifdef HAVE_STACK_GROWING_UP
217
ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift)
219
ts->stackBottom = ts->allocBase + ts->stackSize;
220
ts->stackTop = ts->allocBase;
222
ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1)
223
>> _pr_pageShift) << _pr_pageShift);
224
ts->stackTop = ts->allocBase;
225
ts->stackBottom = ts->allocBase - ts->stackSize;
230
void _PR_NotifyJoinWaiters(PRThread *thread)
233
** Handle joinable threads. Change the state to waiting for join.
234
** Remove from our run Q and put it on global waiting to join Q.
235
** Notify on our "termination" condition variable so that joining
236
** thread will know about our termination. Switch our context and
237
** come back later on to continue the cleanup.
239
PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
240
if (thread->term != NULL) {
241
PR_Lock(_pr_terminationCVLock);
242
_PR_THREAD_LOCK(thread);
243
thread->state = _PR_JOIN_WAIT;
244
if ( !_PR_IS_NATIVE_THREAD(thread) ) {
245
_PR_MISCQ_LOCK(thread->cpu);
246
_PR_ADD_JOINQ(thread, thread->cpu);
247
_PR_MISCQ_UNLOCK(thread->cpu);
249
_PR_THREAD_UNLOCK(thread);
250
PR_NotifyCondVar(thread->term);
251
PR_Unlock(_pr_terminationCVLock);
252
_PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
253
PR_ASSERT(thread->state != _PR_JOIN_WAIT);
259
* Zero some of the data members of a recycled thread.
261
* Note that we can do this either when a dead thread is added to
262
* the dead thread queue or when it is reused. Here, we are doing
263
* this lazily, when the thread is reused in _PR_CreateThread().
265
static void _PR_InitializeRecycledThread(PRThread *thread)
268
* Assert that the following data members are already zeroed
269
* by _PR_CleanupThread().
272
if (thread->privateData) {
274
for (i = 0; i < thread->tpdLength; i++) {
275
PR_ASSERT(thread->privateData[i] == NULL);
279
PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0);
280
PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0);
281
PR_ASSERT(thread->errorStringLength == 0);
283
/* Reset data members in thread structure */
284
thread->errorCode = thread->osErrorCode = 0;
285
thread->io_pending = thread->io_suspended = PR_FALSE;
286
thread->environment = 0;
287
PR_INIT_CLIST(&thread->lockList);
290
PRStatus _PR_RecycleThread(PRThread *thread)
292
if ( _PR_IS_NATIVE_THREAD(thread) &&
293
_PR_NUM_DEADNATIVE < _pr_recycleThreads) {
295
PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ);
299
} else if ( !_PR_IS_NATIVE_THREAD(thread) &&
300
_PR_NUM_DEADUSER < _pr_recycleThreads) {
302
PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ);
311
* Decrement the active thread count, either _pr_systemActive or
312
* _pr_userActive, depending on whether the thread is a system thread
313
* or a user thread. If all the user threads, except possibly
314
* the primordial thread, have terminated, we notify the primordial
315
* thread of this condition.
317
* Since this function will lock _pr_activeLock, do not call this
318
* function while holding the _pr_activeLock lock, as this will result
323
_PR_DecrActiveThreadCount(PRThread *thread)
325
PR_Lock(_pr_activeLock);
326
if (thread->flags & _PR_SYSTEM) {
330
if (_pr_userActive == _pr_primordialExitCount) {
331
PR_NotifyCondVar(_pr_primordialExitCVar);
334
PR_Unlock(_pr_activeLock);
338
** Detach thread structure
341
_PR_DestroyThread(PRThread *thread)
343
_PR_MD_FREE_LOCK(&thread->threadLock);
348
_PR_NativeDestroyThread(PRThread *thread)
351
PR_DestroyCondVar(thread->term);
354
if (NULL != thread->privateData) {
355
PR_ASSERT(0 != thread->tpdLength);
356
PR_DELETE(thread->privateData);
357
thread->tpdLength = 0;
359
PR_DELETE(thread->stack);
360
_PR_DestroyThread(thread);
364
_PR_UserDestroyThread(PRThread *thread)
367
PR_DestroyCondVar(thread->term);
370
if (NULL != thread->privateData) {
371
PR_ASSERT(0 != thread->tpdLength);
372
PR_DELETE(thread->privateData);
373
thread->tpdLength = 0;
375
_PR_MD_FREE_LOCK(&thread->threadLock);
376
if (thread->threadAllocatedOnStack == 1) {
377
_PR_MD_CLEAN_THREAD(thread);
379
* Because the no_sched field is set, this thread/stack will
380
* will not be re-used until the flag is cleared by the thread
381
* we will context switch to.
383
_PR_FreeStack(thread->stack);
386
_PR_MD_CLEAN_THREAD(thread);
389
* This assertion does not apply to NT. On NT, every fiber
390
* has its threadAllocatedOnStack equal to 0. Elsewhere,
391
* only the primordial thread has its threadAllocatedOnStack
394
PR_ASSERT(thread->flags & _PR_PRIMORDIAL);
401
** Run a thread's start function. When the start function returns the
402
** thread is done executing and no longer needs the CPU. If there are no
403
** more user threads running then we can exit the program.
405
void _PR_NativeRunThread(void *arg)
407
PRThread *thread = (PRThread *)arg;
409
_PR_MD_SET_CURRENT_THREAD(thread);
411
_PR_MD_SET_CURRENT_CPU(NULL);
413
/* Set up the thread stack information */
414
_PR_InitializeNativeStack(thread->stack);
416
/* Set up the thread md information */
417
if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
419
* thread failed to initialize itself, possibly due to
420
* failure to allocate per-thread resources
426
thread->state = _PR_RUNNING;
429
* Add to list of active threads
431
PR_Lock(_pr_activeLock);
432
PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
433
_pr_global_threads++;
434
PR_Unlock(_pr_activeLock);
436
(*thread->startFunc)(thread->arg);
439
* The following two assertions are meant for NT asynch io.
441
* The thread should have no asynch io in progress when it
442
* exits, otherwise the overlapped buffer, which is part of
443
* the thread structure, would become invalid.
445
PR_ASSERT(thread->io_pending == PR_FALSE);
447
* This assertion enforces the programming guideline that
448
* if an io function times out or is interrupted, the thread
449
* should close the fd to force the asynch io to abort
450
* before it exits. Right now, closing the fd is the only
451
* way to clear the io_suspended flag.
453
PR_ASSERT(thread->io_suspended == PR_FALSE);
456
* remove thread from list of active threads
458
PR_Lock(_pr_activeLock);
459
PR_REMOVE_LINK(&thread->active);
460
_pr_global_threads--;
461
PR_Unlock(_pr_activeLock);
463
PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
465
/* All done, time to go away */
466
_PR_CleanupThread(thread);
468
_PR_NotifyJoinWaiters(thread);
470
_PR_DecrActiveThreadCount(thread);
472
thread->state = _PR_DEAD_STATE;
474
if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
477
* thread not recycled
478
* platform-specific thread exit processing
479
* - for stuff like releasing native-thread resources, etc.
481
_PR_MD_EXIT_THREAD(thread);
483
* Free memory allocated for the thread
485
_PR_NativeDestroyThread(thread);
487
* thread gone, cannot de-reference thread now
492
/* Now wait for someone to activate us again... */
493
_PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
497
static void _PR_UserRunThread(void)
499
PRThread *thread = _PR_MD_CURRENT_THREAD();
502
if (_MD_LAST_THREAD())
503
_MD_LAST_THREAD()->no_sched = 0;
505
#ifdef HAVE_CUSTOM_USER_THREADS
506
if (thread->stack == NULL) {
507
thread->stack = PR_NEWZAP(PRThreadStack);
508
_PR_InitializeNativeStack(thread->stack);
510
#endif /* HAVE_CUSTOM_USER_THREADS */
513
/* Run thread main */
514
if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0);
517
* Add to list of active threads
519
if (!(thread->flags & _PR_IDLE_THREAD)) {
520
PR_Lock(_pr_activeLock);
521
PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
523
PR_Unlock(_pr_activeLock);
526
(*thread->startFunc)(thread->arg);
529
* The following two assertions are meant for NT asynch io.
531
* The thread should have no asynch io in progress when it
532
* exits, otherwise the overlapped buffer, which is part of
533
* the thread structure, would become invalid.
535
PR_ASSERT(thread->io_pending == PR_FALSE);
537
* This assertion enforces the programming guideline that
538
* if an io function times out or is interrupted, the thread
539
* should close the fd to force the asynch io to abort
540
* before it exits. Right now, closing the fd is the only
541
* way to clear the io_suspended flag.
543
PR_ASSERT(thread->io_suspended == PR_FALSE);
545
PR_Lock(_pr_activeLock);
547
* remove thread from list of active threads
549
if (!(thread->flags & _PR_IDLE_THREAD)) {
550
PR_REMOVE_LINK(&thread->active);
553
PR_Unlock(_pr_activeLock);
554
PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
556
/* All done, time to go away */
557
_PR_CleanupThread(thread);
561
_PR_NotifyJoinWaiters(thread);
563
_PR_DecrActiveThreadCount(thread);
565
thread->state = _PR_DEAD_STATE;
567
if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
570
** Destroy the thread resources
572
_PR_UserDestroyThread(thread);
576
** Find another user thread to run. This cpu has finished the
577
** previous threads main and is now ready to run another thread.
582
_PR_MD_SWITCH_CONTEXT(thread);
585
/* Will land here when we get scheduled again if we are recycling... */
589
void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri)
591
PRThread *me = _PR_MD_CURRENT_THREAD();
594
if ( _PR_IS_NATIVE_THREAD(thread) ) {
595
_PR_MD_SET_PRIORITY(&(thread->md), newPri);
599
if (!_PR_IS_NATIVE_THREAD(me))
601
_PR_THREAD_LOCK(thread);
602
if (newPri != thread->priority) {
603
_PRCPU *cpu = thread->cpu;
605
switch (thread->state) {
607
/* Change my priority */
610
thread->priority = newPri;
611
if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) {
612
if (!_PR_IS_NATIVE_THREAD(me))
613
_PR_SET_RESCHED_FLAG();
615
_PR_RUNQ_UNLOCK(cpu);
621
/* Move to different runQ */
622
_PR_DEL_RUNQ(thread);
623
thread->priority = newPri;
624
PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
625
_PR_ADD_RUNQ(thread, cpu, newPri);
626
_PR_RUNQ_UNLOCK(cpu);
628
if (newPri > me->priority) {
629
if (!_PR_IS_NATIVE_THREAD(me))
630
_PR_SET_RESCHED_FLAG();
640
thread->priority = newPri;
644
_PR_THREAD_UNLOCK(thread);
645
if (!_PR_IS_NATIVE_THREAD(me))
650
** Suspend the named thread and copy its gc registers into regBuf
652
static void _PR_Suspend(PRThread *thread)
655
PRThread *me = _PR_MD_CURRENT_THREAD();
657
PR_ASSERT(thread != me);
658
PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu));
660
if (!_PR_IS_NATIVE_THREAD(me))
662
_PR_THREAD_LOCK(thread);
663
switch (thread->state) {
665
if (!_PR_IS_NATIVE_THREAD(thread)) {
666
_PR_RUNQ_LOCK(thread->cpu);
667
_PR_DEL_RUNQ(thread);
668
_PR_RUNQ_UNLOCK(thread->cpu);
670
_PR_MISCQ_LOCK(thread->cpu);
671
_PR_ADD_SUSPENDQ(thread, thread->cpu);
672
_PR_MISCQ_UNLOCK(thread->cpu);
675
* Only LOCAL threads are suspended by _PR_Suspend
679
thread->state = _PR_SUSPENDED;
684
* The thread being suspended should be a LOCAL thread with
685
* _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
693
if (_PR_IS_NATIVE_THREAD(thread)) {
694
_PR_MD_SUSPEND_THREAD(thread);
696
thread->flags |= _PR_SUSPENDING;
702
_PR_THREAD_UNLOCK(thread);
703
if (!_PR_IS_NATIVE_THREAD(me))
707
static void _PR_Resume(PRThread *thread)
709
PRThreadPriority pri;
711
PRThread *me = _PR_MD_CURRENT_THREAD();
713
if (!_PR_IS_NATIVE_THREAD(me))
715
_PR_THREAD_LOCK(thread);
716
switch (thread->state) {
718
thread->state = _PR_RUNNABLE;
719
thread->flags &= ~_PR_SUSPENDING;
720
if (!_PR_IS_NATIVE_THREAD(thread)) {
721
_PR_MISCQ_LOCK(thread->cpu);
722
_PR_DEL_SUSPENDQ(thread);
723
_PR_MISCQ_UNLOCK(thread->cpu);
725
pri = thread->priority;
727
_PR_RUNQ_LOCK(thread->cpu);
728
_PR_ADD_RUNQ(thread, thread->cpu, pri);
729
_PR_RUNQ_UNLOCK(thread->cpu);
731
if (pri > _PR_MD_CURRENT_THREAD()->priority) {
732
if (!_PR_IS_NATIVE_THREAD(me))
733
_PR_SET_RESCHED_FLAG();
742
thread->flags &= ~_PR_SUSPENDING;
743
/* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */
748
PRLock *wLock = thread->wait.lock;
750
thread->flags &= ~_PR_SUSPENDING;
752
_PR_LOCK_LOCK(wLock);
753
if (thread->wait.lock->owner == 0) {
754
_PR_UnblockLockWaiter(thread->wait.lock);
756
_PR_LOCK_UNLOCK(wLock);
763
* The thread being suspended should be a LOCAL thread with
764
* _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
771
* thread should have been in one of the above-listed blocked states
772
* (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE)
776
_PR_THREAD_UNLOCK(thread);
777
if (!_PR_IS_NATIVE_THREAD(me))
782
#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
783
static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus)
789
PRIntn priMin, priMax;
792
r = _PR_RUNQREADYMASK(cpu);
794
priMin = priMax = PR_PRIORITY_FIRST;
795
} else if (r == (1<<PR_PRIORITY_NORMAL) ) {
796
priMin = priMax = PR_PRIORITY_NORMAL;
798
priMin = PR_PRIORITY_FIRST;
799
priMax = PR_PRIORITY_LAST;
802
for (pri = priMax; pri >= priMin ; pri-- ) {
803
if (r & (1 << pri)) {
804
for (qp = _PR_RUNQ(cpu)[pri].next;
805
qp != &_PR_RUNQ(cpu)[pri];
807
thread = _PR_THREAD_PTR(qp);
809
* skip non-schedulable threads
811
PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
812
if (thread->no_sched) {
815
* Need to wakeup cpus to avoid missing a
817
* Waking up all CPU's need happen only once.
820
*wakeup_cpus = PR_TRUE;
822
} else if (thread->flags & _PR_BOUND_THREAD) {
824
* Thread bound to cpu 0
829
_PR_MD_WAKEUP_PRIMORDIAL_CPU();
832
} else if (thread->io_pending == PR_TRUE) {
834
* A thread that is blocked for I/O needs to run
835
* on the same cpu on which it was blocked. This is because
836
* the cpu's ioq is accessed without lock protection and scheduling
837
* the thread on a different cpu would preclude this optimization.
842
/* Pull thread off of its run queue */
843
_PR_DEL_RUNQ(thread);
844
_PR_RUNQ_UNLOCK(cpu);
851
_PR_RUNQ_UNLOCK(cpu);
854
#endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */
857
** Schedule this native thread by finding the highest priority nspr
858
** thread that is ready to run.
860
** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls
861
** PR_Schedule() rather than calling PR_Schedule. Otherwise if there
862
** is initialization required for switching from SWITCH_CONTEXT,
863
** it will not get done!
865
void _PR_Schedule(void)
867
PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
868
_PRCPU *cpu = _PR_MD_CURRENT_CPU();
872
PRIntn priMin, priMax;
873
#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
877
/* Interrupts must be disabled */
878
PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);
880
/* Since we are rescheduling, we no longer want to */
881
_PR_CLEAR_RESCHED_FLAG();
884
** Find highest priority thread to run. Bigger priority numbers are
885
** higher priority threads
889
* if we are in SuspendAll mode, can schedule only the thread
890
* that called PR_SuspendAll
892
* The thread may be ready to run now, after completing an I/O
893
* operation, for example
895
if ((thread = suspendAllThread) != 0) {
896
if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) {
897
/* Pull thread off of its run queue */
898
_PR_DEL_RUNQ(thread);
899
_PR_RUNQ_UNLOCK(cpu);
903
_PR_RUNQ_UNLOCK(cpu);
907
r = _PR_RUNQREADYMASK(cpu);
909
priMin = priMax = PR_PRIORITY_FIRST;
910
} else if (r == (1<<PR_PRIORITY_NORMAL) ) {
911
priMin = priMax = PR_PRIORITY_NORMAL;
913
priMin = PR_PRIORITY_FIRST;
914
priMax = PR_PRIORITY_LAST;
917
for (pri = priMax; pri >= priMin ; pri-- ) {
918
if (r & (1 << pri)) {
919
for (qp = _PR_RUNQ(cpu)[pri].next;
920
qp != &_PR_RUNQ(cpu)[pri];
922
thread = _PR_THREAD_PTR(qp);
924
* skip non-schedulable threads
927
PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
929
if ((thread->no_sched) && (me != thread)){
933
/* Pull thread off of its run queue */
934
_PR_DEL_RUNQ(thread);
935
_PR_RUNQ_UNLOCK(cpu);
942
_PR_RUNQ_UNLOCK(cpu);
944
#if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
946
wakeup_cpus = PR_FALSE;
948
for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
949
if (cpu != _PR_CPU_PTR(qp)) {
950
if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus))
953
_PR_CPU_LIST_UNLOCK();
954
if (wakeup_cpus == PR_TRUE)
955
_PR_MD_WAKEUP_CPUS();
960
_PR_CPU_LIST_UNLOCK();
961
if (wakeup_cpus == PR_TRUE)
962
_PR_MD_WAKEUP_CPUS();
964
#endif /* _PR_LOCAL_THREADS_ONLY */
968
** There are no threads to run. Switch to the idle thread
970
PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing"));
971
thread = _PR_MD_CURRENT_CPU()->idle_thread;
974
PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) &&
975
(!(thread->no_sched))));
977
/* Resume the thread */
978
PR_LOG(_pr_sched_lm, PR_LOG_MAX,
979
("switching to %d[%p]", thread->id, thread));
980
PR_ASSERT(thread->state != _PR_RUNNING);
981
thread->state = _PR_RUNNING;
983
/* If we are on the runq, it just means that we went to sleep on some
984
* resource, and by the time we got here another real native thread had
985
* already given us the resource and put us back on the runqueue
987
PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU());
989
_PR_MD_RESTORE_CONTEXT(thread);
991
/* XXXMB; with setjmp/longjmp it is impossible to land here, but
992
* it is not with fibers... Is this a bad thing? I believe it is
995
PR_NOT_REACHED("impossible return from schedule");
1000
** Attaches a thread.
1001
** Does not set the _PR_MD_CURRENT_THREAD.
1002
** Does not specify the scope of the thread.
1005
_PR_AttachThread(PRThreadType type, PRThreadPriority priority,
1006
PRThreadStack *stack)
1009
#pragma unused (type)
1015
if (priority > PR_PRIORITY_LAST) {
1016
priority = PR_PRIORITY_LAST;
1017
} else if (priority < PR_PRIORITY_FIRST) {
1018
priority = PR_PRIORITY_FIRST;
1021
mem = (char*) PR_CALLOC(sizeof(PRThread));
1023
thread = (PRThread*) mem;
1024
thread->priority = priority;
1025
thread->stack = stack;
1026
thread->state = _PR_RUNNING;
1027
PR_INIT_CLIST(&thread->lockList);
1028
if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1040
PR_IMPLEMENT(PRThread*)
1041
_PR_NativeCreateThread(PRThreadType type,
1042
void (*start)(void *arg),
1044
PRThreadPriority priority,
1045
PRThreadScope scope,
1046
PRThreadState state,
1051
#pragma unused (scope)
1056
thread = _PR_AttachThread(type, priority, NULL);
1059
PR_Lock(_pr_activeLock);
1060
thread->flags = (flags | _PR_GLOBAL_SCOPE);
1061
thread->id = ++_pr_utid;
1062
if (type == PR_SYSTEM_THREAD) {
1063
thread->flags |= _PR_SYSTEM;
1068
PR_Unlock(_pr_activeLock);
1070
thread->stack = PR_NEWZAP(PRThreadStack);
1071
if (!thread->stack) {
1072
PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1075
thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE;
1076
thread->stack->thr = thread;
1077
thread->startFunc = start;
1081
Set thread flags related to scope and joinable state. If joinable
1082
thread, allocate a "termination" conidition variable.
1084
if (state == PR_JOINABLE_THREAD) {
1085
thread->term = PR_NewCondVar(_pr_terminationCVLock);
1086
if (thread->term == NULL) {
1087
PR_DELETE(thread->stack);
1092
thread->state = _PR_RUNNING;
1093
if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority,
1094
scope,state,stackSize) == PR_SUCCESS) {
1098
PR_DestroyCondVar(thread->term);
1099
thread->term = NULL;
1101
PR_DELETE(thread->stack);
1106
_PR_DecrActiveThreadCount(thread);
1107
_PR_DestroyThread(thread);
1112
/************************************************************************/
1114
PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type,
1115
void (*start)(void *arg),
1117
PRThreadPriority priority,
1118
PRThreadScope scope,
1119
PRThreadState state,
1124
PRThread *thread = NULL;
1125
PRThreadStack *stack;
1129
PRIntn useRecycled = 0;
1133
First, pin down the priority. Not all compilers catch passing out of
1134
range enum here. If we let bad values thru, priority queues won't work.
1136
if (priority > PR_PRIORITY_LAST) {
1137
priority = PR_PRIORITY_LAST;
1138
} else if (priority < PR_PRIORITY_FIRST) {
1139
priority = PR_PRIORITY_FIRST;
1142
if (!_pr_initialized) _PR_ImplicitInitialization();
1144
if (! (flags & _PR_IDLE_THREAD))
1145
me = _PR_MD_CURRENT_THREAD();
1147
#if defined(_PR_GLOBAL_THREADS_ONLY)
1149
* can create global threads only
1151
if (scope == PR_LOCAL_THREAD)
1152
scope = PR_GLOBAL_THREAD;
1155
if (_native_threads_only)
1156
scope = PR_GLOBAL_THREAD;
1158
native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD))
1159
&& _PR_IS_NATIVE_THREAD_SUPPORTED());
1161
_PR_ADJUST_STACKSIZE(stackSize);
1165
* clear the IDLE_THREAD flag which applies to LOCAL
1168
flags &= ~_PR_IDLE_THREAD;
1169
flags |= _PR_GLOBAL_SCOPE;
1170
if (_PR_NUM_DEADNATIVE > 0) {
1173
if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */
1176
thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next);
1177
PR_REMOVE_LINK(&thread->links);
1181
_PR_InitializeRecycledThread(thread);
1182
thread->startFunc = start;
1184
thread->flags = (flags | _PR_GLOBAL_SCOPE);
1185
if (type == PR_SYSTEM_THREAD)
1187
thread->flags |= _PR_SYSTEM;
1188
PR_AtomicIncrement(&_pr_systemActive);
1190
else PR_AtomicIncrement(&_pr_userActive);
1192
if (state == PR_JOINABLE_THREAD) {
1194
thread->term = PR_NewCondVar(_pr_terminationCVLock);
1198
PR_DestroyCondVar(thread->term);
1203
thread->priority = priority;
1204
_PR_MD_SET_PRIORITY(&(thread->md), priority);
1205
/* XXX what about stackSize? */
1206
thread->state = _PR_RUNNING;
1207
_PR_MD_WAKEUP_WAITER(thread);
1211
thread = _PR_NativeCreateThread(type, start, arg, priority,
1212
scope, state, stackSize, flags);
1214
if (_PR_NUM_DEADUSER > 0) {
1217
if (_PR_NUM_DEADUSER == 0) { /* thread safe check */
1222
/* Go down list checking for a recycled thread with a
1223
* large enough stack. XXXMB - this has a bad degenerate case.
1225
ptr = _PR_DEADUSERQ.next;
1226
while( ptr != &_PR_DEADUSERQ ) {
1227
thread = _PR_THREAD_PTR(ptr);
1228
if ((thread->stack->stackSize >= stackSize) &&
1229
(!thread->no_sched)) {
1230
PR_REMOVE_LINK(&thread->links);
1242
_PR_InitializeRecycledThread(thread);
1243
thread->startFunc = start;
1245
thread->priority = priority;
1246
if (state == PR_JOINABLE_THREAD) {
1248
thread->term = PR_NewCondVar(_pr_terminationCVLock);
1251
PR_DestroyCondVar(thread->term);
1259
if (thread == NULL) {
1260
#ifndef HAVE_CUSTOM_USER_THREADS
1261
stack = _PR_NewStack(stackSize);
1263
PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1267
/* Allocate thread object and per-thread data off the top of the stack*/
1268
top = stack->stackTop;
1269
#ifdef HAVE_STACK_GROWING_UP
1270
thread = (PRThread*) top;
1271
top = top + sizeof(PRThread);
1273
* Make stack 64-byte aligned
1275
if ((PRUptrdiff)top & 0x3f) {
1276
top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f);
1279
top = top - sizeof(PRThread);
1280
thread = (PRThread*) top;
1282
* Make stack 64-byte aligned
1284
if ((PRUptrdiff)top & 0x3f) {
1285
top = (char*)((PRUptrdiff)top & ~0x3f);
1288
#if defined(GC_LEAK_DETECTOR)
1290
* sorry, it is not safe to allocate the thread on the stack,
1291
* because we assign to this object before the GC can learn
1292
* about this thread. we'll just leak thread objects instead.
1294
thread = PR_NEW(PRThread);
1296
stack->thr = thread;
1297
memset(thread, 0, sizeof(PRThread));
1298
thread->threadAllocatedOnStack = 1;
1300
thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg);
1302
PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1305
thread->threadAllocatedOnStack = 0;
1310
/* Initialize thread */
1311
thread->tpdLength = 0;
1312
thread->privateData = NULL;
1313
thread->stack = stack;
1314
thread->priority = priority;
1315
thread->startFunc = start;
1317
PR_INIT_CLIST(&thread->lockList);
1319
if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
1320
if (thread->threadAllocatedOnStack == 1)
1321
_PR_FreeStack(thread->stack);
1325
PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1329
if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1330
if (thread->threadAllocatedOnStack == 1)
1331
_PR_FreeStack(thread->stack);
1333
PR_DELETE(thread->privateData);
1336
PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1340
_PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status);
1342
if (status == PR_FALSE) {
1343
_PR_MD_FREE_LOCK(&thread->threadLock);
1344
if (thread->threadAllocatedOnStack == 1)
1345
_PR_FreeStack(thread->stack);
1347
PR_DELETE(thread->privateData);
1354
Set thread flags related to scope and joinable state. If joinable
1355
thread, allocate a "termination" condition variable.
1357
if (state == PR_JOINABLE_THREAD) {
1358
thread->term = PR_NewCondVar(_pr_terminationCVLock);
1359
if (thread->term == NULL) {
1360
_PR_MD_FREE_LOCK(&thread->threadLock);
1361
if (thread->threadAllocatedOnStack == 1)
1362
_PR_FreeStack(thread->stack);
1364
PR_DELETE(thread->privateData);
1373
/* Update thread type counter */
1374
PR_Lock(_pr_activeLock);
1375
thread->flags = flags;
1376
thread->id = ++_pr_utid;
1377
if (type == PR_SYSTEM_THREAD) {
1378
thread->flags |= _PR_SYSTEM;
1384
/* Make thread runnable */
1385
thread->state = _PR_RUNNABLE;
1387
* Add to list of active threads
1389
PR_Unlock(_pr_activeLock);
1391
if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) )
1392
thread->cpu = _PR_GetPrimordialCPU();
1394
thread->cpu = _PR_MD_CURRENT_CPU();
1396
PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1398
if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
1400
_PR_RUNQ_LOCK(thread->cpu);
1401
_PR_ADD_RUNQ(thread, thread->cpu, priority);
1402
_PR_RUNQ_UNLOCK(thread->cpu);
1405
if (thread->flags & _PR_IDLE_THREAD) {
1407
** If the creating thread is a kernel thread, we need to
1408
** awaken the user thread idle thread somehow; potentially
1409
** it could be sleeping in its idle loop, and we need to poke
1410
** it. To do so, wake the idle thread...
1412
_PR_MD_WAKEUP_WAITER(NULL);
1413
} else if (_PR_IS_NATIVE_THREAD(me)) {
1414
_PR_MD_WAKEUP_WAITER(thread);
1416
if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) )
1423
PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type,
1424
void (*start)(void *arg),
1426
PRThreadPriority priority,
1427
PRThreadScope scope,
1428
PRThreadState state,
1431
return _PR_CreateThread(type, start, arg, priority, scope, state,
1436
** Associate a thread object with an existing native thread.
1437
** "type" is the type of thread object to attach
1438
** "priority" is the priority to assign to the thread
1439
** "stack" defines the shape of the threads stack
1441
** This can return NULL if some kind of error occurs, or if memory is
1444
** This call is not normally needed unless you create your own native
1445
** thread. PR_Init does this automatically for the primordial thread.
1447
PRThread* _PRI_AttachThread(PRThreadType type,
1448
PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags)
1452
if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) {
1455
_PR_MD_SET_CURRENT_THREAD(NULL);
1457
/* Clear out any state if this thread was attached before */
1458
_PR_MD_SET_CURRENT_CPU(NULL);
1460
thread = _PR_AttachThread(type, priority, stack);
1464
_PR_MD_SET_CURRENT_THREAD(thread);
1466
thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED;
1469
thread->stack = PR_NEWZAP(PRThreadStack);
1470
if (!thread->stack) {
1471
_PR_DestroyThread(thread);
1474
thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE;
1476
PR_INIT_CLIST(&thread->links);
1478
if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) {
1479
PR_DELETE(thread->stack);
1480
_PR_DestroyThread(thread);
1484
_PR_MD_SET_CURRENT_CPU(NULL);
1486
if (_PR_MD_CURRENT_CPU()) {
1488
PR_Lock(_pr_activeLock);
1490
if (type == PR_SYSTEM_THREAD) {
1491
thread->flags |= _PR_SYSTEM;
1496
if (_PR_MD_CURRENT_CPU()) {
1497
PR_Unlock(_pr_activeLock);
1504
PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type,
1505
PRThreadPriority priority, PRThreadStack *stack)
1508
#pragma unused( type, priority, stack )
1510
return PR_GetCurrentThread();
1513
PR_IMPLEMENT(void) PR_DetachThread(void)
1516
* On IRIX, Solaris, and Windows, foreign threads are detached when
1519
#if !defined(IRIX) && !defined(WIN32) \
1520
&& !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY))
1522
if (_pr_initialized) {
1523
me = _PR_MD_GET_ATTACHED_THREAD();
1524
if ((me != NULL) && (me->flags & _PR_ATTACHED))
1525
_PRI_DetachThread();
1530
void _PRI_DetachThread(void)
1532
PRThread *me = _PR_MD_CURRENT_THREAD();
1534
if (me->flags & _PR_PRIMORDIAL) {
1536
* ignore, if primordial thread
1540
PR_ASSERT(me->flags & _PR_ATTACHED);
1541
PR_ASSERT(_PR_IS_NATIVE_THREAD(me));
1542
_PR_CleanupThread(me);
1543
PR_DELETE(me->privateData);
1545
_PR_DecrActiveThreadCount(me);
1547
_PR_MD_CLEAN_THREAD(me);
1548
_PR_MD_SET_CURRENT_THREAD(NULL);
1549
if (!me->threadAllocatedOnStack)
1550
PR_DELETE(me->stack);
1551
_PR_MD_FREE_LOCK(&me->threadLock);
1556
** Wait for thread termination:
1557
** "thread" is the target thread
1559
** This can return PR_FAILURE if no joinable thread could be found
1560
** corresponding to the specified target thread.
1562
** The calling thread is suspended until the target thread completes.
1563
** Several threads cannot wait for the same thread to complete; one thread
1564
** will complete successfully and others will terminate with an error PR_FAILURE.
1565
** The calling thread will not be blocked if the target thread has already
1568
PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread)
1572
PRThread *me = _PR_MD_CURRENT_THREAD();
1574
if (!_PR_IS_NATIVE_THREAD(me))
1576
term = thread->term;
1577
/* can't join a non-joinable thread */
1579
PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1583
/* multiple threads can't wait on the same joinable thread */
1584
if (term->condQ.next != &term->condQ) {
1587
if (!_PR_IS_NATIVE_THREAD(me))
1590
/* wait for the target thread's termination cv invariant */
1591
PR_Lock (_pr_terminationCVLock);
1592
while (thread->state != _PR_JOIN_WAIT) {
1593
(void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT);
1595
(void) PR_Unlock (_pr_terminationCVLock);
1598
Remove target thread from global waiting to join Q; make it runnable
1599
again and put it back on its run Q. When it gets scheduled later in
1600
_PR_RunThread code, it will clean up its stack.
1602
if (!_PR_IS_NATIVE_THREAD(me))
1604
thread->state = _PR_RUNNABLE;
1605
if ( !_PR_IS_NATIVE_THREAD(thread) ) {
1606
_PR_THREAD_LOCK(thread);
1608
_PR_MISCQ_LOCK(thread->cpu);
1609
_PR_DEL_JOINQ(thread);
1610
_PR_MISCQ_UNLOCK(thread->cpu);
1612
_PR_AddThreadToRunQ(me, thread);
1613
_PR_THREAD_UNLOCK(thread);
1615
if (!_PR_IS_NATIVE_THREAD(me))
1618
_PR_MD_WAKEUP_WAITER(thread);
1623
if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is);
1627
PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread,
1628
PRThreadPriority newPri)
1632
First, pin down the priority. Not all compilers catch passing out of
1633
range enum here. If we let bad values thru, priority queues won't work.
1635
if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) {
1636
newPri = PR_PRIORITY_LAST;
1637
} else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) {
1638
newPri = PR_PRIORITY_FIRST;
1641
if ( _PR_IS_NATIVE_THREAD(thread) ) {
1642
thread->priority = newPri;
1643
_PR_MD_SET_PRIORITY(&(thread->md), newPri);
1644
} else _PR_SetThreadPriority(thread, newPri);
1649
** This routine prevents all other threads from running. This call is needed by
1650
** the garbage collector.
1652
PR_IMPLEMENT(void) PR_SuspendAll(void)
1654
PRThread *me = _PR_MD_CURRENT_THREAD();
1658
* Stop all user and native threads which are marked GC able.
1660
PR_Lock(_pr_activeLock);
1661
suspendAllOn = PR_TRUE;
1662
suspendAllThread = _PR_MD_CURRENT_THREAD();
1663
_PR_MD_BEGIN_SUSPEND_ALL();
1664
for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1665
qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1666
if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1667
_PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1668
_PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp));
1669
PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING);
1672
for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1673
qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1674
if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1675
_PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1676
/* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */
1677
_PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1679
_PR_MD_END_SUSPEND_ALL();
1683
** This routine unblocks all other threads that were suspended from running by
1684
** PR_SuspendAll(). This call is needed by the garbage collector.
1686
PR_IMPLEMENT(void) PR_ResumeAll(void)
1688
PRThread *me = _PR_MD_CURRENT_THREAD();
1692
* Resume all user and native threads which are marked GC able.
1694
_PR_MD_BEGIN_RESUME_ALL();
1695
for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1696
qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1697
if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1698
_PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1699
_PR_Resume(_PR_ACTIVE_THREAD_PTR(qp));
1701
for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1702
qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1703
if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1704
_PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1705
_PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1707
_PR_MD_END_RESUME_ALL();
1708
suspendAllThread = NULL;
1709
suspendAllOn = PR_FALSE;
1710
PR_Unlock(_pr_activeLock);
1713
PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg)
1715
PRCList *qp, *qp_next;
1717
PRStatus rv = PR_SUCCESS;
1721
** Currently Enumerate threads happen only with suspension and
1722
** pr_activeLock held
1724
PR_ASSERT(suspendAllOn);
1726
/* Steve Morse, 4-23-97: Note that we can't walk a queue by taking
1727
* qp->next after applying the function "func". In particular, "func"
1728
* might remove the thread from the queue and put it into another one in
1729
* which case qp->next no longer points to the next entry in the original
1732
* To get around this problem, we save qp->next in qp_next before applying
1733
* "func" and use that saved value as the next value after applying "func".
1737
* Traverse the list of local and global threads
1739
for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1740
qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next)
1743
t = _PR_ACTIVE_THREAD_PTR(qp);
1744
if (_PR_IS_GCABLE_THREAD(t))
1746
rv = (*func)(t, i, arg);
1747
if (rv != PR_SUCCESS)
1752
for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1753
qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next)
1756
t = _PR_ACTIVE_THREAD_PTR(qp);
1757
if (_PR_IS_GCABLE_THREAD(t))
1759
rv = (*func)(t, i, arg);
1760
if (rv != PR_SUCCESS)
1768
/* FUNCTION: _PR_AddSleepQ
1770
** Adds a thread to the sleep/pauseQ.
1772
** Caller must have the RUNQ lock.
1773
** Caller must be a user level thread
1776
_PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout)
1778
_PRCPU *cpu = thread->cpu;
1780
if (timeout == PR_INTERVAL_NO_TIMEOUT) {
1781
/* append the thread to the global pause Q */
1782
PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu));
1783
thread->flags |= _PR_ON_PAUSEQ;
1785
PRIntervalTime sleep;
1789
/* sort onto global sleepQ */
1792
/* Check if we are longest timeout */
1793
if (timeout >= _PR_SLEEPQMAX(cpu)) {
1794
PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu));
1795
thread->sleep = timeout - _PR_SLEEPQMAX(cpu);
1796
_PR_SLEEPQMAX(cpu) = timeout;
1798
/* Sort thread into global sleepQ at appropriate point */
1799
q = _PR_SLEEPQ(cpu).next;
1801
/* Now scan the list for where to insert this entry */
1802
while (q != &_PR_SLEEPQ(cpu)) {
1803
t = _PR_THREAD_PTR(q);
1804
if (sleep < t->sleep) {
1805
/* Found sleeper to insert in front of */
1811
thread->sleep = sleep;
1812
PR_INSERT_BEFORE(&thread->links, q);
1815
** Subtract our sleep time from the sleeper that follows us (there
1816
** must be one) so that they remain relative to us.
1818
PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu));
1820
t = _PR_THREAD_PTR(thread->links.next);
1821
PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread);
1825
thread->flags |= _PR_ON_SLEEPQ;
1829
/* FUNCTION: _PR_DelSleepQ
1831
** Removes a thread from the sleep/pauseQ.
1833
** If propogate_time is true, then the thread following the deleted
1834
** thread will be get the time from the deleted thread. This is used
1835
** when deleting a sleeper that has not timed out.
1837
** Caller must have the RUNQ lock.
1838
** Caller must be a user level thread
1841
_PR_DelSleepQ(PRThread *thread, PRBool propogate_time)
1843
_PRCPU *cpu = thread->cpu;
1845
/* Remove from pauseQ/sleepQ */
1846
if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
1847
if (thread->flags & _PR_ON_SLEEPQ) {
1848
PRCList *q = thread->links.next;
1849
if (q != &_PR_SLEEPQ(cpu)) {
1850
if (propogate_time == PR_TRUE) {
1851
PRThread *after = _PR_THREAD_PTR(q);
1852
after->sleep += thread->sleep;
1854
_PR_SLEEPQMAX(cpu) -= thread->sleep;
1856
/* Check if prev is the beggining of the list; if so,
1857
* we are the only element on the list.
1859
if (thread->links.prev != &_PR_SLEEPQ(cpu))
1860
_PR_SLEEPQMAX(cpu) -= thread->sleep;
1862
_PR_SLEEPQMAX(cpu) = 0;
1864
thread->flags &= ~_PR_ON_SLEEPQ;
1866
thread->flags &= ~_PR_ON_PAUSEQ;
1868
PR_REMOVE_LINK(&thread->links);
1874
_PR_AddThreadToRunQ(
1875
PRThread *me, /* the current thread */
1876
PRThread *thread) /* the local thread to be added to a run queue */
1878
PRThreadPriority pri = thread->priority;
1879
_PRCPU *cpu = thread->cpu;
1881
PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1885
* On NT, we can only reliably know that the current CPU
1886
* is not idle. We add the awakened thread to the run
1887
* queue of its CPU if its CPU is the current CPU.
1888
* For any other CPU, we don't really know whether it
1889
* is busy or idle. So in all other cases, we just
1890
* "post" the awakened thread to the IO completion port
1891
* for the next idle CPU to execute (this is done in
1892
* _PR_MD_WAKEUP_WAITER).
1893
* Threads with a suspended I/O operation remain bound to
1894
* the same cpu until I/O is cancelled
1896
* NOTE: the boolean expression below must be the exact
1897
* opposite of the corresponding boolean expression in
1898
* _PR_MD_WAKEUP_WAITER.
1900
if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) ||
1901
(thread->md.thr_bound_cpu)) {
1902
PR_ASSERT(!thread->md.thr_bound_cpu ||
1903
(thread->md.thr_bound_cpu == cpu));
1905
_PR_ADD_RUNQ(thread, cpu, pri);
1906
_PR_RUNQ_UNLOCK(cpu);
1910
_PR_ADD_RUNQ(thread, cpu, pri);
1911
_PR_RUNQ_UNLOCK(cpu);
1912
if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) {
1913
if (pri > me->priority) {
1914
_PR_SET_RESCHED_FLAG();