1
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* ***** BEGIN LICENSE BLOCK *****
3
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
5
* The contents of this file are subject to the Mozilla Public License Version
6
* 1.1 (the "License"); you may not use this file except in compliance with
7
* the License. You may obtain a copy of the License at
8
* http://www.mozilla.org/MPL/
10
* Software distributed under the License is distributed on an "AS IS" basis,
11
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12
* for the specific language governing rights and limitations under the
15
* The Original Code is the Netscape Portable Runtime (NSPR).
17
* The Initial Developer of the Original Code is
18
* Netscape Communications Corporation.
19
* Portions created by the Initial Developer are Copyright (C) 1998-2000
20
* the Initial Developer. All Rights Reserved.
24
* Alternatively, the contents of this file may be used under the terms of
25
* either the GNU General Public License Version 2 or later (the "GPL"), or
26
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27
* in which case the provisions of the GPL or the LGPL are applicable instead
28
* of those above. If you wish to allow use of your version of this file only
29
* under the terms of either the GPL or the LGPL, and not to allow others to
30
* use your version of this file under the terms of the MPL, indicate your
31
* decision by deleting the provisions above and replace them with the notice
32
* and other provisions required by the GPL or the LGPL. If you do not delete
33
* the provisions above, a recipient may use your version of this file under
34
* the terms of any one of the MPL, the GPL or the LGPL.
36
* ***** END LICENSE BLOCK ***** */
47
#include <Multiprocessing.h>
50
#include "mdcriticalregion.h"
52
TimerUPP gTimerCallbackUPP = NULL;
53
PRThread * gPrimaryThread = NULL;
55
ProcessSerialNumber gApplicationProcess;
57
PR_IMPLEMENT(PRThread *) PR_GetPrimaryThread()
59
return gPrimaryThread;
62
//##############################################################################
63
//##############################################################################
65
#pragma mark CREATING MACINTOSH THREAD STACKS
67
#if defined(GC_LEAK_DETECTOR)
68
extern void* GC_malloc_atomic(PRUint32 size);
72
** Allocate a new memory segment. We allocate it from our figment heap. Currently,
73
** it is being used for per thread stack space.
75
** Return the segment's access rights and size. vaddr is used on Unix platforms to
76
** map an existing address for the segment.
78
PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr)
82
PR_ASSERT(vaddr == 0);
85
** Take the actual memory for the segment out of our Figment heap.
88
#if defined(GC_LEAK_DETECTOR)
89
seg->vaddr = (char *)GC_malloc_atomic(size);
91
seg->vaddr = (char *)malloc(size);
94
if (seg->vaddr == NULL) {
97
DebugStr("\p_MD_AllocSegment failed.");
110
** Free previously allocated memory segment.
112
void _MD_FreeSegment(PRSegment *seg)
114
PR_ASSERT((seg->flags & _PR_SEG_VM) == 0);
116
if (seg->vaddr != NULL)
122
** The thread's stack has been allocated and its fields are already properly filled
123
** in by PR. Perform any debugging related initialization here.
125
** Put a recognizable pattern so that we can find it from Macsbug.
126
** Put a cookie at the top of the stack so that we can find it from Macsbug.
128
void _MD_InitStack(PRThreadStack *ts, int redZoneBytes)
130
#pragma unused (redZoneBytes)
132
// Put a cookie at the top of the stack so that we can find
135
memset(ts->allocBase, 0xDC, ts->stackSize);
137
((UInt32 *)ts->stackTop)[-1] = 0xBEEFCAFE;
138
((UInt32 *)ts->stackTop)[-2] = (UInt32)gPrimaryThread;
139
((UInt32 *)ts->stackTop)[-3] = (UInt32)(ts);
140
((UInt32 *)ts->stackBottom)[0] = 0xCAFEBEEF;
146
extern void _MD_ClearStack(PRThreadStack *ts)
149
// Clear out our cookies.
151
memset(ts->allocBase, 0xEF, ts->allocSize);
152
((UInt32 *)ts->stackTop)[-1] = 0;
153
((UInt32 *)ts->stackTop)[-2] = 0;
154
((UInt32 *)ts->stackTop)[-3] = 0;
155
((UInt32 *)ts->stackBottom)[0] = 0;
162
//##############################################################################
163
//##############################################################################
165
#pragma mark TIME MANAGER-BASED CLOCK
167
// On Mac OS X, it's possible for the application to spend lots of time
168
// in WaitNextEvent, yielding to other applications. Since NSPR threads are
169
// cooperative here, this means that NSPR threads will also get very little
170
// time to run. To kick ourselves out of a WaitNextEvent call when we have
171
// determined that it's time to schedule another thread, the Timer Task
172
// (which fires every 8ms, even when other apps have the CPU) calls WakeUpProcess.
173
// We only want to do this on Mac OS X; the gTimeManagerTaskDoesWUP variable
174
// indicates when we're running on that OS.
176
// Note that the TimerCallback makes use of gApplicationProcess. We need to
177
// have set this up before the first possible run of the timer task; we do
178
// so in _MD_EarlyInit().
179
static Boolean gTimeManagerTaskDoesWUP;
181
static TMTask gTimeManagerTaskElem;
183
extern void _MD_IOInterrupt(void);
184
_PRInterruptTable _pr_interruptTable[] = {
185
{ "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, },
186
{ "i/o", _PR_MISSED_IO, _MD_IOInterrupt, },
190
#define kMacTimerInMiliSecs 8L
192
pascal void TimerCallback(TMTaskPtr tmTaskPtr)
194
_PRCPU *cpu = _PR_MD_CURRENT_CPU();
197
if (_PR_MD_GET_INTSOFF()) {
198
cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK;
199
PrimeTime((QElemPtr)tmTaskPtr, kMacTimerInMiliSecs);
205
// And tell nspr that a clock interrupt occured.
206
_PR_ClockInterrupt();
208
if ((_PR_RUNQREADYMASK(cpu)) >> ((_PR_MD_CURRENT_THREAD()->priority))) {
209
if (gTimeManagerTaskDoesWUP) {
210
// We only want to call WakeUpProcess if we know that NSPR has managed to switch threads
211
// since the last call, otherwise we end up spewing out WakeUpProcess() calls while the
212
// application is blocking somewhere. This can interfere with events loops other than
213
// our own (see bug 158927).
214
if (UnsignedWideToUInt64(cpu->md.lastThreadSwitch) > UnsignedWideToUInt64(cpu->md.lastWakeUpProcess))
216
WakeUpProcess(&gApplicationProcess);
217
cpu->md.lastWakeUpProcess = UpTime();
220
_PR_SET_RESCHED_FLAG();
225
// Reset the clock timer so that we fire again.
226
PrimeTime((QElemPtr)tmTaskPtr, kMacTimerInMiliSecs);
230
void _MD_StartInterrupts(void)
232
gPrimaryThread = _PR_MD_CURRENT_THREAD();
234
gTimeManagerTaskDoesWUP = RunningOnOSX();
236
if ( !gTimerCallbackUPP )
237
gTimerCallbackUPP = NewTimerUPP(TimerCallback);
239
// Fill in the Time Manager queue element
241
gTimeManagerTaskElem.tmAddr = (TimerUPP)gTimerCallbackUPP;
242
gTimeManagerTaskElem.tmCount = 0;
243
gTimeManagerTaskElem.tmWakeUp = 0;
244
gTimeManagerTaskElem.tmReserved = 0;
246
// Make sure that our time manager task is ready to go.
247
InsTime((QElemPtr)&gTimeManagerTaskElem);
249
PrimeTime((QElemPtr)&gTimeManagerTaskElem, kMacTimerInMiliSecs);
252
void _MD_StopInterrupts(void)
254
if (gTimeManagerTaskElem.tmAddr != NULL) {
255
RmvTime((QElemPtr)&gTimeManagerTaskElem);
256
gTimeManagerTaskElem.tmAddr = NULL;
261
#define MAX_PAUSE_TIMEOUT_MS 500
263
void _MD_PauseCPU(PRIntervalTime timeout)
265
if (timeout != PR_INTERVAL_NO_WAIT)
267
// There is a race condition entering the critical section
268
// in AsyncIOCompletion (and probably elsewhere) that can
269
// causes deadlock for the duration of this timeout. To
270
// work around this, use a max 500ms timeout for now.
271
// See bug 99561 for details.
272
if (PR_IntervalToMilliseconds(timeout) > MAX_PAUSE_TIMEOUT_MS)
273
timeout = PR_MillisecondsToInterval(MAX_PAUSE_TIMEOUT_MS);
275
WaitOnIdleSemaphore(timeout);
276
(void) _MD_IOInterrupt();
280
void _MD_InitRunningCPU(_PRCPU* cpu)
282
cpu->md.trackScheduling = RunningOnOSX();
283
if (cpu->md.trackScheduling) {
284
AbsoluteTime zeroTime = {0, 0};
285
cpu->md.lastThreadSwitch = UpTime();
286
cpu->md.lastWakeUpProcess = zeroTime;
291
//##############################################################################
292
//##############################################################################
294
#pragma mark THREAD SUPPORT FUNCTIONS
296
#include <OpenTransport.h> /* for error codes */
298
PRStatus _MD_InitThread(PRThread *thread)
300
thread->md.asyncIOLock = PR_NewLock();
301
PR_ASSERT(thread->md.asyncIOLock != NULL);
302
thread->md.asyncIOCVar = PR_NewCondVar(thread->md.asyncIOLock);
303
PR_ASSERT(thread->md.asyncIOCVar != NULL);
305
if (thread->md.asyncIOLock == NULL || thread->md.asyncIOCVar == NULL)
311
PRStatus _MD_wait(PRThread *thread, PRIntervalTime timeout)
313
#pragma unused (timeout)
315
_MD_SWITCH_CONTEXT(thread);
320
void WaitOnThisThread(PRThread *thread, PRIntervalTime timeout)
323
PRIntervalTime timein = PR_IntervalNow();
324
PRStatus status = PR_SUCCESS;
326
// Turn interrupts off to avoid a race over lock ownership with the callback
327
// (which can fire at any time). Interrupts may stay off until we leave
328
// this function, or another NSPR thread turns them back on. They certainly
329
// stay off until PR_WaitCondVar() relinquishes the asyncIOLock lock, which
330
// is what we care about.
332
PR_Lock(thread->md.asyncIOLock);
333
if (timeout == PR_INTERVAL_NO_TIMEOUT) {
334
while ((thread->io_pending) && (status == PR_SUCCESS))
335
status = PR_WaitCondVar(thread->md.asyncIOCVar, PR_INTERVAL_NO_TIMEOUT);
337
while ((thread->io_pending) && ((PRIntervalTime)(PR_IntervalNow() - timein) < timeout) && (status == PR_SUCCESS))
338
status = PR_WaitCondVar(thread->md.asyncIOCVar, timeout);
340
if ((status == PR_FAILURE) && (PR_GetError() == PR_PENDING_INTERRUPT_ERROR)) {
341
thread->md.osErrCode = kEINTRErr;
342
} else if (thread->io_pending) {
343
thread->md.osErrCode = kETIMEDOUTErr;
344
PR_SetError(PR_IO_TIMEOUT_ERROR, kETIMEDOUTErr);
347
thread->io_pending = PR_FALSE;
348
PR_Unlock(thread->md.asyncIOLock);
353
void DoneWaitingOnThisThread(PRThread *thread)
357
PR_ASSERT(thread->md.asyncIOLock->owner == NULL);
359
// DoneWaitingOnThisThread() is called from OT notifiers and async file I/O
360
// callbacks that can run at "interrupt" time (Classic Mac OS) or on pthreads
361
// that may run concurrently with the main threads (Mac OS X). They can thus
362
// be called when any NSPR thread is running, or even while NSPR is in a
363
// thread context switch. It is therefore vital that we can guarantee to
364
// be able to get the asyncIOLock without blocking (thus avoiding code
365
// that makes assumptions about the current NSPR thread etc). To achieve
366
// this, we use NSPR interrrupts as a semaphore on the lock; all code
367
// that grabs the lock also disables interrupts for the time the lock
368
// is held. Callers of DoneWaitingOnThisThread() thus have to check whether
369
// interrupts are already off, and, if so, simply set the missed_IO flag on
370
// the CPU rather than calling this function.
373
PR_Lock(thread->md.asyncIOLock);
374
thread->io_pending = PR_FALSE;
375
/* let the waiting thread know that async IO completed */
376
PR_NotifyCondVar(thread->md.asyncIOCVar);
377
PR_Unlock(thread->md.asyncIOLock);
382
PR_IMPLEMENT(void) PR_Mac_WaitForAsyncNotify(PRIntervalTime timeout)
385
PRIntervalTime timein = PR_IntervalNow();
386
PRStatus status = PR_SUCCESS;
387
PRThread *thread = _PR_MD_CURRENT_THREAD();
389
// See commments in WaitOnThisThread()
391
PR_Lock(thread->md.asyncIOLock);
392
if (timeout == PR_INTERVAL_NO_TIMEOUT) {
393
while ((!thread->md.asyncNotifyPending) && (status == PR_SUCCESS))
394
status = PR_WaitCondVar(thread->md.asyncIOCVar, PR_INTERVAL_NO_TIMEOUT);
396
while ((!thread->md.asyncNotifyPending) && ((PRIntervalTime)(PR_IntervalNow() - timein) < timeout) && (status == PR_SUCCESS))
397
status = PR_WaitCondVar(thread->md.asyncIOCVar, timeout);
399
if ((status == PR_FAILURE) && (PR_GetError() == PR_PENDING_INTERRUPT_ERROR)) {
400
thread->md.osErrCode = kEINTRErr;
401
} else if (!thread->md.asyncNotifyPending) {
402
thread->md.osErrCode = kETIMEDOUTErr;
403
PR_SetError(PR_IO_TIMEOUT_ERROR, kETIMEDOUTErr);
405
thread->md.asyncNotifyPending = PR_FALSE;
406
PR_Unlock(thread->md.asyncIOLock);
411
void AsyncNotify(PRThread *thread)
415
PR_ASSERT(thread->md.asyncIOLock->owner == NULL);
417
// See commments in DoneWaitingOnThisThread()
419
PR_Lock(thread->md.asyncIOLock);
420
thread->md.asyncNotifyPending = PR_TRUE;
421
/* let the waiting thread know that async IO completed */
422
PR_NotifyCondVar(thread->md.asyncIOCVar);
423
PR_Unlock(thread->md.asyncIOLock);
428
PR_IMPLEMENT(void) PR_Mac_PostAsyncNotify(PRThread *thread)
430
_PRCPU * cpu = _PR_MD_CURRENT_CPU();
432
if (_PR_MD_GET_INTSOFF()) {
433
thread->md.missedAsyncNotify = PR_TRUE;
434
cpu->u.missed[cpu->where] |= _PR_MISSED_IO;
441
//##############################################################################
442
//##############################################################################
444
#pragma mark PROCESS SUPPORT FUNCTIONS
446
PRProcess * _MD_CreateProcess(
450
const PRProcessAttr *attr)
452
#pragma unused (path, argv, envp, attr)
454
PR_SetError(PR_NOT_IMPLEMENTED_ERROR, unimpErr);
458
PRStatus _MD_DetachProcess(PRProcess *process)
460
#pragma unused (process)
462
PR_SetError(PR_NOT_IMPLEMENTED_ERROR, unimpErr);
466
PRStatus _MD_WaitProcess(PRProcess *process, PRInt32 *exitCode)
468
#pragma unused (process, exitCode)
470
PR_SetError(PR_NOT_IMPLEMENTED_ERROR, unimpErr);
474
PRStatus _MD_KillProcess(PRProcess *process)
476
#pragma unused (process)
478
PR_SetError(PR_NOT_IMPLEMENTED_ERROR, unimpErr);
482
//##############################################################################
483
//##############################################################################
485
#pragma mark ATOMIC OPERATIONS
487
#ifdef _PR_HAVE_ATOMIC_OPS
489
_MD_AtomicSet(PRInt32 *val, PRInt32 newval)
494
} while (!OTCompareAndSwap32(rv, newval, (UInt32*)val));
499
#endif // _PR_HAVE_ATOMIC_OPS
501
//##############################################################################
502
//##############################################################################
504
#pragma mark INTERRUPT SUPPORT
509
This critical region support is required for Mac NSPR to work correctly on dual CPU
510
machines on Mac OS X. This note explains why.
512
NSPR uses a timer task, and has callbacks for async file I/O and Open Transport
513
whose runtime behaviour differs depending on environment. On "Classic" Mac OS
514
these run at "interrupt" time (OS-level interrupts, that is, not NSPR interrupts),
515
and can thus preempt other code, but they always run to completion.
517
On Mac OS X, these are all emulated using MP tasks, which sit atop pthreads. Thus,
518
they can be preempted at any time (and not necessarily run to completion), and can
519
also run *concurrently* with eachother, and with application code, on multiple
520
CPU machines. Note that all NSPR threads are emulated, and all run on the main
523
We thus have to use MP critical sections to protect data that is shared between
524
the various callbacks and the main MP thread. It so happens that NSPR has this
525
concept of software interrupts, and making interrupt-off times be critical
532
Whether to use critical regions. True if running on Mac OS X and later
535
PRBool gUseCriticalRegions;
538
Count of the number of times we've entered the critical region.
539
We need this because ENTER_CRITICAL_REGION() will *not* block when
540
called from different NSPR threads (which all run on one MP thread),
541
and we need to ensure that when code turns interrupts back on (by
542
settings _pr_intsOff to 0) we exit the critical section enough times
546
PRInt32 gCriticalRegionEntryCount;
549
void _MD_SetIntsOff(PRInt32 ints)
551
ENTER_CRITICAL_REGION();
552
gCriticalRegionEntryCount ++;
558
PRInt32 i = gCriticalRegionEntryCount;
560
gCriticalRegionEntryCount = 0;
561
for ( ;i > 0; i --) {
562
LEAVE_CRITICAL_REGION();
568
#endif /* TARGET_CARBON */
571
//##############################################################################
572
//##############################################################################
574
#pragma mark CRITICAL REGION SUPPORT
577
static PRBool RunningOnOSX()
580
OSErr err = Gestalt(gestaltSystemVersion, &systemVersion);
581
return (err == noErr) && (systemVersion >= 0x00001000);
585
#if MAC_CRITICAL_REGIONS
587
MDCriticalRegionID gCriticalRegion;
589
void InitCriticalRegion()
593
// we only need to do critical region stuff on Mac OS X
594
gUseCriticalRegions = RunningOnOSX();
595
if (!gUseCriticalRegions) return;
597
err = MD_CriticalRegionCreate(&gCriticalRegion);
598
PR_ASSERT(err == noErr);
601
void TermCriticalRegion()
605
if (!gUseCriticalRegions) return;
607
err = MD_CriticalRegionDelete(gCriticalRegion);
608
PR_ASSERT(err == noErr);
612
void EnterCritialRegion()
616
if (!gUseCriticalRegions) return;
618
PR_ASSERT(gCriticalRegion != kInvalidID);
620
/* Change to a non-infinite timeout for debugging purposes */
621
err = MD_CriticalRegionEnter(gCriticalRegion, kDurationForever /* 10000 * kDurationMillisecond */ );
622
PR_ASSERT(err == noErr);
625
void LeaveCritialRegion()
629
if (!gUseCriticalRegions) return;
631
PR_ASSERT(gCriticalRegion != kInvalidID);
633
err = MD_CriticalRegionExit(gCriticalRegion);
634
PR_ASSERT(err == noErr);
638
#endif // MAC_CRITICAL_REGIONS
640
//##############################################################################
641
//##############################################################################
643
#pragma mark IDLE SEMAPHORE SUPPORT
646
Since the WaitNextEvent() in _MD_PauseCPU() is causing all sorts of
647
headache under Mac OS X we're going to switch to MPWaitOnSemaphore()
648
which should do what we want
652
PRBool gUseIdleSemaphore = PR_FALSE;
653
MPSemaphoreID gIdleSemaphore = NULL;
656
void InitIdleSemaphore()
658
// we only need to do idle semaphore stuff on Mac OS X
660
gUseIdleSemaphore = RunningOnOSX();
661
if (gUseIdleSemaphore)
663
OSStatus err = MPCreateSemaphore(1 /* max value */, 0 /* initial value */, &gIdleSemaphore);
664
PR_ASSERT(err == noErr);
669
void TermIdleSemaphore()
672
if (gUseIdleSemaphore)
674
OSStatus err = MPDeleteSemaphore(gIdleSemaphore);
675
PR_ASSERT(err == noErr);
676
gUseIdleSemaphore = NULL;
682
void WaitOnIdleSemaphore(PRIntervalTime timeout)
685
if (gUseIdleSemaphore)
687
OSStatus err = MPWaitOnSemaphore(gIdleSemaphore, kDurationMillisecond * PR_IntervalToMilliseconds(timeout));
688
PR_ASSERT(err == noErr);
693
EventRecord theEvent;
695
** Calling WaitNextEvent() here is suboptimal. This routine should
696
** pause the process until IO or the timeout occur, yielding time to
697
** other processes on operating systems that require this (Mac OS classic).
698
** WaitNextEvent() may incur too much latency, and has other problems,
699
** such as the potential to drop suspend/resume events.
701
(void)WaitNextEvent(nullEvent, &theEvent, 1, NULL);
706
void SignalIdleSemaphore()
709
if (gUseIdleSemaphore)
711
// often we won't be waiting on the semaphore here, so ignore any errors
712
(void)MPSignalSemaphore(gIdleSemaphore);
717
WakeUpProcess(&gApplicationProcess);