2
* Copyright (c) 2001-2004 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
* Wait queue is the basic synchronization primitive upon which all
38
* other synchronization primitives build.
40
* It allows threads to wait for an event in first-come, first-served
41
* fashion. Conditional operation as well as timeouts and interruptions
45
#include <synch/waitq.h>
46
#include <synch/synch.h>
47
#include <synch/spinlock.h>
48
#include <proc/thread.h>
49
#include <proc/scheduler.h>
51
#include <arch/types.h>
52
#include <time/timeout.h>
57
static void waitq_sleep_timed_out(void *data);
59
/** Initialize wait queue
61
* Initialize wait queue.
63
* @param wq Pointer to wait queue to be initialized.
65
void waitq_initialize(waitq_t *wq)
67
spinlock_initialize(&wq->lock, "waitq_lock");
68
list_initialize(&wq->head);
69
wq->missed_wakeups = 0;
72
/** Handle timeout during waitq_sleep_timeout() call
74
* This routine is called when waitq_sleep_timeout() times out.
75
* Interrupts are disabled.
77
* It is supposed to try to remove 'its' thread from the wait queue;
78
* it can eventually fail to achieve this goal when these two events
79
* overlap. In that case it behaves just as though there was no
82
* @param data Pointer to the thread that called waitq_sleep_timeout().
84
void waitq_sleep_timed_out(void *data)
86
thread_t *t = (thread_t *) data;
88
bool do_wakeup = false;
89
DEADLOCK_PROBE_INIT(p_wqlock);
91
spinlock_lock(&threads_lock);
92
if (!thread_exists(t))
96
spinlock_lock(&t->lock);
97
if ((wq = t->sleep_queue)) { /* assignment */
98
if (!spinlock_trylock(&wq->lock)) {
99
spinlock_unlock(&t->lock);
100
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
101
goto grab_locks; /* avoid deadlock */
104
list_remove(&t->wq_link);
105
t->saved_context = t->sleep_timeout_context;
107
t->sleep_queue = NULL;
108
spinlock_unlock(&wq->lock);
111
t->timeout_pending = false;
112
spinlock_unlock(&t->lock);
118
spinlock_unlock(&threads_lock);
121
/** Interrupt sleeping thread.
123
* This routine attempts to interrupt a thread from its sleep in a waitqueue.
124
* If the thread is not found sleeping, no action is taken.
126
* @param t Thread to be interrupted.
128
void waitq_interrupt_sleep(thread_t *t)
131
bool do_wakeup = false;
133
DEADLOCK_PROBE_INIT(p_wqlock);
135
ipl = interrupts_disable();
136
spinlock_lock(&threads_lock);
137
if (!thread_exists(t))
141
spinlock_lock(&t->lock);
142
if ((wq = t->sleep_queue)) { /* assignment */
143
if (!(t->sleep_interruptible)) {
145
* The sleep cannot be interrupted.
147
spinlock_unlock(&t->lock);
151
if (!spinlock_trylock(&wq->lock)) {
152
spinlock_unlock(&t->lock);
153
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
154
goto grab_locks; /* avoid deadlock */
157
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
158
t->timeout_pending = false;
160
list_remove(&t->wq_link);
161
t->saved_context = t->sleep_interruption_context;
163
t->sleep_queue = NULL;
164
spinlock_unlock(&wq->lock);
166
spinlock_unlock(&t->lock);
172
spinlock_unlock(&threads_lock);
173
interrupts_restore(ipl);
176
/** Sleep until either wakeup, timeout or interruption occurs
178
* This is a sleep implementation which allows itself to time out or to be
179
* interrupted from the sleep, restoring a failover context.
181
* Sleepers are organised in a FIFO fashion in a structure called wait queue.
183
* This function is really basic in that other functions as waitq_sleep()
184
* and all the *_timeout() functions use it.
186
* @param wq Pointer to wait queue.
187
* @param usec Timeout in microseconds.
188
* @param flags Specify mode of the sleep.
190
* The sleep can be interrupted only if the
191
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
193
* If usec is greater than zero, regardless of the value of the
194
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
195
* timeout, interruption or wakeup comes.
197
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
198
* the call will not return until wakeup or interruption comes.
200
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
201
* call will immediately return, reporting either success or failure.
203
* @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT,
204
* ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and
207
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of
208
* the call there was no pending wakeup.
210
* @li ESYNCH_TIMEOUT means that the sleep timed out.
212
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread.
214
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was
215
* a pending wakeup at the time of the call. The caller was not put
218
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was
221
int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags)
226
ipl = waitq_sleep_prepare(wq);
227
rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
228
waitq_sleep_finish(wq, rc, ipl);
232
/** Prepare to sleep in a waitq.
234
* This function will return holding the lock of the wait queue
235
* and interrupts disabled.
237
* @param wq Wait queue.
239
* @return Interrupt level as it existed on entry to this function.
241
ipl_t waitq_sleep_prepare(waitq_t *wq)
246
ipl = interrupts_disable();
248
if (THREAD) { /* needed during system initiailzation */
250
* Busy waiting for a delayed timeout.
251
* This is an important fix for the race condition between
252
* a delayed timeout and a next call to waitq_sleep_timeout().
253
* Simply, the thread is not allowed to go to sleep if
254
* there are timeouts in progress.
256
spinlock_lock(&THREAD->lock);
257
if (THREAD->timeout_pending) {
258
spinlock_unlock(&THREAD->lock);
259
interrupts_restore(ipl);
262
spinlock_unlock(&THREAD->lock);
265
spinlock_lock(&wq->lock);
269
/** Finish waiting in a wait queue.
271
* This function restores interrupts to the state that existed prior
272
* to the call to waitq_sleep_prepare(). If necessary, the wait queue
275
* @param wq Wait queue.
276
* @param rc Return code of waitq_sleep_timeout_unsafe().
277
* @param ipl Interrupt level returned by waitq_sleep_prepare().
279
void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
282
case ESYNCH_WOULD_BLOCK:
283
case ESYNCH_OK_ATOMIC:
284
spinlock_unlock(&wq->lock);
289
interrupts_restore(ipl);
292
/** Internal implementation of waitq_sleep_timeout().
294
* This function implements logic of sleeping in a wait queue.
295
* This call must be preceded by a call to waitq_sleep_prepare()
296
* and followed by a call to waitq_sleep_finish().
298
* @param wq See waitq_sleep_timeout().
299
* @param usec See waitq_sleep_timeout().
300
* @param flags See waitq_sleep_timeout().
302
* @return See waitq_sleep_timeout().
304
int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags)
306
/* checks whether to go to sleep at all */
307
if (wq->missed_wakeups) {
308
wq->missed_wakeups--;
309
return ESYNCH_OK_ATOMIC;
312
if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
313
/* return immediatelly instead of going to sleep */
314
return ESYNCH_WOULD_BLOCK;
319
* Now we are firmly decided to go to sleep.
321
spinlock_lock(&THREAD->lock);
323
if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
326
* If the thread was already interrupted,
327
* don't go to sleep at all.
329
if (THREAD->interrupted) {
330
spinlock_unlock(&THREAD->lock);
331
spinlock_unlock(&wq->lock);
332
return ESYNCH_INTERRUPTED;
336
* Set context that will be restored if the sleep
337
* of this thread is ever interrupted.
339
THREAD->sleep_interruptible = true;
340
if (!context_save(&THREAD->sleep_interruption_context)) {
341
/* Short emulation of scheduler() return code. */
342
spinlock_unlock(&THREAD->lock);
343
return ESYNCH_INTERRUPTED;
347
THREAD->sleep_interruptible = false;
351
/* We use the timeout variant. */
352
if (!context_save(&THREAD->sleep_timeout_context)) {
353
/* Short emulation of scheduler() return code. */
354
spinlock_unlock(&THREAD->lock);
355
return ESYNCH_TIMEOUT;
357
THREAD->timeout_pending = true;
358
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
359
waitq_sleep_timed_out, THREAD);
362
list_append(&THREAD->wq_link, &wq->head);
367
THREAD->state = Sleeping;
368
THREAD->sleep_queue = wq;
370
spinlock_unlock(&THREAD->lock);
372
/* wq->lock is released in scheduler_separated_stack() */
375
return ESYNCH_OK_BLOCKED;
379
/** Wake up first thread sleeping in a wait queue
381
* Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
382
* wrapper meant for general use.
384
* Besides its 'normal' wakeup operation, it attempts to unregister possible
387
* @param wq Pointer to wait queue.
388
* @param mode Wakeup mode.
390
void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
394
ipl = interrupts_disable();
395
spinlock_lock(&wq->lock);
397
_waitq_wakeup_unsafe(wq, mode);
399
spinlock_unlock(&wq->lock);
400
interrupts_restore(ipl);
403
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
405
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
406
* assumes wq->lock is already locked and interrupts are already disabled.
408
* @param wq Pointer to wait queue.
409
* @param mode If mode is WAKEUP_FIRST, then the longest waiting
410
* thread, if any, is woken up. If mode is WAKEUP_ALL, then
411
* all waiting threads, if any, are woken up. If there are
412
* no waiting threads to be woken up, the missed wakeup is
413
* recorded in the wait queue.
415
void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
421
if (list_empty(&wq->head)) {
422
wq->missed_wakeups++;
423
if (count && mode == WAKEUP_ALL)
424
wq->missed_wakeups--;
429
t = list_get_instance(wq->head.next, thread_t, wq_link);
432
* Lock the thread prior to removing it from the wq.
433
* This is not necessary because of mutual exclusion
434
* (the link belongs to the wait queue), but because
435
* of synchronization with waitq_sleep_timed_out()
436
* and thread_interrupt_sleep().
438
* In order for these two functions to work, the following
439
* invariant must hold:
441
* t->sleep_queue != NULL <=> t sleeps in a wait queue
443
* For an observer who locks the thread, the invariant
444
* holds only when the lock is held prior to removing
445
* it from the wait queue.
447
spinlock_lock(&t->lock);
448
list_remove(&t->wq_link);
450
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
451
t->timeout_pending = false;
452
t->sleep_queue = NULL;
453
spinlock_unlock(&t->lock);
457
if (mode == WAKEUP_ALL)