2
* Copyright (c) 2001-2004 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
* @brief Reader/Writer locks.
37
* A reader/writer lock can be held by multiple readers at a time.
38
* Or it can be exclusively held by a sole writer at a time.
40
* These locks are not recursive.
41
* Because a technique called direct hand-off is used and because
42
* waiting takes place in a single wait queue, neither readers
43
* nor writers will suffer starvation.
45
* If there is a writer followed by a reader waiting for the rwlock
46
* and the writer times out, all leading readers are automatically woken up
51
* NOTE ON rwlock_holder_type
52
* This field is set on an attempt to acquire the exclusive mutex
53
* to the respective value depending whether the caller is a reader
54
* or a writer. The field is examined only if the thread had been
55
* previously blocked on the exclusive mutex. Thus it is save
56
* to store the rwlock type in the thread structure, because
57
* each thread can block on only one rwlock at a time.
60
#include <synch/rwlock.h>
61
#include <synch/spinlock.h>
62
#include <synch/mutex.h>
63
#include <synch/waitq.h>
64
#include <synch/synch.h>
68
#include <proc/thread.h>
72
#define ALLOW_READERS_ONLY 1
74
static void let_others_in(rwlock_t *rwl, int readers_only);
75
static void release_spinlock(void *arg);
77
/** Initialize reader/writer lock
79
* Initialize reader/writer lock.
81
* @param rwl Reader/Writer lock.
83
void rwlock_initialize(rwlock_t *rwl) {
84
spinlock_initialize(&rwl->lock, "rwlock_t");
85
mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
89
/** Acquire reader/writer lock for reading
91
* Acquire reader/writer lock for reading.
92
* Timeout and willingness to block may be specified.
94
* @param rwl Reader/Writer lock.
95
* @param usec Timeout in microseconds.
96
* @param flags Specify mode of operation.
98
* For exact description of possible combinations of
99
* usec and flags, see comment for waitq_sleep_timeout().
101
* @return See comment for waitq_sleep_timeout().
103
int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
108
ipl = interrupts_disable();
109
spinlock_lock(&THREAD->lock);
110
THREAD->rwlock_holder_type = RWLOCK_WRITER;
111
spinlock_unlock(&THREAD->lock);
112
interrupts_restore(ipl);
115
* Writers take the easy part.
116
* They just need to acquire the exclusive mutex.
118
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
119
if (SYNCH_FAILED(rc)) {
122
* Lock operation timed out or was interrupted.
123
* The state of rwl is UNKNOWN at this point.
124
* No claims about its holder can be made.
127
ipl = interrupts_disable();
128
spinlock_lock(&rwl->lock);
130
* Now when rwl is locked, we can inspect it again.
131
* If it is held by some readers already, we can let
132
* readers from the head of the wait queue in.
135
let_others_in(rwl, ALLOW_READERS_ONLY);
136
spinlock_unlock(&rwl->lock);
137
interrupts_restore(ipl);
143
/** Acquire reader/writer lock for writing
145
* Acquire reader/writer lock for writing.
146
* Timeout and willingness to block may be specified.
148
* @param rwl Reader/Writer lock.
149
* @param usec Timeout in microseconds.
150
* @param flags Select mode of operation.
152
* For exact description of possible combinations of
153
* usec and flags, see comment for waitq_sleep_timeout().
155
* @return See comment for waitq_sleep_timeout().
157
int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
162
ipl = interrupts_disable();
163
spinlock_lock(&THREAD->lock);
164
THREAD->rwlock_holder_type = RWLOCK_READER;
165
spinlock_unlock(&THREAD->lock);
167
spinlock_lock(&rwl->lock);
170
* Find out whether we can get what we want without blocking.
172
rc = mutex_trylock(&rwl->exclusive);
173
if (SYNCH_FAILED(rc)) {
176
* 'exclusive' mutex is being held by someone else.
177
* If the holder is a reader and there is no one
178
* else waiting for it, we can enter the critical
182
if (rwl->readers_in) {
183
spinlock_lock(&rwl->exclusive.sem.wq.lock);
184
if (list_empty(&rwl->exclusive.sem.wq.head)) {
188
spinlock_unlock(&rwl->exclusive.sem.wq.lock);
191
spinlock_unlock(&rwl->exclusive.sem.wq.lock);
195
* In order to prevent a race condition when a reader
196
* could block another reader at the head of the waitq,
197
* we register a function to unlock rwl->lock
198
* after this thread is put asleep.
201
thread_register_call_me(release_spinlock, &rwl->lock);
203
thread_register_call_me(release_spinlock, NULL);
206
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
208
case ESYNCH_WOULD_BLOCK:
210
* release_spinlock() wasn't called
212
thread_register_call_me(NULL, NULL);
213
spinlock_unlock(&rwl->lock);
215
case ESYNCH_INTERRUPTED:
217
* The sleep timed out.
218
* We just restore interrupt priority level.
220
case ESYNCH_OK_BLOCKED:
222
* We were woken with rwl->readers_in already
225
* Note that this arrangement avoids race condition
226
* between two concurrent readers. (Race is avoided if
227
* 'exclusive' is locked at the same time as
228
* 'readers_in' is incremented. Same time means both
229
* events happen atomically when rwl->lock is held.)
231
interrupts_restore(ipl);
233
case ESYNCH_OK_ATOMIC:
234
panic("_mutex_lock_timeout() == ESYNCH_OK_ATOMIC.");
237
panic("Invalid ESYNCH.");
246
* We can increment readers_in only if we didn't go to sleep.
247
* For sleepers, rwlock_let_others_in() will do the job.
251
spinlock_unlock(&rwl->lock);
252
interrupts_restore(ipl);
254
return ESYNCH_OK_ATOMIC;
257
/** Release reader/writer lock held by writer
259
* Release reader/writer lock held by writer.
260
* Handoff reader/writer lock ownership directly
261
* to waiting readers or a writer.
263
* @param rwl Reader/Writer lock.
265
void rwlock_write_unlock(rwlock_t *rwl)
269
ipl = interrupts_disable();
270
spinlock_lock(&rwl->lock);
271
let_others_in(rwl, ALLOW_ALL);
272
spinlock_unlock(&rwl->lock);
273
interrupts_restore(ipl);
277
/** Release reader/writer lock held by reader
279
* Release reader/writer lock held by reader.
280
* Handoff reader/writer lock ownership directly
281
* to a waiting writer or don't do anything if more
282
* readers poses the lock.
284
* @param rwl Reader/Writer lock.
286
void rwlock_read_unlock(rwlock_t *rwl)
290
ipl = interrupts_disable();
291
spinlock_lock(&rwl->lock);
292
if (!--rwl->readers_in)
293
let_others_in(rwl, ALLOW_ALL);
294
spinlock_unlock(&rwl->lock);
295
interrupts_restore(ipl);
299
/** Direct handoff of reader/writer lock ownership.
301
* Direct handoff of reader/writer lock ownership
302
* to waiting readers or a writer.
304
* Must be called with rwl->lock locked.
305
* Must be called with interrupts_disable()'d.
307
* @param rwl Reader/Writer lock.
308
* @param readers_only See the description below.
310
* If readers_only is false: (unlock scenario)
311
* Let the first sleeper on 'exclusive' mutex in, no matter
312
* whether it is a reader or a writer. If there are more leading
313
* readers in line, let each of them in.
315
* Otherwise: (timeout scenario)
316
* Let all leading readers in.
318
void let_others_in(rwlock_t *rwl, int readers_only)
320
rwlock_type_t type = RWLOCK_NONE;
322
bool one_more = true;
324
spinlock_lock(&rwl->exclusive.sem.wq.lock);
326
if (!list_empty(&rwl->exclusive.sem.wq.head))
327
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
331
spinlock_lock(&t->lock);
332
type = t->rwlock_holder_type;
333
spinlock_unlock(&t->lock);
337
* If readers_only is true, we wake all leading readers
338
* if and only if rwl is locked by another reader.
339
* Assumption: readers_only ==> rwl->readers_in
341
if (readers_only && (type != RWLOCK_READER))
345
if (type == RWLOCK_READER) {
347
* Waking up a reader.
348
* We are responsible for incrementing rwl->readers_in
355
* Only the last iteration through this loop can increment
356
* rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
357
* iterations will wake up a thread.
359
/* We call the internal version of waitq_wakeup, which
360
* relies on the fact that the waitq is already locked.
362
_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
365
if (!list_empty(&rwl->exclusive.sem.wq.head)) {
366
t = list_get_instance(rwl->exclusive.sem.wq.head.next,
369
spinlock_lock(&t->lock);
370
if (t->rwlock_holder_type != RWLOCK_READER)
372
spinlock_unlock(&t->lock);
375
} while ((type == RWLOCK_READER) && t && one_more);
377
spinlock_unlock(&rwl->exclusive.sem.wq.lock);
380
/** Release spinlock callback
382
* This is a callback function invoked from the scheduler.
383
* The callback is registered in _rwlock_read_lock_timeout().
385
* @param arg Spinlock.
387
void release_spinlock(void *arg)
389
spinlock_unlock((spinlock_t *) arg);