2
Copyright 2005-2009 Intel Corporation. All Rights Reserved.
4
This file is part of Threading Building Blocks.
6
Threading Building Blocks is free software; you can redistribute it
7
and/or modify it under the terms of the GNU General Public License
8
version 2 as published by the Free Software Foundation.
10
Threading Building Blocks is distributed in the hope that it will be
11
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
15
You should have received a copy of the GNU General Public License
16
along with Threading Building Blocks; if not, write to the Free Software
17
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19
As a special exception, you may use this file as part of a free software
20
library without restriction. Specifically, if other files instantiate
21
templates or use macros or inline functions from this file, or you compile
22
this file and link it with other files to produce an executable, this
23
file does not by itself cause the resulting executable to be covered by
24
the GNU General Public License. This exception does not however
25
invalidate any other reasons why the executable file might be covered by
26
the GNU General Public License.
29
/** Before making any changes in the implementation, please emulate algorithmic changes
30
with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml.
31
There could be some code looking as "can be restructured" but its structure does matter! */
33
#include "tbb/tbb_machine.h"
34
#include "tbb/tbb_stddef.h"
35
#include "tbb/tbb_machine.h"
36
#include "tbb/queuing_rw_mutex.h"
37
#include "itt_notify.h"
42
using namespace internal;
44
//! Flag bits in a state_t that specify information about a locking request.
49
STATE_READER_UNBLOCKNEXT = 1<<2,
50
STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT,
51
STATE_ACTIVEREADER = 1<<3,
52
STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER,
53
STATE_UPGRADE_REQUESTED = 1<<4,
54
STATE_UPGRADE_WAITING = 1<<5,
55
STATE_UPGRADE_LOSER = 1<<6,
56
STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER
59
const unsigned char RELEASED = 0;
60
const unsigned char ACQUIRED = 1;
63
inline atomic<T>& as_atomic( T& t ) {
64
return *(atomic<T>*)&t;
67
inline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock()
69
return as_atomic(internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED;
72
inline void queuing_rw_mutex::scoped_lock::acquire_internal_lock()
74
// Usually, we would use the test-test-and-set idiom here, with exponential backoff.
75
// But so far, experiments indicate there is no value in doing so here.
76
while( !try_acquire_internal_lock() ) {
81
inline void queuing_rw_mutex::scoped_lock::release_internal_lock()
83
__TBB_store_with_release(internal_lock,RELEASED);
86
inline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock()
88
spin_wait_until_eq(internal_lock, RELEASED);
91
inline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) {
93
wait_for_release_of_internal_lock();
95
release_internal_lock();
98
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
99
// Workaround for overzealous compiler warnings
100
#pragma warning (push)
101
#pragma warning (disable: 4311 4312)
104
//! A view of a T* with additional functionality for twiddling low-order bits.
106
class tricky_atomic_pointer: no_copy {
108
typedef typename atomic_rep<sizeof(T*)>::word word;
110
template<memory_semantics M>
111
static T* fetch_and_add( T* volatile * location, word addend ) {
112
return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );
114
template<memory_semantics M>
115
static T* fetch_and_store( T* volatile * location, T* value ) {
116
return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );
118
template<memory_semantics M>
119
static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {
120
return reinterpret_cast<T*>(
121
atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),
122
reinterpret_cast<word>(comparand))
127
tricky_atomic_pointer( T*& original ) : ref(original) {};
128
tricky_atomic_pointer( T* volatile & original ) : ref(original) {};
129
T* operator&( word operand2 ) const {
130
return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
132
T* operator|( word operand2 ) const {
133
return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
137
typedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer;
139
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
140
// Workaround for overzealous compiler warnings
141
#pragma warning (pop)
144
//! Mask for low order bit of a pointer.
145
static const tricky_pointer::word FLAG = 0x1;
148
uintptr get_flag( queuing_rw_mutex::scoped_lock* ptr ) {
149
return uintptr(tricky_pointer(ptr)&FLAG);
152
//------------------------------------------------------------------------
153
// Methods of queuing_rw_mutex::scoped_lock
154
//------------------------------------------------------------------------
156
void queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write )
158
__TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
160
// Must set all fields before the fetch_and_store, because once the
161
// fetch_and_store executes, *this becomes accessible to other threads.
166
state = state_t(write ? STATE_WRITER : STATE_READER);
167
internal_lock = RELEASED;
169
queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
171
if( write ) { // Acquiring for write
174
ITT_NOTIFY(sync_prepare, mutex);
175
pred = tricky_pointer(pred) & ~FLAG;
176
__TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
177
__TBB_ASSERT( !pred->next, "the predecessor has another successor!");
178
// ensure release semantics on IPF
179
__TBB_store_with_release(pred->next,this);
180
spin_wait_until_eq(going, 1);
183
} else { // Acquiring for read
185
bool sync_prepare_done = false;
188
unsigned short pred_state;
189
__TBB_ASSERT( !this->prev, "the predecessor is already set" );
190
if( tricky_pointer(pred)&FLAG ) {
191
/* this is only possible if pred is an upgrading reader and it signals us to wait */
192
pred_state = STATE_UPGRADE_WAITING;
193
pred = tricky_pointer(pred) & ~FLAG;
195
// Load pred->state now, because once pred->next becomes
196
// non-NULL, we must assume that *pred might be destroyed.
197
pred_state = pred->state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER);
200
__TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
201
__TBB_ASSERT( !pred->next, "the predecessor has another successor!");
202
// ensure release semantics on IPF
203
__TBB_store_with_release(pred->next,this);
204
if( pred_state != STATE_ACTIVEREADER ) {
206
sync_prepare_done = true;
207
ITT_NOTIFY(sync_prepare, mutex);
209
spin_wait_until_eq(going, 1);
212
unsigned short old_state = state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);
213
if( old_state!=STATE_READER ) {
215
if( !sync_prepare_done )
216
ITT_NOTIFY(sync_prepare, mutex);
218
// Failed to become active reader -> need to unblock the next waiting reader first
219
__TBB_ASSERT( state==STATE_READER_UNBLOCKNEXT, "unexpected state" );
220
spin_wait_while_eq(next, (scoped_lock*)NULL);
221
/* state should be changed before unblocking the next otherwise it might finish
222
and another thread can get our old state and left blocked */
223
state = STATE_ACTIVEREADER;
224
// ensure release semantics on IPF
225
__TBB_store_with_release(next->going,1);
229
ITT_NOTIFY(sync_acquired, mutex);
231
// Force acquire so that user's critical section receives correct values
232
// from processor that was previously in the user's critical section.
233
__TBB_load_with_acquire(going);
236
bool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write )
238
__TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
240
// Must set all fields before the fetch_and_store, because once the
241
// fetch_and_store executes, *this becomes accessible to other threads.
245
state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);
246
internal_lock = RELEASED;
248
if( m.q_tail ) return false;
249
// The CAS must have release semantics, because we are
250
// "sending" the fields initialized above to other processors.
251
queuing_rw_mutex::scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL);
253
// Force acquire so that user's critical section receives correct values
254
// from processor that was previously in the user's critical section.
255
// try_acquire should always have acquire semantic, even if failed.
256
__TBB_load_with_acquire(going);
260
ITT_NOTIFY(sync_acquired, mutex);
266
void queuing_rw_mutex::scoped_lock::release( )
268
__TBB_ASSERT(this->mutex!=NULL, "no lock acquired");
270
ITT_NOTIFY(sync_releasing, mutex);
272
if( state == STATE_WRITER ) { // Acquired for write
274
// The logic below is the same as "writerUnlock", but restructured to remove "return" in the middle of routine.
275
// In the statement below, acquire semantics of reading 'next' is required
276
// so that following operations with fields of 'next' are safe.
277
scoped_lock* n = __TBB_load_with_acquire(next);
279
if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
280
// this was the only item in the queue, and the queue is now empty.
283
spin_wait_while_eq( next, (scoped_lock*)NULL );
286
n->going = 2; // protect next queue node from being destroyed too early
287
if( n->state==STATE_UPGRADE_WAITING ) {
288
// the next waiting for upgrade means this writer was upgraded before.
289
acquire_internal_lock();
290
queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
291
n->state = STATE_UPGRADE_LOSER;
292
__TBB_store_with_release(n->going,1);
293
unblock_or_wait_on_internal_lock(get_flag(tmp));
295
__TBB_ASSERT( state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
296
__TBB_ASSERT( !( tricky_pointer(n->prev) & FLAG ), "use of corrupted pointer!" );
298
// ensure release semantics on IPF
299
__TBB_store_with_release(n->going,1);
302
} else { // Acquired for read
304
queuing_rw_mutex::scoped_lock *tmp = NULL;
306
// Addition to the original paper: Mark this->prev as in use
307
queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
310
if( !(pred->try_acquire_internal_lock()) )
312
// Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
313
// In the second case, it could or could not know my "in use" flag - need to check
314
tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
315
if( !(tricky_pointer(tmp)&FLAG) ) {
316
// Wait for the predecessor to change this->prev (e.g. during unlink)
317
spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
318
// Now owner of pred is waiting for _us_ to release its lock
319
pred->release_internal_lock();
321
else ; // The "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
326
__TBB_ASSERT(pred && pred->internal_lock==ACQUIRED, "predecessor's lock is not acquired");
328
acquire_internal_lock();
330
__TBB_store_with_release(pred->next,reinterpret_cast<scoped_lock *>(NULL));
332
if( !next && this != mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
333
spin_wait_while_eq( next, (void*)NULL );
335
__TBB_ASSERT( !get_flag(next), "use of corrupted pointer" );
337
// ensure acquire semantics of reading 'next'
338
if( __TBB_load_with_acquire(next) ) { // I->next != nil
339
// Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
340
tmp = tricky_pointer::fetch_and_store<tbb::release>(&(next->prev), pred);
341
// I->prev->next = I->next;
342
__TBB_ASSERT(this->prev==pred, NULL);
343
__TBB_store_with_release(pred->next,next);
345
// Safe to release in the order opposite to acquiring which makes the code simplier
346
pred->release_internal_lock();
348
} else { // No predecessor when we looked
349
acquire_internal_lock(); // "exclusiveLock(&I->EL)"
350
// ensure acquire semantics of reading 'next'
351
scoped_lock* n = __TBB_load_with_acquire(next);
353
if( this != mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
354
spin_wait_while_eq( next, (scoped_lock*)NULL );
360
n->going = 2; // protect next queue node from being destroyed too early
361
tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
362
// ensure release semantics on IPF
363
__TBB_store_with_release(n->going,1);
366
unblock_or_wait_on_internal_lock(get_flag(tmp));
369
spin_wait_while_eq( going, 2 );
374
bool queuing_rw_mutex::scoped_lock::downgrade_to_reader()
376
__TBB_ASSERT( state==STATE_WRITER, "no sense to downgrade a reader" );
378
ITT_NOTIFY(sync_releasing, mutex);
380
// ensure acquire semantics of reading 'next'
381
if( ! __TBB_load_with_acquire(next) ) {
382
state = STATE_READER;
383
if( this==mutex->q_tail ) {
384
unsigned short old_state = state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);
385
if( old_state==STATE_READER ) {
389
/* wait for the next to register */
390
spin_wait_while_eq( next, (void*)NULL );
392
__TBB_ASSERT( next, "still no successor at this point!" );
393
if( next->state & STATE_COMBINED_WAITINGREADER )
394
__TBB_store_with_release(next->going,1);
395
else if( next->state==STATE_UPGRADE_WAITING )
396
// the next waiting for upgrade means this writer was upgraded before.
397
next->state = STATE_UPGRADE_LOSER;
398
state = STATE_ACTIVEREADER;
404
bool queuing_rw_mutex::scoped_lock::upgrade_to_writer()
406
__TBB_ASSERT( state==STATE_ACTIVEREADER, "only active reader can be upgraded" );
408
queuing_rw_mutex::scoped_lock * tmp;
409
queuing_rw_mutex::scoped_lock * me = this;
411
ITT_NOTIFY(sync_releasing, mutex);
412
state = STATE_UPGRADE_REQUESTED;
414
__TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
415
acquire_internal_lock();
416
if( this != mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {
417
spin_wait_while_eq( next, (void*)NULL );
418
queuing_rw_mutex::scoped_lock * n;
419
n = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->next), FLAG);
420
unsigned short n_state = n->state;
421
/* the next reader can be blocked by our state. the best thing to do is to unblock it */
422
if( n_state & STATE_COMBINED_WAITINGREADER )
423
__TBB_store_with_release(n->going,1);
424
tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), this);
425
unblock_or_wait_on_internal_lock(get_flag(tmp));
426
if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {
427
// save n|FLAG for simplicity of following comparisons
428
tmp = tricky_pointer(n)|FLAG;
429
atomic_backoff backoff;
431
if( state & STATE_COMBINED_UPGRADING ) {
432
if( __TBB_load_with_acquire(next)==tmp )
438
__TBB_ASSERT(next!=(tricky_pointer(n)|FLAG), NULL);
441
__TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
442
__TBB_ASSERT( (tricky_pointer(n)|FLAG)==next, NULL);
446
/* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
447
release_internal_lock();
448
} // if( this != mutex->q_tail... )
449
state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
452
__TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
453
__TBB_ASSERT( state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
454
__TBB_ASSERT( me==this, NULL );
455
ITT_NOTIFY(sync_prepare, mutex);
456
/* if noone was blocked by the "corrupted" q_tail, turn it back */
457
mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );
458
queuing_rw_mutex::scoped_lock * pred;
459
pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
461
bool success = pred->try_acquire_internal_lock();
462
pred->state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
464
tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
465
if( tricky_pointer(tmp)&FLAG ) {
466
spin_wait_while_eq(this->prev, pred);
469
spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
470
pred->release_internal_lock();
474
pred->release_internal_lock();
475
spin_wait_while_eq(this->prev, pred);
481
// restore the corrupted prev field for possible further use (e.g. if downgrade back to reader)
484
__TBB_ASSERT( !pred && !this->prev, NULL );
486
// additional lifetime issue prevention checks
487
// wait for the successor to finish working with my fields
488
wait_for_release_of_internal_lock();
489
// now wait for the predecessor to finish working with my fields
490
spin_wait_while_eq( going, 2 );
491
// there is an acquire semantics statement in the end of spin_wait_while_eq.
493
bool result = ( state != STATE_UPGRADE_LOSER );
494
state = STATE_WRITER;
497
ITT_NOTIFY(sync_acquired, mutex);
501
void queuing_rw_mutex::internal_construct() {
502
ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T(""));