~ubuntu-branches/ubuntu/wily/deal.ii/wily-proposed

« back to all changes in this revision

Viewing changes to contrib/tbb/tbb22_20090809oss/src/tbb/queuing_rw_mutex.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Adam C. Powell, IV, Adam C. Powell, IV, Denis Barbier
  • Date: 2010-07-29 13:47:01 UTC
  • mfrom: (1.1.2 upstream)
  • Revision ID: james.westby@ubuntu.com-20100729134701-qk60t2om7u7oklkb
Tags: 6.3.1-1
[ Adam C. Powell, IV ]
* Changed to source format 3.0 (quilt).
* Changed maintainer to debian-science with Adam Powell as uploader.
* Added source lintian overrides about Adam Powell's name.
* Added Vcs info on git repository.
* Bumped Standards-Version.
* Changed stamp-patch to patch target and fixed its application criterion.
* Moved make_dependencies and expand_instantiations to a versioned directory
  to avoid shlib package conflicts.

[ Denis Barbier ]
* New upstream release (closes: #562332).
  + Added libtbb support.
  + Forward-ported all patches.
* Updates for new PETSc version, including workaround for different versions
  of petsc and slepc.
* Add debian/watch.
* Update to debhelper 7.
* Added pdebuild patch.

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
    Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
 
3
 
 
4
    This file is part of Threading Building Blocks.
 
5
 
 
6
    Threading Building Blocks is free software; you can redistribute it
 
7
    and/or modify it under the terms of the GNU General Public License
 
8
    version 2 as published by the Free Software Foundation.
 
9
 
 
10
    Threading Building Blocks is distributed in the hope that it will be
 
11
    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
 
12
    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
13
    GNU General Public License for more details.
 
14
 
 
15
    You should have received a copy of the GNU General Public License
 
16
    along with Threading Building Blocks; if not, write to the Free Software
 
17
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
18
 
 
19
    As a special exception, you may use this file as part of a free software
 
20
    library without restriction.  Specifically, if other files instantiate
 
21
    templates or use macros or inline functions from this file, or you compile
 
22
    this file and link it with other files to produce an executable, this
 
23
    file does not by itself cause the resulting executable to be covered by
 
24
    the GNU General Public License.  This exception does not however
 
25
    invalidate any other reasons why the executable file might be covered by
 
26
    the GNU General Public License.
 
27
*/
 
28
 
 
29
/** Before making any changes in the implementation, please emulate algorithmic changes
 
30
    with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml.
 
31
    There could be some code looking as "can be restructured" but its structure does matter! */
 
32
 
 
33
#include "tbb/tbb_machine.h"
 
34
#include "tbb/tbb_stddef.h"
 
35
#include "tbb/tbb_machine.h"
 
36
#include "tbb/queuing_rw_mutex.h"
 
37
#include "itt_notify.h"
 
38
 
 
39
 
 
40
namespace tbb {
 
41
 
 
42
using namespace internal;
 
43
 
 
44
//! Flag bits in a state_t that specify information about a locking request.
 
45
enum state_t_flags {
 
46
    STATE_NONE = 0,
 
47
    STATE_WRITER = 1,
 
48
    STATE_READER = 1<<1,
 
49
    STATE_READER_UNBLOCKNEXT = 1<<2,
 
50
    STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT,
 
51
    STATE_ACTIVEREADER = 1<<3,
 
52
    STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER,
 
53
    STATE_UPGRADE_REQUESTED = 1<<4,
 
54
    STATE_UPGRADE_WAITING = 1<<5,
 
55
    STATE_UPGRADE_LOSER = 1<<6,
 
56
    STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER
 
57
};
 
58
 
 
59
const unsigned char RELEASED = 0;
 
60
const unsigned char ACQUIRED = 1;
 
61
 
 
62
template<typename T>
 
63
inline atomic<T>& as_atomic( T& t ) {
 
64
    return *(atomic<T>*)&t;
 
65
}
 
66
 
 
67
inline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock()
 
68
{
 
69
    return as_atomic(internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED;
 
70
}
 
71
 
 
72
inline void queuing_rw_mutex::scoped_lock::acquire_internal_lock()
 
73
{
 
74
    // Usually, we would use the test-test-and-set idiom here, with exponential backoff.
 
75
    // But so far, experiments indicate there is no value in doing so here.
 
76
    while( !try_acquire_internal_lock() ) {
 
77
        __TBB_Pause(1);
 
78
    }
 
79
}
 
80
 
 
81
inline void queuing_rw_mutex::scoped_lock::release_internal_lock()
 
82
{
 
83
    __TBB_store_with_release(internal_lock,RELEASED);
 
84
}
 
85
 
 
86
inline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock()
 
87
{
 
88
    spin_wait_until_eq(internal_lock, RELEASED);
 
89
}
 
90
 
 
91
inline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) {
 
92
    if( flag )
 
93
        wait_for_release_of_internal_lock();
 
94
    else
 
95
        release_internal_lock();
 
96
}
 
97
 
 
98
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
 
99
    // Workaround for overzealous compiler warnings
 
100
    #pragma warning (push)
 
101
    #pragma warning (disable: 4311 4312)
 
102
#endif
 
103
 
 
104
//! A view of a T* with additional functionality for twiddling low-order bits.
 
105
template<typename T>
 
106
class tricky_atomic_pointer: no_copy {
 
107
public:
 
108
    typedef typename atomic_rep<sizeof(T*)>::word word;
 
109
 
 
110
    template<memory_semantics M>
 
111
    static T* fetch_and_add( T* volatile * location, word addend ) {
 
112
        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );
 
113
    }
 
114
    template<memory_semantics M>
 
115
    static T* fetch_and_store( T* volatile * location, T* value ) {
 
116
        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );
 
117
    }
 
118
    template<memory_semantics M>
 
119
    static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {
 
120
        return reinterpret_cast<T*>(
 
121
                 atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),
 
122
                                                              reinterpret_cast<word>(comparand))
 
123
               );
 
124
    }
 
125
 
 
126
    T* & ref;
 
127
    tricky_atomic_pointer( T*& original ) : ref(original) {};
 
128
    tricky_atomic_pointer( T* volatile & original ) : ref(original) {};
 
129
    T* operator&( word operand2 ) const {
 
130
        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
 
131
    }
 
132
    T* operator|( word operand2 ) const {
 
133
        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
 
134
    }
 
135
};
 
136
 
 
137
typedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer;
 
138
 
 
139
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
 
140
    // Workaround for overzealous compiler warnings
 
141
    #pragma warning (pop)
 
142
#endif
 
143
 
 
144
//! Mask for low order bit of a pointer.
 
145
static const tricky_pointer::word FLAG = 0x1;
 
146
 
 
147
inline
 
148
uintptr get_flag( queuing_rw_mutex::scoped_lock* ptr ) { 
 
149
    return uintptr(tricky_pointer(ptr)&FLAG);
 
150
}
 
151
 
 
152
//------------------------------------------------------------------------
 
153
// Methods of queuing_rw_mutex::scoped_lock
 
154
//------------------------------------------------------------------------
 
155
 
 
156
void queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write )
 
157
{
 
158
    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
 
159
 
 
160
    // Must set all fields before the fetch_and_store, because once the
 
161
    // fetch_and_store executes, *this becomes accessible to other threads.
 
162
    mutex = &m;
 
163
    prev  = NULL;
 
164
    next  = NULL;
 
165
    going = 0;
 
166
    state = state_t(write ? STATE_WRITER : STATE_READER);
 
167
    internal_lock = RELEASED;
 
168
 
 
169
    queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
 
170
 
 
171
    if( write ) {       // Acquiring for write
 
172
 
 
173
        if( pred ) {
 
174
            ITT_NOTIFY(sync_prepare, mutex);
 
175
            pred = tricky_pointer(pred) & ~FLAG;
 
176
            __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
 
177
            __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
 
178
            // ensure release semantics on IPF
 
179
           __TBB_store_with_release(pred->next,this);
 
180
            spin_wait_until_eq(going, 1);
 
181
        }
 
182
 
 
183
    } else {            // Acquiring for read
 
184
#if DO_ITT_NOTIFY
 
185
        bool sync_prepare_done = false;
 
186
#endif
 
187
        if( pred ) {
 
188
            unsigned short pred_state;
 
189
            __TBB_ASSERT( !this->prev, "the predecessor is already set" );
 
190
            if( tricky_pointer(pred)&FLAG ) {
 
191
                /* this is only possible if pred is an upgrading reader and it signals us to wait */
 
192
                pred_state = STATE_UPGRADE_WAITING;
 
193
                pred = tricky_pointer(pred) & ~FLAG;
 
194
            } else {
 
195
                // Load pred->state now, because once pred->next becomes
 
196
                // non-NULL, we must assume that *pred might be destroyed.
 
197
                pred_state = pred->state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER);
 
198
            }
 
199
            this->prev = pred;
 
200
            __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
 
201
            __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
 
202
            // ensure release semantics on IPF
 
203
           __TBB_store_with_release(pred->next,this);
 
204
            if( pred_state != STATE_ACTIVEREADER ) {
 
205
#if DO_ITT_NOTIFY
 
206
                sync_prepare_done = true;
 
207
                ITT_NOTIFY(sync_prepare, mutex);
 
208
#endif
 
209
                spin_wait_until_eq(going, 1);
 
210
            }
 
211
        }
 
212
        unsigned short old_state = state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);
 
213
        if( old_state!=STATE_READER ) {
 
214
#if DO_ITT_NOTIFY
 
215
            if( !sync_prepare_done )
 
216
                ITT_NOTIFY(sync_prepare, mutex);
 
217
#endif
 
218
            // Failed to become active reader -> need to unblock the next waiting reader first
 
219
            __TBB_ASSERT( state==STATE_READER_UNBLOCKNEXT, "unexpected state" );
 
220
            spin_wait_while_eq(next, (scoped_lock*)NULL);
 
221
            /* state should be changed before unblocking the next otherwise it might finish
 
222
               and another thread can get our old state and left blocked */
 
223
            state = STATE_ACTIVEREADER;
 
224
            // ensure release semantics on IPF
 
225
           __TBB_store_with_release(next->going,1);
 
226
        }
 
227
    }
 
228
 
 
229
    ITT_NOTIFY(sync_acquired, mutex);
 
230
 
 
231
    // Force acquire so that user's critical section receives correct values
 
232
    // from processor that was previously in the user's critical section.
 
233
    __TBB_load_with_acquire(going);
 
234
}
 
235
 
 
236
bool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write )
 
237
{
 
238
    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
 
239
 
 
240
    // Must set all fields before the fetch_and_store, because once the
 
241
    // fetch_and_store executes, *this becomes accessible to other threads.
 
242
    prev  = NULL;
 
243
    next  = NULL;
 
244
    going = 0;
 
245
    state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);
 
246
    internal_lock = RELEASED;
 
247
 
 
248
    if( m.q_tail ) return false;
 
249
    // The CAS must have release semantics, because we are
 
250
    // "sending" the fields initialized above to other processors.
 
251
    queuing_rw_mutex::scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL);
 
252
 
 
253
    // Force acquire so that user's critical section receives correct values
 
254
    // from processor that was previously in the user's critical section.
 
255
    // try_acquire should always have acquire semantic, even if failed.
 
256
    __TBB_load_with_acquire(going);
 
257
 
 
258
    if( !pred ) {
 
259
        mutex = &m;
 
260
        ITT_NOTIFY(sync_acquired, mutex);
 
261
        return true;
 
262
    } else return false;
 
263
 
 
264
}
 
265
 
 
266
void queuing_rw_mutex::scoped_lock::release( )
 
267
{
 
268
    __TBB_ASSERT(this->mutex!=NULL, "no lock acquired");
 
269
 
 
270
    ITT_NOTIFY(sync_releasing, mutex);
 
271
 
 
272
    if( state == STATE_WRITER ) { // Acquired for write
 
273
 
 
274
        // The logic below is the same as "writerUnlock", but restructured to remove "return" in the middle of routine.
 
275
        // In the statement below, acquire semantics of reading 'next' is required
 
276
        // so that following operations with fields of 'next' are safe.
 
277
        scoped_lock* n = __TBB_load_with_acquire(next);
 
278
        if( !n ) {
 
279
            if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
 
280
                // this was the only item in the queue, and the queue is now empty.
 
281
                goto done;
 
282
            }
 
283
            spin_wait_while_eq( next, (scoped_lock*)NULL );
 
284
            n = next;
 
285
        }
 
286
        n->going = 2; // protect next queue node from being destroyed too early
 
287
        if( n->state==STATE_UPGRADE_WAITING ) {
 
288
            // the next waiting for upgrade means this writer was upgraded before.
 
289
            acquire_internal_lock();
 
290
            queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
 
291
            n->state = STATE_UPGRADE_LOSER;
 
292
            __TBB_store_with_release(n->going,1);
 
293
            unblock_or_wait_on_internal_lock(get_flag(tmp));
 
294
        } else {
 
295
            __TBB_ASSERT( state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
 
296
            __TBB_ASSERT( !( tricky_pointer(n->prev) & FLAG ), "use of corrupted pointer!" );
 
297
            n->prev = NULL;
 
298
            // ensure release semantics on IPF
 
299
            __TBB_store_with_release(n->going,1);
 
300
        }
 
301
 
 
302
    } else { // Acquired for read
 
303
 
 
304
        queuing_rw_mutex::scoped_lock *tmp = NULL;
 
305
retry:
 
306
        // Addition to the original paper: Mark this->prev as in use
 
307
        queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
 
308
 
 
309
        if( pred ) {
 
310
            if( !(pred->try_acquire_internal_lock()) )
 
311
            {
 
312
                // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
 
313
                // In the second case, it could or could not know my "in use" flag - need to check
 
314
                tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
 
315
                if( !(tricky_pointer(tmp)&FLAG) ) {
 
316
                    // Wait for the predecessor to change this->prev (e.g. during unlink)
 
317
                    spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
 
318
                    // Now owner of pred is waiting for _us_ to release its lock
 
319
                    pred->release_internal_lock();
 
320
                }
 
321
                else ; // The "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
 
322
 
 
323
                tmp = NULL;
 
324
                goto retry;
 
325
            }
 
326
            __TBB_ASSERT(pred && pred->internal_lock==ACQUIRED, "predecessor's lock is not acquired");
 
327
            this->prev = pred;
 
328
            acquire_internal_lock();
 
329
 
 
330
            __TBB_store_with_release(pred->next,reinterpret_cast<scoped_lock *>(NULL));
 
331
 
 
332
            if( !next && this != mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
 
333
                spin_wait_while_eq( next, (void*)NULL );
 
334
            }
 
335
            __TBB_ASSERT( !get_flag(next), "use of corrupted pointer" );
 
336
 
 
337
            // ensure acquire semantics of reading 'next'
 
338
            if( __TBB_load_with_acquire(next) ) { // I->next != nil
 
339
                // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
 
340
                tmp = tricky_pointer::fetch_and_store<tbb::release>(&(next->prev), pred);
 
341
                // I->prev->next = I->next;
 
342
                __TBB_ASSERT(this->prev==pred, NULL);
 
343
                __TBB_store_with_release(pred->next,next);
 
344
            }
 
345
            // Safe to release in the order opposite to acquiring which makes the code simplier
 
346
            pred->release_internal_lock();
 
347
 
 
348
        } else { // No predecessor when we looked
 
349
            acquire_internal_lock();  // "exclusiveLock(&I->EL)"
 
350
            // ensure acquire semantics of reading 'next'
 
351
            scoped_lock* n = __TBB_load_with_acquire(next);
 
352
            if( !n ) {
 
353
                if( this != mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
 
354
                    spin_wait_while_eq( next, (scoped_lock*)NULL );
 
355
                    n = next;
 
356
                } else {
 
357
                    goto unlock_self;
 
358
                }
 
359
            }
 
360
            n->going = 2; // protect next queue node from being destroyed too early
 
361
            tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
 
362
            // ensure release semantics on IPF
 
363
            __TBB_store_with_release(n->going,1);
 
364
        }
 
365
unlock_self:
 
366
        unblock_or_wait_on_internal_lock(get_flag(tmp));
 
367
    }
 
368
done:
 
369
    spin_wait_while_eq( going, 2 );
 
370
 
 
371
    initialize();
 
372
}
 
373
 
 
374
bool queuing_rw_mutex::scoped_lock::downgrade_to_reader()
 
375
{
 
376
    __TBB_ASSERT( state==STATE_WRITER, "no sense to downgrade a reader" );
 
377
 
 
378
    ITT_NOTIFY(sync_releasing, mutex);
 
379
 
 
380
    // ensure acquire semantics of reading 'next'
 
381
    if( ! __TBB_load_with_acquire(next) ) {
 
382
        state = STATE_READER;
 
383
        if( this==mutex->q_tail ) {
 
384
            unsigned short old_state = state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);
 
385
            if( old_state==STATE_READER ) {
 
386
                goto downgrade_done;
 
387
            }
 
388
        }
 
389
        /* wait for the next to register */
 
390
        spin_wait_while_eq( next, (void*)NULL );
 
391
    }
 
392
    __TBB_ASSERT( next, "still no successor at this point!" );
 
393
    if( next->state & STATE_COMBINED_WAITINGREADER )
 
394
        __TBB_store_with_release(next->going,1);
 
395
    else if( next->state==STATE_UPGRADE_WAITING )
 
396
        // the next waiting for upgrade means this writer was upgraded before.
 
397
        next->state = STATE_UPGRADE_LOSER;
 
398
    state = STATE_ACTIVEREADER;
 
399
 
 
400
downgrade_done:
 
401
    return true;
 
402
}
 
403
 
 
404
bool queuing_rw_mutex::scoped_lock::upgrade_to_writer()
 
405
{
 
406
    __TBB_ASSERT( state==STATE_ACTIVEREADER, "only active reader can be upgraded" );
 
407
 
 
408
    queuing_rw_mutex::scoped_lock * tmp;
 
409
    queuing_rw_mutex::scoped_lock * me = this;
 
410
 
 
411
    ITT_NOTIFY(sync_releasing, mutex);
 
412
    state = STATE_UPGRADE_REQUESTED;
 
413
requested:
 
414
    __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
 
415
    acquire_internal_lock();
 
416
    if( this != mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {
 
417
        spin_wait_while_eq( next, (void*)NULL );
 
418
        queuing_rw_mutex::scoped_lock * n;
 
419
        n = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->next), FLAG);
 
420
        unsigned short n_state = n->state;
 
421
        /* the next reader can be blocked by our state. the best thing to do is to unblock it */
 
422
        if( n_state & STATE_COMBINED_WAITINGREADER )
 
423
            __TBB_store_with_release(n->going,1);
 
424
        tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), this);
 
425
        unblock_or_wait_on_internal_lock(get_flag(tmp));
 
426
        if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {
 
427
            // save n|FLAG for simplicity of following comparisons
 
428
            tmp = tricky_pointer(n)|FLAG;
 
429
            atomic_backoff backoff;
 
430
            while(next==tmp) {
 
431
                if( state & STATE_COMBINED_UPGRADING ) {
 
432
                    if( __TBB_load_with_acquire(next)==tmp )
 
433
                        next = n;
 
434
                    goto waiting;
 
435
                }
 
436
                backoff.pause();
 
437
            }
 
438
            __TBB_ASSERT(next!=(tricky_pointer(n)|FLAG), NULL);
 
439
            goto requested;
 
440
        } else {
 
441
            __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
 
442
            __TBB_ASSERT( (tricky_pointer(n)|FLAG)==next, NULL);
 
443
            next = n;
 
444
        }
 
445
    } else {
 
446
        /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
 
447
        release_internal_lock();
 
448
    } // if( this != mutex->q_tail... )
 
449
    state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
 
450
 
 
451
waiting:
 
452
    __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
 
453
    __TBB_ASSERT( state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
 
454
    __TBB_ASSERT( me==this, NULL );
 
455
    ITT_NOTIFY(sync_prepare, mutex);
 
456
    /* if noone was blocked by the "corrupted" q_tail, turn it back */
 
457
    mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );
 
458
    queuing_rw_mutex::scoped_lock * pred;
 
459
    pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
 
460
    if( pred ) {
 
461
        bool success = pred->try_acquire_internal_lock();
 
462
        pred->state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
 
463
        if( !success ) {
 
464
            tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
 
465
            if( tricky_pointer(tmp)&FLAG ) {
 
466
                spin_wait_while_eq(this->prev, pred);
 
467
                pred = this->prev;
 
468
            } else {
 
469
                spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
 
470
                pred->release_internal_lock();
 
471
            }
 
472
        } else {
 
473
            this->prev = pred;
 
474
            pred->release_internal_lock();
 
475
            spin_wait_while_eq(this->prev, pred);
 
476
            pred = this->prev;
 
477
        }
 
478
        if( pred )
 
479
            goto waiting;
 
480
    } else {
 
481
        // restore the corrupted prev field for possible further use (e.g. if downgrade back to reader)
 
482
        this->prev = pred;
 
483
    }
 
484
    __TBB_ASSERT( !pred && !this->prev, NULL );
 
485
 
 
486
    // additional lifetime issue prevention checks
 
487
    // wait for the successor to finish working with my fields
 
488
    wait_for_release_of_internal_lock();
 
489
    // now wait for the predecessor to finish working with my fields
 
490
    spin_wait_while_eq( going, 2 );
 
491
    // there is an acquire semantics statement in the end of spin_wait_while_eq.
 
492
 
 
493
    bool result = ( state != STATE_UPGRADE_LOSER );
 
494
    state = STATE_WRITER;
 
495
    going = 1;
 
496
 
 
497
    ITT_NOTIFY(sync_acquired, mutex);
 
498
    return result;
 
499
}
 
500
 
 
501
void queuing_rw_mutex::internal_construct() {
 
502
    ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T(""));
 
503
}
 
504
 
 
505
} // namespace tbb