2
Copyright 2005-2009 Intel Corporation. All Rights Reserved.
4
This file is part of Threading Building Blocks.
6
Threading Building Blocks is free software; you can redistribute it
7
and/or modify it under the terms of the GNU General Public License
8
version 2 as published by the Free Software Foundation.
10
Threading Building Blocks is distributed in the hope that it will be
11
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
GNU General Public License for more details.
15
You should have received a copy of the GNU General Public License
16
along with Threading Building Blocks; if not, write to the Free Software
17
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19
As a special exception, you may use this file as part of a free software
20
library without restriction. Specifically, if other files instantiate
21
templates or use macros or inline functions from this file, or you compile
22
this file and link it with other files to produce an executable, this
23
file does not by itself cause the resulting executable to be covered by
24
the GNU General Public License. This exception does not however
25
invalidate any other reasons why the executable file might be covered by
26
the GNU General Public License.
29
#ifndef __TBB_queuing_mutex_H
30
#define __TBB_queuing_mutex_H
34
#include "tbb_profiling.h"
38
//! Queuing lock with local-only spinning.
39
/** @ingroup synchronization */
42
//! Construct unacquired mutex.
45
#if TBB_USE_THREADING_TOOLS
50
//! The scoped locking pattern
51
/** It helps to avoid the common problem of forgetting to release lock.
52
It also nicely provides the "node" for queuing locks. */
53
class scoped_lock: internal::no_copy {
54
//! Initialize fields to mean "no lock held".
58
internal::poison_pointer(next);
59
#endif /* TBB_USE_ASSERT */
62
//! Construct lock that has not acquired a mutex.
63
/** Equivalent to zero-initialization of *this. */
64
scoped_lock() {initialize();}
66
//! Acquire lock on given mutex.
67
/** Upon entry, *this should not be in the "have acquired a mutex" state. */
68
scoped_lock( queuing_mutex& m ) {
73
//! Release lock (if lock is held).
75
if( mutex ) release();
78
//! Acquire lock on given mutex.
79
void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );
81
//! Acquire lock on given mutex if free (i.e. non-blocking)
82
bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );
85
void __TBB_EXPORTED_METHOD release();
88
//! The pointer to the mutex owned, or NULL if not holding a mutex.
91
//! The pointer to the next competitor for a mutex
94
//! The local spin-wait variable
95
/** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
96
zero-initialization. Defining it as an entire word instead of
97
a byte seems to help performance slightly. */
98
internal::uintptr going;
101
void __TBB_EXPORTED_METHOD internal_construct();
104
static const bool is_rw_mutex = false;
105
static const bool is_recursive_mutex = false;
106
static const bool is_fair_mutex = true;
108
friend class scoped_lock;
110
//! The last competitor requesting the lock
111
atomic<scoped_lock*> q_tail;
115
__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
119
#endif /* __TBB_queuing_mutex_H */