2
* PROGRAM: JRD Access Method
3
* MODULE: GlobalRWLock.cpp
4
* DESCRIPTION: GlobalRWLock
6
* The contents of this file are subject to the Initial
7
* Developer's Public License Version 1.0 (the "License");
8
* you may not use this file except in compliance with the
9
* License. You may obtain a copy of the License at
10
* http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
12
* Software distributed under the License is distributed AS IS,
13
* WITHOUT WARRANTY OF ANY KIND, either express or implied.
14
* See the License for the specific language governing rights
15
* and limitations under the License.
17
* The Original Code was created by Nickolay Samofatov
18
* for the Firebird Open Source RDBMS project.
20
* Copyright (c) 2006 Nickolay Samofatov
21
* and all contributors signed below.
23
* All Rights Reserved.
24
* Contributor(s): ______________________________________.
30
#include "GlobalRWLock.h"
31
#include "../lock/lock.h"
32
#include "../lock/lock_proto.h"
33
#include "iberr_proto.h"
34
#include "isc_proto.h"
36
#include "lck_proto.h"
37
#include "err_proto.h"
40
IMPLEMENT_TRACE_ROUTINE(cos_trace, "COS")
45
int GlobalRWLock::blocking_ast_cached_lock(void* ast_object)
47
Jrd::GlobalRWLock *GlobalRWLock =
48
static_cast<Jrd::GlobalRWLock*>(ast_object);
52
/* Since this routine will be called asynchronously, we must establish
54
Jrd::thread_db thd_context, *tdbb;
55
JRD_set_thread_data(tdbb, thd_context);
57
ISC_STATUS_ARRAY ast_status;
58
Jrd::Database* dbb = GlobalRWLock->cached_lock->lck_dbb;
60
tdbb->setDatabase(dbb);
61
tdbb->setAttachment(NULL);
62
tdbb->tdbb_quantum = QUANTUM;
63
tdbb->setRequest(NULL);
64
tdbb->setTransaction(NULL);
65
tdbb->tdbb_status_vector = ast_status;
67
GlobalRWLock->blockingAstHandler(tdbb);
69
/* Restore the prior thread context */
71
JRD_restore_thread_data();
78
GlobalRWLock::GlobalRWLock(thread_db* tdbb, MemoryPool& p, locktype_t lckType,
79
size_t lockLen, const UCHAR* lockStr, lck_owner_t physical_lock_owner,
80
lck_owner_t default_logical_lock_owner, bool lock_caching)
81
: PermanentStorage(p), internal_blocking(0), external_blocking(false),
82
physicalLockOwner(physical_lock_owner), defaultLogicalLockOwner(default_logical_lock_owner),
83
lockCaching(lock_caching), readers(p)
87
Database* dbb = tdbb->getDatabase();
89
cached_lock = FB_NEW_RPT(getPool(), lockLen) Lock();
90
cached_lock->lck_type = static_cast<Jrd::lck_t>(lckType);
91
cached_lock->lck_owner_handle = 0;
92
cached_lock->lck_length = lockLen;
94
cached_lock->lck_dbb = dbb;
95
cached_lock->lck_parent = dbb->dbb_lock;
96
cached_lock->lck_object = reinterpret_cast<blk*>(this);
97
cached_lock->lck_ast = lockCaching ? blocking_ast_cached_lock : NULL;
98
memcpy(&cached_lock->lck_key, lockStr, lockLen);
100
writer.owner_handle = 0;
101
writer.entry_count = 0;
104
GlobalRWLock::~GlobalRWLock()
106
thread_db* tdbb = JRD_get_thread_data();
107
LCK_release(tdbb, cached_lock);
111
bool GlobalRWLock::lock(thread_db* tdbb, locklevel_t level, SSHORT wait, SLONG owner_handle)
114
fb_assert(owner_handle);
116
{ // this is a first scope for a code where counters are locked
117
CountersLockHolder lockHolder(lockMutex);
119
COS_TRACE(("lock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
120
// Check if this is a recursion case
122
if (level == LCK_read) {
123
if (readers.find(owner_handle, n)) {
124
readers[n].entry_count++;
129
if (writer.owner_handle == owner_handle) {
130
writer.entry_count++;
135
const bool all_compatible = !writer.entry_count && (level == LCK_read || readers.getCount() == 0);
137
// We own the lock and all present requests are compatible with us
138
// In case of any congestion we force all requests through the lock
139
// manager to ensure lock ordering.
140
if (cached_lock->lck_physical >= level && all_compatible &&
141
!internal_blocking && !external_blocking)
143
if (level == LCK_read) {
144
ObjectOwnerData ownerData;
145
ownerData.owner_handle = owner_handle;
146
ownerData.entry_count++;
147
readers.insert(n, ownerData);
151
writer.owner_handle = owner_handle;
152
writer.entry_count++;
158
// We need to release lock to get new level lock
159
if ( (cached_lock->lck_physical > 0) && (writer.entry_count == 0) && (readers.getCount() == 0) )
161
LCK_release(tdbb, cached_lock);
162
invalidate(tdbb, false);
163
external_blocking = false;
164
COS_TRACE(("release our lock to get new level lock, type=%i, level=%i", cached_lock->lck_type, cached_lock->lck_physical));
170
// There is some congestion. Need to use the lock manager.
171
// Request new lock at the new level. Several concurrent lock requests may
172
// wait here in the same process in parallel.
173
Lock* newLock = FB_NEW_RPT(getPool(), cached_lock->lck_length) Lock;
174
newLock->lck_type = cached_lock->lck_type;
175
newLock->lck_owner_handle = owner_handle;
176
newLock->lck_length = cached_lock->lck_length;
178
newLock->lck_dbb = cached_lock->lck_dbb;
179
newLock->lck_parent = cached_lock->lck_parent;
180
newLock->lck_object = cached_lock->lck_object;
181
newLock->lck_ast = cached_lock->lck_ast;
182
memcpy(&newLock->lck_key, &cached_lock->lck_key, cached_lock->lck_length);
184
COS_TRACE(("request new lock, type=%i, level=%i", cached_lock->lck_type, level));
185
if (!LCK_lock(tdbb, newLock, level, wait)) {
186
COS_TRACE(("Can't get a lock"));
190
COS_TRACE(("Lock is got, type=%i", cached_lock->lck_type));
192
{ // this is a second scope for a code where counters are locked
193
CountersLockHolder lockHolder(lockMutex);
195
fb_assert(internal_blocking > 0);
198
// Here old lock is not protecting shared object. We must refresh state by fetch.
199
if (newLock->lck_physical >= LCK_read) {
203
catch(const Firebird::Exception&) {
204
LCK_release(tdbb, newLock);
210
if (level == LCK_read) {
211
ObjectOwnerData ownerData;
212
ownerData.entry_count++;
213
ownerData.owner_handle = owner_handle;
214
readers.add(ownerData);
218
writer.owner_handle = owner_handle;
219
writer.entry_count++;
222
// Replace cached lock with the new lock if needed
223
COS_TRACE(("Replace lock, type=%i", cached_lock->lck_type));
224
if (newLock->lck_physical > cached_lock->lck_physical) {
225
LCK_release(tdbb, cached_lock);
227
cached_lock = newLock;
228
if (!LCK_set_owner_handle(tdbb, cached_lock, LCK_get_owner_handle_by_type(tdbb, physicalLockOwner))) {
229
COS_TRACE(("Error: set owner handle for captured lock, type=%i", cached_lock->lck_type));
230
LCK_release(tdbb, cached_lock);
235
LCK_release(tdbb, newLock);
243
// NOTE: unlock method must be signal safe
244
// This function may be called in AST. The function doesn't wait.
245
void GlobalRWLock::unlock(thread_db* tdbb, locklevel_t level, SLONG owner_handle)
249
CountersLockHolder lockHolder(lockMutex);
251
COS_TRACE(("unlock level=%i", level));
253
// Check if this is a recursion case
254
if (level == LCK_read) {
256
if (!readers.find(owner_handle, n)) {
257
ERR_bugcheck_msg("Attempt to call GlobalRWLock::unlock() while not holding a valid lock for logical owner");
259
fb_assert(readers[n].entry_count > 0);
260
readers[n].entry_count--;
261
if (readers[n].entry_count == 0)
265
fb_assert(writer.owner_handle == owner_handle);
266
fb_assert(writer.entry_count == 1);
267
fb_assert(cached_lock->lck_physical == LCK_write);
269
writer.entry_count = 0;
270
writer.owner_handle = 0;
272
// Optimize non-contention case - downgrade to PR and re-use the lock
273
if (!internal_blocking && !external_blocking && lockCaching)
275
if (!LCK_convert(tdbb, cached_lock, LCK_read, 0))
276
ERR_bugcheck_msg("LCK_convert call failed in GlobalRWLock::unlock()");
281
if ( (readers.getCount() == 0) && (writer.entry_count == 0) ) {
282
COS_TRACE(("check for release a lock, type=%i", cached_lock->lck_type));
283
if (internal_blocking || !lockCaching) {
284
LCK_release(tdbb, cached_lock);
285
invalidate(tdbb, false);
286
external_blocking = false;
288
else if (external_blocking) {
289
LCK_downgrade(tdbb, cached_lock);
290
if (cached_lock->lck_physical < LCK_read)
291
invalidate(tdbb, false);
292
external_blocking = false;
296
COS_TRACE(("unlock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
299
void GlobalRWLock::blockingAstHandler(thread_db* tdbb)
303
CountersLockHolder lockHolder(lockMutex);
305
COS_TRACE_AST("bloackingAstHandler");
306
// When we request a new lock counters are not updated until we get it.
307
// As such, we need to check internal_blocking flag that is set during such situation.
308
if ( !internal_blocking && (readers.getCount() == 0) && (writer.entry_count == 0) ) {
309
COS_TRACE_AST("downgrade");
310
LCK_downgrade(tdbb, cached_lock);
311
if (cached_lock->lck_physical < LCK_read) {
312
invalidate(tdbb, true);
313
external_blocking = false;
317
external_blocking = true;
320
void GlobalRWLock::setLockData(SLONG lck_data)
322
LCK_write_data(cached_lock, lck_data);
325
void GlobalRWLock::changeLockOwner(thread_db* tdbb, locklevel_t level, SLONG old_owner_handle, SLONG new_owner_handle)
329
if (old_owner_handle == new_owner_handle)
332
CountersLockHolder lockHolder(lockMutex);
334
if (level == LCK_read) {
336
if (readers.find(old_owner_handle, n)) {
337
fb_assert(readers[n].entry_count > 0);
338
readers[n].entry_count--;
339
if (readers[n].entry_count == 0)
342
if (readers.find(new_owner_handle, n))
343
readers[n].entry_count++;
345
ObjectOwnerData ownerData;
346
ownerData.entry_count++;
347
ownerData.owner_handle = new_owner_handle;
348
readers.insert(n, ownerData);
352
ERR_bugcheck_msg("Attempt to perform GlobalRWLock::change_lock_owner() while not holding a valid lock for logical owner");
356
fb_assert(writer.entry_count == 1);
357
writer.owner_handle = new_owner_handle;
361
bool GlobalRWLock::tryReleaseLock(thread_db* tdbb)
363
CountersLockHolder lockHolder(lockMutex);
364
if (!writer.entry_count && !readers.getCount())
366
LCK_release(tdbb, cached_lock);
367
invalidate(tdbb, false);