~ubuntu-branches/ubuntu/karmic/firebird2.1/karmic

« back to all changes in this revision

Viewing changes to src/jrd/GlobalRWLock.cpp

  • Committer: Bazaar Package Importer
  • Author(s): Damyan Ivanov
  • Date: 2008-05-26 23:59:25 UTC
  • Revision ID: james.westby@ubuntu.com-20080526235925-2pnqj6nxpppoeaer
Tags: upstream-2.1.0.17798-0.ds2
ImportĀ upstreamĀ versionĀ 2.1.0.17798-0.ds2

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 *      PROGRAM:        JRD Access Method
 
3
 *      MODULE:         GlobalRWLock.cpp
 
4
 *      DESCRIPTION:    GlobalRWLock
 
5
 *
 
6
 *  The contents of this file are subject to the Initial
 
7
 *  Developer's Public License Version 1.0 (the "License");
 
8
 *  you may not use this file except in compliance with the
 
9
 *  License. You may obtain a copy of the License at
 
10
 *  http://www.ibphoenix.com/main.nfs?a=ibphoenix&page=ibp_idpl.
 
11
 *
 
12
 *  Software distributed under the License is distributed AS IS,
 
13
 *  WITHOUT WARRANTY OF ANY KIND, either express or implied.
 
14
 *  See the License for the specific language governing rights
 
15
 *  and limitations under the License.
 
16
 *
 
17
 *  The Original Code was created by Nickolay Samofatov
 
18
 *  for the Firebird Open Source RDBMS project.
 
19
 *
 
20
 *  Copyright (c) 2006 Nickolay Samofatov
 
21
 *  and all contributors signed below.
 
22
 *
 
23
 *  All Rights Reserved.
 
24
 *  Contributor(s): ______________________________________.
 
25
 *
 
26
 *
 
27
 */
 
28
 
 
29
#include "firebird.h"
 
30
#include "GlobalRWLock.h"
 
31
#include "../lock/lock.h"
 
32
#include "../lock/lock_proto.h"
 
33
#include "iberr_proto.h"
 
34
#include "isc_proto.h"
 
35
#include "jrd.h"
 
36
#include "lck_proto.h"
 
37
#include "err_proto.h"
 
38
 
 
39
#ifdef COS_DEBUG
 
40
IMPLEMENT_TRACE_ROUTINE(cos_trace, "COS")
 
41
#endif
 
42
 
 
43
namespace Jrd {
 
44
 
 
45
int GlobalRWLock::blocking_ast_cached_lock(void* ast_object)
 
46
{
 
47
        Jrd::GlobalRWLock *GlobalRWLock = 
 
48
                static_cast<Jrd::GlobalRWLock*>(ast_object);
 
49
 
 
50
        ISC_ast_enter();
 
51
 
 
52
        /* Since this routine will be called asynchronously, we must establish
 
53
                a thread context. */
 
54
        Jrd::thread_db thd_context, *tdbb;
 
55
        JRD_set_thread_data(tdbb, thd_context);
 
56
 
 
57
        ISC_STATUS_ARRAY ast_status;
 
58
        Jrd::Database* dbb = GlobalRWLock->cached_lock->lck_dbb;
 
59
 
 
60
        tdbb->setDatabase(dbb);
 
61
        tdbb->setAttachment(NULL);
 
62
        tdbb->tdbb_quantum = QUANTUM;
 
63
        tdbb->setRequest(NULL);
 
64
        tdbb->setTransaction(NULL);
 
65
        tdbb->tdbb_status_vector = ast_status;
 
66
 
 
67
        GlobalRWLock->blockingAstHandler(tdbb);
 
68
 
 
69
        /* Restore the prior thread context */
 
70
 
 
71
        JRD_restore_thread_data();
 
72
 
 
73
        ISC_ast_exit();
 
74
 
 
75
        return 0;       
 
76
}
 
77
 
 
78
GlobalRWLock::GlobalRWLock(thread_db* tdbb, MemoryPool& p, locktype_t lckType, 
 
79
                                                   size_t lockLen, const UCHAR* lockStr, lck_owner_t physical_lock_owner,
 
80
                                                   lck_owner_t default_logical_lock_owner, bool lock_caching)
 
81
        : PermanentStorage(p), internal_blocking(0), external_blocking(false), 
 
82
          physicalLockOwner(physical_lock_owner), defaultLogicalLockOwner(default_logical_lock_owner), 
 
83
          lockCaching(lock_caching), readers(p)
 
84
{
 
85
        SET_TDBB(tdbb);
 
86
 
 
87
        Database* dbb = tdbb->getDatabase();
 
88
 
 
89
        cached_lock = FB_NEW_RPT(getPool(), lockLen) Lock();
 
90
        cached_lock->lck_type = static_cast<Jrd::lck_t>(lckType);
 
91
        cached_lock->lck_owner_handle = 0;
 
92
        cached_lock->lck_length = lockLen;
 
93
 
 
94
        cached_lock->lck_dbb = dbb;
 
95
        cached_lock->lck_parent = dbb->dbb_lock;
 
96
        cached_lock->lck_object = reinterpret_cast<blk*>(this);
 
97
        cached_lock->lck_ast = lockCaching ? blocking_ast_cached_lock : NULL;
 
98
        memcpy(&cached_lock->lck_key, lockStr, lockLen);
 
99
        
 
100
        writer.owner_handle = 0;
 
101
        writer.entry_count = 0;
 
102
}
 
103
 
 
104
GlobalRWLock::~GlobalRWLock()
 
105
{
 
106
        thread_db* tdbb = JRD_get_thread_data();
 
107
        LCK_release(tdbb, cached_lock);
 
108
        delete cached_lock;
 
109
}
 
110
 
 
111
bool GlobalRWLock::lock(thread_db* tdbb, locklevel_t level, SSHORT wait, SLONG owner_handle)
 
112
{
 
113
        SET_TDBB(tdbb);
 
114
        fb_assert(owner_handle);
 
115
 
 
116
        { // this is a first scope for a code where counters are locked
 
117
                CountersLockHolder lockHolder(lockMutex);
 
118
 
 
119
                COS_TRACE(("lock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
 
120
                // Check if this is a recursion case
 
121
                size_t n;
 
122
                if (level == LCK_read) {
 
123
                        if (readers.find(owner_handle, n)) {
 
124
                                readers[n].entry_count++;
 
125
                                return true;
 
126
                        }
 
127
                } 
 
128
                else {
 
129
                        if (writer.owner_handle == owner_handle) {
 
130
                                writer.entry_count++;
 
131
                                return true;
 
132
                        }
 
133
                }
 
134
 
 
135
                const bool all_compatible = !writer.entry_count && (level == LCK_read || readers.getCount() == 0);
 
136
 
 
137
                // We own the lock and all present requests are compatible with us
 
138
                // In case of any congestion we force all requests through the lock
 
139
                // manager to ensure lock ordering.
 
140
                if (cached_lock->lck_physical >= level && all_compatible && 
 
141
                        !internal_blocking && !external_blocking) 
 
142
                {
 
143
                        if (level == LCK_read) {
 
144
                                ObjectOwnerData ownerData;
 
145
                                ownerData.owner_handle = owner_handle;
 
146
                                ownerData.entry_count++;
 
147
                                readers.insert(n, ownerData);
 
148
                        }
 
149
                        else
 
150
                        {
 
151
                                writer.owner_handle = owner_handle;
 
152
                                writer.entry_count++;
 
153
                        }
 
154
 
 
155
                        return true;
 
156
                }
 
157
 
 
158
                // We need to release lock to get new level lock
 
159
                if ( (cached_lock->lck_physical > 0) && (writer.entry_count == 0) && (readers.getCount() == 0) )
 
160
                {
 
161
                        LCK_release(tdbb, cached_lock);
 
162
                        invalidate(tdbb, false);
 
163
                        external_blocking = false;
 
164
                        COS_TRACE(("release our lock to get new level lock, type=%i, level=%i", cached_lock->lck_type, cached_lock->lck_physical));
 
165
                }
 
166
 
 
167
                internal_blocking++;
 
168
        }
 
169
 
 
170
        // There is some congestion. Need to use the lock manager.
 
171
        // Request new lock at the new level. Several concurrent lock requests may 
 
172
        // wait here in the same process in parallel.
 
173
        Lock* newLock = FB_NEW_RPT(getPool(), cached_lock->lck_length) Lock;
 
174
        newLock->lck_type = cached_lock->lck_type;
 
175
        newLock->lck_owner_handle = owner_handle;
 
176
        newLock->lck_length = cached_lock->lck_length;
 
177
 
 
178
        newLock->lck_dbb = cached_lock->lck_dbb;
 
179
        newLock->lck_parent = cached_lock->lck_parent;
 
180
        newLock->lck_object = cached_lock->lck_object;
 
181
        newLock->lck_ast = cached_lock->lck_ast;
 
182
        memcpy(&newLock->lck_key, &cached_lock->lck_key, cached_lock->lck_length);
 
183
 
 
184
        COS_TRACE(("request new lock, type=%i, level=%i", cached_lock->lck_type, level));
 
185
        if (!LCK_lock(tdbb, newLock, level, wait)) {
 
186
                COS_TRACE(("Can't get a lock"));
 
187
                delete newLock;
 
188
                return false;
 
189
        }
 
190
        COS_TRACE(("Lock is got, type=%i", cached_lock->lck_type));
 
191
 
 
192
        { // this is a second scope for a code where counters are locked
 
193
                CountersLockHolder lockHolder(lockMutex);
 
194
 
 
195
                fb_assert(internal_blocking > 0);
 
196
                internal_blocking--;
 
197
 
 
198
                // Here old lock is not protecting shared object. We must refresh state by fetch.
 
199
                if (newLock->lck_physical >= LCK_read) {
 
200
                        try {
 
201
                                fetch(tdbb);
 
202
                        }
 
203
                        catch(const Firebird::Exception&) {
 
204
                                LCK_release(tdbb, newLock);
 
205
                                delete newLock;
 
206
                                return false;
 
207
                        }
 
208
                }
 
209
 
 
210
                if (level == LCK_read) {
 
211
                        ObjectOwnerData ownerData;
 
212
                        ownerData.entry_count++;
 
213
                        ownerData.owner_handle = owner_handle;
 
214
                        readers.add(ownerData);
 
215
                }
 
216
                else
 
217
                {
 
218
                        writer.owner_handle = owner_handle;
 
219
                        writer.entry_count++;
 
220
                }
 
221
 
 
222
                // Replace cached lock with the new lock if needed
 
223
                COS_TRACE(("Replace lock, type=%i", cached_lock->lck_type));
 
224
                if (newLock->lck_physical > cached_lock->lck_physical) {
 
225
                        LCK_release(tdbb, cached_lock);
 
226
                        delete cached_lock;
 
227
                        cached_lock = newLock;
 
228
                        if (!LCK_set_owner_handle(tdbb, cached_lock, LCK_get_owner_handle_by_type(tdbb, physicalLockOwner))) {
 
229
                                COS_TRACE(("Error: set owner handle for captured lock, type=%i", cached_lock->lck_type));
 
230
                                LCK_release(tdbb, cached_lock);
 
231
                                return false;
 
232
                        }
 
233
                }
 
234
                else {
 
235
                        LCK_release(tdbb, newLock);
 
236
                        delete newLock;
 
237
                }
 
238
        }
 
239
 
 
240
        return true;
 
241
}
 
242
 
 
243
// NOTE: unlock method must be signal safe
 
244
// This function may be called in AST. The function doesn't wait.
 
245
void GlobalRWLock::unlock(thread_db* tdbb, locklevel_t level, SLONG owner_handle)
 
246
{
 
247
        SET_TDBB(tdbb);
 
248
 
 
249
        CountersLockHolder lockHolder(lockMutex);
 
250
 
 
251
        COS_TRACE(("unlock level=%i", level));
 
252
 
 
253
        // Check if this is a recursion case
 
254
        if (level == LCK_read) {
 
255
                size_t n;
 
256
                if (!readers.find(owner_handle, n)) {
 
257
                        ERR_bugcheck_msg("Attempt to call GlobalRWLock::unlock() while not holding a valid lock for logical owner");
 
258
                }
 
259
                fb_assert(readers[n].entry_count > 0);
 
260
                readers[n].entry_count--;
 
261
                if (readers[n].entry_count == 0)
 
262
                        readers.remove(n);
 
263
        }
 
264
        else {
 
265
                fb_assert(writer.owner_handle == owner_handle);
 
266
                fb_assert(writer.entry_count == 1);
 
267
                fb_assert(cached_lock->lck_physical == LCK_write);
 
268
                
 
269
                writer.entry_count = 0;
 
270
                writer.owner_handle = 0;
 
271
 
 
272
                // Optimize non-contention case - downgrade to PR and re-use the lock
 
273
                if (!internal_blocking && !external_blocking && lockCaching)
 
274
                {
 
275
                        if (!LCK_convert(tdbb, cached_lock, LCK_read, 0))
 
276
                                ERR_bugcheck_msg("LCK_convert call failed in GlobalRWLock::unlock()");
 
277
                        return;
 
278
                }
 
279
        }
 
280
 
 
281
        if ( (readers.getCount() == 0) && (writer.entry_count == 0) ) {
 
282
                COS_TRACE(("check for release a lock, type=%i", cached_lock->lck_type));
 
283
                if (internal_blocking || !lockCaching) {
 
284
                        LCK_release(tdbb, cached_lock);
 
285
                        invalidate(tdbb, false);
 
286
                        external_blocking = false;
 
287
                } 
 
288
                else if (external_blocking) {
 
289
                        LCK_downgrade(tdbb, cached_lock);
 
290
                        if (cached_lock->lck_physical < LCK_read)
 
291
                                invalidate(tdbb, false);
 
292
                        external_blocking = false;
 
293
                }
 
294
        }
 
295
 
 
296
        COS_TRACE(("unlock type=%i, level=%i, readerscount=%i, owner=%i", cached_lock->lck_type, level, readers.getCount(), owner_handle));
 
297
}
 
298
 
 
299
void GlobalRWLock::blockingAstHandler(thread_db* tdbb)
 
300
{
 
301
        SET_TDBB(tdbb);
 
302
 
 
303
        CountersLockHolder lockHolder(lockMutex);
 
304
 
 
305
        COS_TRACE_AST("bloackingAstHandler");
 
306
        // When we request a new lock counters are not updated until we get it.
 
307
        // As such, we need to check internal_blocking flag that is set during such situation.
 
308
        if ( !internal_blocking && (readers.getCount() == 0) && (writer.entry_count == 0) ) {
 
309
                COS_TRACE_AST("downgrade");
 
310
                LCK_downgrade(tdbb, cached_lock);
 
311
                if (cached_lock->lck_physical < LCK_read) {
 
312
                        invalidate(tdbb, true);
 
313
                        external_blocking = false;
 
314
                }
 
315
        }
 
316
        else
 
317
                external_blocking = true;
 
318
}
 
319
 
 
320
void GlobalRWLock::setLockData(SLONG lck_data)
 
321
{
 
322
        LCK_write_data(cached_lock, lck_data);
 
323
 
324
 
 
325
void GlobalRWLock::changeLockOwner(thread_db* tdbb, locklevel_t level, SLONG old_owner_handle, SLONG new_owner_handle)
 
326
{
 
327
        SET_TDBB(tdbb);
 
328
 
 
329
        if (old_owner_handle == new_owner_handle)
 
330
                return;
 
331
 
 
332
        CountersLockHolder lockHolder(lockMutex);
 
333
 
 
334
        if (level == LCK_read) {
 
335
                size_t n;
 
336
                if (readers.find(old_owner_handle, n)) {
 
337
                        fb_assert(readers[n].entry_count > 0);
 
338
                        readers[n].entry_count--;
 
339
                        if (readers[n].entry_count == 0)
 
340
                                readers.remove(n);
 
341
 
 
342
                        if (readers.find(new_owner_handle, n))
 
343
                                readers[n].entry_count++;
 
344
                        else {
 
345
                                ObjectOwnerData ownerData;
 
346
                                ownerData.entry_count++;
 
347
                                ownerData.owner_handle = new_owner_handle;
 
348
                                readers.insert(n, ownerData);
 
349
                        }
 
350
                }
 
351
                else {
 
352
                        ERR_bugcheck_msg("Attempt to perform GlobalRWLock::change_lock_owner() while not holding a valid lock for logical owner");
 
353
                }
 
354
        }
 
355
        else {
 
356
                fb_assert(writer.entry_count == 1);
 
357
                writer.owner_handle = new_owner_handle;
 
358
        }
 
359
}
 
360
 
 
361
bool GlobalRWLock::tryReleaseLock(thread_db* tdbb)
 
362
{
 
363
        CountersLockHolder lockHolder(lockMutex);
 
364
        if (!writer.entry_count && !readers.getCount())
 
365
        {
 
366
                LCK_release(tdbb, cached_lock);
 
367
                invalidate(tdbb, false);
 
368
                return true;
 
369
        }
 
370
        return false;
 
371
}
 
372
 
 
373
} // namespace Jrd