1
/*-------------------------------------------------------------------------
4
* POSTGRES primary lock mechanism
6
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
7
* Portions Copyright (c) 1994, Regents of the University of California
14
* A lock table is a shared memory hash table. When
15
* a process tries to acquire a lock of a type that conflicts
16
* with existing locks, it is put to sleep using the routines
17
* in storage/lmgr/proc.c.
19
* For the most part, this code should be invoked via lmgr.c
20
* or another lock-management module, not directly.
24
* InitLocks(), GetLocksMethodTable(),
25
* LockAcquire(), LockRelease(), LockReleaseAll(),
26
* LockCheckConflicts(), GrantLock()
28
*-------------------------------------------------------------------------
35
#include "access/transam.h"
36
#include "access/twophase.h"
37
#include "access/twophase_rmgr.h"
38
#include "miscadmin.h"
41
#include "utils/memutils.h"
42
#include "utils/ps_status.h"
43
#include "utils/resowner.h"
46
/* This configuration variable is used to set the lock table size */
47
int max_locks_per_xact; /* set by guc.c */
50
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
54
* Data structures defining the semantics of the standard lock methods.
56
* The conflict table defines the semantics of the various lock modes.
58
static const LOCKMASK LockConflicts[] = {
62
(1 << AccessExclusiveLock),
65
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
67
/* RowExclusiveLock */
68
(1 << ShareLock) | (1 << ShareRowExclusiveLock) |
69
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
71
/* ShareUpdateExclusiveLock */
72
(1 << ShareUpdateExclusiveLock) |
73
(1 << ShareLock) | (1 << ShareRowExclusiveLock) |
74
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
77
(1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) |
78
(1 << ShareRowExclusiveLock) |
79
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
81
/* ShareRowExclusiveLock */
82
(1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) |
83
(1 << ShareLock) | (1 << ShareRowExclusiveLock) |
84
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
88
(1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) |
89
(1 << ShareLock) | (1 << ShareRowExclusiveLock) |
90
(1 << ExclusiveLock) | (1 << AccessExclusiveLock),
92
/* AccessExclusiveLock */
93
(1 << AccessShareLock) | (1 << RowShareLock) |
94
(1 << RowExclusiveLock) | (1 << ShareUpdateExclusiveLock) |
95
(1 << ShareLock) | (1 << ShareRowExclusiveLock) |
96
(1 << ExclusiveLock) | (1 << AccessExclusiveLock)
100
/* Names of lock modes, for debug printouts */
101
static const char *const lock_mode_names[] =
107
"ShareUpdateExclusiveLock",
109
"ShareRowExclusiveLock",
111
"AccessExclusiveLock"
115
static bool Dummy_trace = false;
118
static const LockMethodData default_lockmethod = {
119
AccessExclusiveLock, /* highest valid lock mode number */
130
static const LockMethodData user_lockmethod = {
131
AccessExclusiveLock, /* highest valid lock mode number */
143
* map from lock method id to the lock table data structures
145
static const LockMethod LockMethods[] = {
152
/* Record that's written to 2PC state file when a lock is persisted */
153
typedef struct TwoPhaseLockRecord
157
} TwoPhaseLockRecord;
161
* Pointers to hash tables containing lock state
163
* The LockMethodLockHash and LockMethodProcLockHash hash tables are in
164
* shared memory; LockMethodLocalHash is local to each backend.
166
static HTAB *LockMethodLockHash;
167
static HTAB *LockMethodProcLockHash;
168
static HTAB *LockMethodLocalHash;
171
/* private state for GrantAwaitedLock */
172
static LOCALLOCK *awaitedLock;
173
static ResourceOwner awaitedOwner;
179
* The following configuration options are available for lock debugging:
181
* TRACE_LOCKS -- give a bunch of output what's going on in this file
182
* TRACE_USERLOCKS -- same but for user locks
183
* TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
184
* (use to avoid output on system tables)
185
* TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
186
* DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
188
* Furthermore, but in storage/lmgr/lwlock.c:
189
* TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
191
* Define LOCK_DEBUG at compile time to get all these enabled.
195
int Trace_lock_oidmin = FirstNormalObjectId;
196
bool Trace_locks = false;
197
bool Trace_userlocks = false;
198
int Trace_lock_table = 0;
199
bool Debug_deadlocks = false;
203
LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
206
(*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
207
((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
208
|| (Trace_lock_table &&
209
(tag->locktag_field2 == Trace_lock_table));
214
LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
216
if (LOCK_DEBUG_ENABLED(&lock->tag))
218
"%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
219
"req(%d,%d,%d,%d,%d,%d,%d)=%d "
220
"grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
222
lock->tag.locktag_field1, lock->tag.locktag_field2,
223
lock->tag.locktag_field3, lock->tag.locktag_field4,
224
lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
226
lock->requested[1], lock->requested[2], lock->requested[3],
227
lock->requested[4], lock->requested[5], lock->requested[6],
228
lock->requested[7], lock->nRequested,
229
lock->granted[1], lock->granted[2], lock->granted[3],
230
lock->granted[4], lock->granted[5], lock->granted[6],
231
lock->granted[7], lock->nGranted,
232
lock->waitProcs.size,
233
LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
238
PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
240
if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
242
"%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
243
where, proclockP, proclockP->tag.myLock,
244
PROCLOCK_LOCKMETHOD(*(proclockP)),
245
proclockP->tag.myProc, (int) proclockP->holdMask);
247
#else /* not LOCK_DEBUG */
249
#define LOCK_PRINT(where, lock, type)
250
#define PROCLOCK_PRINT(where, proclockP)
251
#endif /* not LOCK_DEBUG */
254
static uint32 proclock_hash(const void *key, Size keysize);
255
static void RemoveLocalLock(LOCALLOCK *locallock);
256
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
257
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
258
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
259
PROCLOCK *proclock, LockMethod lockMethodTable);
260
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
261
LockMethod lockMethodTable, uint32 hashcode,
266
* InitLocks -- Initialize the lock manager's data structures.
268
* This is called from CreateSharedMemoryAndSemaphores(), which see for
269
* more comments. In the normal postmaster case, the shared hash tables
270
* are created here, as well as a locallock hash table that will remain
271
* unused and empty in the postmaster itself. Backends inherit the pointers
272
* to the shared tables via fork(), and also inherit an image of the locallock
273
* hash table, which they proceed to use. In the EXEC_BACKEND case, each
274
* backend re-executes this code to obtain pointers to the already existing
275
* shared hash tables and to create its locallock hash table.
282
long init_table_size,
286
* Compute init/max size to request for lock hashtables. Note these
287
* calculations must agree with LockShmemSize!
289
max_table_size = NLOCKENTS();
290
init_table_size = max_table_size / 2;
293
* Allocate hash table for LOCK structs. This stores per-locked-object
296
MemSet(&info, 0, sizeof(info));
297
info.keysize = sizeof(LOCKTAG);
298
info.entrysize = sizeof(LOCK);
299
info.hash = tag_hash;
300
info.num_partitions = NUM_LOCK_PARTITIONS;
301
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
303
LockMethodLockHash = ShmemInitHash("LOCK hash",
308
if (!LockMethodLockHash)
309
elog(FATAL, "could not initialize lock hash table");
311
/* Assume an average of 2 holders per lock */
313
init_table_size *= 2;
316
* Allocate hash table for PROCLOCK structs. This stores
317
* per-lock-per-holder information.
319
info.keysize = sizeof(PROCLOCKTAG);
320
info.entrysize = sizeof(PROCLOCK);
321
info.hash = proclock_hash;
322
info.num_partitions = NUM_LOCK_PARTITIONS;
323
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
325
LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
330
if (!LockMethodProcLockHash)
331
elog(FATAL, "could not initialize proclock hash table");
334
* Allocate non-shared hash table for LOCALLOCK structs. This stores lock
335
* counts and resource owner information.
337
* The non-shared table could already exist in this process (this occurs
338
* when the postmaster is recreating shared memory after a backend crash).
339
* If so, delete and recreate it. (We could simply leave it, since it
340
* ought to be empty in the postmaster, but for safety let's zap it.)
342
if (LockMethodLocalHash)
343
hash_destroy(LockMethodLocalHash);
345
info.keysize = sizeof(LOCALLOCKTAG);
346
info.entrysize = sizeof(LOCALLOCK);
347
info.hash = tag_hash;
348
hash_flags = (HASH_ELEM | HASH_FUNCTION);
350
LockMethodLocalHash = hash_create("LOCALLOCK hash",
358
* Fetch the lock method table associated with a given lock
361
GetLocksMethodTable(const LOCK *lock)
363
LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
365
Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
366
return LockMethods[lockmethodid];
371
* Compute the hash code associated with a LOCKTAG.
373
* To avoid unnecessary recomputations of the hash code, we try to do this
374
* just once per function, and then pass it around as needed. Aside from
375
* passing the hashcode to hash_search_with_hash_value(), we can extract
376
* the lock partition number from the hashcode.
379
LockTagHashCode(const LOCKTAG *locktag)
381
return get_hash_value(LockMethodLockHash, (const void *) locktag);
385
* Compute the hash code associated with a PROCLOCKTAG.
387
* Because we want to use just one set of partition locks for both the
388
* LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
389
* fall into the same partition number as their associated LOCKs.
390
* dynahash.c expects the partition number to be the low-order bits of
391
* the hash code, and therefore a PROCLOCKTAG's hash code must have the
392
* same low-order bits as the associated LOCKTAG's hash code. We achieve
393
* this with this specialized hash function.
396
proclock_hash(const void *key, Size keysize)
398
const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
402
Assert(keysize == sizeof(PROCLOCKTAG));
404
/* Look into the associated LOCK object, and compute its hash code */
405
lockhash = LockTagHashCode(&proclocktag->myLock->tag);
408
* To make the hash code also depend on the PGPROC, we xor the proc
409
* struct's address into the hash code, left-shifted so that the
410
* partition-number bits don't change. Since this is only a hash, we
411
* don't care if we lose high-order bits of the address; use an
412
* intermediate variable to suppress cast-pointer-to-int warnings.
414
procptr = PointerGetDatum(proclocktag->myProc);
415
lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
421
* Compute the hash code associated with a PROCLOCKTAG, given the hashcode
422
* for its underlying LOCK.
424
* We use this just to avoid redundant calls of LockTagHashCode().
427
ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
429
uint32 lockhash = hashcode;
433
* This must match proclock_hash()!
435
procptr = PointerGetDatum(proclocktag->myProc);
436
lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
443
* LockAcquire -- Check for lock conflicts, sleep if conflict found,
444
* set lock if/when no conflicts.
447
* locktag: unique identifier for the lockable object
448
* lockmode: lock mode to acquire
449
* sessionLock: if true, acquire lock for session not current transaction
450
* dontWait: if true, don't wait to acquire lock
453
* LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
454
* LOCKACQUIRE_OK lock successfully acquired
455
* LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
457
* In the normal case where dontWait=false and the caller doesn't need to
458
* distinguish a freshly acquired lock from one already taken earlier in
459
* this same transaction, there is no need to examine the return value.
461
* Side Effects: The lock is acquired and recorded in lock tables.
463
* NOTE: if we wait for the lock, there is no way to abort the wait
464
* short of aborting the transaction.
467
LockAcquire(const LOCKTAG *locktag,
472
LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
473
LockMethod lockMethodTable;
474
LOCALLOCKTAG localtag;
475
LOCALLOCK *locallock;
478
PROCLOCKTAG proclocktag;
482
uint32 proclock_hashcode;
484
LWLockId partitionLock;
487
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
488
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
489
lockMethodTable = LockMethods[lockmethodid];
490
if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
491
elog(ERROR, "unrecognized lock mode: %d", lockmode);
494
if (LOCK_DEBUG_ENABLED(locktag))
495
elog(LOG, "LockAcquire: lock [%u,%u] %s",
496
locktag->locktag_field1, locktag->locktag_field2,
497
lockMethodTable->lockModeNames[lockmode]);
500
/* Session locks are never transactional, else check table */
501
if (!sessionLock && lockMethodTable->transactional)
502
owner = CurrentResourceOwner;
507
* Find or create a LOCALLOCK entry for this lock and lockmode
509
MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
510
localtag.lock = *locktag;
511
localtag.mode = lockmode;
513
locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
518
* if it's a new locallock object, initialize it
522
locallock->lock = NULL;
523
locallock->proclock = NULL;
524
locallock->hashcode = LockTagHashCode(&(localtag.lock));
525
locallock->nLocks = 0;
526
locallock->numLockOwners = 0;
527
locallock->maxLockOwners = 8;
528
locallock->lockOwners = NULL;
529
locallock->lockOwners = (LOCALLOCKOWNER *)
530
MemoryContextAlloc(TopMemoryContext,
531
locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
535
/* Make sure there will be room to remember the lock */
536
if (locallock->numLockOwners >= locallock->maxLockOwners)
538
int newsize = locallock->maxLockOwners * 2;
540
locallock->lockOwners = (LOCALLOCKOWNER *)
541
repalloc(locallock->lockOwners,
542
newsize * sizeof(LOCALLOCKOWNER));
543
locallock->maxLockOwners = newsize;
548
* If we already hold the lock, we can just increase the count locally.
550
if (locallock->nLocks > 0)
552
GrantLockLocal(locallock, owner);
553
return LOCKACQUIRE_ALREADY_HELD;
557
* Otherwise we've got to mess with the shared lock table.
559
hashcode = locallock->hashcode;
560
partition = LockHashPartition(hashcode);
561
partitionLock = LockHashPartitionLock(hashcode);
563
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
566
* Find or create a lock with this tag.
568
* Note: if the locallock object already existed, it might have a pointer
569
* to the lock already ... but we probably should not assume that that
570
* pointer is valid, since a lock object with no locks can go away
573
lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
580
LWLockRelease(partitionLock);
582
(errcode(ERRCODE_OUT_OF_MEMORY),
583
errmsg("out of shared memory"),
584
errhint("You might need to increase max_locks_per_transaction.")));
586
locallock->lock = lock;
589
* if it's a new lock object, initialize it
595
SHMQueueInit(&(lock->procLocks));
596
ProcQueueInit(&(lock->waitProcs));
597
lock->nRequested = 0;
599
MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
600
MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
601
LOCK_PRINT("LockAcquire: new", lock, lockmode);
605
LOCK_PRINT("LockAcquire: found", lock, lockmode);
606
Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
607
Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
608
Assert(lock->nGranted <= lock->nRequested);
612
* Create the hash key for the proclock table.
614
proclocktag.myLock = lock;
615
proclocktag.myProc = MyProc;
617
proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
620
* Find or create a proclock entry with this tag
622
proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
623
(void *) &proclocktag,
629
/* Ooops, not enough shmem for the proclock */
630
if (lock->nRequested == 0)
633
* There are no other requestors of this lock, so garbage-collect
634
* the lock object. We *must* do this to avoid a permanent leak
635
* of shared memory, because there won't be anything to cause
636
* anyone to release the lock object later.
638
Assert(SHMQueueEmpty(&(lock->procLocks)));
639
if (!hash_search_with_hash_value(LockMethodLockHash,
640
(void *) &(lock->tag),
644
elog(PANIC, "lock table corrupted");
646
LWLockRelease(partitionLock);
648
(errcode(ERRCODE_OUT_OF_MEMORY),
649
errmsg("out of shared memory"),
650
errhint("You might need to increase max_locks_per_transaction.")));
652
locallock->proclock = proclock;
655
* If new, initialize the new entry
659
proclock->holdMask = 0;
660
proclock->releaseMask = 0;
661
/* Add proclock to appropriate lists */
662
SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
663
SHMQueueInsertBefore(&(MyProc->myProcLocks[partition]),
664
&proclock->procLink);
665
PROCLOCK_PRINT("LockAcquire: new", proclock);
669
PROCLOCK_PRINT("LockAcquire: found", proclock);
670
Assert((proclock->holdMask & ~lock->grantMask) == 0);
672
#ifdef CHECK_DEADLOCK_RISK
675
* Issue warning if we already hold a lower-level lock on this object
676
* and do not hold a lock of the requested level or higher. This
677
* indicates a deadlock-prone coding practice (eg, we'd have a
678
* deadlock if another backend were following the same code path at
679
* about the same time).
681
* This is not enabled by default, because it may generate log entries
682
* about user-level coding practices that are in fact safe in context.
683
* It can be enabled to help find system-level problems.
685
* XXX Doing numeric comparison on the lockmodes is a hack; it'd be
686
* better to use a table. For now, though, this works.
691
for (i = lockMethodTable->numLockModes; i > 0; i--)
693
if (proclock->holdMask & LOCKBIT_ON(i))
695
if (i >= (int) lockmode)
696
break; /* safe: we have a lock >= req level */
697
elog(LOG, "deadlock risk: raising lock level"
698
" from %s to %s on object %u/%u/%u",
699
lockMethodTable->lockModeNames[i],
700
lockMethodTable->lockModeNames[lockmode],
701
lock->tag.locktag_field1, lock->tag.locktag_field2,
702
lock->tag.locktag_field3);
707
#endif /* CHECK_DEADLOCK_RISK */
711
* lock->nRequested and lock->requested[] count the total number of
712
* requests, whether granted or waiting, so increment those immediately.
713
* The other counts don't increment till we get the lock.
716
lock->requested[lockmode]++;
717
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
720
* We shouldn't already hold the desired lock; else locallock table is
723
if (proclock->holdMask & LOCKBIT_ON(lockmode))
724
elog(ERROR, "lock %s on object %u/%u/%u is already held",
725
lockMethodTable->lockModeNames[lockmode],
726
lock->tag.locktag_field1, lock->tag.locktag_field2,
727
lock->tag.locktag_field3);
730
* If lock requested conflicts with locks requested by waiters, must join
731
* wait queue. Otherwise, check for conflict with already-held locks.
732
* (That's last because most complex check.)
734
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
735
status = STATUS_FOUND;
737
status = LockCheckConflicts(lockMethodTable, lockmode,
738
lock, proclock, MyProc);
740
if (status == STATUS_OK)
742
/* No conflict with held or previously requested locks */
743
GrantLock(lock, proclock, lockmode);
744
GrantLockLocal(locallock, owner);
748
Assert(status == STATUS_FOUND);
751
* We can't acquire the lock immediately. If caller specified no
752
* blocking, remove useless table entries and return NOT_AVAIL without
757
if (proclock->holdMask == 0)
759
SHMQueueDelete(&proclock->lockLink);
760
SHMQueueDelete(&proclock->procLink);
761
if (!hash_search_with_hash_value(LockMethodProcLockHash,
762
(void *) &(proclock->tag),
766
elog(PANIC, "proclock table corrupted");
769
PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
771
lock->requested[lockmode]--;
772
LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
773
Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
774
Assert(lock->nGranted <= lock->nRequested);
775
LWLockRelease(partitionLock);
776
if (locallock->nLocks == 0)
777
RemoveLocalLock(locallock);
778
return LOCKACQUIRE_NOT_AVAIL;
782
* Set bitmask of locks this process already holds on this object.
784
MyProc->heldLocks = proclock->holdMask;
787
* Sleep till someone wakes me up.
790
TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
791
locktag->locktag_field2,
792
locktag->locktag_field3,
793
locktag->locktag_field4,
794
locktag->locktag_type,
797
WaitOnLock(locallock, owner);
799
TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
800
locktag->locktag_field2,
801
locktag->locktag_field3,
802
locktag->locktag_field4,
803
locktag->locktag_type,
807
* NOTE: do not do any material change of state between here and
808
* return. All required changes in locktable state must have been
809
* done when the lock was granted to us --- see notes in WaitOnLock.
813
* Check the proclock entry status, in case something in the ipc
814
* communication doesn't work correctly.
816
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
818
PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
819
LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
820
/* Should we retry ? */
821
LWLockRelease(partitionLock);
822
elog(ERROR, "LockAcquire failed");
824
PROCLOCK_PRINT("LockAcquire: granted", proclock);
825
LOCK_PRINT("LockAcquire: granted", lock, lockmode);
828
LWLockRelease(partitionLock);
830
return LOCKACQUIRE_OK;
834
* Subroutine to free a locallock entry
837
RemoveLocalLock(LOCALLOCK *locallock)
839
pfree(locallock->lockOwners);
840
locallock->lockOwners = NULL;
841
if (!hash_search(LockMethodLocalHash,
842
(void *) &(locallock->tag),
844
elog(WARNING, "locallock table corrupted");
848
* LockCheckConflicts -- test whether requested lock conflicts
849
* with those already granted
851
* Returns STATUS_FOUND if conflict, STATUS_OK if no conflict.
854
* Here's what makes this complicated: one process's locks don't
855
* conflict with one another, no matter what purpose they are held for
856
* (eg, session and transaction locks do not conflict).
857
* So, we must subtract off our own locks when determining whether the
858
* requested new lock conflicts with those already held.
861
LockCheckConflicts(LockMethod lockMethodTable,
867
int numLockModes = lockMethodTable->numLockModes;
873
* first check for global conflicts: If no locks conflict with my request,
874
* then I get the lock.
876
* Checking for conflict: lock->grantMask represents the types of
877
* currently held locks. conflictTable[lockmode] has a bit set for each
878
* type of lock that conflicts with request. Bitwise compare tells if
879
* there is a conflict.
881
if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask))
883
PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
888
* Rats. Something conflicts. But it could still be my own lock. We have
889
* to construct a conflict mask that does not reflect our own locks, but
890
* only lock types held by other processes.
892
myLocks = proclock->holdMask;
894
for (i = 1; i <= numLockModes; i++)
896
int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
898
if (lock->granted[i] > myHolding)
899
otherLocks |= LOCKBIT_ON(i);
903
* now check again for conflicts. 'otherLocks' describes the types of
904
* locks held by other processes. If one of these conflicts with the kind
905
* of lock that I want, there is a conflict and I have to sleep.
907
if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
909
/* no conflict. OK to get the lock */
910
PROCLOCK_PRINT("LockCheckConflicts: resolved", proclock);
914
PROCLOCK_PRINT("LockCheckConflicts: conflicting", proclock);
919
* GrantLock -- update the lock and proclock data structures to show
920
* the lock request has been granted.
922
* NOTE: if proc was blocked, it also needs to be removed from the wait list
923
* and have its waitLock/waitProcLock fields cleared. That's not done here.
925
* NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
926
* table entry; but since we may be awaking some other process, we can't do
927
* that here; it's done by GrantLockLocal, instead.
930
GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
933
lock->granted[lockmode]++;
934
lock->grantMask |= LOCKBIT_ON(lockmode);
935
if (lock->granted[lockmode] == lock->requested[lockmode])
936
lock->waitMask &= LOCKBIT_OFF(lockmode);
937
proclock->holdMask |= LOCKBIT_ON(lockmode);
938
LOCK_PRINT("GrantLock", lock, lockmode);
939
Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
940
Assert(lock->nGranted <= lock->nRequested);
944
* UnGrantLock -- opposite of GrantLock.
946
* Updates the lock and proclock data structures to show that the lock
947
* is no longer held nor requested by the current holder.
949
* Returns true if there were any waiters waiting on the lock that
950
* should now be woken up with ProcLockWakeup.
953
UnGrantLock(LOCK *lock, LOCKMODE lockmode,
954
PROCLOCK *proclock, LockMethod lockMethodTable)
956
bool wakeupNeeded = false;
958
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
959
Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
960
Assert(lock->nGranted <= lock->nRequested);
963
* fix the general lock stats
966
lock->requested[lockmode]--;
968
lock->granted[lockmode]--;
970
if (lock->granted[lockmode] == 0)
972
/* change the conflict mask. No more of this lock type. */
973
lock->grantMask &= LOCKBIT_OFF(lockmode);
976
LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
979
* We need only run ProcLockWakeup if the released lock conflicts with at
980
* least one of the lock types requested by waiter(s). Otherwise whatever
981
* conflict made them wait must still exist. NOTE: before MVCC, we could
982
* skip wakeup if lock->granted[lockmode] was still positive. But that's
983
* not true anymore, because the remaining granted locks might belong to
984
* some waiter, who could now be awakened because he doesn't conflict with
987
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
991
* Now fix the per-proclock state.
993
proclock->holdMask &= LOCKBIT_OFF(lockmode);
994
PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1000
* CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1001
* proclock and lock objects if possible, and call ProcLockWakeup if there
1002
* are remaining requests and the caller says it's OK. (Normally, this
1003
* should be called after UnGrantLock, and wakeupNeeded is the result from
1006
* The appropriate partition lock must be held at entry, and will be
1010
CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1011
LockMethod lockMethodTable, uint32 hashcode,
1015
* If this was my last hold on this lock, delete my entry in the proclock
1018
if (proclock->holdMask == 0)
1020
uint32 proclock_hashcode;
1022
PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1023
SHMQueueDelete(&proclock->lockLink);
1024
SHMQueueDelete(&proclock->procLink);
1025
proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1026
if (!hash_search_with_hash_value(LockMethodProcLockHash,
1027
(void *) &(proclock->tag),
1031
elog(PANIC, "proclock table corrupted");
1034
if (lock->nRequested == 0)
1037
* The caller just released the last lock, so garbage-collect the lock
1040
LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1041
Assert(SHMQueueEmpty(&(lock->procLocks)));
1042
if (!hash_search_with_hash_value(LockMethodLockHash,
1043
(void *) &(lock->tag),
1047
elog(PANIC, "lock table corrupted");
1049
else if (wakeupNeeded)
1051
/* There are waiters on this lock, so wake them up. */
1052
ProcLockWakeup(lockMethodTable, lock);
1057
* GrantLockLocal -- update the locallock data structures to show
1058
* the lock request has been granted.
1060
* We expect that LockAcquire made sure there is room to add a new
1061
* ResourceOwner entry.
1064
GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1066
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1069
Assert(locallock->numLockOwners < locallock->maxLockOwners);
1070
/* Count the total */
1071
locallock->nLocks++;
1072
/* Count the per-owner lock */
1073
for (i = 0; i < locallock->numLockOwners; i++)
1075
if (lockOwners[i].owner == owner)
1077
lockOwners[i].nLocks++;
1081
lockOwners[i].owner = owner;
1082
lockOwners[i].nLocks = 1;
1083
locallock->numLockOwners++;
1087
* GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1090
* proc.c needs this for the case where we are booted off the lock by
1091
* timeout, but discover that someone granted us the lock anyway.
1093
* We could just export GrantLockLocal, but that would require including
1094
* resowner.h in lock.h, which creates circularity.
1097
GrantAwaitedLock(void)
1099
GrantLockLocal(awaitedLock, awaitedOwner);
1103
* WaitOnLock -- wait to acquire a lock
1105
* Caller must have set MyProc->heldLocks to reflect locks already held
1106
* on the lockable object by this process.
1108
* The appropriate partition lock must be held at entry.
1111
WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1113
LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1114
LockMethod lockMethodTable = LockMethods[lockmethodid];
1115
char * volatile new_status = NULL;
1117
LOCK_PRINT("WaitOnLock: sleeping on lock",
1118
locallock->lock, locallock->tag.mode);
1120
/* Report change to waiting status */
1121
if (update_process_title)
1123
const char *old_status;
1126
old_status = get_ps_display(&len);
1127
new_status = (char *) palloc(len + 8 + 1);
1128
memcpy(new_status, old_status, len);
1129
strcpy(new_status + len, " waiting");
1130
set_ps_display(new_status, false);
1131
new_status[len] = '\0'; /* truncate off " waiting" */
1133
pgstat_report_waiting(true);
1135
awaitedLock = locallock;
1136
awaitedOwner = owner;
1139
* NOTE: Think not to put any shared-state cleanup after the call to
1140
* ProcSleep, in either the normal or failure path. The lock state must
1141
* be fully set by the lock grantor, or by CheckDeadLock if we give up
1142
* waiting for the lock. This is necessary because of the possibility
1143
* that a cancel/die interrupt will interrupt ProcSleep after someone else
1144
* grants us the lock, but before we've noticed it. Hence, after granting,
1145
* the locktable state must fully reflect the fact that we own the lock;
1146
* we can't do additional work on return.
1148
* We can and do use a PG_TRY block to try to clean up after failure,
1149
* but this still has a major limitation: elog(FATAL) can occur while
1150
* waiting (eg, a "die" interrupt), and then control won't come back here.
1151
* So all cleanup of essential state should happen in LockWaitCancel,
1152
* not here. We can use PG_TRY to clear the "waiting" status flags,
1153
* since doing that is unimportant if the process exits.
1157
if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
1160
* We failed as a result of a deadlock, see CheckDeadLock().
1164
LOCK_PRINT("WaitOnLock: aborting on lock",
1165
locallock->lock, locallock->tag.mode);
1166
LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1169
* Now that we aren't holding the partition lock, we can give an
1170
* error report including details about the detected deadlock.
1178
/* In this path, awaitedLock remains set until LockWaitCancel */
1180
/* Report change to non-waiting status */
1181
pgstat_report_waiting(false);
1182
if (update_process_title)
1184
set_ps_display(new_status, false);
1188
/* and propagate the error */
1195
/* Report change to non-waiting status */
1196
pgstat_report_waiting(false);
1197
if (update_process_title)
1199
set_ps_display(new_status, false);
1203
LOCK_PRINT("WaitOnLock: wakeup on lock",
1204
locallock->lock, locallock->tag.mode);
1208
* Remove a proc from the wait-queue it is on (caller must know it is on one).
1209
* This is only used when the proc has failed to get the lock, so we set its
1210
* waitStatus to STATUS_ERROR.
1212
* Appropriate partition lock must be held by caller. Also, caller is
1213
* responsible for signaling the proc if needed.
1215
* NB: this does not clean up any locallock object that may exist for the lock.
1218
RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1220
LOCK *waitLock = proc->waitLock;
1221
PROCLOCK *proclock = proc->waitProcLock;
1222
LOCKMODE lockmode = proc->waitLockMode;
1223
LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1225
/* Make sure proc is waiting */
1226
Assert(proc->waitStatus == STATUS_WAITING);
1227
Assert(proc->links.next != NULL);
1229
Assert(waitLock->waitProcs.size > 0);
1230
Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1232
/* Remove proc from lock's wait queue */
1233
SHMQueueDelete(&(proc->links));
1234
waitLock->waitProcs.size--;
1236
/* Undo increments of request counts by waiting process */
1237
Assert(waitLock->nRequested > 0);
1238
Assert(waitLock->nRequested > proc->waitLock->nGranted);
1239
waitLock->nRequested--;
1240
Assert(waitLock->requested[lockmode] > 0);
1241
waitLock->requested[lockmode]--;
1242
/* don't forget to clear waitMask bit if appropriate */
1243
if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1244
waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1246
/* Clean up the proc's own state, and pass it the ok/fail signal */
1247
proc->waitLock = NULL;
1248
proc->waitProcLock = NULL;
1249
proc->waitStatus = STATUS_ERROR;
1252
* Delete the proclock immediately if it represents no already-held locks.
1253
* (This must happen now because if the owner of the lock decides to
1254
* release it, and the requested/granted counts then go to zero,
1255
* LockRelease expects there to be no remaining proclocks.) Then see if
1256
* any other waiters for the lock can be woken up now.
1258
CleanUpLock(waitLock, proclock,
1259
LockMethods[lockmethodid], hashcode,
1264
* LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1265
* Release a session lock if 'sessionLock' is true, else release a
1266
* regular transaction lock.
1268
* Side Effects: find any waiting processes that are now wakable,
1269
* grant them their requested locks and awaken them.
1270
* (We have to grant the lock here to avoid a race between
1271
* the waking process and any new process to
1272
* come along and request the lock.)
1275
LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1277
LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1278
LockMethod lockMethodTable;
1279
LOCALLOCKTAG localtag;
1280
LOCALLOCK *locallock;
1283
LWLockId partitionLock;
1286
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1287
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1288
lockMethodTable = LockMethods[lockmethodid];
1289
if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1290
elog(ERROR, "unrecognized lock mode: %d", lockmode);
1293
if (LOCK_DEBUG_ENABLED(locktag))
1294
elog(LOG, "LockRelease: lock [%u,%u] %s",
1295
locktag->locktag_field1, locktag->locktag_field2,
1296
lockMethodTable->lockModeNames[lockmode]);
1300
* Find the LOCALLOCK entry for this lock and lockmode
1302
MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1303
localtag.lock = *locktag;
1304
localtag.mode = lockmode;
1306
locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1311
* let the caller print its own error message, too. Do not ereport(ERROR).
1313
if (!locallock || locallock->nLocks <= 0)
1315
elog(WARNING, "you don't own a lock of type %s",
1316
lockMethodTable->lockModeNames[lockmode]);
1321
* Decrease the count for the resource owner.
1324
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1325
ResourceOwner owner;
1328
/* Session locks are never transactional, else check table */
1329
if (!sessionLock && lockMethodTable->transactional)
1330
owner = CurrentResourceOwner;
1334
for (i = locallock->numLockOwners - 1; i >= 0; i--)
1336
if (lockOwners[i].owner == owner)
1338
Assert(lockOwners[i].nLocks > 0);
1339
if (--lockOwners[i].nLocks == 0)
1341
/* compact out unused slot */
1342
locallock->numLockOwners--;
1343
if (i < locallock->numLockOwners)
1344
lockOwners[i] = lockOwners[locallock->numLockOwners];
1351
/* don't release a lock belonging to another owner */
1352
elog(WARNING, "you don't own a lock of type %s",
1353
lockMethodTable->lockModeNames[lockmode]);
1359
* Decrease the total local count. If we're still holding the lock, we're
1362
locallock->nLocks--;
1364
if (locallock->nLocks > 0)
1368
* Otherwise we've got to mess with the shared lock table.
1370
partitionLock = LockHashPartitionLock(locallock->hashcode);
1372
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1375
* We don't need to re-find the lock or proclock, since we kept their
1376
* addresses in the locallock table, and they couldn't have been removed
1377
* while we were holding a lock on them.
1379
lock = locallock->lock;
1380
LOCK_PRINT("LockRelease: found", lock, lockmode);
1381
proclock = locallock->proclock;
1382
PROCLOCK_PRINT("LockRelease: found", proclock);
1385
* Double-check that we are actually holding a lock of the type we want to
1388
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1390
PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
1391
LWLockRelease(partitionLock);
1392
elog(WARNING, "you don't own a lock of type %s",
1393
lockMethodTable->lockModeNames[lockmode]);
1394
RemoveLocalLock(locallock);
1399
* Do the releasing. CleanUpLock will waken any now-wakable waiters.
1401
wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
1403
CleanUpLock(lock, proclock,
1404
lockMethodTable, locallock->hashcode,
1407
LWLockRelease(partitionLock);
1409
RemoveLocalLock(locallock);
1414
* LockReleaseAll -- Release all locks of the specified lock method that
1415
* are held by the current process.
1417
* Well, not necessarily *all* locks. The available behaviors are:
1418
* allLocks == true: release all locks including session locks.
1419
* allLocks == false: release all non-session locks.
1422
LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
1424
HASH_SEQ_STATUS status;
1425
LockMethod lockMethodTable;
1428
LOCALLOCK *locallock;
1433
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1434
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1435
lockMethodTable = LockMethods[lockmethodid];
1438
if (*(lockMethodTable->trace_flag))
1439
elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
1442
numLockModes = lockMethodTable->numLockModes;
1445
* First we run through the locallock table and get rid of unwanted
1446
* entries, then we scan the process's proclocks and get rid of those. We
1447
* do this separately because we may have multiple locallock entries
1448
* pointing to the same proclock, and we daren't end up with any dangling
1451
hash_seq_init(&status, LockMethodLocalHash);
1453
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
1455
if (locallock->proclock == NULL || locallock->lock == NULL)
1458
* We must've run out of shared memory while trying to set up this
1459
* lock. Just forget the local entry.
1461
Assert(locallock->nLocks == 0);
1462
RemoveLocalLock(locallock);
1466
/* Ignore items that are not of the lockmethod to be removed */
1467
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
1471
* If we are asked to release all locks, we can just zap the entry.
1472
* Otherwise, must scan to see if there are session locks. We assume
1473
* there is at most one lockOwners entry for session locks.
1477
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1479
/* If it's above array position 0, move it down to 0 */
1480
for (i = locallock->numLockOwners - 1; i > 0; i--)
1482
if (lockOwners[i].owner == NULL)
1484
lockOwners[0] = lockOwners[i];
1489
if (locallock->numLockOwners > 0 &&
1490
lockOwners[0].owner == NULL &&
1491
lockOwners[0].nLocks > 0)
1493
/* Fix the locallock to show just the session locks */
1494
locallock->nLocks = lockOwners[0].nLocks;
1495
locallock->numLockOwners = 1;
1496
/* We aren't deleting this locallock, so done */
1501
/* Mark the proclock to show we need to release this lockmode */
1502
if (locallock->nLocks > 0)
1503
locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
1505
/* And remove the locallock hashtable entry */
1506
RemoveLocalLock(locallock);
1510
* Now, scan each lock partition separately.
1512
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
1514
LWLockId partitionLock = FirstLockMgrLock + partition;
1515
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
1517
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1518
offsetof(PROCLOCK, procLink));
1521
continue; /* needn't examine this partition */
1523
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1527
bool wakeupNeeded = false;
1528
PROCLOCK *nextplock;
1530
/* Get link first, since we may unlink/delete this proclock */
1531
nextplock = (PROCLOCK *)
1532
SHMQueueNext(procLocks, &proclock->procLink,
1533
offsetof(PROCLOCK, procLink));
1535
Assert(proclock->tag.myProc == MyProc);
1537
lock = proclock->tag.myLock;
1539
/* Ignore items that are not of the lockmethod to be removed */
1540
if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
1544
* In allLocks mode, force release of all locks even if locallock
1545
* table had problems
1548
proclock->releaseMask = proclock->holdMask;
1550
Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
1553
* Ignore items that have nothing to be released, unless they have
1554
* holdMask == 0 and are therefore recyclable
1556
if (proclock->releaseMask == 0 && proclock->holdMask != 0)
1559
PROCLOCK_PRINT("LockReleaseAll", proclock);
1560
LOCK_PRINT("LockReleaseAll", lock, 0);
1561
Assert(lock->nRequested >= 0);
1562
Assert(lock->nGranted >= 0);
1563
Assert(lock->nGranted <= lock->nRequested);
1564
Assert((proclock->holdMask & ~lock->grantMask) == 0);
1567
* Release the previously-marked lock modes
1569
for (i = 1; i <= numLockModes; i++)
1571
if (proclock->releaseMask & LOCKBIT_ON(i))
1572
wakeupNeeded |= UnGrantLock(lock, i, proclock,
1575
Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
1576
Assert(lock->nGranted <= lock->nRequested);
1577
LOCK_PRINT("LockReleaseAll: updated", lock, 0);
1579
proclock->releaseMask = 0;
1581
/* CleanUpLock will wake up waiters if needed. */
1582
CleanUpLock(lock, proclock,
1584
LockTagHashCode(&lock->tag),
1588
proclock = nextplock;
1589
} /* loop over PROCLOCKs within this partition */
1591
LWLockRelease(partitionLock);
1592
} /* loop over partitions */
1595
if (*(lockMethodTable->trace_flag))
1596
elog(LOG, "LockReleaseAll done");
1601
* LockReleaseCurrentOwner
1602
* Release all locks belonging to CurrentResourceOwner
1605
LockReleaseCurrentOwner(void)
1607
HASH_SEQ_STATUS status;
1608
LOCALLOCK *locallock;
1609
LOCALLOCKOWNER *lockOwners;
1612
hash_seq_init(&status, LockMethodLocalHash);
1614
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
1616
/* Ignore items that must be nontransactional */
1617
if (!LockMethods[LOCALLOCK_LOCKMETHOD(*locallock)]->transactional)
1620
/* Scan to see if there are any locks belonging to current owner */
1621
lockOwners = locallock->lockOwners;
1622
for (i = locallock->numLockOwners - 1; i >= 0; i--)
1624
if (lockOwners[i].owner == CurrentResourceOwner)
1626
Assert(lockOwners[i].nLocks > 0);
1627
if (lockOwners[i].nLocks < locallock->nLocks)
1630
* We will still hold this lock after forgetting this
1633
locallock->nLocks -= lockOwners[i].nLocks;
1634
/* compact out unused slot */
1635
locallock->numLockOwners--;
1636
if (i < locallock->numLockOwners)
1637
lockOwners[i] = lockOwners[locallock->numLockOwners];
1641
Assert(lockOwners[i].nLocks == locallock->nLocks);
1642
/* We want to call LockRelease just once */
1643
lockOwners[i].nLocks = 1;
1644
locallock->nLocks = 1;
1645
if (!LockRelease(&locallock->tag.lock,
1646
locallock->tag.mode,
1648
elog(WARNING, "LockReleaseCurrentOwner: failed??");
1657
* LockReassignCurrentOwner
1658
* Reassign all locks belonging to CurrentResourceOwner to belong
1659
* to its parent resource owner
1662
LockReassignCurrentOwner(void)
1664
ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
1665
HASH_SEQ_STATUS status;
1666
LOCALLOCK *locallock;
1667
LOCALLOCKOWNER *lockOwners;
1669
Assert(parent != NULL);
1671
hash_seq_init(&status, LockMethodLocalHash);
1673
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
1679
/* Ignore items that must be nontransactional */
1680
if (!LockMethods[LOCALLOCK_LOCKMETHOD(*locallock)]->transactional)
1684
* Scan to see if there are any locks belonging to current owner or
1687
lockOwners = locallock->lockOwners;
1688
for (i = locallock->numLockOwners - 1; i >= 0; i--)
1690
if (lockOwners[i].owner == CurrentResourceOwner)
1692
else if (lockOwners[i].owner == parent)
1697
continue; /* no current locks */
1701
/* Parent has no slot, so just give it child's slot */
1702
lockOwners[ic].owner = parent;
1706
/* Merge child's count with parent's */
1707
lockOwners[ip].nLocks += lockOwners[ic].nLocks;
1708
/* compact out unused slot */
1709
locallock->numLockOwners--;
1710
if (ic < locallock->numLockOwners)
1711
lockOwners[ic] = lockOwners[locallock->numLockOwners];
1719
* Get an array of VirtualTransactionIds of xacts currently holding locks
1720
* that would conflict with the specified lock/lockmode.
1721
* xacts merely awaiting such a lock are NOT reported.
1723
* The result array is palloc'd and is terminated with an invalid VXID.
1725
* Of course, the result could be out of date by the time it's returned,
1726
* so use of this function has to be thought about carefully.
1728
* Note we never include the current xact's vxid in the result array,
1729
* since an xact never blocks itself. Also, prepared transactions are
1730
* ignored, which is a bit more debatable but is appropriate for current
1731
* uses of the result.
1733
VirtualTransactionId *
1734
GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
1736
VirtualTransactionId *vxids;
1737
LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1738
LockMethod lockMethodTable;
1740
LOCKMASK conflictMask;
1741
SHM_QUEUE *procLocks;
1744
LWLockId partitionLock;
1747
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1748
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1749
lockMethodTable = LockMethods[lockmethodid];
1750
if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1751
elog(ERROR, "unrecognized lock mode: %d", lockmode);
1754
* Allocate memory to store results, and fill with InvalidVXID. We only
1755
* need enough space for MaxBackends + a terminator, since prepared xacts
1758
vxids = (VirtualTransactionId *)
1759
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
1762
* Look up the lock object matching the tag.
1764
hashcode = LockTagHashCode(locktag);
1765
partitionLock = LockHashPartitionLock(hashcode);
1767
LWLockAcquire(partitionLock, LW_SHARED);
1769
lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1777
* If the lock object doesn't exist, there is nothing holding a lock
1778
* on this lockable object.
1780
LWLockRelease(partitionLock);
1785
* Examine each existing holder (or awaiter) of the lock.
1787
conflictMask = lockMethodTable->conflictTab[lockmode];
1789
procLocks = &(lock->procLocks);
1791
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1792
offsetof(PROCLOCK, lockLink));
1796
if (conflictMask & proclock->holdMask)
1798
PGPROC *proc = proclock->tag.myProc;
1800
/* A backend never blocks itself */
1803
VirtualTransactionId vxid;
1805
GET_VXID_FROM_PGPROC(vxid, *proc);
1808
* If we see an invalid VXID, then either the xact has already
1809
* committed (or aborted), or it's a prepared xact. In either
1810
* case we may ignore it.
1812
if (VirtualTransactionIdIsValid(vxid))
1813
vxids[count++] = vxid;
1817
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1818
offsetof(PROCLOCK, lockLink));
1821
LWLockRelease(partitionLock);
1823
if (count > MaxBackends) /* should never happen */
1824
elog(PANIC, "too many conflicting locks found");
1832
* Do the preparatory work for a PREPARE: make 2PC state file records
1833
* for all locks currently held.
1835
* Non-transactional locks are ignored, as are VXID locks.
1837
* There are some special cases that we error out on: we can't be holding
1838
* any session locks (should be OK since only VACUUM uses those) and we
1839
* can't be holding any locks on temporary objects (since that would mess
1840
* up the current backend if it tries to exit before the prepared xact is
1844
AtPrepare_Locks(void)
1846
HASH_SEQ_STATUS status;
1847
LOCALLOCK *locallock;
1850
* We don't need to touch shared memory for this --- all the necessary
1851
* state information is in the locallock table.
1853
hash_seq_init(&status, LockMethodLocalHash);
1855
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
1857
TwoPhaseLockRecord record;
1858
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1861
/* Ignore nontransactional locks */
1862
if (!LockMethods[LOCALLOCK_LOCKMETHOD(*locallock)]->transactional)
1866
* Ignore VXID locks. We don't want those to be held by prepared
1867
* transactions, since they aren't meaningful after a restart.
1869
if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
1872
/* Ignore it if we don't actually hold the lock */
1873
if (locallock->nLocks <= 0)
1876
/* Scan to verify there are no session locks */
1877
for (i = locallock->numLockOwners - 1; i >= 0; i--)
1879
/* elog not ereport since this should not happen */
1880
if (lockOwners[i].owner == NULL)
1881
elog(ERROR, "cannot PREPARE when session locks exist");
1885
* Create a 2PC record.
1887
memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
1888
record.lockmode = locallock->tag.mode;
1890
RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
1891
&record, sizeof(TwoPhaseLockRecord));
1897
* Clean up after successful PREPARE
1899
* Here, we want to transfer ownership of our locks to a dummy PGPROC
1900
* that's now associated with the prepared transaction, and we want to
1901
* clean out the corresponding entries in the LOCALLOCK table.
1903
* Note: by removing the LOCALLOCK entries, we are leaving dangling
1904
* pointers in the transaction's resource owner. This is OK at the
1905
* moment since resowner.c doesn't try to free locks retail at a toplevel
1906
* transaction commit or abort. We could alternatively zero out nLocks
1907
* and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
1908
* but that probably costs more cycles.
1911
PostPrepare_Locks(TransactionId xid)
1913
PGPROC *newproc = TwoPhaseGetDummyProc(xid);
1914
HASH_SEQ_STATUS status;
1915
LOCALLOCK *locallock;
1918
PROCLOCKTAG proclocktag;
1922
/* This is a critical section: any error means big trouble */
1923
START_CRIT_SECTION();
1926
* First we run through the locallock table and get rid of unwanted
1927
* entries, then we scan the process's proclocks and transfer them to the
1930
* We do this separately because we may have multiple locallock entries
1931
* pointing to the same proclock, and we daren't end up with any dangling
1934
hash_seq_init(&status, LockMethodLocalHash);
1936
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
1938
if (locallock->proclock == NULL || locallock->lock == NULL)
1941
* We must've run out of shared memory while trying to set up this
1942
* lock. Just forget the local entry.
1944
Assert(locallock->nLocks == 0);
1945
RemoveLocalLock(locallock);
1949
/* Ignore nontransactional locks */
1950
if (!LockMethods[LOCALLOCK_LOCKMETHOD(*locallock)]->transactional)
1953
/* Ignore VXID locks */
1954
if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
1957
/* We already checked there are no session locks */
1959
/* Mark the proclock to show we need to release this lockmode */
1960
if (locallock->nLocks > 0)
1961
locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
1963
/* And remove the locallock hashtable entry */
1964
RemoveLocalLock(locallock);
1968
* Now, scan each lock partition separately.
1970
for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
1972
LWLockId partitionLock = FirstLockMgrLock + partition;
1973
SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
1975
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1976
offsetof(PROCLOCK, procLink));
1979
continue; /* needn't examine this partition */
1981
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1985
PROCLOCK *nextplock;
1987
PROCLOCK *newproclock;
1989
/* Get link first, since we may unlink/delete this proclock */
1990
nextplock = (PROCLOCK *)
1991
SHMQueueNext(procLocks, &proclock->procLink,
1992
offsetof(PROCLOCK, procLink));
1994
Assert(proclock->tag.myProc == MyProc);
1996
lock = proclock->tag.myLock;
1998
/* Ignore nontransactional locks */
1999
if (!LockMethods[LOCK_LOCKMETHOD(*lock)]->transactional)
2002
/* Ignore VXID locks */
2003
if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
2006
PROCLOCK_PRINT("PostPrepare_Locks", proclock);
2007
LOCK_PRINT("PostPrepare_Locks", lock, 0);
2008
Assert(lock->nRequested >= 0);
2009
Assert(lock->nGranted >= 0);
2010
Assert(lock->nGranted <= lock->nRequested);
2011
Assert((proclock->holdMask & ~lock->grantMask) == 0);
2014
* Since there were no session locks, we should be releasing all
2017
if (proclock->releaseMask != proclock->holdMask)
2018
elog(PANIC, "we seem to have dropped a bit somewhere");
2020
holdMask = proclock->holdMask;
2023
* We cannot simply modify proclock->tag.myProc to reassign
2024
* ownership of the lock, because that's part of the hash key and
2025
* the proclock would then be in the wrong hash chain. So, unlink
2026
* and delete the old proclock; create a new one with the right
2027
* contents; and link it into place. We do it in this order to be
2028
* certain we won't run out of shared memory (the way dynahash.c
2029
* works, the deleted object is certain to be available for
2032
SHMQueueDelete(&proclock->lockLink);
2033
SHMQueueDelete(&proclock->procLink);
2034
if (!hash_search(LockMethodProcLockHash,
2035
(void *) &(proclock->tag),
2037
elog(PANIC, "proclock table corrupted");
2040
* Create the hash key for the new proclock table.
2042
proclocktag.myLock = lock;
2043
proclocktag.myProc = newproc;
2045
newproclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2046
(void *) &proclocktag,
2047
HASH_ENTER_NULL, &found);
2049
ereport(PANIC, /* should not happen */
2050
(errcode(ERRCODE_OUT_OF_MEMORY),
2051
errmsg("out of shared memory"),
2052
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
2055
* If new, initialize the new entry
2059
newproclock->holdMask = 0;
2060
newproclock->releaseMask = 0;
2061
/* Add new proclock to appropriate lists */
2062
SHMQueueInsertBefore(&lock->procLocks, &newproclock->lockLink);
2063
SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
2064
&newproclock->procLink);
2065
PROCLOCK_PRINT("PostPrepare_Locks: new", newproclock);
2069
PROCLOCK_PRINT("PostPrepare_Locks: found", newproclock);
2070
Assert((newproclock->holdMask & ~lock->grantMask) == 0);
2074
* Pass over the identified lock ownership.
2076
Assert((newproclock->holdMask & holdMask) == 0);
2077
newproclock->holdMask |= holdMask;
2080
proclock = nextplock;
2081
} /* loop over PROCLOCKs within this partition */
2083
LWLockRelease(partitionLock);
2084
} /* loop over partitions */
2091
* Estimate shared-memory space used for lock tables
2097
long max_table_size;
2099
/* lock hash table */
2100
max_table_size = NLOCKENTS();
2101
size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
2103
/* proclock hash table */
2104
max_table_size *= 2;
2105
size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
2108
* Since NLOCKENTS is only an estimate, add 10% safety margin.
2110
size = add_size(size, size / 10);
2116
* GetLockStatusData - Return a summary of the lock manager's internal
2117
* status, for use in a user-level reporting function.
2119
* The return data consists of an array of PROCLOCK objects, with the
2120
* associated PGPROC and LOCK objects for each. Note that multiple
2121
* copies of the same PGPROC and/or LOCK objects are likely to appear.
2122
* It is the caller's responsibility to match up duplicates if wanted.
2124
* The design goal is to hold the LWLocks for as short a time as possible;
2125
* thus, this function simply makes a copy of the necessary data and releases
2126
* the locks, allowing the caller to contemplate and format the data for as
2127
* long as it pleases.
2130
GetLockStatusData(void)
2134
HASH_SEQ_STATUS seqstat;
2139
data = (LockData *) palloc(sizeof(LockData));
2142
* Acquire lock on the entire shared lock data structure. We can't
2143
* operate one partition at a time if we want to deliver a self-consistent
2144
* view of the state.
2146
* Since this is a read-only operation, we take shared instead of
2147
* exclusive lock. There's not a whole lot of point to this, because all
2148
* the normal operations require exclusive lock, but it doesn't hurt
2149
* anything either. It will at least allow two backends to do
2150
* GetLockStatusData in parallel.
2152
* Must grab LWLocks in partition-number order to avoid LWLock deadlock.
2154
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
2155
LWLockAcquire(FirstLockMgrLock + i, LW_SHARED);
2157
/* Now we can safely count the number of proclocks */
2158
els = hash_get_num_entries(LockMethodProcLockHash);
2160
data->nelements = els;
2161
data->proclocks = (PROCLOCK *) palloc(sizeof(PROCLOCK) * els);
2162
data->procs = (PGPROC *) palloc(sizeof(PGPROC) * els);
2163
data->locks = (LOCK *) palloc(sizeof(LOCK) * els);
2165
/* Now scan the tables to copy the data */
2166
hash_seq_init(&seqstat, LockMethodProcLockHash);
2169
while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
2171
PGPROC *proc = proclock->tag.myProc;
2172
LOCK *lock = proclock->tag.myLock;
2174
memcpy(&(data->proclocks[el]), proclock, sizeof(PROCLOCK));
2175
memcpy(&(data->procs[el]), proc, sizeof(PGPROC));
2176
memcpy(&(data->locks[el]), lock, sizeof(LOCK));
2182
* And release locks. We do this in reverse order for two reasons: (1)
2183
* Anyone else who needs more than one of the locks will be trying to lock
2184
* them in increasing order; we don't want to release the other process
2185
* until it can get all the locks it needs. (2) This avoids O(N^2)
2186
* behavior inside LWLockRelease.
2188
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
2189
LWLockRelease(FirstLockMgrLock + i);
2191
Assert(el == data->nelements);
2196
/* Provide the textual name of any lock mode */
2198
GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
2200
Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
2201
Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
2202
return LockMethods[lockmethodid]->lockModeNames[mode];
2207
* Dump all locks in the given proc's myProcLocks lists.
2209
* Caller is responsible for having acquired appropriate LWLocks.
2212
DumpLocks(PGPROC *proc)
2214
SHM_QUEUE *procLocks;
2223
LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
2225
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
2227
procLocks = &(proc->myProcLocks[i]);
2229
proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2230
offsetof(PROCLOCK, procLink));
2234
Assert(proclock->tag.myProc == proc);
2236
lock = proclock->tag.myLock;
2238
PROCLOCK_PRINT("DumpLocks", proclock);
2239
LOCK_PRINT("DumpLocks", lock, 0);
2241
proclock = (PROCLOCK *)
2242
SHMQueueNext(procLocks, &proclock->procLink,
2243
offsetof(PROCLOCK, procLink));
2249
* Dump all lmgr locks.
2251
* Caller is responsible for having acquired appropriate LWLocks.
2259
HASH_SEQ_STATUS status;
2263
if (proc && proc->waitLock)
2264
LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
2266
hash_seq_init(&status, LockMethodProcLockHash);
2268
while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
2270
PROCLOCK_PRINT("DumpAllLocks", proclock);
2272
lock = proclock->tag.myLock;
2274
LOCK_PRINT("DumpAllLocks", lock, 0);
2276
elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
2279
#endif /* LOCK_DEBUG */
2282
* LOCK 2PC resource manager's routines
2286
* Re-acquire a lock belonging to a transaction that was prepared.
2288
* Because this function is run at db startup, re-acquiring the locks should
2289
* never conflict with running transactions because there are none. We
2290
* assume that the lock state represented by the stored 2PC files is legal.
2293
lock_twophase_recover(TransactionId xid, uint16 info,
2294
void *recdata, uint32 len)
2296
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
2297
PGPROC *proc = TwoPhaseGetDummyProc(xid);
2300
LOCKMETHODID lockmethodid;
2303
PROCLOCKTAG proclocktag;
2306
uint32 proclock_hashcode;
2308
LWLockId partitionLock;
2309
LockMethod lockMethodTable;
2311
Assert(len == sizeof(TwoPhaseLockRecord));
2312
locktag = &rec->locktag;
2313
lockmode = rec->lockmode;
2314
lockmethodid = locktag->locktag_lockmethodid;
2316
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2317
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2318
lockMethodTable = LockMethods[lockmethodid];
2320
hashcode = LockTagHashCode(locktag);
2321
partition = LockHashPartition(hashcode);
2322
partitionLock = LockHashPartitionLock(hashcode);
2324
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2327
* Find or create a lock with this tag.
2329
lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2336
LWLockRelease(partitionLock);
2338
(errcode(ERRCODE_OUT_OF_MEMORY),
2339
errmsg("out of shared memory"),
2340
errhint("You might need to increase max_locks_per_transaction.")));
2344
* if it's a new lock object, initialize it
2348
lock->grantMask = 0;
2350
SHMQueueInit(&(lock->procLocks));
2351
ProcQueueInit(&(lock->waitProcs));
2352
lock->nRequested = 0;
2354
MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
2355
MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
2356
LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
2360
LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
2361
Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
2362
Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
2363
Assert(lock->nGranted <= lock->nRequested);
2367
* Create the hash key for the proclock table.
2369
proclocktag.myLock = lock;
2370
proclocktag.myProc = proc;
2372
proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
2375
* Find or create a proclock entry with this tag
2377
proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
2378
(void *) &proclocktag,
2384
/* Ooops, not enough shmem for the proclock */
2385
if (lock->nRequested == 0)
2388
* There are no other requestors of this lock, so garbage-collect
2389
* the lock object. We *must* do this to avoid a permanent leak
2390
* of shared memory, because there won't be anything to cause
2391
* anyone to release the lock object later.
2393
Assert(SHMQueueEmpty(&(lock->procLocks)));
2394
if (!hash_search_with_hash_value(LockMethodLockHash,
2395
(void *) &(lock->tag),
2399
elog(PANIC, "lock table corrupted");
2401
LWLockRelease(partitionLock);
2403
(errcode(ERRCODE_OUT_OF_MEMORY),
2404
errmsg("out of shared memory"),
2405
errhint("You might need to increase max_locks_per_transaction.")));
2409
* If new, initialize the new entry
2413
proclock->holdMask = 0;
2414
proclock->releaseMask = 0;
2415
/* Add proclock to appropriate lists */
2416
SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
2417
SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
2418
&proclock->procLink);
2419
PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
2423
PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
2424
Assert((proclock->holdMask & ~lock->grantMask) == 0);
2428
* lock->nRequested and lock->requested[] count the total number of
2429
* requests, whether granted or waiting, so increment those immediately.
2432
lock->requested[lockmode]++;
2433
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
2436
* We shouldn't already hold the desired lock.
2438
if (proclock->holdMask & LOCKBIT_ON(lockmode))
2439
elog(ERROR, "lock %s on object %u/%u/%u is already held",
2440
lockMethodTable->lockModeNames[lockmode],
2441
lock->tag.locktag_field1, lock->tag.locktag_field2,
2442
lock->tag.locktag_field3);
2445
* We ignore any possible conflicts and just grant ourselves the lock.
2447
GrantLock(lock, proclock, lockmode);
2449
LWLockRelease(partitionLock);
2453
* 2PC processing routine for COMMIT PREPARED case.
2455
* Find and release the lock indicated by the 2PC record.
2458
lock_twophase_postcommit(TransactionId xid, uint16 info,
2459
void *recdata, uint32 len)
2461
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
2462
PGPROC *proc = TwoPhaseGetDummyProc(xid);
2465
LOCKMETHODID lockmethodid;
2468
PROCLOCKTAG proclocktag;
2470
uint32 proclock_hashcode;
2471
LWLockId partitionLock;
2472
LockMethod lockMethodTable;
2475
Assert(len == sizeof(TwoPhaseLockRecord));
2476
locktag = &rec->locktag;
2477
lockmode = rec->lockmode;
2478
lockmethodid = locktag->locktag_lockmethodid;
2480
if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2481
elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2482
lockMethodTable = LockMethods[lockmethodid];
2484
hashcode = LockTagHashCode(locktag);
2485
partitionLock = LockHashPartitionLock(hashcode);
2487
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2490
* Re-find the lock object (it had better be there).
2492
lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2498
elog(PANIC, "failed to re-find shared lock object");
2501
* Re-find the proclock object (ditto).
2503
proclocktag.myLock = lock;
2504
proclocktag.myProc = proc;
2506
proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
2508
proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
2509
(void *) &proclocktag,
2514
elog(PANIC, "failed to re-find shared proclock object");
2517
* Double-check that we are actually holding a lock of the type we want to
2520
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2522
PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
2523
LWLockRelease(partitionLock);
2524
elog(WARNING, "you don't own a lock of type %s",
2525
lockMethodTable->lockModeNames[lockmode]);
2530
* Do the releasing. CleanUpLock will waken any now-wakable waiters.
2532
wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2534
CleanUpLock(lock, proclock,
2535
lockMethodTable, hashcode,
2538
LWLockRelease(partitionLock);
2542
* 2PC processing routine for ROLLBACK PREPARED case.
2544
* This is actually just the same as the COMMIT case.
2547
lock_twophase_postabort(TransactionId xid, uint16 info,
2548
void *recdata, uint32 len)
2550
lock_twophase_postcommit(xid, info, recdata, len);