2
* See the file LICENSE for redistribution information.
4
* Copyright (c) 1996-2004
5
* Sleepycat Software. All rights reserved.
7
* $Id: lock_deadlock.c,v 11.86 2004/10/15 16:59:42 bostic Exp $
10
#include "db_config.h"
12
#ifndef NO_SYSTEM_INCLUDES
13
#include <sys/types.h>
19
#include "dbinc/db_shash.h"
20
#include "dbinc/lock.h"
21
#include "dbinc/log.h"
22
#include "dbinc/txn.h"
24
#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << ((N) % 32)))
26
#define CLEAR_MAP(M, N) { \
28
for (__i = 0; __i < (N); __i++) \
32
#define SET_MAP(M, B) ((M)[(B) / 32] |= (1 << ((B) % 32)))
33
#define CLR_MAP(M, B) ((M)[(B) / 32] &= ~(1 << ((B) % 32)))
35
#define OR_MAP(D, S, N) { \
37
for (__i = 0; __i < (N); __i++) \
40
#define BAD_KILLID 0xffffffff
50
u_int32_t last_locker_id;
54
static int __dd_abort __P((DB_ENV *, locker_info *));
55
static int __dd_build __P((DB_ENV *,
56
u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
57
static int __dd_find __P((DB_ENV *,
58
u_int32_t *, locker_info *, u_int32_t, u_int32_t, u_int32_t ***));
59
static int __dd_isolder __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
60
static int __dd_verify __P((locker_info *, u_int32_t *, u_int32_t *,
61
u_int32_t *, u_int32_t, u_int32_t, u_int32_t));
64
static void __dd_debug
65
__P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t, u_int32_t));
70
* DB_ENV->lock_detect pre/post processing.
72
* PUBLIC: int __lock_detect_pp __P((DB_ENV *, u_int32_t, u_int32_t, int *));
75
__lock_detect_pp(dbenv, flags, atype, abortp)
77
u_int32_t flags, atype;
83
ENV_REQUIRES_CONFIG(dbenv,
84
dbenv->lk_handle, "DB_ENV->lock_detect", DB_INIT_LOCK);
86
/* Validate arguments. */
87
if ((ret = __db_fchk(dbenv, "DB_ENV->lock_detect", flags, 0)) != 0)
92
case DB_LOCK_MAXLOCKS:
93
case DB_LOCK_MAXWRITE:
94
case DB_LOCK_MINLOCKS:
95
case DB_LOCK_MINWRITE:
98
case DB_LOCK_YOUNGEST:
102
"DB_ENV->lock_detect: unknown deadlock detection mode specified");
106
rep_check = IS_ENV_REPLICATED(dbenv) ? 1 : 0;
108
__env_rep_enter(dbenv);
109
ret = __lock_detect(dbenv, atype, abortp);
111
__env_db_rep_exit(dbenv);
117
* DB_ENV->lock_detect.
119
* PUBLIC: int __lock_detect __P((DB_ENV *, u_int32_t, int *));
122
__lock_detect(dbenv, atype, abortp)
127
DB_LOCKREGION *region;
132
u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
133
u_int32_t i, cid, keeper, killid, limit, nalloc, nlockers;
134
u_int32_t lock_max, txn_max;
138
* If this environment is a replication client, then we must use the
139
* MINWRITE detection discipline.
141
if (__rep_is_client(dbenv))
142
atype = DB_LOCK_MINWRITE;
146
lt = dbenv->lk_handle;
150
/* Check if a detector run is necessary. */
151
LOCKREGION(dbenv, lt);
153
/* Make a pass only if auto-detect would run. */
154
region = lt->reginfo.primary;
156
LOCK_SET_TIME_INVALID(&now);
157
if (region->need_dd == 0 &&
158
(!LOCK_TIME_ISVALID(®ion->next_timeout) ||
159
!__lock_expired(dbenv, &now, ®ion->next_timeout))) {
160
UNLOCKREGION(dbenv, lt);
163
if (region->need_dd == 0)
164
atype = DB_LOCK_EXPIRE;
166
/* Reset need_dd, so we know we've run the detector. */
169
/* Build the waits-for bitmap. */
170
ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
171
lock_max = region->stat.st_cur_maxid;
172
UNLOCKREGION(dbenv, lt);
175
* We need the cur_maxid from the txn region as well. In order
176
* to avoid tricky synchronization between the lock and txn
177
* regions, we simply unlock the lock region and then lock the
178
* txn region. This introduces a small window during which the
179
* transaction system could then wrap. We're willing to return
180
* the wrong answer for "oldest" or "youngest" in those rare
183
tmgr = dbenv->tx_handle;
185
R_LOCK(dbenv, &tmgr->reginfo);
186
txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
187
R_UNLOCK(dbenv, &tmgr->reginfo);
189
txn_max = TXN_MAXIMUM;
190
if (ret != 0 || atype == DB_LOCK_EXPIRE)
196
if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
197
__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
199
/* Now duplicate the bitmaps so we can verify deadlock participants. */
200
if ((ret = __os_calloc(dbenv, (size_t)nlockers,
201
sizeof(u_int32_t) * nalloc, ©map)) != 0)
203
memcpy(copymap, bitmap, nlockers * sizeof(u_int32_t) * nalloc);
205
if ((ret = __os_calloc(dbenv, sizeof(u_int32_t), nalloc, &tmpmap)) != 0)
208
/* Find a deadlock. */
210
__dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
215
for (; *deadp != NULL; deadp++) {
218
killid = (u_int32_t)(*deadp - bitmap) / nalloc;
222
* There are cases in which our general algorithm will
223
* fail. Returning 1 from verify indicates that the
224
* particular locker is not only involved in a deadlock,
225
* but that killing him will allow others to make forward
226
* progress. Unfortunately, there are cases where we need
227
* to abort someone, but killing them will not necessarily
228
* ensure forward progress (imagine N readers all trying to
229
* acquire a write lock).
230
* killid is only set to lockers that pass the db_verify test.
231
* keeper will hold the best candidate even if it does
232
* not pass db_verify. Once we fill in killid then we do
233
* not need a keeper, but we keep updating it anyway.
236
keeper = idmap[killid].in_abort == 0 ? killid : BAD_KILLID;
237
if (keeper == BAD_KILLID ||
238
__dd_verify(idmap, *deadp,
239
tmpmap, copymap, nlockers, nalloc, keeper) == 0)
242
if (killid != BAD_KILLID &&
243
(atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM))
247
* Start with the id that we know is deadlocked, then examine
248
* all other set bits and see if any are a better candidate
249
* for abortion and they are genuinely part of the deadlock.
250
* The definition of "best":
251
* MAXLOCKS: maximum count
252
* MAXWRITE: maximum write count
253
* MINLOCKS: minimum count
254
* MINWRITE: minimum write count
255
* OLDEST: smallest id
256
* YOUNGEST: largest id
258
for (i = (limit + 1) % nlockers;
260
i = (i + 1) % nlockers) {
261
if (!ISSET_MAP(*deadp, i) || idmap[i].in_abort)
265
* Determine if we have a verified candidate
266
* in killid, if not then compare with the
267
* non-verified candidate in keeper.
269
if (killid == BAD_KILLID) {
270
if (keeper == BAD_KILLID)
279
if (__dd_isolder(idmap[cid].id,
280
idmap[i].id, lock_max, txn_max))
283
case DB_LOCK_YOUNGEST:
284
if (__dd_isolder(idmap[i].id,
285
idmap[cid].id, lock_max, txn_max))
288
case DB_LOCK_MAXLOCKS:
289
if (idmap[i].count < idmap[cid].count)
292
case DB_LOCK_MAXWRITE:
293
if (idmap[i].count < idmap[cid].count)
296
case DB_LOCK_MINLOCKS:
297
case DB_LOCK_MINWRITE:
298
if (idmap[i].count > idmap[cid].count)
301
case DB_LOCK_DEFAULT:
311
use_next: keeper = i;
312
if (__dd_verify(idmap, *deadp,
313
tmpmap, copymap, nlockers, nalloc, i))
317
dokill: if (killid == BAD_KILLID) {
318
if (keeper == BAD_KILLID)
320
* It's conceivable that under XA, the
321
* locker could have gone away.
326
* Removing a single locker will not
327
* break the deadlock, signal to run
330
LOCKREGION(dbenv, lt);
332
UNLOCKREGION(dbenv, lt);
337
/* Kill the locker with lockid idmap[killid]. */
338
if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
340
* It's possible that the lock was already aborted;
341
* this isn't necessarily a problem, so do not treat
344
if (ret == DB_ALREADY_ABORTED)
348
"warning: unable to abort locker %lx",
349
(u_long)idmap[killid].id);
350
} else if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
352
"Aborting locker %lx", (u_long)idmap[killid].id);
354
__os_free(dbenv, tmpmap);
355
err1: __os_free(dbenv, copymap);
357
err: if (free_me != NULL)
358
__os_free(dbenv, free_me);
359
__os_free(dbenv, bitmap);
360
__os_free(dbenv, idmap);
366
* ========================================================================
370
# define DD_INVALID_ID ((u_int32_t) -1)
373
__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
375
u_int32_t atype, **bmp, *nlockers, *allocp;
378
struct __db_lock *lp;
379
DB_LOCKER *lip, *lockerp, *child;
381
DB_LOCKREGION *region;
383
locker_info *id_array;
384
db_timeval_t now, min_timeout;
385
u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
387
int expire_only, is_first, ret;
389
lt = dbenv->lk_handle;
390
region = lt->reginfo.primary;
391
LOCK_SET_TIME_INVALID(&now);
392
LOCK_SET_TIME_MAX(&min_timeout);
393
expire_only = atype == DB_LOCK_EXPIRE;
396
* While we always check for expired timeouts, if we are called
397
* with DB_LOCK_EXPIRE, then we are only checking for timeouts
398
* (i.e., not doing deadlock detection at all). If we aren't
399
* doing real deadlock detection, then we can skip a significant,
400
* amount of the processing. In particular we do not build
401
* the conflict array and our caller needs to expect this.
410
* We'll check how many lockers there are, add a few more in for
411
* good measure and then allocate all the structures. Then we'll
412
* verify that we have enough room when we go back in and get the
413
* mutex the second time.
415
retry: count = region->stat.st_nlockers;
422
if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
423
__db_msg(dbenv, "%lu lockers", (u_long)count);
426
nentries = (u_int32_t)DB_ALIGN(count, 32) / 32;
429
* Allocate enough space for a count by count bitmap matrix.
432
* We can probably save the malloc's between iterations just
433
* reallocing if necessary because count grew by too much.
435
if ((ret = __os_calloc(dbenv, (size_t)count,
436
sizeof(u_int32_t) * nentries, &bitmap)) != 0)
439
if ((ret = __os_calloc(dbenv,
440
sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
441
__os_free(dbenv, bitmap);
445
if ((ret = __os_calloc(dbenv,
446
(size_t)count, sizeof(locker_info), &id_array)) != 0) {
447
__os_free(dbenv, bitmap);
448
__os_free(dbenv, tmpmap);
453
* Now go back in and actually fill in the matrix.
455
if (region->stat.st_nlockers > count) {
456
__os_free(dbenv, bitmap);
457
__os_free(dbenv, tmpmap);
458
__os_free(dbenv, id_array);
463
* First we go through and assign each locker a deadlock detector id.
465
for (id = 0, lip = SH_TAILQ_FIRST(®ion->lockers, __db_locker);
467
lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
468
if (lip->master_locker == INVALID_ROFF) {
470
id_array[lip->dd_id].id = lip->id;
472
case DB_LOCK_MINLOCKS:
473
case DB_LOCK_MAXLOCKS:
474
id_array[lip->dd_id].count = lip->nlocks;
476
case DB_LOCK_MINWRITE:
477
case DB_LOCK_MAXWRITE:
478
id_array[lip->dd_id].count = lip->nwrites;
481
if (F_ISSET(lip, DB_LOCKER_INABORT))
482
id_array[lip->dd_id].in_abort = 1;
484
lip->dd_id = DD_INVALID_ID;
489
* We only need consider objects that have waiters, so we use
490
* the list of objects with waiters (dd_objs) instead of traversing
491
* the entire hash table. For each object, we traverse the waiters
492
* list and add an entry in the waitsfor matrix for each waiter/holder
496
for (op = SH_TAILQ_FIRST(®ion->dd_objs, __db_lockobj);
497
op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
500
CLEAR_MAP(tmpmap, nentries);
503
* First we go through and create a bit map that
504
* represents all the holders of this object.
506
for (lp = SH_TAILQ_FIRST(&op->holders, __db_lock);
508
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
509
LOCKER_LOCK(lt, region, lp->holder, ndx);
510
if ((ret = __lock_getlocker(lt,
511
lp->holder, ndx, 0, &lockerp)) != 0)
514
if (lockerp->dd_id == DD_INVALID_ID) {
515
dd = ((DB_LOCKER *)R_ADDR(<->reginfo,
516
lockerp->master_locker))->dd_id;
519
case DB_LOCK_MINLOCKS:
520
case DB_LOCK_MAXLOCKS:
521
id_array[dd].count += lockerp->nlocks;
523
case DB_LOCK_MINWRITE:
524
case DB_LOCK_MAXWRITE:
525
id_array[dd].count += lockerp->nwrites;
528
if (F_ISSET(lockerp, DB_LOCKER_INABORT))
529
id_array[dd].in_abort = 1;
533
id_array[dd].valid = 1;
536
* If the holder has already been aborted, then
537
* we should ignore it for now.
539
if (lp->status == DB_LSTAT_HELD)
544
* Next, for each waiter, we set its row in the matrix
545
* equal to the map of holders we set up above.
549
lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
552
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
553
LOCKER_LOCK(lt, region, lp->holder, ndx);
554
if ((ret = __lock_getlocker(lt,
555
lp->holder, ndx, 0, &lockerp)) != 0)
557
if (lp->status == DB_LSTAT_WAITING) {
558
if (__lock_expired(dbenv,
559
&now, &lockerp->lk_expire)) {
560
lp->status = DB_LSTAT_EXPIRED;
561
MUTEX_UNLOCK(dbenv, &lp->mutex);
564
if (LOCK_TIME_GREATER(
565
&min_timeout, &lockerp->lk_expire))
566
min_timeout = lockerp->lk_expire;
573
if (lockerp->dd_id == DD_INVALID_ID) {
574
dd = ((DB_LOCKER *)R_ADDR(<->reginfo,
575
lockerp->master_locker))->dd_id;
578
case DB_LOCK_MINLOCKS:
579
case DB_LOCK_MAXLOCKS:
580
id_array[dd].count += lockerp->nlocks;
582
case DB_LOCK_MINWRITE:
583
case DB_LOCK_MAXWRITE:
584
id_array[dd].count += lockerp->nwrites;
589
id_array[dd].valid = 1;
592
* If the transaction is pending abortion, then
593
* ignore it on this iteration.
595
if (lp->status != DB_LSTAT_WAITING)
598
entryp = bitmap + (nentries * dd);
599
OR_MAP(entryp, tmpmap, nentries);
601
* If this is the first waiter on the queue,
602
* then we remove the waitsfor relationship
603
* with oneself. However, if it's anywhere
604
* else on the queue, then we have to keep
605
* it and we have an automatic deadlock.
608
if (ISSET_MAP(entryp, dd))
609
id_array[dd].self_wait = 1;
615
if (LOCK_TIME_ISVALID(®ion->next_timeout)) {
616
if (LOCK_TIME_ISMAX(&min_timeout))
617
LOCK_SET_TIME_INVALID(®ion->next_timeout);
619
region->next_timeout = min_timeout;
624
/* Now for each locker; record its last lock. */
625
for (id = 0; id < count; id++) {
626
if (!id_array[id].valid)
628
LOCKER_LOCK(lt, region, id_array[id].id, ndx);
629
if ((ret = __lock_getlocker(lt,
630
id_array[id].id, ndx, 0, &lockerp)) != 0) {
632
"No locks for locker %lu", (u_long)id_array[id].id);
637
* If this is a master transaction, try to
638
* find one of its children's locks first,
639
* as they are probably more recent.
641
child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker);
644
lp = SH_LIST_FIRST(&child->heldby, __db_lock);
646
lp->status == DB_LSTAT_WAITING) {
647
id_array[id].last_locker_id = child->id;
650
child = SH_LIST_NEXT(
651
child, child_link, __db_locker);
652
} while (child != NULL);
654
lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
656
id_array[id].last_locker_id = lockerp->id;
657
get_lock: id_array[id].last_lock = R_OFFSET(<->reginfo, lp);
658
id_array[id].last_obj = lp->obj;
659
lo = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
660
pptr = SH_DBT_PTR(&lo->lockobj);
661
if (lo->lockobj.size >= sizeof(db_pgno_t))
662
memcpy(&id_array[id].pgno,
663
pptr, sizeof(db_pgno_t));
665
id_array[id].pgno = 0;
670
* Pass complete, reset the deadlock detector bit.
675
* Now we can release everything except the bitmap matrix that we
682
__os_free(dbenv, tmpmap);
687
__dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
689
u_int32_t *bmp, nlockers, nalloc;
693
u_int32_t i, j, k, *mymap, *tmpmap, **retp;
694
u_int ndead, ndeadalloc;
697
#undef INITIAL_DEAD_ALLOC
698
#define INITIAL_DEAD_ALLOC 8
700
ndeadalloc = INITIAL_DEAD_ALLOC;
702
if ((ret = __os_malloc(dbenv,
703
ndeadalloc * sizeof(u_int32_t *), &retp)) != 0)
707
* For each locker, OR in the bits from the lockers on which that
710
for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) {
713
for (j = 0; j < nlockers; j++) {
714
if (!ISSET_MAP(mymap, j))
717
/* Find the map for this bit. */
718
tmpmap = bmp + (nalloc * j);
719
OR_MAP(mymap, tmpmap, nalloc);
720
if (!ISSET_MAP(mymap, i))
723
/* Make sure we leave room for NULL. */
724
if (ndead + 2 >= ndeadalloc) {
727
* If the alloc fails, then simply return the
728
* deadlocks that we already have.
730
if (__os_realloc(dbenv,
731
ndeadalloc * sizeof(u_int32_t),
738
retp[ndead++] = mymap;
740
/* Mark all participants in this deadlock invalid. */
741
for (k = 0; k < nlockers; k++)
742
if (ISSET_MAP(mymap, k))
753
__dd_abort(dbenv, info)
757
struct __db_lock *lockp;
760
DB_LOCKREGION *region;
765
lt = dbenv->lk_handle;
766
region = lt->reginfo.primary;
768
LOCKREGION(dbenv, lt);
771
* Get the locker. If its gone or was aborted while
772
* we were detecting return that.
774
LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
775
if ((ret = __lock_getlocker(lt,
776
info->last_locker_id, ndx, 0, &lockerp)) != 0 ||
777
lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) {
779
ret = DB_ALREADY_ABORTED;
784
* Find the locker's last lock.
785
* It is possible for this lock to have been freed,
786
* either though a timeout or another detector run.
788
if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
789
ret = DB_ALREADY_ABORTED;
792
if (R_OFFSET(<->reginfo, lockp) != info->last_lock ||
793
lockp->holder != lockerp->id ||
794
lockp->obj != info->last_obj || lockp->status != DB_LSTAT_WAITING) {
795
ret = DB_ALREADY_ABORTED;
799
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
801
/* Abort lock, take it off list, and wake up this lock. */
802
SHOBJECT_LOCK(lt, region, sh_obj, ndx);
803
lockp->status = DB_LSTAT_ABORTED;
804
SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
807
* Either the waiters list is now empty, in which case we remove
808
* it from dd_objs, or it is not empty, in which case we need to
811
if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
812
SH_TAILQ_REMOVE(®ion->dd_objs,
813
sh_obj, dd_links, __db_lockobj);
815
ret = __lock_promote(lt, sh_obj, 0);
816
MUTEX_UNLOCK(dbenv, &lockp->mutex);
818
region->stat.st_ndeadlocks++;
819
UNLOCKREGION(dbenv, lt);
823
out: UNLOCKREGION(dbenv, lt);
829
__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc)
832
u_int32_t *bitmap, nlockers, nalloc;
835
u_int32_t i, j, *mymap;
839
__db_msg(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
840
for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) {
844
__db_msgadd(dbenv, &mb, /* Waiter. */
845
"%lx/%lu:\t", (u_long)idmap[i].id, (u_long)idmap[i].pgno);
846
for (j = 0; j < nlockers; j++)
847
if (ISSET_MAP(mymap, j))
849
&mb, " %lx", (u_long)idmap[j].id);
850
__db_msgadd(dbenv, &mb, " %lu", (u_long)idmap[i].last_lock);
851
DB_MSGBUF_FLUSH(dbenv, &mb);
857
* Given a bitmap that contains a deadlock, verify that the bit
858
* specified in the which parameter indicates a transaction that
859
* is actually deadlocked. Return 1 if really deadlocked, 0 otherwise.
860
* deadmap -- the array that identified the deadlock.
861
* tmpmap -- a copy of the initial bitmaps from the dd_build phase.
862
* origmap -- a temporary bit map into which we can OR things.
863
* nlockers -- the number of actual lockers under consideration.
864
* nalloc -- the number of words allocated for the bitmap.
865
* which -- the locker in question.
868
__dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which)
870
u_int32_t *deadmap, *tmpmap, *origmap;
871
u_int32_t nlockers, nalloc, which;
877
memset(tmpmap, 0, sizeof(u_int32_t) * nalloc);
880
* In order for "which" to be actively involved in
881
* the deadlock, removing him from the evaluation
882
* must remove the deadlock. So, we OR together everyone
883
* except which; if all the participants still have their
884
* bits set, then the deadlock persists and which does
885
* not participate. If the deadlock does not persist
886
* then "which" does participate.
889
for (j = 0; j < nlockers; j++) {
890
if (!ISSET_MAP(deadmap, j) || j == which)
893
/* Find the map for this bit. */
894
tmap = origmap + (nalloc * j);
897
* We special case the first waiter who is also a holder, so
898
* we don't automatically call that a deadlock. However, if
899
* it really is a deadlock, we need the bit set now so that
900
* we treat the first waiter like other waiters.
902
if (idmap[j].self_wait)
904
OR_MAP(tmpmap, tmap, nalloc);
912
* Now check the resulting map and see whether
913
* all participants still have their bit set.
915
for (j = 0; j < nlockers; j++) {
916
if (!ISSET_MAP(deadmap, j) || j == which)
918
if (!ISSET_MAP(tmpmap, j))
927
* Figure out the relative age of two lockers. We make all lockers
928
* older than all transactions, because that's how it's worked
929
* historically (because lockers are lower ids).
932
__dd_isolder(a, b, lock_max, txn_max)
934
u_int32_t lock_max, txn_max;
938
/* Check for comparing lock-id and txnid. */
939
if (a <= DB_LOCK_MAXID && b > DB_LOCK_MAXID)
941
if (b <= DB_LOCK_MAXID && a > DB_LOCK_MAXID)
944
/* In the same space; figure out which one. */
946
if (a <= DB_LOCK_MAXID)
950
* We can't get a 100% correct ordering, because we don't know
951
* where the current interval started and if there were older
952
* lockers outside the interval. We do the best we can.
956
* Check for a wrapped case with ids above max.
958
if (a > max && b < max)
960
if (b > max && a < max)