2
* See the file LICENSE for redistribution information.
4
* Copyright (c) 1996-2002
5
* Sleepycat Software. All rights reserved.
14
* Some of the Berkeley DB ports require single-threading at various
15
* places in the code. In those cases, these #defines will be set.
17
#define DB_BEGIN_SINGLE_THREAD
18
#define DB_END_SINGLE_THREAD
20
/*********************************************************************
21
* POSIX.1 pthreads interface.
22
*********************************************************************/
23
#ifdef HAVE_MUTEX_PTHREADS
26
#define MUTEX_FIELDS \
27
pthread_mutex_t mutex; /* Mutex. */ \
28
pthread_cond_t cond; /* Condition variable. */
31
/*********************************************************************
32
* Solaris lwp threads interface.
35
* We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
36
* which are available), for two reasons. First, the Solaris C library
37
* includes versions of the both UI and POSIX thread mutex interfaces, but
38
* they are broken in that they don't support inter-process locking, and
39
* there's no way to detect it, e.g., calls to configure the mutexes for
40
* inter-process locking succeed without error. So, we use LWP mutexes so
41
* that we don't fail in fairly undetectable ways because the application
42
* wasn't linked with the appropriate threads library. Second, there were
43
* bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
44
* before loading the libthread/libpthread threads libraries (e.g., by using
45
* dlopen to load the DB library), the pwrite64 interface would be translated
46
* into a call to pwrite and DB would drop core.
47
*********************************************************************/
48
#ifdef HAVE_MUTEX_SOLARIS_LWP
51
* Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
52
* Solaris manual page as the correct include to use, it causes the Solaris
53
* compiler on SunOS 2.6 to fail.
57
#define MUTEX_FIELDS \
58
lwp_mutex_t mutex; /* Mutex. */ \
59
lwp_cond_t cond; /* Condition variable. */
62
/*********************************************************************
63
* Solaris/Unixware threads interface.
64
*********************************************************************/
65
#ifdef HAVE_MUTEX_UI_THREADS
69
#define MUTEX_FIELDS \
70
mutex_t mutex; /* Mutex. */ \
71
cond_t cond; /* Condition variable. */
74
/*********************************************************************
75
* AIX C library functions.
76
*********************************************************************/
77
#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
78
#include <sys/atomic_op.h>
80
#define MUTEX_ALIGN sizeof(int)
82
#ifdef LOAD_ACTUAL_MUTEX_CODE
83
#define MUTEX_INIT(x) 0
84
#define MUTEX_SET(x) (!_check_lock(x, 0, 1))
85
#define MUTEX_UNSET(x) _clear_lock(x, 0)
89
/*********************************************************************
90
* General C library functions (msemaphore).
93
* Check for HPPA as a special case, because it requires unusual alignment,
94
* and doesn't support semaphores in malloc(3) or shmget(2) memory.
97
* Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single
98
* process makes two msem_lock() calls in a row, the second one returns an
99
* error. We depend on the fact that we can lock against ourselves in the
100
* locking subsystem, where we set up a mutex so that we can block ourselves.
101
* Tested on OSF1 v4.0.
102
*********************************************************************/
103
#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
104
#define MUTEX_NO_MALLOC_LOCKS
105
#define MUTEX_NO_SHMGET_LOCKS
107
#define MUTEX_ALIGN 16
110
#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
111
#include <sys/mman.h>
112
typedef msemaphore tsl_t;
115
#define MUTEX_ALIGN sizeof(int)
118
#ifdef LOAD_ACTUAL_MUTEX_CODE
119
#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
120
#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
121
#define MUTEX_UNSET(x) msem_unlock(x, 0)
125
/*********************************************************************
126
* Plan 9 library functions.
127
*********************************************************************/
128
#ifdef HAVE_MUTEX_PLAN9
131
#define MUTEX_ALIGN sizeof(int)
133
#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
134
#define MUTEX_SET(x) canlock(x)
135
#define MUTEX_UNSET(x) unlock(x)
138
/*********************************************************************
139
* Reliant UNIX C library functions.
140
*********************************************************************/
141
#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
143
typedef spinlock_t tsl_t;
145
#ifdef LOAD_ACTUAL_MUTEX_CODE
146
#define MUTEX_INIT(x) (initspin(x, 1), 0)
147
#define MUTEX_SET(x) (cspinlock(x) == 0)
148
#define MUTEX_UNSET(x) spinunlock(x)
152
/*********************************************************************
153
* General C library functions (POSIX 1003.1 sema_XXX).
156
* Never selected by autoconfig in this release (semaphore calls are known
157
* to not work in Solaris 5.5).
158
*********************************************************************/
159
#ifdef HAVE_MUTEX_SEMA_INIT
161
typedef sema_t tsl_t;
162
#define MUTEX_ALIGN sizeof(int)
164
#ifdef LOAD_ACTUAL_MUTEX_CODE
165
#define MUTEX_DESTROY(x) sema_destroy(x)
166
#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
167
#define MUTEX_SET(x) (sema_wait(x) == 0)
168
#define MUTEX_UNSET(x) sema_post(x)
172
/*********************************************************************
173
* SGI C library functions.
174
*********************************************************************/
175
#ifdef HAVE_MUTEX_SGI_INIT_LOCK
176
#include <abi_mutex.h>
177
typedef abilock_t tsl_t;
178
#define MUTEX_ALIGN sizeof(int)
180
#ifdef LOAD_ACTUAL_MUTEX_CODE
181
#define MUTEX_INIT(x) (init_lock(x) != 0)
182
#define MUTEX_SET(x) (!acquire_lock(x))
183
#define MUTEX_UNSET(x) release_lock(x)
187
/*********************************************************************
188
* Solaris C library functions.
191
* These are undocumented functions, but they're the only ones that work
192
* correctly as far as we know.
193
*********************************************************************/
194
#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
195
#include <sys/machlock.h>
196
typedef lock_t tsl_t;
197
#define MUTEX_ALIGN sizeof(int)
199
#ifdef LOAD_ACTUAL_MUTEX_CODE
200
#define MUTEX_INIT(x) 0
201
#define MUTEX_SET(x) _lock_try(x)
202
#define MUTEX_UNSET(x) _lock_clear(x)
206
/*********************************************************************
208
*********************************************************************/
209
#ifdef HAVE_MUTEX_VMS
210
#include <sys/mman.h>;
211
#include <builtins.h>
212
typedef unsigned char tsl_t;
213
#define MUTEX_ALIGN sizeof(unsigned int)
215
#ifdef LOAD_ACTUAL_MUTEX_CODE
217
#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
219
#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
221
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
222
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
226
/*********************************************************************
228
* Use basic binary semaphores in VxWorks, as we currently do not need
229
* any special features. We do need the ability to single-thread the
230
* entire system, however, because VxWorks doesn't support the open(2)
231
* flag O_EXCL, the mechanism we normally use to single thread access
232
* when we're first looking for a DB environment.
233
*********************************************************************/
234
#ifdef HAVE_MUTEX_VXWORKS
236
typedef SEM_ID tsl_t;
237
#define MUTEX_ALIGN sizeof(unsigned int)
239
#ifdef LOAD_ACTUAL_MUTEX_CODE
240
#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
241
#define MUTEX_UNSET(tsl) (semGive((*tsl)))
242
#define MUTEX_INIT(tsl) \
243
((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
244
#define MUTEX_DESTROY(tsl) semDelete(*tsl)
248
* Use the taskLock() mutex to eliminate a race where two tasks are
249
* trying to initialize the global lock at the same time.
251
#undef DB_BEGIN_SINGLE_THREAD
252
#define DB_BEGIN_SINGLE_THREAD \
254
if (DB_GLOBAL(db_global_init)) \
255
(void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER); \
258
if (DB_GLOBAL(db_global_init)) { \
260
(void)semTake(DB_GLOBAL(db_global_lock), \
264
DB_GLOBAL(db_global_lock) = \
265
semBCreate(SEM_Q_FIFO, SEM_EMPTY); \
266
if (DB_GLOBAL(db_global_lock) != NULL) \
267
DB_GLOBAL(db_global_init) = 1; \
270
} while (DB_GLOBAL(db_global_init) == 0)
271
#undef DB_END_SINGLE_THREAD
272
#define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock))
275
/*********************************************************************
278
* Win16 spinlocks are simple because we cannot possibly be preempted.
281
* We should simplify this by always returning a no-need-to-lock lock
282
* when we initialize the mutex.
283
*********************************************************************/
284
#ifdef HAVE_MUTEX_WIN16
285
typedef unsigned int tsl_t;
286
#define MUTEX_ALIGN sizeof(unsigned int)
288
#ifdef LOAD_ACTUAL_MUTEX_CODE
289
#define MUTEX_INIT(x) 0
290
#define MUTEX_SET(tsl) (*(tsl) = 1)
291
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
295
/*********************************************************************
297
*********************************************************************/
298
#ifdef HAVE_MUTEX_WIN32
299
#define MUTEX_FIELDS \
302
u_int32_t id; /* ID used for creating events */ \
304
#if defined(LOAD_ACTUAL_MUTEX_CODE)
305
#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
306
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
307
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
311
/*********************************************************************
313
*********************************************************************/
314
#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
315
typedef unsigned char tsl_t;
317
#ifdef LOAD_ACTUAL_MUTEX_CODE
319
* For gcc/68K, 0 is clear, 1 is set.
321
#define MUTEX_SET(tsl) ({ \
322
register tsl_t *__l = (tsl); \
324
asm volatile("tas %1; \n \
326
: "=dm" (__r), "=m" (*__l) \
332
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
333
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
337
/*********************************************************************
338
* ALPHA/gcc assembly.
339
*********************************************************************/
340
#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
341
typedef u_int32_t tsl_t;
342
#define MUTEX_ALIGN 4
344
#ifdef LOAD_ACTUAL_MUTEX_CODE
346
* For gcc/alpha. Should return 0 if could not acquire the lock, 1 if
347
* lock was acquired properly.
351
MUTEX_SET(tsl_t *tsl) {
352
register tsl_t *__l = tsl;
364
: "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
369
* Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
370
* might be necessary before unlocking
373
MUTEX_UNSET(tsl_t *tsl) {
374
asm volatile(" mb\n");
380
#include <alpha/builtins.h>
381
#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
382
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
385
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
389
/*********************************************************************
391
*********************************************************************/
392
#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
393
typedef unsigned char tsl_t;
395
#ifdef LOAD_ACTUAL_MUTEX_CODE
397
* For arm/gcc, 0 is clear, 1 is set.
399
#define MUTEX_SET(tsl) ({ \
401
asm volatile("swpb %0, %1, [%2]" \
403
: "0" (1), "r" (tsl) \
409
#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
410
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
414
/*********************************************************************
416
*********************************************************************/
417
#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
418
typedef u_int32_t tsl_t;
419
#define MUTEX_ALIGN 16
421
#ifdef LOAD_ACTUAL_MUTEX_CODE
423
* The PA-RISC has a "load and clear" instead of a "test and set" instruction.
424
* The 32-bit word used by that instruction must be 16-byte aligned. We could
425
* use the "aligned" attribute in GCC but that doesn't work for stack variables.
427
#define MUTEX_SET(tsl) ({ \
428
register tsl_t *__l = (tsl); \
430
asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
434
#define MUTEX_UNSET(tsl) (*(tsl) = -1)
435
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
439
/*********************************************************************
441
*********************************************************************/
442
#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
443
typedef unsigned char tsl_t;
445
#ifdef LOAD_ACTUAL_MUTEX_CODE
447
* For gcc/ia64, 0 is clear, 1 is set.
449
#define MUTEX_SET(tsl) ({ \
450
register tsl_t *__l = (tsl); \
452
asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));\
457
* Store through a "volatile" pointer so we get a store with "release"
460
#define MUTEX_UNSET(tsl) (*(volatile unsigned char *)(tsl) = 0)
461
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
465
/*********************************************************************
466
* PowerPC/gcc assembly.
467
*********************************************************************/
468
#if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) || \
469
(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
470
typedef u_int32_t tsl_t;
472
#ifdef LOAD_ACTUAL_MUTEX_CODE
474
* The PowerPC does a sort of pseudo-atomic locking. You set up a
475
* 'reservation' on a chunk of memory containing a mutex by loading the
476
* mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary)
477
* value, you then try storing into it with STWCX. If no other process or
478
* thread broke your 'reservation' by modifying the memory containing the
479
* mutex, then the STCWX succeeds; otherwise it fails and you try to get
480
* a reservation again.
482
* While mutexes are explicitly 4 bytes, a 'reservation' applies to an
483
* entire cache line, normally 32 bytes, aligned naturally. If the mutex
484
* lives near data that gets changed a lot, there's a chance that you'll
485
* see more broken reservations than you might otherwise. The only
486
* situation in which this might be a problem is if one processor is
487
* beating on a variable in the same cache block as the mutex while another
488
* processor tries to acquire the mutex. That's bad news regardless
489
* because of the way it bashes caches, but if you can't guarantee that a
490
* mutex will reside in a relatively quiescent cache line, you might
491
* consider padding the mutex to force it to live in a cache line by
492
* itself. No, you aren't guaranteed that cache lines are 32 bytes. Some
493
* embedded processors use 16-byte cache lines, while some 64-bit
494
* processors use 128-bit cache lines. But assuming a 32-byte cache line
495
* won't get you into trouble for now.
497
* If mutex locking is a bottleneck, then you can speed it up by adding a
498
* regular LWZ load before the LWARX load, so that you can test for the
499
* common case of a locked mutex without wasting cycles making a reservation.
501
* 'set' mutexes have the value 1, like on Intel; the returned value from
502
* MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
504
* Mutexes on Mac OS X work the same way as the standard PowerPC version, but
505
* the assembler syntax is subtly different -- the standard PowerPC version
506
* assembles but doesn't work correctly. This version makes (unnecessary?)
507
* use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the
508
* ___db_mutex_set label is used as a function name.
510
#ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
511
extern int __db_mutex_set __P((volatile tsl_t *));
513
__db_mutex_tas_dummy()
515
__asm__ __volatile__(" \n\
516
.globl ___db_mutex_set \n\
531
#define MUTEX_SET(tsl) __db_mutex_set(tsl)
533
#ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
534
#define MUTEX_SET(tsl) ({ \
537
tsl_t *__l = (tsl); \
548
: "r" (__l), "r" (__one)); \
552
#define MUTEX_UNSET(tsl) ({ \
553
asm volatile("lwsync":::"memory"); \
556
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
560
/*********************************************************************
561
* S/390 32-bit assembly.
562
*********************************************************************/
563
#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
566
#ifdef LOAD_ACTUAL_MUTEX_CODE
568
* For gcc/S390, 0 is clear, 1 is set.
571
MUTEX_SET(tsl_t *tsl) { \
572
register tsl_t *__l = (tsl); \
578
"0: cs %0,0,0(1)\n" \
580
: "=&d" (__r), "+m" (*__l) \
581
: : "0", "1", "cc"); \
585
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
586
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
590
/*********************************************************************
592
*********************************************************************/
593
#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
594
typedef unsigned char tsl_t;
596
#ifdef LOAD_ACTUAL_MUTEX_CODE
598
* UnixWare has threads in libthread, but OpenServer doesn't (yet).
600
* For cc/x86, 0 is clear, 1 is set.
603
#if defined(__USLC__)
616
#define MUTEX_SET(tsl) _tsl_set(tsl)
617
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
618
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
622
/*********************************************************************
623
* Sparc/gcc assembly.
624
*********************************************************************/
625
#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
626
typedef unsigned char tsl_t;
628
#ifdef LOAD_ACTUAL_MUTEX_CODE
631
* The ldstub instruction takes the location specified by its first argument
632
* (a register containing a memory address) and loads its contents into its
633
* second argument (a register) and atomically sets the contents the location
634
* specified by its first argument to a byte of 1s. (The value in the second
635
* argument is never read, but only overwritten.)
637
* The stbar is needed for v8, and is implemented as membar #sync on v9,
638
* so is functional there as well. For v7, stbar may generate an illegal
639
* instruction and we have no way to tell what we're running on. Some
640
* operating systems notice and skip this instruction in the fault handler.
642
* For gcc/sparc, 0 is clear, 1 is set.
644
#define MUTEX_SET(tsl) ({ \
645
register tsl_t *__l = (tsl); \
646
register tsl_t __r; \
648
("ldstub [%1],%0; stbar" \
649
: "=r"( __r) : "r" (__l)); \
653
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
654
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
658
/*********************************************************************
660
*********************************************************************/
661
#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
664
#define MUTEX_ALIGN sizeof(int)
665
#ifdef LOAD_ACTUAL_MUTEX_CODE
666
#define MUTEX_INIT(x) 0
667
#define MUTEX_SET(x) (!uts_lock(x, 1))
668
#define MUTEX_UNSET(x) (*(x) = 0)
672
/*********************************************************************
674
*********************************************************************/
675
#ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
676
typedef unsigned int tsl_t;
677
#define MUTEX_ALIGN sizeof(unsigned int)
679
#ifdef LOAD_ACTUAL_MUTEX_CODE
683
#define MUTEX_SET(tsl) ({ \
685
register tsl_t *__l = (tsl); \
686
__asm__ __volatile__( \
687
".set\tnoreorder\t\t# test_and_set_bit\n" \
688
"1:\tll\t%0, %1\n\t" \
689
"ori\t%2, %0, 1\n\t" \
692
" andi\t%2, %0, 1\n\t" \
695
: "=&r" (tmp), "=m" (*__l), "=&r" (res) \
701
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
702
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
706
/*********************************************************************
708
*********************************************************************/
709
#ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
710
typedef unsigned char tsl_t;
712
#ifdef LOAD_ACTUAL_MUTEX_CODE
714
* For gcc/x86, 0 is clear, 1 is set.
716
#define MUTEX_SET(tsl) ({ \
717
register tsl_t *__l = (tsl); \
719
asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"\
720
: "=&a" (__r), "=m" (*__l) \
726
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
727
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
731
/*********************************************************************
732
* x86_64/gcc assembly.
733
*********************************************************************/
734
#ifdef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
735
typedef unsigned char tsl_t;
737
#ifdef LOAD_ACTUAL_MUTEX_CODE
739
* For gcc/x86-64, 0 is clear, 1 is set.
741
#define MUTEX_SET(tsl) ({ \
742
register tsl_t *__l = (tsl); \
744
asm volatile("movq $1,%%rax; lock; xchgb %1,%%al; xorq $1,%%rax"\
745
: "=&a" (__r), "=m" (*__l) \
751
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
752
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
756
/*********************************************************************
757
* alphalinux/gcc assembly.
758
*********************************************************************/
759
#ifdef HAVE_MUTEX_ALPHA_LINUX_ASSEMBLY
760
typedef unsigned long int tsl_t;
762
#define MUTEX_ALIGN 8
765
/*********************************************************************
766
* sparc32linux/gcc assembly.
767
*********************************************************************/
768
#ifdef HAVE_MUTEX_SPARC32_LINUX_ASSEMBLY
769
typedef unsigned char tsl_t;
772
/*********************************************************************
773
* sparc64linux/gcc assembly.
774
*********************************************************************/
775
#ifdef HAVE_MUTEX_SPARC64_LINUX_ASSEMBLY
776
typedef unsigned char tsl_t;
779
/*********************************************************************
780
* s390linux/gcc assembly.
781
*********************************************************************/
782
#ifdef HAVE_MUTEX_S390_LINUX_ASSEMBLY
783
typedef volatile int tsl_t;
787
* Mutex alignment defaults to one byte.
790
* Various systems require different alignments for mutexes (the worst we've
791
* seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed
792
* to return reasonable alignment, all other mutex users must ensure proper
796
#define MUTEX_ALIGN 1
800
* Mutex destruction defaults to a no-op.
802
#ifdef LOAD_ACTUAL_MUTEX_CODE
803
#ifndef MUTEX_DESTROY
804
#define MUTEX_DESTROY(x)
810
* These defines are separated into the u_int8_t flags stored in the
811
* mutex below, and the 32 bit flags passed to __db_mutex_setup.
812
* But they must co-exist and not overlap. Flags to __db_mutex_setup are:
814
* MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated.
815
* The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever
816
* you use this flag. If this flag is not set, the 'ptr' arg is
818
* MUTEX_NO_RECORD - Explicitly do not record the mutex in the region.
819
* Otherwise the mutex will be recorded by default. If you set
820
* this you need to understand why you don't need it recorded. The
821
* *only* ones not recorded are those that are part of region structures
822
* that only get destroyed when the regions are destroyed.
823
* MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise
824
* the region will be locked by default.
825
* MUTEX_SELF_BLOCK - Set if self blocking mutex.
826
* MUTEX_THREAD - Set if mutex is a thread-only mutex.
828
#define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
829
#define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
830
#define MUTEX_MPOOL 0x004 /* Allocated from mpool. */
831
#define MUTEX_SELF_BLOCK 0x008 /* Must block self. */
832
/* Flags only, may be larger than 0xff. */
833
#define MUTEX_ALLOC 0x00000100 /* Allocate and init a mutex */
834
#define MUTEX_NO_RECORD 0x00000200 /* Do not record lock */
835
#define MUTEX_NO_RLOCK 0x00000400 /* Do not acquire region lock */
836
#define MUTEX_THREAD 0x00000800 /* Thread-only mutex. */
840
#ifdef HAVE_MUTEX_THREADS
844
tsl_t tas; /* Test and set. */
846
u_int32_t spins; /* Spins before block. */
847
u_int32_t locked; /* !0 if locked. */
849
u_int32_t off; /* Byte offset to lock. */
850
u_int32_t pid; /* Lock holder: 0 or process pid. */
852
u_int32_t mutex_set_wait; /* Granted after wait. */
853
u_int32_t mutex_set_nowait; /* Granted without waiting. */
854
u_int32_t mutex_set_spin; /* Granted without spinning. */
855
u_int32_t mutex_set_spins; /* Total number of spins. */
856
#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
857
roff_t reg_off; /* Shared lock info offset. */
860
u_int8_t flags; /* MUTEX_XXX */
863
/* Redirect calls to the correct functions. */
864
#ifdef HAVE_MUTEX_THREADS
865
#if defined(HAVE_MUTEX_PTHREADS) || \
866
defined(HAVE_MUTEX_SOLARIS_LWP) || \
867
defined(HAVE_MUTEX_UI_THREADS)
868
#define __db_mutex_init_int(a, b, c, d) __db_pthread_mutex_init(a, b, d)
869
#define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b)
870
#define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
871
#define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
872
#elif defined(HAVE_MUTEX_WIN32)
873
#define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d)
874
#define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b)
875
#define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b)
876
#define __db_mutex_destroy(a) __db_win32_mutex_destroy(a)
878
#define __db_mutex_init_int(a, b, c, d) __db_tas_mutex_init(a, b, d)
879
#define __db_mutex_lock(a, b) __db_tas_mutex_lock(a, b)
880
#define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
881
#define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
884
#define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
885
#define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b)
886
#define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
887
#define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
890
/* Redirect system resource calls to correct functions */
891
#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
892
#define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
893
#define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
894
#define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
895
#define __db_mutex_init(a, b, c, d, e, f) \
896
__db_shreg_mutex_init(a, b, c, d, e, f)
898
#define __db_maintinit(a, b, c)
899
#define __db_shlocks_clear(a, b, c)
900
#define __db_shlocks_destroy(a, b)
901
#define __db_mutex_init(a, b, c, d, e, f) __db_mutex_init_int(a, b, c, d)
905
* Lock/unlock a mutex. If the mutex was marked as uninteresting, the thread
906
* of control can proceed without it.
908
* If the lock is for threads-only, then it was optionally not allocated and
909
* file handles aren't necessary, as threaded applications aren't supported by
915
* We want to switch threads as often as possible. Yield every time
916
* we get a mutex to ensure contention.
918
#define MUTEX_LOCK(dbenv, mp) \
919
if (!F_ISSET((mp), MUTEX_IGNORE)) \
920
DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0); \
921
if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) \
924
#define MUTEX_LOCK(dbenv, mp) \
925
if (!F_ISSET((mp), MUTEX_IGNORE)) \
926
(void)__db_mutex_lock(dbenv, mp);
928
#define MUTEX_UNLOCK(dbenv, mp) \
929
if (!F_ISSET((mp), MUTEX_IGNORE)) \
930
(void)__db_mutex_unlock(dbenv, mp);
931
#define MUTEX_THREAD_LOCK(dbenv, mp) \
933
MUTEX_LOCK(dbenv, mp)
934
#define MUTEX_THREAD_UNLOCK(dbenv, mp) \
936
MUTEX_UNLOCK(dbenv, mp)
939
* We use a single file descriptor for fcntl(2) locking, and (generally) the
940
* object's offset in a shared region as the byte that we're locking. So,
941
* there's a (remote) possibility that two objects might have the same offsets
942
* such that the locks could conflict, resulting in deadlock. To avoid this
943
* possibility, we offset the region offset by a small integer value, using a
944
* different offset for each subsystem's locks. Since all region objects are
945
* suitably aligned, the offset guarantees that we don't collide with another
948
#define DB_FCNTL_OFF_GEN 0 /* Everything else. */
949
#define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
950
#define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
952
#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
954
* When the underlying mutexes require library (most likely heap) or system
955
* resources, we have to clean up when we discard mutexes (for the library
956
* resources) and both when discarding mutexes and after application failure
957
* (for the mutexes requiring system resources). This violates the rule that
958
* we never look at a shared region after application failure, but we've no
959
* other choice. In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
962
* To support mutex release after application failure, allocate thread-handle
963
* mutexes in shared memory instead of in the heap. The number of slots we
964
* allocate for this purpose isn't configurable, but this tends to be an issue
965
* only on embedded systems where we don't expect large server applications.
967
#define DB_MAX_HANDLES 100 /* Mutex slots for handles. */
969
#endif /* !_DB_MUTEX_H_ */