1
/*-------------------------------------------------------------------------
4
* Hardware-dependent implementation of spinlocks.
6
* NOTE: none of the macros in this file are intended to be called directly.
7
* Call them through the hardware-independent macros in spin.h.
9
* The following hardware-dependent macros must be provided for each
12
* void S_INIT_LOCK(slock_t *lock)
13
* Initialize a spinlock (to the unlocked state).
15
* void S_LOCK(slock_t *lock)
16
* Acquire a spinlock, waiting if necessary.
17
* Time out and abort() if unable to acquire the lock in a
18
* "reasonable" amount of time --- typically ~ 1 minute.
20
* void S_UNLOCK(slock_t *lock)
21
* Unlock a previously acquired lock.
23
* bool S_LOCK_FREE(slock_t *lock)
24
* Tests if the lock is free. Returns TRUE if free, FALSE if locked.
25
* This does *not* change the state of the lock.
27
* void SPIN_DELAY(void)
28
* Delay operation to occur inside spinlock wait loop.
30
* Note to implementors: there are default implementations for all these
31
* macros at the bottom of the file. Check if your platform can use
32
* these or needs to override them.
34
* Usually, S_LOCK() is implemented in terms of an even lower-level macro
37
* int TAS(slock_t *lock)
38
* Atomic test-and-set instruction. Attempt to acquire the lock,
39
* but do *not* wait. Returns 0 if successful, nonzero if unable
40
* to acquire the lock.
42
* TAS() is NOT part of the API, and should never be called directly.
44
* CAUTION: on some platforms TAS() may sometimes report failure to acquire
45
* a lock even when the lock is not locked. For example, on Alpha TAS()
46
* will "fail" if interrupted. Therefore TAS() should always be invoked
47
* in a retry loop, even if you are certain the lock is free.
49
* ANOTHER CAUTION: be sure that TAS() and S_UNLOCK() represent sequence
50
* points, ie, loads and stores of other values must not be moved across
51
* a lock or unlock. In most cases it suffices to make the operation be
52
* done through a "volatile" pointer.
54
* On most supported platforms, TAS() uses a tas() function written
55
* in assembly language to execute a hardware atomic-test-and-set
56
* instruction. Equivalent OS-supplied mutex routines could be used too.
58
* If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
59
* defined), then we fall back on an emulation that uses SysV semaphores
60
* (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
61
* implementation, because of the cost of a kernel call per lock or unlock.
62
* An old report is that Postgres spends around 40% of its time in semop(2)
63
* when using the SysV semaphore code.
66
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
67
* Portions Copyright (c) 1994, Regents of the University of California
69
* $PostgreSQL: pgsql/src/include/storage/s_lock.h,v 1.133 2004-12-31 22:03:42 pgsql Exp $
71
*-------------------------------------------------------------------------
76
#include "storage/pg_sema.h"
78
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
81
#if defined(__GNUC__) || defined(__ICC)
82
/*************************************************************************
84
* Gcc consistently defines the CPU as __cpu__.
85
* Other compilers use __cpu or __cpu__ so we test for both in those cases.
89
* Standard gcc asm format (assuming "volatile slock_t *lock"):
95
: "=r"(_res), "+m"(*lock) // return register, in/out lock value
96
: "r"(lock) // lock pointer, in input register
97
: "memory", "cc"); // show clobbered registers here
99
* The output-operands list (after first colon) should always include
100
* "+m"(*lock), whether or not the asm code actually refers to this
101
* operand directly. This ensures that gcc believes the value in the
102
* lock variable is used and set by the asm code. Also, the clobbers
103
* list (after third colon) should always include "memory"; this prevents
104
* gcc from thinking it can cache the values of shared-memory fields
105
* across the asm code. Add "cc" if your asm code changes the condition
106
* code register, and also list any temp registers the code uses.
111
#if defined(__i386__) || defined(__x86_64__) /* AMD Opteron */
112
#define HAS_TEST_AND_SET
114
typedef unsigned char slock_t;
116
#define TAS(lock) tas(lock)
118
static __inline__ int
119
tas(volatile slock_t *lock)
121
register slock_t _res = 1;
123
/* Use a non-locking test before asserting the bus lock */
124
__asm__ __volatile__(
130
: "+q"(_res), "+m"(*lock)
136
#define SPIN_DELAY() spin_delay()
138
static __inline__ void
142
* This sequence is equivalent to the PAUSE instruction ("rep" is
143
* ignored by old IA32 processors if the following instruction is
144
* not a string operation); the IA-32 Architecture Software
145
* Developer's Manual, Vol. 3, Section 7.7.2 describes why using
146
* PAUSE in the inner loop of a spin lock is necessary for good
149
* The PAUSE instruction improves the performance of IA-32
150
* processors supporting Hyper-Threading Technology when
151
* executing spin-wait loops and other routines where one
152
* thread is accessing a shared lock or semaphore in a tight
153
* polling loop. When executing a spin-wait loop, the
154
* processor can suffer a severe performance penalty when
155
* exiting the loop because it detects a possible memory order
156
* violation and flushes the core processor's pipeline. The
157
* PAUSE instruction provides a hint to the processor that the
158
* code sequence is a spin-wait loop. The processor uses this
159
* hint to avoid the memory order violation and prevent the
160
* pipeline flush. In addition, the PAUSE instruction
161
* de-pipelines the spin-wait loop to prevent it from
162
* consuming execution resources excessively.
164
__asm__ __volatile__(
168
#endif /* __i386__ || __x86_64__ */
171
#if defined(__ia64__) || defined(__ia64) /* __ia64 used by ICC compiler? */
173
#define HAS_TEST_AND_SET
175
typedef unsigned int slock_t;
177
#define TAS(lock) tas(lock)
179
static __inline__ int
180
tas(volatile slock_t *lock)
184
__asm__ __volatile__(
186
: "=r"(ret), "+m"(*lock)
192
#endif /* __ia64__ || __ia64 */
195
#if defined(__arm__) || defined(__arm)
196
#define HAS_TEST_AND_SET
198
typedef unsigned char slock_t;
200
#define TAS(lock) tas(lock)
202
static __inline__ int
203
tas(volatile slock_t *lock)
205
register slock_t _res = 1;
207
__asm__ __volatile__(
208
" swpb %0, %0, [%2] \n"
209
: "+r"(_res), "+m"(*lock)
218
#if defined(__s390__) || defined(__s390x__)
219
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
220
#define HAS_TEST_AND_SET
222
typedef unsigned int slock_t;
224
#define TAS(lock) tas(lock)
226
static __inline__ int
227
tas(volatile slock_t *lock)
231
__asm__ __volatile__(
233
: "+d"(_res), "+m"(*lock)
239
#endif /* __s390__ || __s390x__ */
242
#if defined(__sparc__)
243
#define HAS_TEST_AND_SET
245
typedef unsigned char slock_t;
247
#define TAS(lock) tas(lock)
249
static __inline__ int
250
tas(volatile slock_t *lock)
252
register slock_t _res;
254
__asm__ __volatile__(
255
" ldstub [%2], %0 \n"
256
: "=r"(_res), "+m"(*lock)
262
#endif /* __sparc__ */
265
#if defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
266
#define HAS_TEST_AND_SET
268
#if defined(__powerpc64__)
269
typedef unsigned long slock_t;
271
typedef unsigned int slock_t;
274
#define TAS(lock) tas(lock)
276
* NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
277
* an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
279
static __inline__ int
280
tas(volatile slock_t *lock)
285
__asm__ __volatile__(
299
: "=&r"(_t), "=r"(_res), "+m"(*lock)
305
/* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction */
306
#define S_UNLOCK(lock) \
309
__asm__ __volatile__ (" sync \n"); \
310
*((volatile slock_t *) (lock)) = 0; \
316
#if defined(__mc68000__) && defined(__linux__)
317
#define HAS_TEST_AND_SET
319
typedef unsigned char slock_t;
321
#define TAS(lock) tas(lock)
323
static __inline__ int
324
tas(volatile slock_t *lock)
328
__asm__ __volatile__(
332
: "=d"(rv), "+m"(*lock)
338
#endif /* defined(__mc68000__) && defined(__linux__) */
343
* VAXen -- even multiprocessor ones
344
* (thanks to Tom Ivar Helbekkmo)
346
#define HAS_TEST_AND_SET
348
typedef unsigned char slock_t;
350
#define TAS(lock) tas(lock)
352
static __inline__ int
353
tas(volatile slock_t *lock)
357
__asm__ __volatile__(
359
" bbssi $0, (%2), 1f \n"
362
: "=&r"(_res), "+m"(*lock)
371
#if defined(__ns32k__)
372
#define HAS_TEST_AND_SET
374
typedef unsigned char slock_t;
376
#define TAS(lock) tas(lock)
378
static __inline__ int
379
tas(volatile slock_t *lock)
383
__asm__ __volatile__(
386
: "=r"(_res), "+m"(*lock)
392
#endif /* __ns32k__ */
395
#if defined(__alpha) || defined(__alpha__)
397
* Correct multi-processor locking methods are explained in section 5.5.3
398
* of the Alpha AXP Architecture Handbook, which at this writing can be
399
* found at ftp://ftp.netbsd.org/pub/NetBSD/misc/dec-docs/index.html.
400
* For gcc we implement the handbook's code directly with inline assembler.
402
#define HAS_TEST_AND_SET
404
typedef unsigned long slock_t;
406
#define TAS(lock) tas(lock)
408
static __inline__ int
409
tas(volatile slock_t *lock)
411
register slock_t _res;
413
__asm__ __volatile__(
425
: "=&r"(_res), "+m"(*lock)
431
#define S_UNLOCK(lock) \
434
__asm__ __volatile__ (" mb \n"); \
435
*((volatile slock_t *) (lock)) = 0; \
438
#endif /* __alpha || __alpha__ */
441
/* These live in s_lock.c, but only for gcc */
444
#if defined(__m68k__)
445
#define HAS_TEST_AND_SET
447
typedef unsigned char slock_t;
451
#if defined(__mips__) && !defined(__sgi)
452
#define HAS_TEST_AND_SET
454
typedef unsigned int slock_t;
458
#endif /* __GNUC__ */
462
/***************************************************************************
463
* Platforms that use non-gcc inline assembly:
466
#if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
469
#if defined(USE_UNIVEL_CC)
470
#define HAS_TEST_AND_SET
472
typedef unsigned char slock_t;
474
#define TAS(lock) tas(lock)
477
tas(volatile slock_t *s_lock)
479
/* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
489
#endif /* defined(USE_UNIVEL_CC) */
492
#if defined(__alpha) || defined(__alpha__)
494
* The Tru64 compiler doesn't support gcc-style inline asm, but it does
495
* have some builtin functions that accomplish much the same results.
496
* For simplicity, slock_t is defined as long (ie, quadword) on Alpha
497
* regardless of the compiler in use. LOCK_LONG and UNLOCK_LONG only
498
* operate on an int (ie, longword), but that's OK as long as we define
499
* S_INIT_LOCK to zero out the whole quadword.
501
#define HAS_TEST_AND_SET
503
typedef unsigned long slock_t;
505
#include <alpha/builtins.h>
506
#define S_INIT_LOCK(lock) (*(lock) = 0)
507
#define TAS(lock) (__LOCK_LONG_RETRY((lock), 1) == 0)
508
#define S_UNLOCK(lock) __UNLOCK_LONG(lock)
510
#endif /* __alpha || __alpha__ */
513
#if defined(__hppa) || defined(__hppa__)
517
* See src/backend/port/hpux/tas.c.template for details about LDCWX. Because
518
* LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
519
* struct. The active word in the struct is whichever has the aligned address;
520
* the other three words just sit at -1.
522
* When using gcc, we can inline the required assembly code.
524
#define HAS_TEST_AND_SET
531
#define TAS_ACTIVE_WORD(lock) ((volatile int *) (((long) (lock) + 15) & ~15))
533
#if defined(__GNUC__)
535
static __inline__ int
536
tas(volatile slock_t *lock)
538
volatile int *lockword = TAS_ACTIVE_WORD(lock);
539
register int lockval;
541
__asm__ __volatile__(
542
" ldcwx 0(0,%2),%0 \n"
543
: "=r"(lockval), "+m"(*lockword)
546
return (lockval == 0);
549
#endif /* __GNUC__ */
551
#define S_UNLOCK(lock) (*TAS_ACTIVE_WORD(lock) = -1)
553
#define S_INIT_LOCK(lock) \
555
volatile slock_t *lock_ = (lock); \
556
lock_->sema[0] = -1; \
557
lock_->sema[1] = -1; \
558
lock_->sema[2] = -1; \
559
lock_->sema[3] = -1; \
562
#define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
564
#endif /* __hppa || __hppa__ */
567
#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
569
#define HAS_TEST_AND_SET
571
typedef unsigned int slock_t;
573
#include <ia64/sys/inline.h>
574
#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
576
#endif /* HPUX on IA64, non gcc */
579
#if defined(__QNX__) && defined(__WATCOMC__)
581
* QNX 4 using WATCOM C
583
#define HAS_TEST_AND_SET
585
typedef unsigned char slock_t;
587
#define TAS(lock) wc_tas(lock)
588
extern slock_t wc_tas(volatile slock_t *lock);
589
#pragma aux wc_tas =\
591
" lock xchg al,[esi]" \
595
#endif /* __QNX__ and __WATCOMC__*/
601
* slock_t is defined as a unsigned long. We use the standard SGI
604
* The following comment is left for historical reasons, but is probably
605
* not a good idea since the mutex ABI is supported.
607
* This stuff may be supplemented in the future with Masato Kataoka's MIPS-II
608
* assembly from his NECEWS SVR4 port, but we probably ought to retain this
609
* for the R3000 chips out there.
611
#define HAS_TEST_AND_SET
613
typedef unsigned long slock_t;
616
#define TAS(lock) (test_and_set(lock,1))
617
#define S_UNLOCK(lock) (test_then_and(lock,0))
618
#define S_INIT_LOCK(lock) (test_then_and(lock,0))
619
#define S_LOCK_FREE(lock) (test_then_add(lock,0) == 0)
625
* SINIX / Reliant UNIX
626
* slock_t is defined as a struct abilock_t, which has a single unsigned long
627
* member. (Basically same as SGI)
629
#define HAS_TEST_AND_SET
631
#include "abi_mutex.h"
632
typedef abilock_t slock_t;
634
#define TAS(lock) (!acquire_lock(lock))
635
#define S_UNLOCK(lock) release_lock(lock)
636
#define S_INIT_LOCK(lock) init_lock(lock)
637
#define S_LOCK_FREE(lock) (stat_lock(lock) == UNLOCKED)
645
#define HAS_TEST_AND_SET
647
typedef unsigned int slock_t;
649
#define TAS(lock) _check_lock(lock, 0, 1)
650
#define S_UNLOCK(lock) _clear_lock(lock, 0)
654
#if defined (nextstep)
655
#define HAS_TEST_AND_SET
657
typedef struct mutex slock_t;
659
#define S_LOCK(lock) mutex_lock(lock)
660
#define S_UNLOCK(lock) mutex_unlock(lock)
661
#define S_INIT_LOCK(lock) mutex_init(lock)
662
/* For Mach, we have to delve inside the entrails of `struct mutex'. Ick! */
663
#define S_LOCK_FREE(alock) ((alock)->lock == 0)
664
#endif /* nextstep */
667
/* These are in s_lock.c */
671
#define HAS_TEST_AND_SET
673
typedef unsigned char slock_t;
677
#if defined(__sparc__) || defined(__sparc)
678
#define HAS_TEST_AND_SET
680
typedef unsigned char slock_t;
684
/* out-of-line assembler from src/backend/port/tas/foo.s */
686
#if defined(__sun) && defined(__i386)
688
* Solaris/386 (we only get here for non-gcc case)
690
#define HAS_TEST_AND_SET
692
typedef unsigned char slock_t;
696
#endif /* !defined(HAS_TEST_AND_SET) */
699
/* Blow up if we didn't have any way to do spinlocks */
700
#ifndef HAS_TEST_AND_SET
701
#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@postgresql.org.
705
#else /* !HAVE_SPINLOCKS */
709
* Fake spinlock implementation using semaphores --- slow and prone
710
* to fall foul of kernel limits on number of semaphores, so don't use this
711
* unless you must! The subroutines appear in spin.c.
713
typedef PGSemaphoreData slock_t;
715
extern bool s_lock_free_sema(volatile slock_t *lock);
716
extern void s_unlock_sema(volatile slock_t *lock);
717
extern void s_init_lock_sema(volatile slock_t *lock);
718
extern int tas_sema(volatile slock_t *lock);
720
#define S_LOCK_FREE(lock) s_lock_free_sema(lock)
721
#define S_UNLOCK(lock) s_unlock_sema(lock)
722
#define S_INIT_LOCK(lock) s_init_lock_sema(lock)
723
#define TAS(lock) tas_sema(lock)
726
#endif /* HAVE_SPINLOCKS */
730
* Default Definitions - override these above as needed.
734
#define S_LOCK(lock) \
737
s_lock((lock), __FILE__, __LINE__); \
741
#if !defined(S_LOCK_FREE)
742
#define S_LOCK_FREE(lock) (*(lock) == 0)
743
#endif /* S_LOCK_FREE */
745
#if !defined(S_UNLOCK)
746
#define S_UNLOCK(lock) (*((volatile slock_t *) (lock)) = 0)
747
#endif /* S_UNLOCK */
749
#if !defined(S_INIT_LOCK)
750
#define S_INIT_LOCK(lock) S_UNLOCK(lock)
751
#endif /* S_INIT_LOCK */
753
#if !defined(SPIN_DELAY)
754
#define SPIN_DELAY() ((void) 0)
755
#endif /* SPIN_DELAY */
758
extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
761
#define TAS(lock) tas(lock)
766
* Platform-independent out-of-line support routines
768
extern void s_lock(volatile slock_t *lock, const char *file, int line);
770
#endif /* S_LOCK_H */