1
#ifndef __QEMU_BARRIER_H
2
#define __QEMU_BARRIER_H 1
2
* Simple interface for atomic operations.
4
* Copyright (C) 2013 Red Hat, Inc.
6
* Author: Paolo Bonzini <pbonzini@redhat.com>
8
* This work is licensed under the terms of the GNU GPL, version 2 or later.
9
* See the COPYING file in the top-level directory.
13
#ifndef __QEMU_ATOMIC_H
14
#define __QEMU_ATOMIC_H 1
16
#include "qemu/compiler.h"
18
/* For C11 atomic ops */
4
20
/* Compiler barrier */
5
#define barrier() asm volatile("" ::: "memory")
9
#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
21
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
23
#ifndef __ATOMIC_RELAXED
12
* Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
13
* on x86(well, a compiler barrier only). Well, at least as long as
14
* qemu doesn't do accesses to write-combining memory or non-temporal
15
* load/stores from C code.
26
* We use GCC builtin if it's available, as that can use mfence on
27
* 32-bit as well, e.g. if built with -march=pentium-m. However, on
28
* i386 the spec is buggy, and the implementation followed it until
29
* 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
17
#define smp_wmb() barrier()
18
#define smp_rmb() barrier()
21
* We use GCC builtin if it's available, as that can use
22
* mfence on 32 bit as well, e.g. if built with -march=pentium-m.
23
* However, on i386, there seem to be known bugs as recently as 4.3.
25
#if QEMU_GNUC_PREREQ(4, 4)
26
#define smp_mb() __sync_synchronize()
31
#if defined(__i386__) || defined(__x86_64__)
32
#if !QEMU_GNUC_PREREQ(4, 4)
33
#if defined __x86_64__
34
#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
28
#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
31
#elif defined(__x86_64__)
36
#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
43
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
46
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
49
* Because of the strongly ordered storage model, wmb() and rmb() are nops
50
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
51
* qemu memory or non-temporal load/stores from C code.
33
53
#define smp_wmb() barrier()
34
54
#define smp_rmb() barrier()
35
#define smp_mb() asm volatile("mfence" ::: "memory")
57
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
58
* but it is a full barrier at the hardware level. Add a compiler barrier
59
* to make it a full barrier also at the compiler level.
61
#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
64
* Load/store with Java volatile semantics.
66
#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
37
68
#elif defined(_ARCH_PPC)
40
71
* We use an eieio() for wmb() on powerpc. This assumes we don't
41
72
* need to order cacheable and non-cacheable stores with respect to
75
* smp_mb has the same problem as on x86 for not-very-new GCC
76
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
44
#define smp_wmb() asm volatile("eieio" ::: "memory")
78
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
46
79
#if defined(__powerpc64__)
47
#define smp_rmb() asm volatile("lwsync" ::: "memory")
80
#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
49
#define smp_rmb() asm volatile("sync" ::: "memory")
82
#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
52
#define smp_mb() asm volatile("sync" ::: "memory")
84
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
86
#endif /* _ARCH_PPC */
88
#endif /* C11 atomics */
57
91
* For (host) platforms we don't have explicit barrier definitions
58
92
* for, we use the gcc __sync_synchronize() primitive to generate a
59
93
* full barrier. This should be safe on all platforms, though it may
60
* be overkill for wmb() and rmb().
94
* be overkill for smp_wmb() and smp_rmb().
97
#define smp_mb() __sync_synchronize()
101
#ifdef __ATOMIC_RELEASE
102
#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
62
104
#define smp_wmb() __sync_synchronize()
63
#define smp_mb() __sync_synchronize()
109
#ifdef __ATOMIC_ACQUIRE
110
#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
64
112
#define smp_rmb() __sync_synchronize()
116
#ifndef smp_read_barrier_depends
117
#ifdef __ATOMIC_CONSUME
118
#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
120
#define smp_read_barrier_depends() barrier()
125
#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr))
129
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
132
/* These have the same semantics as Java volatile variables.
133
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
134
* "1. Issue a StoreStore barrier (wmb) before each volatile store."
135
* 2. Issue a StoreLoad barrier after each volatile store.
136
* Note that you could instead issue one before each volatile load, but
137
* this would be slower for typical programs using volatiles in which
138
* reads greatly outnumber writes. Alternatively, if available, you
139
* can implement volatile store as an atomic instruction (for example
140
* XCHG on x86) and omit the barrier. This may be more efficient if
141
* atomic instructions are cheaper than StoreLoad barriers.
142
* 3. Issue LoadLoad and LoadStore barriers after each volatile load."
144
* If you prefer to think in terms of "pairing" of memory barriers,
145
* an atomic_mb_read pairs with an atomic_mb_set.
147
* And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
148
* while an atomic_mb_set is a st.rel followed by a memory barrier.
150
* These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
151
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
152
* Just always use the barriers manually by the rules above.
154
#ifndef atomic_mb_read
155
#define atomic_mb_read(ptr) ({ \
156
typeof(*ptr) _val = atomic_read(ptr); \
162
#ifndef atomic_mb_set
163
#define atomic_mb_set(ptr, i) do { \
165
atomic_set(ptr, i); \
171
#ifdef __ATOMIC_SEQ_CST
172
#define atomic_xchg(ptr, i) ({ \
173
typeof(*ptr) _new = (i), _old; \
174
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
177
#elif defined __clang__
178
#define atomic_xchg(ptr, i) __sync_exchange(ptr, i)
180
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
181
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
185
/* Provide shorter names for GCC atomic builtins. */
186
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
187
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
188
#define atomic_fetch_add __sync_fetch_and_add
189
#define atomic_fetch_sub __sync_fetch_and_sub
190
#define atomic_fetch_and __sync_fetch_and_and
191
#define atomic_fetch_or __sync_fetch_and_or
192
#define atomic_cmpxchg __sync_val_compare_and_swap
194
/* And even shorter names that return void. */
195
#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
196
#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
197
#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
198
#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
199
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
200
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))