1
#ifndef __ASM_SPINLOCK_H
2
#define __ASM_SPINLOCK_H
4
#if __LINUX_ARM_ARCH__ < 6
5
#error SMP not supported on pre-ARMv6 CPUs
8
#include <asm/processor.h>
11
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12
* extensions, so when running on UP, we have to patch these instructions away.
14
#define ALT_SMP(smp, up) \
16
" .pushsection \".alt.smp.init\", \"a\"\n" \
21
#ifdef CONFIG_THUMB2_KERNEL
22
#define SEV ALT_SMP("sev.w", "nop.w")
24
* For Thumb-2, special care is needed to ensure that the conditional WFE
25
* instruction really does assemble to exactly 4 bytes (as required by
26
* the SMP_ON_UP fixup code). By itself "wfene" might cause the
27
* assembler to insert a extra (16-bit) IT instruction, depending on the
28
* presence or absence of neighbouring conditional instructions.
30
* To avoid this unpredictableness, an approprite IT is inserted explicitly:
31
* the assembler won't change IT instructions which are explicitly present
34
#define WFE(cond) ALT_SMP( \
41
#define SEV ALT_SMP("sev", "nop")
42
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
45
static inline void dsb_sev(void)
47
#if __LINUX_ARM_ARCH__ >= 7
48
__asm__ __volatile__ (
53
__asm__ __volatile__ (
54
"mcr p15, 0, %0, c7, c10, 4\n"
64
* We exclusively read the old value. If it is zero, we may have
65
* won the lock, so we try exclusively storing it. A memory barrier
66
* is required after we get a lock, and before we release it, because
67
* V6 CPUs are assumed to have weakly ordered memory.
73
#define arch_spin_is_locked(x) ((x)->lock != 0)
74
#define arch_spin_unlock_wait(lock) \
75
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
77
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
79
static inline void arch_spin_lock(arch_spinlock_t *lock)
87
" strexeq %0, %2, [%1]\n"
91
: "r" (&lock->lock), "r" (1)
97
static inline int arch_spin_trylock(arch_spinlock_t *lock)
101
__asm__ __volatile__(
104
" strexeq %0, %2, [%1]"
106
: "r" (&lock->lock), "r" (1)
117
static inline void arch_spin_unlock(arch_spinlock_t *lock)
121
__asm__ __volatile__(
124
: "r" (&lock->lock), "r" (0)
134
* Write locks are easy - we just set bit 31. When unlocking, we can
135
* just write zero since the lock is exclusively held.
138
static inline void arch_write_lock(arch_rwlock_t *rw)
142
__asm__ __volatile__(
143
"1: ldrex %0, [%1]\n"
146
" strexeq %0, %2, [%1]\n"
150
: "r" (&rw->lock), "r" (0x80000000)
156
static inline int arch_write_trylock(arch_rwlock_t *rw)
160
__asm__ __volatile__(
161
"1: ldrex %0, [%1]\n"
163
" strexeq %0, %2, [%1]"
165
: "r" (&rw->lock), "r" (0x80000000)
176
static inline void arch_write_unlock(arch_rwlock_t *rw)
180
__asm__ __volatile__(
183
: "r" (&rw->lock), "r" (0)
189
/* write_can_lock - would write_trylock() succeed? */
190
#define arch_write_can_lock(x) ((x)->lock == 0)
193
* Read locks are a bit more hairy:
194
* - Exclusively load the lock value.
196
* - Store new lock value if positive, and we still own this location.
197
* If the value is negative, we've already failed.
198
* - If we failed to store the value, we want a negative result.
199
* - If we failed, try again.
200
* Unlocking is similarly hairy. We may have multiple read locks
201
* currently active. However, we know we won't have any write
204
static inline void arch_read_lock(arch_rwlock_t *rw)
206
unsigned long tmp, tmp2;
208
__asm__ __volatile__(
209
"1: ldrex %0, [%2]\n"
211
" strexpl %1, %0, [%2]\n"
213
" rsbpls %0, %1, #0\n"
215
: "=&r" (tmp), "=&r" (tmp2)
222
static inline void arch_read_unlock(arch_rwlock_t *rw)
224
unsigned long tmp, tmp2;
228
__asm__ __volatile__(
229
"1: ldrex %0, [%2]\n"
231
" strex %1, %0, [%2]\n"
234
: "=&r" (tmp), "=&r" (tmp2)
242
static inline int arch_read_trylock(arch_rwlock_t *rw)
244
unsigned long tmp, tmp2 = 1;
246
__asm__ __volatile__(
247
"1: ldrex %0, [%2]\n"
249
" strexpl %1, %0, [%2]\n"
250
: "=&r" (tmp), "+r" (tmp2)
258
/* read_can_lock - would read_trylock() succeed? */
259
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
261
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
262
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
264
#define arch_spin_relax(lock) cpu_relax()
265
#define arch_read_relax(lock) cpu_relax()
266
#define arch_write_relax(lock) cpu_relax()
268
#endif /* __ASM_SPINLOCK_H */