1
#ifndef __ASM_ARCH_SPINLOCK_H
2
#define __ASM_ARCH_SPINLOCK_H
4
#include <linux/spinlock_types.h>
6
#define RW_LOCK_BIAS 0x01000000
8
extern void cris_spin_unlock(void *l, int val);
9
extern void cris_spin_lock(void *l);
10
extern int cris_spin_trylock(void *l);
12
static inline int arch_spin_is_locked(arch_spinlock_t *x)
14
return *(volatile signed char *)(&(x)->slock) <= 0;
17
static inline void arch_spin_unlock(arch_spinlock_t *lock)
19
__asm__ volatile ("move.d %1,%0" \
20
: "=m" (lock->slock) \
25
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
27
while (arch_spin_is_locked(lock))
31
static inline int arch_spin_trylock(arch_spinlock_t *lock)
33
return cris_spin_trylock((void *)&lock->slock);
36
static inline void arch_spin_lock(arch_spinlock_t *lock)
38
cris_spin_lock((void *)&lock->slock);
42
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
48
* Read-write spinlocks, allowing multiple readers
49
* but only one writer.
51
* NOTE! it is quite common to have readers in interrupts
52
* but no interrupt writers. For those circumstances we
53
* can "mix" irq-safe locks - any writer needs to get a
54
* irq-safe write-lock, but readers can get non-irqsafe
59
static inline int arch_read_can_lock(arch_rwlock_t *x)
61
return (int)(x)->lock > 0;
64
static inline int arch_write_can_lock(arch_rwlock_t *x)
66
return (x)->lock == RW_LOCK_BIAS;
69
static inline void arch_read_lock(arch_rwlock_t *rw)
71
arch_spin_lock(&rw->slock);
72
while (rw->lock == 0);
74
arch_spin_unlock(&rw->slock);
77
static inline void arch_write_lock(arch_rwlock_t *rw)
79
arch_spin_lock(&rw->slock);
80
while (rw->lock != RW_LOCK_BIAS);
82
arch_spin_unlock(&rw->slock);
85
static inline void arch_read_unlock(arch_rwlock_t *rw)
87
arch_spin_lock(&rw->slock);
89
arch_spin_unlock(&rw->slock);
92
static inline void arch_write_unlock(arch_rwlock_t *rw)
94
arch_spin_lock(&rw->slock);
95
while (rw->lock != RW_LOCK_BIAS);
96
rw->lock = RW_LOCK_BIAS;
97
arch_spin_unlock(&rw->slock);
100
static inline int arch_read_trylock(arch_rwlock_t *rw)
103
arch_spin_lock(&rw->slock);
108
arch_spin_unlock(&rw->slock);
112
static inline int arch_write_trylock(arch_rwlock_t *rw)
115
arch_spin_lock(&rw->slock);
116
if (rw->lock == RW_LOCK_BIAS) {
120
arch_spin_unlock(&rw->slock);
124
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
125
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
127
#define arch_spin_relax(lock) cpu_relax()
128
#define arch_read_relax(lock) cpu_relax()
129
#define arch_write_relax(lock) cpu_relax()
131
#endif /* __ASM_ARCH_SPINLOCK_H */