1
/* atomic.h: These still suck, but the I-cache hit rate is higher.
3
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
7
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
11
#ifndef __ARCH_SPARC_ATOMIC__
12
#define __ARCH_SPARC_ATOMIC__
14
#include <linux/types.h>
18
#include <asm/system.h>
20
#define ATOMIC_INIT(i) { (i) }
22
extern int __atomic_add_return(int, atomic_t *);
23
extern int atomic_cmpxchg(atomic_t *, int, int);
24
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25
extern int __atomic_add_unless(atomic_t *, int, int);
26
extern void atomic_set(atomic_t *, int);
28
#define atomic_read(v) (*(volatile int *)&(v)->counter)
30
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
35
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
40
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
43
* atomic_inc_and_test - increment and test
44
* @v: pointer of type atomic_t
46
* Atomically increments @v by 1
47
* and returns true if the result is zero, or false for all
50
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
52
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
56
/* This is the old 24-bit implementation. It's still used internally
57
* by some sparc-specific code, notably the semaphore implementation.
59
typedef struct { volatile int counter; } atomic24_t;
63
#define ATOMIC24_INIT(i) { (i) }
64
#define atomic24_read(v) ((v)->counter)
65
#define atomic24_set(v, i) (((v)->counter) = i)
68
/* We do the bulk of the actual work out of line in two common
69
* routines in assembler, see arch/sparc/lib/atomic.S for the
72
* For SMP the trick is you embed the spin lock byte within
73
* the word, use the low byte so signedness is easily retained
74
* via a quick arithmetic shift. It looks like this:
76
* ----------------------------------------
77
* | signed 24-bit counter value | lock | atomic_t
78
* ----------------------------------------
82
#define ATOMIC24_INIT(i) { ((i) << 8) }
84
static inline int atomic24_read(const atomic24_t *v)
94
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
97
static inline int __atomic24_add(int i, atomic24_t *v)
99
register volatile int *ptr asm("g1");
100
register int increment asm("g2");
101
register int tmp1 asm("g3");
102
register int tmp2 asm("g4");
103
register int tmp3 asm("g7");
108
__asm__ __volatile__(
110
"call ___atomic24_add\n\t"
111
" add %%o7, 8, %%o7\n"
112
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
113
: "0" (increment), "r" (ptr)
119
static inline int __atomic24_sub(int i, atomic24_t *v)
121
register volatile int *ptr asm("g1");
122
register int increment asm("g2");
123
register int tmp1 asm("g3");
124
register int tmp2 asm("g4");
125
register int tmp3 asm("g7");
130
__asm__ __volatile__(
132
"call ___atomic24_sub\n\t"
133
" add %%o7, 8, %%o7\n"
134
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
135
: "0" (increment), "r" (ptr)
141
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
142
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
144
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
145
#define atomic24_inc_return(v) __atomic24_add(1, (v))
147
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
148
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
150
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
151
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
153
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
155
/* Atomic operations are already serializing */
156
#define smp_mb__before_atomic_dec() barrier()
157
#define smp_mb__after_atomic_dec() barrier()
158
#define smp_mb__before_atomic_inc() barrier()
159
#define smp_mb__after_atomic_inc() barrier()
161
#endif /* !(__KERNEL__) */
163
#endif /* !(__ARCH_SPARC_ATOMIC__) */