1
#ifndef __ALSA_IATOMIC_H
2
#define __ALSA_IATOMIC_H
4
#if defined(__i386__) || defined(__x86_64__)
7
* Atomic operations that C can't guarantee us. Useful for
8
* resource counting etc..
11
#define ATOMIC_SMP_LOCK "lock ; "
14
* Make sure gcc doesn't try to be clever and move things around
15
* on us. We need to use _exactly_ the address the user gave us,
16
* not some alias that contains the same information.
18
typedef struct { volatile int counter; } atomic_t;
20
#define ATOMIC_INIT(i) { (i) }
23
* atomic_read - read atomic variable
24
* @v: pointer of type atomic_t
26
* Atomically reads the value of @v. Note that the guaranteed
27
* useful range of an atomic_t is only 24 bits.
29
#define atomic_read(v) ((v)->counter)
32
* atomic_set - set atomic variable
33
* @v: pointer of type atomic_t
36
* Atomically sets the value of @v to @i. Note that the guaranteed
37
* useful range of an atomic_t is only 24 bits.
39
#define atomic_set(v,i) (((v)->counter) = (i))
42
* atomic_add - add integer to atomic variable
43
* @i: integer value to add
44
* @v: pointer of type atomic_t
46
* Atomically adds @i to @v. Note that the guaranteed useful range
47
* of an atomic_t is only 24 bits.
49
static __inline__ void atomic_add(int i, atomic_t *v)
52
ATOMIC_SMP_LOCK "addl %1,%0"
54
:"ir" (i), "m" (v->counter));
58
* atomic_sub - subtract the atomic variable
59
* @i: integer value to subtract
60
* @v: pointer of type atomic_t
62
* Atomically subtracts @i from @v. Note that the guaranteed
63
* useful range of an atomic_t is only 24 bits.
65
static __inline__ void atomic_sub(int i, atomic_t *v)
68
ATOMIC_SMP_LOCK "subl %1,%0"
70
:"ir" (i), "m" (v->counter));
74
* atomic_sub_and_test - subtract value from variable and test result
75
* @i: integer value to subtract
76
* @v: pointer of type atomic_t
78
* Atomically subtracts @i from @v and returns
79
* true if the result is zero, or false for all
80
* other cases. Note that the guaranteed
81
* useful range of an atomic_t is only 24 bits.
83
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
88
ATOMIC_SMP_LOCK "subl %2,%0; sete %1"
89
:"=m" (v->counter), "=qm" (c)
90
:"ir" (i), "m" (v->counter) : "memory");
95
* atomic_inc - increment atomic variable
96
* @v: pointer of type atomic_t
98
* Atomically increments @v by 1. Note that the guaranteed
99
* useful range of an atomic_t is only 24 bits.
101
static __inline__ void atomic_inc(atomic_t *v)
103
__asm__ __volatile__(
104
ATOMIC_SMP_LOCK "incl %0"
110
* atomic_dec - decrement atomic variable
111
* @v: pointer of type atomic_t
113
* Atomically decrements @v by 1. Note that the guaranteed
114
* useful range of an atomic_t is only 24 bits.
116
static __inline__ void atomic_dec(atomic_t *v)
118
__asm__ __volatile__(
119
ATOMIC_SMP_LOCK "decl %0"
125
* atomic_dec_and_test - decrement and test
126
* @v: pointer of type atomic_t
128
* Atomically decrements @v by 1 and
129
* returns true if the result is 0, or false for all other
130
* cases. Note that the guaranteed
131
* useful range of an atomic_t is only 24 bits.
133
static __inline__ int atomic_dec_and_test(atomic_t *v)
137
__asm__ __volatile__(
138
ATOMIC_SMP_LOCK "decl %0; sete %1"
139
:"=m" (v->counter), "=qm" (c)
140
:"m" (v->counter) : "memory");
145
* atomic_inc_and_test - increment and test
146
* @v: pointer of type atomic_t
148
* Atomically increments @v by 1
149
* and returns true if the result is zero, or false for all
150
* other cases. Note that the guaranteed
151
* useful range of an atomic_t is only 24 bits.
153
static __inline__ int atomic_inc_and_test(atomic_t *v)
157
__asm__ __volatile__(
158
ATOMIC_SMP_LOCK "incl %0; sete %1"
159
:"=m" (v->counter), "=qm" (c)
160
:"m" (v->counter) : "memory");
165
* atomic_add_negative - add and test if negative
166
* @v: pointer of type atomic_t
167
* @i: integer value to add
169
* Atomically adds @i to @v and returns true
170
* if the result is negative, or false when
171
* result is greater than or equal to zero. Note that the guaranteed
172
* useful range of an atomic_t is only 24 bits.
174
static __inline__ int atomic_add_negative(int i, atomic_t *v)
178
__asm__ __volatile__(
179
ATOMIC_SMP_LOCK "addl %2,%0; sets %1"
180
:"=m" (v->counter), "=qm" (c)
181
:"ir" (i), "m" (v->counter) : "memory");
185
/* These are x86-specific, used by some header files */
186
#define atomic_clear_mask(mask, addr) \
187
__asm__ __volatile__(ATOMIC_SMP_LOCK "andl %0,%1" \
188
: : "r" (~(mask)),"m" (*addr) : "memory")
190
#define atomic_set_mask(mask, addr) \
191
__asm__ __volatile__(ATOMIC_SMP_LOCK "orl %0,%1" \
192
: : "r" (mask),"m" (*addr) : "memory")
195
* Force strict CPU ordering.
196
* And yes, this is required on UP too when we're talking
199
* For now, "wmb()" doesn't actually do anything, as all
200
* Intel CPU's follow what Intel calls a *Processor Order*,
201
* in which all writes are seen in the program order even
204
* I expect future Intel CPU's to have a weaker ordering,
205
* but I'd also expect them to finally get their act together
206
* and add some real memory barriers if so.
210
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
212
#define wmb() __asm__ __volatile__ ("": : :"memory")
214
#define mb() asm volatile("mfence":::"memory")
215
#define rmb() asm volatile("lfence":::"memory")
216
#define wmb() asm volatile("sfence":::"memory")
219
#undef ATOMIC_SMP_LOCK
221
#define IATOMIC_DEFINED 1
223
#endif /* __i386__ */
228
* On IA-64, counter must always be volatile to ensure that that the
229
* memory accesses are ordered.
231
typedef struct { volatile int counter; } atomic_t;
233
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
235
#define atomic_read(v) ((v)->counter)
236
#define atomic_set(v,i) (((v)->counter) = (i))
238
/* stripped version - we need only 4byte version */
239
#define ia64_cmpxchg(sem,ptr,old,new,size) \
241
__typeof__(ptr) _p_ = (ptr); \
242
__typeof__(new) _n_ = (new); \
243
unsigned long _o_, _r_; \
244
_o_ = (unsigned int) (long) (old); \
245
__asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \
246
__asm__ __volatile__ ("cmpxchg4."sem" %0=[%1],%2,ar.ccv" \
247
: "=r"(_r_) : "r"(_p_), "r"(_n_) : "memory"); \
248
(__typeof__(old)) _r_; \
251
static __inline__ int
252
ia64_atomic_add (int i, atomic_t *v)
255
// CMPXCHG_BUGCHECK_DECL
258
// CMPXCHG_BUGCHECK(v);
259
old = atomic_read(v);
261
} while (ia64_cmpxchg("acq", v, old, old + i, sizeof(atomic_t)) != old);
265
static __inline__ int
266
ia64_atomic_sub (int i, atomic_t *v)
269
// CMPXCHG_BUGCHECK_DECL
272
// CMPXCHG_BUGCHECK(v);
273
old = atomic_read(v);
275
} while (ia64_cmpxchg("acq", v, old, new, sizeof(atomic_t)) != old);
279
#define IA64_FETCHADD(tmp,v,n,sz) \
283
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
284
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
288
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
289
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
294
#define ia64_fetch_and_add(i,v) \
296
unsigned long _tmp; \
297
volatile __typeof__(*(v)) *_v = (v); \
299
case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
300
case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
301
case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
302
case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
303
case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
304
case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
305
case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
306
case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
308
(__typeof__(*v)) (_tmp + (i)); /* return new value */ \
312
* Atomically add I to V and return TRUE if the resulting value is
315
static __inline__ int
316
atomic_add_negative (int i, atomic_t *v)
318
return ia64_atomic_add(i, v) < 0;
321
#define atomic_add_return(i,v) \
322
((__builtin_constant_p(i) && \
323
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
324
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
325
? ia64_fetch_and_add(i, &(v)->counter) \
326
: ia64_atomic_add(i, v))
328
#define atomic_sub_return(i,v) \
329
((__builtin_constant_p(i) && \
330
( (i == 1) || (i == 4) || (i == 8) || (i == 16) \
331
|| (i == -1) || (i == -4) || (i == -8) || (i == -16))) \
332
? ia64_fetch_and_add(-(i), &(v)->counter) \
333
: ia64_atomic_sub(i, v))
335
#define atomic_dec_return(v) atomic_sub_return(1, (v))
336
#define atomic_inc_return(v) atomic_add_return(1, (v))
338
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
339
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
340
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
342
#define atomic_add(i,v) atomic_add_return((i), (v))
343
#define atomic_sub(i,v) atomic_sub_return((i), (v))
344
#define atomic_inc(v) atomic_add(1, (v))
345
#define atomic_dec(v) atomic_sub(1, (v))
348
* Macros to force memory ordering. In these descriptions, "previous"
349
* and "subsequent" refer to program order; "visible" means that all
350
* architecturally visible effects of a memory access have occurred
351
* (at a minimum, this means the memory has been read or written).
353
* wmb(): Guarantees that all preceding stores to memory-
354
* like regions are visible before any subsequent
355
* stores and that all following stores will be
356
* visible only after all previous stores.
357
* rmb(): Like wmb(), but for reads.
358
* mb(): wmb()/rmb() combo, i.e., all previous memory
359
* accesses are visible before all subsequent
360
* accesses and vice versa. This is also known as
363
* Note: "mb()" and its variants cannot be used as a fence to order
364
* accesses to memory mapped I/O registers. For that, mf.a needs to
365
* be used. However, we don't want to always use mf.a because (a)
366
* it's (presumably) much slower than mf and (b) mf.a is supported for
367
* sequential memory pages only.
369
#define mb() __asm__ __volatile__ ("mf" ::: "memory")
373
#define IATOMIC_DEFINED 1
375
#endif /* __ia64__ */
380
* Atomic operations that C can't guarantee us. Useful for
381
* resource counting etc...
383
* But use these as seldom as possible since they are much slower
384
* than regular operations.
389
* Counter is volatile to make sure gcc doesn't try to be clever
390
* and move things around on us. We need to use _exactly_ the address
391
* the user gave us, not some alias that contains the same information.
393
typedef struct { volatile int counter; } atomic_t;
395
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
397
#define atomic_read(v) ((v)->counter)
398
#define atomic_set(v,i) ((v)->counter = (i))
401
* To get proper branch prediction for the main line, we must branch
402
* forward to code at the end of this object's .text section, then
403
* branch back to restart the operation.
406
static __inline__ void atomic_add(int i, atomic_t * v)
409
__asm__ __volatile__(
417
:"=&r" (temp), "=m" (v->counter)
418
:"Ir" (i), "m" (v->counter));
421
static __inline__ void atomic_sub(int i, atomic_t * v)
424
__asm__ __volatile__(
432
:"=&r" (temp), "=m" (v->counter)
433
:"Ir" (i), "m" (v->counter));
437
* Same as above, but return the result value
439
static __inline__ long atomic_add_return(int i, atomic_t * v)
442
__asm__ __volatile__(
452
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
453
:"Ir" (i), "m" (v->counter) : "memory");
457
static __inline__ long atomic_sub_return(int i, atomic_t * v)
460
__asm__ __volatile__(
470
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
471
:"Ir" (i), "m" (v->counter) : "memory");
475
#define atomic_dec_return(v) atomic_sub_return(1,(v))
476
#define atomic_inc_return(v) atomic_add_return(1,(v))
478
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
479
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
481
#define atomic_inc(v) atomic_add(1,(v))
482
#define atomic_dec(v) atomic_sub(1,(v))
485
__asm__ __volatile__("mb": : :"memory")
488
__asm__ __volatile__("mb": : :"memory")
491
__asm__ __volatile__("wmb": : :"memory")
493
#define IATOMIC_DEFINED 1
495
#endif /* __alpha__ */
499
typedef struct { volatile int counter; } atomic_t;
501
#define ATOMIC_INIT(i) { (i) }
503
#define atomic_read(v) ((v)->counter)
504
#define atomic_set(v,i) (((v)->counter) = (i))
506
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
507
extern void atomic_set_mask(unsigned long mask, unsigned long *addr);
509
#define SMP_ISYNC "\n\tisync"
511
static __inline__ void atomic_add(int a, atomic_t *v)
515
__asm__ __volatile__(
516
"1: lwarx %0,0,%3 # atomic_add\n\
520
: "=&r" (t), "=m" (v->counter)
521
: "r" (a), "r" (&v->counter), "m" (v->counter)
525
static __inline__ int atomic_add_return(int a, atomic_t *v)
529
__asm__ __volatile__(
530
"1: lwarx %0,0,%2 # atomic_add_return\n\
536
: "r" (a), "r" (&v->counter)
542
static __inline__ void atomic_sub(int a, atomic_t *v)
546
__asm__ __volatile__(
547
"1: lwarx %0,0,%3 # atomic_sub\n\
551
: "=&r" (t), "=m" (v->counter)
552
: "r" (a), "r" (&v->counter), "m" (v->counter)
556
static __inline__ int atomic_sub_return(int a, atomic_t *v)
560
__asm__ __volatile__(
561
"1: lwarx %0,0,%2 # atomic_sub_return\n\
567
: "r" (a), "r" (&v->counter)
573
static __inline__ void atomic_inc(atomic_t *v)
577
__asm__ __volatile__(
578
"1: lwarx %0,0,%2 # atomic_inc\n\
582
: "=&r" (t), "=m" (v->counter)
583
: "r" (&v->counter), "m" (v->counter)
587
static __inline__ int atomic_inc_return(atomic_t *v)
591
__asm__ __volatile__(
592
"1: lwarx %0,0,%1 # atomic_inc_return\n\
604
static __inline__ void atomic_dec(atomic_t *v)
608
__asm__ __volatile__(
609
"1: lwarx %0,0,%2 # atomic_dec\n\
613
: "=&r" (t), "=m" (v->counter)
614
: "r" (&v->counter), "m" (v->counter)
618
static __inline__ int atomic_dec_return(atomic_t *v)
622
__asm__ __volatile__(
623
"1: lwarx %0,0,%1 # atomic_dec_return\n\
635
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
636
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
639
* Atomically test *v and decrement if it is greater than 0.
640
* The function returns the old value of *v minus 1.
642
static __inline__ int atomic_dec_if_positive(atomic_t *v)
646
__asm__ __volatile__(
647
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
663
* The sync instruction guarantees that all memory accesses initiated
664
* by this processor have been performed (with respect to all other
665
* mechanisms that access memory). The eieio instruction is a barrier
666
* providing an ordering (separately) for (a) cacheable stores and (b)
667
* loads and stores to non-cacheable memory (e.g. I/O devices).
669
* mb() prevents loads and stores being reordered across this point.
670
* rmb() prevents loads being reordered across this point.
671
* wmb() prevents stores being reordered across this point.
673
* We can use the eieio instruction for wmb, but since it doesn't
674
* give any ordering guarantees about loads, we have to use the
675
* stronger but slower sync instruction for mb and rmb.
677
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
678
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
679
#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
681
#define IATOMIC_DEFINED 1
683
#endif /* __powerpc__ */
687
typedef struct { volatile int counter; } atomic_t;
689
#define ATOMIC_INIT(i) { (i) }
692
* atomic_read - read atomic variable
693
* @v: pointer of type atomic_t
695
* Atomically reads the value of @v. Note that the guaranteed
696
* useful range of an atomic_t is only 24 bits.
698
#define atomic_read(v) ((v)->counter)
701
* atomic_set - set atomic variable
702
* @v: pointer of type atomic_t
705
* Atomically sets the value of @v to @i. Note that the guaranteed
706
* useful range of an atomic_t is only 24 bits.
708
#define atomic_set(v,i) ((v)->counter = (i))
711
* for MIPS II and better we can use ll/sc instruction, and kernel 2.4.3+
712
* will emulate it on MIPS I.
716
* atomic_add - add integer to atomic variable
717
* @i: integer value to add
718
* @v: pointer of type atomic_t
720
* Atomically adds @i to @v. Note that the guaranteed useful range
721
* of an atomic_t is only 24 bits.
723
extern __inline__ void atomic_add(int i, atomic_t * v)
727
__asm__ __volatile__(
730
"1: ll %0, %1 # atomic_add\n"
735
: "=&r" (temp), "=m" (v->counter)
736
: "Ir" (i), "m" (v->counter));
740
* atomic_sub - subtract the atomic variable
741
* @i: integer value to subtract
742
* @v: pointer of type atomic_t
744
* Atomically subtracts @i from @v. Note that the guaranteed
745
* useful range of an atomic_t is only 24 bits.
747
extern __inline__ void atomic_sub(int i, atomic_t * v)
751
__asm__ __volatile__(
754
"1: ll %0, %1 # atomic_sub\n"
759
: "=&r" (temp), "=m" (v->counter)
760
: "Ir" (i), "m" (v->counter));
764
* Same as above, but return the result value
766
extern __inline__ int atomic_add_return(int i, atomic_t * v)
768
unsigned long temp, result;
770
__asm__ __volatile__(
771
".set push # atomic_add_return\n"
775
" addu %0, %1, %3 \n"
778
" addu %0, %1, %3 \n"
780
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
781
: "Ir" (i), "m" (v->counter)
787
extern __inline__ int atomic_sub_return(int i, atomic_t * v)
789
unsigned long temp, result;
791
__asm__ __volatile__(
794
".set noreorder # atomic_sub_return\n"
796
" subu %0, %1, %3 \n"
799
" subu %0, %1, %3 \n"
801
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
802
: "Ir" (i), "m" (v->counter)
808
#define atomic_dec_return(v) atomic_sub_return(1,(v))
809
#define atomic_inc_return(v) atomic_add_return(1,(v))
812
* atomic_sub_and_test - subtract value from variable and test result
813
* @i: integer value to subtract
814
* @v: pointer of type atomic_t
816
* Atomically subtracts @i from @v and returns
817
* true if the result is zero, or false for all
818
* other cases. Note that the guaranteed
819
* useful range of an atomic_t is only 24 bits.
821
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
824
* atomic_inc_and_test - increment and test
825
* @v: pointer of type atomic_t
827
* Atomically increments @v by 1
828
* and returns true if the result is zero, or false for all
829
* other cases. Note that the guaranteed
830
* useful range of an atomic_t is only 24 bits.
832
#define atomic_inc_and_test(v) (atomic_inc_return(1, (v)) == 0)
835
* atomic_dec_and_test - decrement by 1 and test
836
* @v: pointer of type atomic_t
838
* Atomically decrements @v by 1 and
839
* returns true if the result is 0, or false for all other
840
* cases. Note that the guaranteed
841
* useful range of an atomic_t is only 24 bits.
843
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
846
* atomic_inc - increment atomic variable
847
* @v: pointer of type atomic_t
849
* Atomically increments @v by 1. Note that the guaranteed
850
* useful range of an atomic_t is only 24 bits.
852
#define atomic_inc(v) atomic_add(1,(v))
855
* atomic_dec - decrement and test
856
* @v: pointer of type atomic_t
858
* Atomically decrements @v by 1. Note that the guaranteed
859
* useful range of an atomic_t is only 24 bits.
861
#define atomic_dec(v) atomic_sub(1,(v))
864
* atomic_add_negative - add and test if negative
865
* @v: pointer of type atomic_t
866
* @i: integer value to add
868
* Atomically adds @i to @v and returns true
869
* if the result is negative, or false when
870
* result is greater than or equal to zero. Note that the guaranteed
871
* useful range of an atomic_t is only 24 bits.
873
* Currently not implemented for MIPS.
877
__asm__ __volatile__( \
878
"# prevent instructions being moved around\n\t" \
879
".set\tnoreorder\n\t" \
880
"# 8 nops to fool the R4400 pipeline\n\t" \
881
"nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
889
#define IATOMIC_DEFINED 1
891
#endif /* __mips__ */
896
* FIXME: bellow code is valid only for SA11xx
900
* Save the current interrupt enable state & disable IRQs
902
#define local_irq_save(x) \
904
unsigned long temp; \
905
__asm__ __volatile__( \
906
"mrs %0, cpsr @ local_irq_save\n" \
907
" orr %1, %0, #128\n" \
909
: "=r" (x), "=r" (temp) \
915
* restore saved IRQ & FIQ state
917
#define local_irq_restore(x) \
918
__asm__ __volatile__( \
919
"msr cpsr_c, %0 @ local_irq_restore\n" \
924
#define __save_flags_cli(x) local_irq_save(x)
925
#define __restore_flags(x) local_irq_restore(x)
927
typedef struct { volatile int counter; } atomic_t;
929
#define ATOMIC_INIT(i) { (i) }
931
#define atomic_read(v) ((v)->counter)
932
#define atomic_set(v,i) (((v)->counter) = (i))
934
static __inline__ void atomic_add(int i, volatile atomic_t *v)
938
__save_flags_cli(flags);
940
__restore_flags(flags);
943
static __inline__ void atomic_sub(int i, volatile atomic_t *v)
947
__save_flags_cli(flags);
949
__restore_flags(flags);
952
static __inline__ void atomic_inc(volatile atomic_t *v)
956
__save_flags_cli(flags);
958
__restore_flags(flags);
961
static __inline__ void atomic_dec(volatile atomic_t *v)
965
__save_flags_cli(flags);
967
__restore_flags(flags);
970
static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
975
__save_flags_cli(flags);
977
result = (v->counter == 0);
978
__restore_flags(flags);
983
static inline int atomic_add_negative(int i, volatile atomic_t *v)
988
__save_flags_cli(flags);
990
result = (v->counter < 0);
991
__restore_flags(flags);
996
static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1000
__save_flags_cli(flags);
1002
__restore_flags(flags);
1005
#define mb() __asm__ __volatile__ ("" : : : "memory")
1009
#define IATOMIC_DEFINED 1
1011
#endif /* __arm__ */
1013
#ifndef IATOMIC_DEFINED
1015
* non supported architecture.
1017
#warning "Atomic operations are not supported on this architecture."
1019
typedef struct { volatile int counter; } atomic_t;
1021
#define ATOMIC_INIT(i) { (i) }
1023
#define atomic_read(v) ((v)->counter)
1024
#define atomic_set(v,i) (((v)->counter) = (i))
1025
#define atomic_add(i,v) (((v)->counter) += (i))
1026
#define atomic_sub(i,v) (((v)->counter) -= (i))
1027
#define atomic_inc(v) (((v)->counter)++)
1028
#define atomic_dec(v) (((v)->counter)--)
1034
#define IATOMIC_DEFINED 1
1036
#endif /* IATOMIC_DEFINED */
1040
* Copyright (c) 2001 by Abramo Bagnara <abramo@alsa-project.org>
1043
/* Max number of times we must spin on a spin-lock calling sched_yield().
1044
After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
1046
#ifndef MAX_SPIN_COUNT
1047
#define MAX_SPIN_COUNT 50
1050
/* Duration of sleep (in nanoseconds) when we can't acquire a spin-lock
1051
after MAX_SPIN_COUNT iterations of sched_yield().
1053
(Otherwise the kernel does busy-waiting for real-time threads,
1054
giving other threads no chance to run.) */
1056
#ifndef SPIN_SLEEP_DURATION
1057
#define SPIN_SLEEP_DURATION 2000001
1061
unsigned int begin, end;
1062
} snd_atomic_write_t;
1065
volatile const snd_atomic_write_t *write;
1067
} snd_atomic_read_t;
1069
void snd_atomic_read_wait(snd_atomic_read_t *t);
1071
static inline void snd_atomic_write_init(snd_atomic_write_t *w)
1077
static inline void snd_atomic_write_begin(snd_atomic_write_t *w)
1083
static inline void snd_atomic_write_end(snd_atomic_write_t *w)
1089
static inline void snd_atomic_read_init(snd_atomic_read_t *r, snd_atomic_write_t *w)
1094
static inline void snd_atomic_read_begin(snd_atomic_read_t *r)
1096
r->end = r->write->end;
1100
static inline int snd_atomic_read_ok(snd_atomic_read_t *r)
1103
return r->end == r->write->begin;
1106
#endif /* __ALSA_IATOMIC_H */