259
* Special handling for cmpxchg_double. cmpxchg_double is passed two
260
* percpu variables. The first has to be aligned to a double word
261
* boundary and the second has to follow directly thereafter.
262
* We enforce this on all architectures even if they don't support
263
* a double cmpxchg instruction, since it's a cheap requirement, and it
264
* avoids breaking the requirement for architectures with the instruction.
266
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
269
__verify_pcpu_ptr(&pcp1); \
270
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
271
VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
272
VM_BUG_ON((unsigned long)(&pcp2) != \
273
(unsigned long)(&pcp1) + sizeof(pcp1)); \
274
switch(sizeof(pcp1)) { \
275
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
276
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
277
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
278
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
280
__bad_size_call_parameter(); break; \
258
285
#define __pcpu_size_call(stem, variable, ...) \
260
287
__verify_pcpu_ptr(&(variable)); \
531
* cmpxchg_double replaces two adjacent scalars at once. The first
532
* two parameters are per cpu variables which have to be of the same
533
* size. A truth value is returned to indicate success or failure
534
* (since a double register result is difficult to handle). There is
535
* very limited hardware support for these operations, so only certain
538
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
542
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
543
oval1, oval2, nval1, nval2); \
548
#ifndef this_cpu_cmpxchg_double
549
# ifndef this_cpu_cmpxchg_double_1
550
# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
551
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
553
# ifndef this_cpu_cmpxchg_double_2
554
# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
555
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
557
# ifndef this_cpu_cmpxchg_double_4
558
# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
559
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
561
# ifndef this_cpu_cmpxchg_double_8
562
# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
563
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
565
# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
566
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
504
570
* Generic percpu operations that do not require preemption handling.
505
571
* Either we do not care about races or the caller has the
506
572
* responsibility of handling preemptions issues. Arch code can still
703
769
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
772
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
775
if (__this_cpu_read(pcp1) == (oval1) && \
776
__this_cpu_read(pcp2) == (oval2)) { \
777
__this_cpu_write(pcp1, (nval1)); \
778
__this_cpu_write(pcp2, (nval2)); \
784
#ifndef __this_cpu_cmpxchg_double
785
# ifndef __this_cpu_cmpxchg_double_1
786
# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
787
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
789
# ifndef __this_cpu_cmpxchg_double_2
790
# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
791
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
793
# ifndef __this_cpu_cmpxchg_double_4
794
# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
795
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
797
# ifndef __this_cpu_cmpxchg_double_8
798
# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
799
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
801
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
802
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
707
806
* IRQ safe versions of the per cpu RMW operations. Note that these operations
708
807
* are *not* safe against modification of the same variable from another
823
922
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
925
#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
928
unsigned long flags; \
929
local_irq_save(flags); \
930
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
931
oval1, oval2, nval1, nval2); \
932
local_irq_restore(flags); \
936
#ifndef irqsafe_cpu_cmpxchg_double
937
# ifndef irqsafe_cpu_cmpxchg_double_1
938
# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
939
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
941
# ifndef irqsafe_cpu_cmpxchg_double_2
942
# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
943
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
945
# ifndef irqsafe_cpu_cmpxchg_double_4
946
# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
947
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
949
# ifndef irqsafe_cpu_cmpxchg_double_8
950
# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
953
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
954
__pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
826
957
#endif /* __LINUX_PERCPU_H */