~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to include/linux/percpu.h

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
255
255
        pscr2_ret__;                                                    \
256
256
})
257
257
 
 
258
/*
 
259
 * Special handling for cmpxchg_double.  cmpxchg_double is passed two
 
260
 * percpu variables.  The first has to be aligned to a double word
 
261
 * boundary and the second has to follow directly thereafter.
 
262
 * We enforce this on all architectures even if they don't support
 
263
 * a double cmpxchg instruction, since it's a cheap requirement, and it
 
264
 * avoids breaking the requirement for architectures with the instruction.
 
265
 */
 
266
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)           \
 
267
({                                                                      \
 
268
        bool pdcrb_ret__;                                               \
 
269
        __verify_pcpu_ptr(&pcp1);                                       \
 
270
        BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
 
271
        VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));         \
 
272
        VM_BUG_ON((unsigned long)(&pcp2) !=                             \
 
273
                  (unsigned long)(&pcp1) + sizeof(pcp1));               \
 
274
        switch(sizeof(pcp1)) {                                          \
 
275
        case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
 
276
        case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
 
277
        case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
 
278
        case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
 
279
        default:                                                        \
 
280
                __bad_size_call_parameter(); break;                     \
 
281
        }                                                               \
 
282
        pdcrb_ret__;                                                    \
 
283
})
 
284
 
258
285
#define __pcpu_size_call(stem, variable, ...)                           \
259
286
do {                                                                    \
260
287
        __verify_pcpu_ptr(&(variable));                                 \
501
528
#endif
502
529
 
503
530
/*
 
531
 * cmpxchg_double replaces two adjacent scalars at once.  The first
 
532
 * two parameters are per cpu variables which have to be of the same
 
533
 * size.  A truth value is returned to indicate success or failure
 
534
 * (since a double register result is difficult to handle).  There is
 
535
 * very limited hardware support for these operations, so only certain
 
536
 * sizes may work.
 
537
 */
 
538
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
 
539
({                                                                      \
 
540
        int ret__;                                                      \
 
541
        preempt_disable();                                              \
 
542
        ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
 
543
                        oval1, oval2, nval1, nval2);                    \
 
544
        preempt_enable();                                               \
 
545
        ret__;                                                          \
 
546
})
 
547
 
 
548
#ifndef this_cpu_cmpxchg_double
 
549
# ifndef this_cpu_cmpxchg_double_1
 
550
#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 
551
        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
552
# endif
 
553
# ifndef this_cpu_cmpxchg_double_2
 
554
#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 
555
        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
556
# endif
 
557
# ifndef this_cpu_cmpxchg_double_4
 
558
#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 
559
        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
560
# endif
 
561
# ifndef this_cpu_cmpxchg_double_8
 
562
#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 
563
        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
564
# endif
 
565
# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
 
566
        __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 
567
#endif
 
568
 
 
569
/*
504
570
 * Generic percpu operations that do not require preemption handling.
505
571
 * Either we do not care about races or the caller has the
506
572
 * responsibility of handling preemptions issues. Arch code can still
703
769
        __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
704
770
#endif
705
771
 
 
772
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
 
773
({                                                                      \
 
774
        int __ret = 0;                                                  \
 
775
        if (__this_cpu_read(pcp1) == (oval1) &&                         \
 
776
                         __this_cpu_read(pcp2)  == (oval2)) {           \
 
777
                __this_cpu_write(pcp1, (nval1));                        \
 
778
                __this_cpu_write(pcp2, (nval2));                        \
 
779
                __ret = 1;                                              \
 
780
        }                                                               \
 
781
        (__ret);                                                        \
 
782
})
 
783
 
 
784
#ifndef __this_cpu_cmpxchg_double
 
785
# ifndef __this_cpu_cmpxchg_double_1
 
786
#  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 
787
        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
788
# endif
 
789
# ifndef __this_cpu_cmpxchg_double_2
 
790
#  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 
791
        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
792
# endif
 
793
# ifndef __this_cpu_cmpxchg_double_4
 
794
#  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 
795
        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
796
# endif
 
797
# ifndef __this_cpu_cmpxchg_double_8
 
798
#  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 
799
        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
800
# endif
 
801
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)      \
 
802
        __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 
803
#endif
 
804
 
706
805
/*
707
806
 * IRQ safe versions of the per cpu RMW operations. Note that these operations
708
807
 * are *not* safe against modification of the same variable from another
823
922
        __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
824
923
#endif
825
924
 
 
925
#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)      \
 
926
({                                                                      \
 
927
        int ret__;                                                      \
 
928
        unsigned long flags;                                            \
 
929
        local_irq_save(flags);                                          \
 
930
        ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
 
931
                        oval1, oval2, nval1, nval2);                    \
 
932
        local_irq_restore(flags);                                       \
 
933
        ret__;                                                          \
 
934
})
 
935
 
 
936
#ifndef irqsafe_cpu_cmpxchg_double
 
937
# ifndef irqsafe_cpu_cmpxchg_double_1
 
938
#  define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
 
939
        irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
940
# endif
 
941
# ifndef irqsafe_cpu_cmpxchg_double_2
 
942
#  define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
 
943
        irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
944
# endif
 
945
# ifndef irqsafe_cpu_cmpxchg_double_4
 
946
#  define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
 
947
        irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
948
# endif
 
949
# ifndef irqsafe_cpu_cmpxchg_double_8
 
950
#  define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)  \
 
951
        irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 
952
# endif
 
953
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 
954
        __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 
955
#endif
 
956
 
826
957
#endif /* __LINUX_PERCPU_H */