~ubuntu-branches/ubuntu/saucy/linux-ti-omap4/saucy-proposed

« back to all changes in this revision

Viewing changes to arch/tile/include/asm/futex.h

  • Committer: Package Import Robot
  • Author(s): Paolo Pisati, Paolo Pisati, Stefan Bader, Upstream Kernel Changes
  • Date: 2012-08-15 17:17:43 UTC
  • Revision ID: package-import@ubuntu.com-20120815171743-h5wnuf51xe7pvdid
Tags: 3.5.0-207.13
[ Paolo Pisati ]

* Start new release

[ Stefan Bader ]

* (config) Enable getabis to use local package copies

[ Upstream Kernel Changes ]

* fixup: gargabe collect iva_seq[0|1] init
* [Config] enable all SND_OMAP_SOC_*s
* fixup: cm2xxx_3xxx.o is needed for omap2_cm_read|write_reg
* fixup: add some snd_soc_dai* helper functions
* fixup: s/snd_soc_dpcm_params/snd_soc_dpcm/g
* fixup: typo, no_host_mode and useless SDP4430 init
* fixup: enable again aess hwmod

Show diffs side-by-side

added added

removed removed

Lines of Context:
28
28
#include <linux/futex.h>
29
29
#include <linux/uaccess.h>
30
30
#include <linux/errno.h>
31
 
 
32
 
extern struct __get_user futex_set(u32 __user *v, int i);
33
 
extern struct __get_user futex_add(u32 __user *v, int n);
34
 
extern struct __get_user futex_or(u32 __user *v, int n);
35
 
extern struct __get_user futex_andn(u32 __user *v, int n);
36
 
extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
37
 
 
38
 
#ifndef __tilegx__
39
 
extern struct __get_user futex_xor(u32 __user *v, int n);
 
31
#include <asm/atomic.h>
 
32
 
 
33
/*
 
34
 * Support macros for futex operations.  Do not use these macros directly.
 
35
 * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
 
36
 * __futex_cmpxchg() additionally assumes "oldval".
 
37
 */
 
38
 
 
39
#ifdef __tilegx__
 
40
 
 
41
#define __futex_asm(OP) \
 
42
        asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n"           \
 
43
            ".pushsection .fixup,\"ax\"\n"                      \
 
44
            "0: { movei %0, %5; j 9f }\n"                       \
 
45
            ".section __ex_table,\"a\"\n"                       \
 
46
            ".quad 1b, 0b\n"                                    \
 
47
            ".popsection\n"                                     \
 
48
            "9:"                                                \
 
49
            : "=r" (ret), "=r" (val), "+m" (*(uaddr))           \
 
50
            : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
 
51
 
 
52
#define __futex_set() __futex_asm(exch4)
 
53
#define __futex_add() __futex_asm(fetchadd4)
 
54
#define __futex_or() __futex_asm(fetchor4)
 
55
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
 
56
#define __futex_cmpxchg() \
 
57
        ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
 
58
 
 
59
#define __futex_xor()                                           \
 
60
        ({                                                      \
 
61
                u32 oldval, n = oparg;                          \
 
62
                if ((ret = __get_user(oldval, uaddr)) == 0) {   \
 
63
                        do {                                    \
 
64
                                oparg = oldval ^ n;             \
 
65
                                __futex_cmpxchg();              \
 
66
                        } while (ret == 0 && oldval != val);    \
 
67
                }                                               \
 
68
        })
 
69
 
 
70
/* No need to prefetch, since the atomic ops go to the home cache anyway. */
 
71
#define __futex_prolog()
 
72
 
40
73
#else
41
 
static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
42
 
{
43
 
        struct __get_user asm_ret = __get_user_4(uaddr);
44
 
        if (!asm_ret.err) {
45
 
                int oldval, newval;
46
 
                do {
47
 
                        oldval = asm_ret.val;
48
 
                        newval = oldval ^ n;
49
 
                        asm_ret = futex_cmpxchg(uaddr, oldval, newval);
50
 
                } while (asm_ret.err == 0 && oldval != asm_ret.val);
51
 
        }
52
 
        return asm_ret;
53
 
}
 
74
 
 
75
#define __futex_call(FN)                                                \
 
76
        {                                                               \
 
77
                struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
 
78
                val = gu.val;                                           \
 
79
                ret = gu.err;                                           \
 
80
        }
 
81
 
 
82
#define __futex_set() __futex_call(__atomic_xchg)
 
83
#define __futex_add() __futex_call(__atomic_xchg_add)
 
84
#define __futex_or() __futex_call(__atomic_or)
 
85
#define __futex_andn() __futex_call(__atomic_andn)
 
86
#define __futex_xor() __futex_call(__atomic_xor)
 
87
 
 
88
#define __futex_cmpxchg()                                               \
 
89
        {                                                               \
 
90
                struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
 
91
                                                        lock, oldval, oparg); \
 
92
                val = gu.val;                                           \
 
93
                ret = gu.err;                                           \
 
94
        }
 
95
 
 
96
/*
 
97
 * Find the lock pointer for the atomic calls to use, and issue a
 
98
 * prefetch to the user address to bring it into cache.  Similar to
 
99
 * __atomic_setup(), but we can't do a read into the L1 since it might
 
100
 * fault; instead we do a prefetch into the L2.
 
101
 */
 
102
#define __futex_prolog()                                        \
 
103
        int *lock;                                              \
 
104
        __insn_prefetch(uaddr);                                 \
 
105
        lock = __atomic_hashed_lock((int __force *)uaddr)
54
106
#endif
55
107
 
56
108
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
59
111
        int cmp = (encoded_op >> 24) & 15;
60
112
        int oparg = (encoded_op << 8) >> 20;
61
113
        int cmparg = (encoded_op << 20) >> 20;
62
 
        int ret;
63
 
        struct __get_user asm_ret;
 
114
        int uninitialized_var(val), ret;
 
115
 
 
116
        __futex_prolog();
 
117
 
 
118
        /* The 32-bit futex code makes this assumption, so validate it here. */
 
119
        BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
64
120
 
65
121
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
66
122
                oparg = 1 << oparg;
71
127
        pagefault_disable();
72
128
        switch (op) {
73
129
        case FUTEX_OP_SET:
74
 
                asm_ret = futex_set(uaddr, oparg);
 
130
                __futex_set();
75
131
                break;
76
132
        case FUTEX_OP_ADD:
77
 
                asm_ret = futex_add(uaddr, oparg);
 
133
                __futex_add();
78
134
                break;
79
135
        case FUTEX_OP_OR:
80
 
                asm_ret = futex_or(uaddr, oparg);
 
136
                __futex_or();
81
137
                break;
82
138
        case FUTEX_OP_ANDN:
83
 
                asm_ret = futex_andn(uaddr, oparg);
 
139
                __futex_andn();
84
140
                break;
85
141
        case FUTEX_OP_XOR:
86
 
                asm_ret = futex_xor(uaddr, oparg);
 
142
                __futex_xor();
87
143
                break;
88
144
        default:
89
 
                asm_ret.err = -ENOSYS;
 
145
                ret = -ENOSYS;
 
146
                break;
90
147
        }
91
148
        pagefault_enable();
92
149
 
93
 
        ret = asm_ret.err;
94
 
 
95
150
        if (!ret) {
96
151
                switch (cmp) {
97
152
                case FUTEX_OP_CMP_EQ:
98
 
                        ret = (asm_ret.val == cmparg);
 
153
                        ret = (val == cmparg);
99
154
                        break;
100
155
                case FUTEX_OP_CMP_NE:
101
 
                        ret = (asm_ret.val != cmparg);
 
156
                        ret = (val != cmparg);
102
157
                        break;
103
158
                case FUTEX_OP_CMP_LT:
104
 
                        ret = (asm_ret.val < cmparg);
 
159
                        ret = (val < cmparg);
105
160
                        break;
106
161
                case FUTEX_OP_CMP_GE:
107
 
                        ret = (asm_ret.val >= cmparg);
 
162
                        ret = (val >= cmparg);
108
163
                        break;
109
164
                case FUTEX_OP_CMP_LE:
110
 
                        ret = (asm_ret.val <= cmparg);
 
165
                        ret = (val <= cmparg);
111
166
                        break;
112
167
                case FUTEX_OP_CMP_GT:
113
 
                        ret = (asm_ret.val > cmparg);
 
168
                        ret = (val > cmparg);
114
169
                        break;
115
170
                default:
116
171
                        ret = -ENOSYS;
120
175
}
121
176
 
122
177
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
123
 
                                                u32 oldval, u32 newval)
 
178
                                                u32 oldval, u32 oparg)
124
179
{
125
 
        struct __get_user asm_ret;
 
180
        int ret, val;
 
181
 
 
182
        __futex_prolog();
126
183
 
127
184
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
128
185
                return -EFAULT;
129
186
 
130
 
        asm_ret = futex_cmpxchg(uaddr, oldval, newval);
131
 
        *uval = asm_ret.val;
132
 
        return asm_ret.err;
 
187
        __futex_cmpxchg();
 
188
 
 
189
        *uval = val;
 
190
        return ret;
133
191
}
134
192
 
135
 
#ifndef __tilegx__
136
 
/* Return failure from the atomic wrappers. */
137
 
struct __get_user __atomic_bad_address(int __user *addr);
138
 
#endif
139
 
 
140
193
#endif /* !__ASSEMBLY__ */
141
194
 
142
195
#endif /* _ASM_TILE_FUTEX_H */