~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xen/include/asm-ia64/uaccess.h

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
#ifndef _ASM_IA64_UACCESS_H
 
2
#define _ASM_IA64_UACCESS_H
 
3
 
 
4
/*
 
5
 * This file defines various macros to transfer memory areas across
 
6
 * the user/kernel boundary.  This needs to be done carefully because
 
7
 * this code is executed in kernel mode and uses user-specified
 
8
 * addresses.  Thus, we need to be careful not to let the user to
 
9
 * trick us into accessing kernel memory that would normally be
 
10
 * inaccessible.  This code is also fairly performance sensitive,
 
11
 * so we want to spend as little time doing safety checks as
 
12
 * possible.
 
13
 *
 
14
 * To make matters a bit more interesting, these macros sometimes also
 
15
 * called from within the kernel itself, in which case the address
 
16
 * validity check must be skipped.  The get_fs() macro tells us what
 
17
 * to do: if get_fs()==USER_DS, checking is performed, if
 
18
 * get_fs()==KERNEL_DS, checking is bypassed.
 
19
 *
 
20
 * Note that even if the memory area specified by the user is in a
 
21
 * valid address range, it is still possible that we'll get a page
 
22
 * fault while accessing it.  This is handled by filling out an
 
23
 * exception handler fixup entry for each instruction that has the
 
24
 * potential to fault.  When such a fault occurs, the page fault
 
25
 * handler checks to see whether the faulting instruction has a fixup
 
26
 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
 
27
 * then resumes execution at the continuation point.
 
28
 *
 
29
 * Based on <asm-alpha/uaccess.h>.
 
30
 *
 
31
 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
 
32
 *      David Mosberger-Tang <davidm@hpl.hp.com>
 
33
 */
 
34
 
 
35
#include <linux/compiler.h>
 
36
#include <linux/errno.h>
 
37
#include <linux/sched.h>
 
38
#include <linux/page-flags.h>
 
39
#include <linux/mm.h>
 
40
 
 
41
#include <asm/intrinsics.h>
 
42
#include <asm/pgtable.h>
 
43
#include <asm/io.h>
 
44
 
 
45
#define __access_ok(addr) (!IS_VMM_ADDRESS((unsigned long)(addr)))
 
46
#define access_ok(addr, size) (__access_ok(addr))
 
47
#define array_access_ok(addr,count,size)( __access_ok(addr))
 
48
 
 
49
/*
 
50
 * These are the main single-value transfer routines.  They automatically
 
51
 * use the right size if we just have the right pointer type.
 
52
 *
 
53
 * Careful to not
 
54
 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
 
55
 * (b) require any knowledge of processes at this stage
 
56
 */
 
57
#define put_user(x, ptr)        __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
 
58
#define get_user(x, ptr)        __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
 
59
 
 
60
/*
 
61
 * The "__xxx" versions do not do address space checking, useful when
 
62
 * doing multiple accesses to the same area (the programmer has to do the
 
63
 * checks by hand with "access_ok()")
 
64
 */
 
65
#define __put_user(x, ptr)      __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
 
66
#define __get_user(x, ptr)      __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
67
 
 
68
extern long __put_user_unaligned_unknown (void);
 
69
 
 
70
#define __put_user_unaligned(x, ptr)                                                            \
 
71
({                                                                                              \
 
72
        long __ret;                                                                             \
 
73
        switch (sizeof(*(ptr))) {                                                               \
 
74
                case 1: __ret = __put_user((x), (ptr)); break;                                  \
 
75
                case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))                           \
 
76
                        | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;              \
 
77
                case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))                          \
 
78
                        | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;            \
 
79
                case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))                          \
 
80
                        | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;            \
 
81
                default: __ret = __put_user_unaligned_unknown();                                \
 
82
        }                                                                                       \
 
83
        __ret;                                                                                  \
 
84
})
 
85
 
 
86
extern long __get_user_unaligned_unknown (void);
 
87
 
 
88
#define __get_user_unaligned(x, ptr)                                                            \
 
89
({                                                                                              \
 
90
        long __ret;                                                                             \
 
91
        switch (sizeof(*(ptr))) {                                                               \
 
92
                case 1: __ret = __get_user((x), (ptr)); break;                                  \
 
93
                case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))                           \
 
94
                        | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;              \
 
95
                case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))                          \
 
96
                        | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;            \
 
97
                case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))                          \
 
98
                        | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;            \
 
99
                default: __ret = __get_user_unaligned_unknown();                                \
 
100
        }                                                                                       \
 
101
        __ret;                                                                                  \
 
102
})
 
103
 
 
104
#ifdef ASM_SUPPORTED
 
105
  struct __large_struct { unsigned long buf[100]; };
 
106
# define __m(x) (*(struct __large_struct __user *)(x))
 
107
 
 
108
/* We need to declare the __ex_table section before we can use it in .xdata.  */
 
109
asm (".section \"__ex_table\", \"a\"\n\t.previous");
 
110
 
 
111
# define __get_user_size(val, addr, n, err)                                                     \
 
112
do {                                                                                            \
 
113
        register long __gu_r8 asm ("r8") = 0;                                                   \
 
114
        register long __gu_r9 asm ("r9");                                                       \
 
115
        asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"     \
 
116
             "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"                                         \
 
117
             "[1:]"                                                                             \
 
118
             : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));                    \
 
119
        (err) = __gu_r8;                                                                        \
 
120
        (val) = __gu_r9;                                                                        \
 
121
} while (0)
 
122
 
 
123
/*
 
124
 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it.  This
 
125
 * is because they do not write to any memory gcc knows about, so there are no aliasing
 
126
 * issues.
 
127
 */
 
128
# define __put_user_size(val, addr, n, err)                                                     \
 
129
do {                                                                                            \
 
130
        register long __pu_r8 asm ("r8") = 0;                                                   \
 
131
        asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
 
132
                      "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"                                  \
 
133
                      "[1:]"                                                                    \
 
134
                      : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8));               \
 
135
        (err) = __pu_r8;                                                                        \
 
136
} while (0)
 
137
 
 
138
#else /* !ASM_SUPPORTED */
 
139
# define RELOC_TYPE     2       /* ip-rel */
 
140
# define __get_user_size(val, addr, n, err)                             \
 
141
do {                                                                    \
 
142
        __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);   \
 
143
        (err) = ia64_getreg(_IA64_REG_R8);                              \
 
144
        (val) = ia64_getreg(_IA64_REG_R9);                              \
 
145
} while (0)
 
146
# define __put_user_size(val, addr, n, err)                                                     \
 
147
do {                                                                                            \
 
148
        __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val));    \
 
149
        (err) = ia64_getreg(_IA64_REG_R8);                                                      \
 
150
} while (0)
 
151
#endif /* !ASM_SUPPORTED */
 
152
 
 
153
extern void __get_user_unknown (void);
 
154
 
 
155
/*
 
156
 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
 
157
 * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
 
158
 * using r8/r9.
 
159
 */
 
160
#define __do_get_user(check, x, ptr, size, segment)                                     \
 
161
({                                                                                      \
 
162
        const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);                              \
 
163
        __typeof__ (size) __gu_size = (size);                                           \
 
164
        long __gu_err = -EFAULT, __gu_val = 0;                                          \
 
165
                                                                                        \
 
166
        if (!check || __access_ok(__gu_ptr))                                            \
 
167
                switch (__gu_size) {                                                    \
 
168
                      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;  \
 
169
                      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;  \
 
170
                      case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break;  \
 
171
                      case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break;  \
 
172
                      default: __get_user_unknown(); break;                             \
 
173
                }                                                                       \
 
174
        (x) = (__typeof__(*(__gu_ptr))) __gu_val;                                       \
 
175
        __gu_err;                                                                       \
 
176
})
 
177
 
 
178
#define __get_user_nocheck(x, ptr, size)        __do_get_user(0, x, ptr, size, KERNEL_DS)
 
179
#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
 
180
 
 
181
extern void __put_user_unknown (void);
 
182
 
 
183
/*
 
184
 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
 
185
 * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
 
186
 */
 
187
#define __do_put_user(check, x, ptr, size, segment)                                     \
 
188
({                                                                                      \
 
189
        __typeof__ (x) __pu_x = (x);                                                    \
 
190
        __typeof__ (*(ptr)) __user *__pu_ptr = (ptr);                                   \
 
191
        __typeof__ (size) __pu_size = (size);                                           \
 
192
        long __pu_err = -EFAULT;                                                        \
 
193
                                                                                        \
 
194
        if (!check || __access_ok(__pu_ptr))                                            \
 
195
                switch (__pu_size) {                                                    \
 
196
                      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;    \
 
197
                      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;    \
 
198
                      case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break;    \
 
199
                      case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break;    \
 
200
                      default: __put_user_unknown(); break;                             \
 
201
                }                                                                       \
 
202
        __pu_err;                                                                       \
 
203
})
 
204
 
 
205
#define __put_user_nocheck(x, ptr, size)        __do_put_user(0, x, ptr, size, KERNEL_DS)
 
206
#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
 
207
 
 
208
/*
 
209
 * Complex access routines
 
210
 */
 
211
extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
 
212
                                               unsigned long count);
 
213
 
 
214
static inline unsigned long
 
215
__copy_to_user (void __user *to, const void *from, unsigned long count)
 
216
{
 
217
        return __copy_user(to, (void __user *)from, count);
 
218
}
 
219
 
 
220
static inline unsigned long
 
221
__copy_from_user (void *to, const void __user *from, unsigned long count)
 
222
{
 
223
        return __copy_user((void __user *)to, from, count);
 
224
}
 
225
 
 
226
#define __copy_to_user_inatomic         __copy_to_user
 
227
#define __copy_from_user_inatomic       __copy_from_user
 
228
#define copy_to_user(to, from, n)                                                       \
 
229
({                                                                                      \
 
230
        void __user *__cu_to = (to);                                                    \
 
231
        const void *__cu_from = (from);                                                 \
 
232
        long __cu_len = (n);                                                            \
 
233
                                                                                        \
 
234
        if (__access_ok(__cu_to))                                                       \
 
235
                __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len);   \
 
236
        __cu_len;                                                                       \
 
237
})
 
238
 
 
239
#define copy_from_user(to, from, n)                                                     \
 
240
({                                                                                      \
 
241
        void *__cu_to = (to);                                                           \
 
242
        const void __user *__cu_from = (from);                                          \
 
243
        long __cu_len = (n);                                                            \
 
244
                                                                                        \
 
245
        __chk_user_ptr(__cu_from);                                                      \
 
246
        if (__access_ok(__cu_from))                                                     \
 
247
                __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len);   \
 
248
        __cu_len;                                                                       \
 
249
})
 
250
 
 
251
#define __copy_in_user(to, from, size)  __copy_user((to), (from), (size))
 
252
 
 
253
static inline unsigned long
 
254
copy_in_user (void __user *to, const void __user *from, unsigned long n)
 
255
{
 
256
        if (likely(access_ok(from, n) && access_ok(to, n)))
 
257
                n = __copy_user(to, from, n);
 
258
        return n;
 
259
}
 
260
 
 
261
#define ARCH_HAS_SORT_EXTABLE
 
262
#define ARCH_HAS_SEARCH_EXTABLE
 
263
 
 
264
struct exception_table_entry {
 
265
        int addr;       /* location-relative address of insn this fixup is for */
 
266
        int cont;       /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
 
267
};
 
268
 
 
269
extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
 
270
extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
 
271
 
 
272
static inline int
 
273
ia64_done_with_exception (struct pt_regs *regs)
 
274
{
 
275
        const struct exception_table_entry *e;
 
276
        e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
 
277
        if (e) {
 
278
                ia64_handle_exception(regs, e);
 
279
                return 1;
 
280
        }
 
281
        return 0;
 
282
}
 
283
 
 
284
#endif /* _ASM_IA64_UACCESS_H */