1
/* Copyright (C) 1992-2012 Free Software Foundation, Inc.
2
This file is part of the GNU C Library.
3
Contributed by Brendan Kehoe (brendan@zen.org).
5
The GNU C Library is free software; you can redistribute it and/or
6
modify it under the terms of the GNU Lesser General Public
7
License as published by the Free Software Foundation; either
8
version 2.1 of the License, or (at your option) any later version.
10
The GNU C Library is distributed in the hope that it will be useful,
11
but WITHOUT ANY WARRANTY; without even the implied warranty of
12
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
Lesser General Public License for more details.
15
You should have received a copy of the GNU Lesser General Public
16
License along with the GNU C Library. If not, see
17
<http://www.gnu.org/licenses/>. */
19
#include <sysdeps/unix/sysdep.h>
24
# include <alpha/regdef.h>
30
# include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
34
#define __LABEL(x) x##:
36
#define LEAF(name, framesize) \
41
.frame sp, framesize, ra
50
/* Mark the end of function SYM. */
52
#define END(sym) .end sym
55
# define PSEUDO_PROLOGUE \
60
jsr AT,(AT),_mcount; \
64
# define PSEUDO_PROLOGUE \
68
# define PSEUDO_PROLOGUE \
75
# define USEPV_PROF std
77
# define USEPV_PROF no
80
#if RTLD_PRIVATE_ERRNO
81
# define SYSCALL_ERROR_LABEL $syscall_error
82
# define SYSCALL_ERROR_HANDLER \
83
stl v0, rtld_errno(gp) !gprel; \
87
# define SYSCALL_ERROR_LABEL __syscall_error !samegp
88
# define SYSCALL_ERROR_HANDLER
90
# define SYSCALL_ERROR_LABEL $syscall_error
91
# define SYSCALL_ERROR_HANDLER \
92
jmp $31, __syscall_error
93
#endif /* RTLD_PRIVATE_ERRNO */
95
/* Overridden by specific syscalls. */
96
#undef PSEUDO_PREPARE_ARGS
97
#define PSEUDO_PREPARE_ARGS /* Nothing. */
99
#define PSEUDO(name, syscall_name, args) \
105
PSEUDO_PREPARE_ARGS \
106
lda v0, SYS_ify(syscall_name); \
107
call_pal PAL_callsys; \
108
bne a3, SYSCALL_ERROR_LABEL
111
#if defined(PIC) && !RTLD_PRIVATE_ERRNO
112
# define PSEUDO_END(sym) END(sym)
114
# define PSEUDO_END(sym) \
116
SYSCALL_ERROR_HANDLER; \
120
#define PSEUDO_NOERRNO(name, syscall_name, args) \
126
PSEUDO_PREPARE_ARGS \
127
lda v0, SYS_ify(syscall_name); \
128
call_pal PAL_callsys;
130
#undef PSEUDO_END_NOERRNO
131
#define PSEUDO_END_NOERRNO(sym) END(sym)
133
#define ret_NOERRNO ret
135
#define PSEUDO_ERRVAL(name, syscall_name, args) \
141
PSEUDO_PREPARE_ARGS \
142
lda v0, SYS_ify(syscall_name); \
143
call_pal PAL_callsys;
145
#undef PSEUDO_END_ERRVAL
146
#define PSEUDO_END_ERRVAL(sym) END(sym)
148
#define ret_ERRVAL ret
153
#define MOVE(x,y) mov x,y
155
#else /* !ASSEMBLER */
157
/* ??? Linux needs to be able to override INLINE_SYSCALL for one
158
particular special case. Make this easy. */
160
#undef INLINE_SYSCALL
161
#define INLINE_SYSCALL(name, nr, args...) \
162
INLINE_SYSCALL1(name, nr, args)
164
#define INLINE_SYSCALL1(name, nr, args...) \
166
long _sc_ret, _sc_err; \
167
inline_syscall##nr(__NR_##name, args); \
168
if (__builtin_expect (_sc_err, 0)) \
170
__set_errno (_sc_ret); \
176
#define INTERNAL_SYSCALL(name, err_out, nr, args...) \
177
INTERNAL_SYSCALL1(name, err_out, nr, args)
179
#define INTERNAL_SYSCALL1(name, err_out, nr, args...) \
180
INTERNAL_SYSCALL_NCS(__NR_##name, err_out, nr, args)
182
#define INTERNAL_SYSCALL_NCS(name, err_out, nr, args...) \
184
long _sc_ret, _sc_err; \
185
inline_syscall##nr(name, args); \
190
#define INTERNAL_SYSCALL_DECL(err) \
191
long int err __attribute__((unused))
193
/* The normal Alpha calling convention sign-extends 32-bit quantties
194
no matter what the "real" sign of the 32-bit type. We want to
195
preserve that when filling in values for the kernel. */
196
#define syscall_promote(arg) \
197
(sizeof(arg) == 4 ? (long)(int)(long)(arg) : (long)(arg))
199
/* Make sure and "use" the variable that we're not returning,
200
in order to suppress unused variable warnings. */
201
#define INTERNAL_SYSCALL_ERROR_P(val, err) ((void)val, err)
202
#define INTERNAL_SYSCALL_ERRNO(val, err) ((void)err, val)
204
#define inline_syscall_clobbers \
205
"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
206
"$22", "$23", "$24", "$25", "$27", "$28", "memory"
208
/* It is moderately important optimization-wise to limit the lifetime
209
of the hard-register variables as much as possible. Thus we copy
210
in/out as close to the asm as possible. */
212
#define inline_syscall0(name, args...) \
214
register long _sc_19 __asm__("$19"); \
215
register long _sc_0 = name; \
216
__asm__ __volatile__ \
217
("callsys # %0 %1 <= %2" \
218
: "+v"(_sc_0), "=r"(_sc_19) \
219
: : inline_syscall_clobbers, \
220
"$16", "$17", "$18", "$20", "$21"); \
221
_sc_ret = _sc_0, _sc_err = _sc_19; \
224
#define inline_syscall1(name,arg1) \
226
register long _tmp_16 = syscall_promote (arg1); \
227
register long _sc_0 = name; \
228
register long _sc_16 __asm__("$16") = _tmp_16; \
229
register long _sc_19 __asm__("$19"); \
230
__asm__ __volatile__ \
231
("callsys # %0 %1 <= %2 %3" \
232
: "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16) \
233
: : inline_syscall_clobbers, \
234
"$17", "$18", "$20", "$21"); \
235
_sc_ret = _sc_0, _sc_err = _sc_19; \
238
#define inline_syscall2(name,arg1,arg2) \
240
register long _tmp_16 = syscall_promote (arg1); \
241
register long _tmp_17 = syscall_promote (arg2); \
242
register long _sc_0 = name; \
243
register long _sc_16 __asm__("$16") = _tmp_16; \
244
register long _sc_17 __asm__("$17") = _tmp_17; \
245
register long _sc_19 __asm__("$19"); \
246
__asm__ __volatile__ \
247
("callsys # %0 %1 <= %2 %3 %4" \
248
: "+v"(_sc_0), "=r"(_sc_19), \
249
"+r"(_sc_16), "+r"(_sc_17) \
250
: : inline_syscall_clobbers, \
251
"$18", "$20", "$21"); \
252
_sc_ret = _sc_0, _sc_err = _sc_19; \
255
#define inline_syscall3(name,arg1,arg2,arg3) \
257
register long _tmp_16 = syscall_promote (arg1); \
258
register long _tmp_17 = syscall_promote (arg2); \
259
register long _tmp_18 = syscall_promote (arg3); \
260
register long _sc_0 = name; \
261
register long _sc_16 __asm__("$16") = _tmp_16; \
262
register long _sc_17 __asm__("$17") = _tmp_17; \
263
register long _sc_18 __asm__("$18") = _tmp_18; \
264
register long _sc_19 __asm__("$19"); \
265
__asm__ __volatile__ \
266
("callsys # %0 %1 <= %2 %3 %4 %5" \
267
: "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16), \
268
"+r"(_sc_17), "+r"(_sc_18) \
269
: : inline_syscall_clobbers, "$20", "$21"); \
270
_sc_ret = _sc_0, _sc_err = _sc_19; \
273
#define inline_syscall4(name,arg1,arg2,arg3,arg4) \
275
register long _tmp_16 = syscall_promote (arg1); \
276
register long _tmp_17 = syscall_promote (arg2); \
277
register long _tmp_18 = syscall_promote (arg3); \
278
register long _tmp_19 = syscall_promote (arg4); \
279
register long _sc_0 = name; \
280
register long _sc_16 __asm__("$16") = _tmp_16; \
281
register long _sc_17 __asm__("$17") = _tmp_17; \
282
register long _sc_18 __asm__("$18") = _tmp_18; \
283
register long _sc_19 __asm__("$19") = _tmp_19; \
284
__asm__ __volatile__ \
285
("callsys # %0 %1 <= %2 %3 %4 %5 %6" \
286
: "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
287
"+r"(_sc_17), "+r"(_sc_18) \
288
: : inline_syscall_clobbers, "$20", "$21"); \
289
_sc_ret = _sc_0, _sc_err = _sc_19; \
292
#define inline_syscall5(name,arg1,arg2,arg3,arg4,arg5) \
294
register long _tmp_16 = syscall_promote (arg1); \
295
register long _tmp_17 = syscall_promote (arg2); \
296
register long _tmp_18 = syscall_promote (arg3); \
297
register long _tmp_19 = syscall_promote (arg4); \
298
register long _tmp_20 = syscall_promote (arg5); \
299
register long _sc_0 = name; \
300
register long _sc_16 __asm__("$16") = _tmp_16; \
301
register long _sc_17 __asm__("$17") = _tmp_17; \
302
register long _sc_18 __asm__("$18") = _tmp_18; \
303
register long _sc_19 __asm__("$19") = _tmp_19; \
304
register long _sc_20 __asm__("$20") = _tmp_20; \
305
__asm__ __volatile__ \
306
("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7" \
307
: "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
308
"+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20) \
309
: : inline_syscall_clobbers, "$21"); \
310
_sc_ret = _sc_0, _sc_err = _sc_19; \
313
#define inline_syscall6(name,arg1,arg2,arg3,arg4,arg5,arg6) \
315
register long _tmp_16 = syscall_promote (arg1); \
316
register long _tmp_17 = syscall_promote (arg2); \
317
register long _tmp_18 = syscall_promote (arg3); \
318
register long _tmp_19 = syscall_promote (arg4); \
319
register long _tmp_20 = syscall_promote (arg5); \
320
register long _tmp_21 = syscall_promote (arg6); \
321
register long _sc_0 = name; \
322
register long _sc_16 __asm__("$16") = _tmp_16; \
323
register long _sc_17 __asm__("$17") = _tmp_17; \
324
register long _sc_18 __asm__("$18") = _tmp_18; \
325
register long _sc_19 __asm__("$19") = _tmp_19; \
326
register long _sc_20 __asm__("$20") = _tmp_20; \
327
register long _sc_21 __asm__("$21") = _tmp_21; \
328
__asm__ __volatile__ \
329
("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7 %8" \
330
: "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \
331
"+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20), \
333
: : inline_syscall_clobbers); \
334
_sc_ret = _sc_0, _sc_err = _sc_19; \
337
/* Pointer mangling support. Note that tls access is slow enough that
338
we don't deoptimize things by placing the pointer check value there. */
342
#if defined NOT_IN_libc && defined IS_IN_rtld
343
# ifdef __ASSEMBLER__
344
# define PTR_MANGLE(dst, src, tmp) \
345
ldah tmp, __pointer_chk_guard_local($29) !gprelhigh; \
346
ldq tmp, __pointer_chk_guard_local(tmp) !gprellow; \
348
# define PTR_MANGLE2(dst, src, tmp) \
350
# define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
351
# define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
353
extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden;
354
# define PTR_MANGLE(var) \
355
(var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local)
356
# define PTR_DEMANGLE(var) PTR_MANGLE(var)
359
# ifdef __ASSEMBLER__
360
# define PTR_MANGLE(dst, src, tmp) \
361
ldq tmp, __pointer_chk_guard; \
363
# define PTR_MANGLE2(dst, src, tmp) \
365
# define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp)
366
# define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp)
368
extern const uintptr_t __pointer_chk_guard attribute_relro;
369
# define PTR_MANGLE(var) \
370
(var) = (__typeof(var)) ((uintptr_t) (var) ^ __pointer_chk_guard)
371
# define PTR_DEMANGLE(var) PTR_MANGLE(var)
374
/* There exists generic C code that assumes that PTR_MANGLE is always
375
defined. When generating code for the static libc, we don't have
376
__pointer_chk_guard defined. Nor is there any place that would
377
initialize it if it were defined, so there's little point in doing
378
anything more than nothing. */
379
# ifndef __ASSEMBLER__
380
# define PTR_MANGLE(var)
381
# define PTR_DEMANGLE(var)
385
#endif /* ASSEMBLER */