4
* Copyright (c) 2003 Fabrice Bellard
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, see <http://www.gnu.org/licenses/>.
19
#define _ATFILE_SOURCE
32
#include <sys/types.h>
38
#include <sys/mount.h>
40
#include <sys/fsuid.h>
41
#include <sys/personality.h>
42
#include <sys/prctl.h>
43
#include <sys/resource.h>
49
int __clone2(int (*fn)(void *), void *child_stack_base,
50
size_t stack_size, int flags, void *arg, ...);
52
#include <sys/socket.h>
56
#include <sys/times.h>
59
#include <sys/statfs.h>
61
#include <sys/sysinfo.h>
62
#include <sys/utsname.h>
63
//#include <sys/user.h>
64
#include <netinet/ip.h>
65
#include <netinet/tcp.h>
66
#include <linux/wireless.h>
67
#include <linux/icmp.h>
68
#include "qemu-common.h"
73
#include <sys/eventfd.h>
76
#include <sys/epoll.h>
79
#include "qemu/xattr.h"
81
#ifdef CONFIG_SENDFILE
82
#include <sys/sendfile.h>
85
#define termios host_termios
86
#define winsize host_winsize
87
#define termio host_termio
88
#define sgttyb host_sgttyb /* same as target */
89
#define tchars host_tchars /* same as target */
90
#define ltchars host_ltchars /* same as target */
92
#include <linux/termios.h>
93
#include <linux/unistd.h>
94
#include <linux/utsname.h>
95
#include <linux/cdrom.h>
96
#include <linux/hdreg.h>
97
#include <linux/soundcard.h>
99
#include <linux/mtio.h>
100
#include <linux/fs.h>
101
#if defined(CONFIG_FIEMAP)
102
#include <linux/fiemap.h>
104
#include <linux/fb.h>
105
#include <linux/vt.h>
106
#include <linux/dm-ioctl.h>
107
#include <linux/reboot.h>
108
#include <linux/route.h>
109
#include <linux/filter.h>
110
#include "linux_loop.h"
111
#include "cpu-uname.h"
115
#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116
CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
120
//#include <linux/msdos_fs.h>
121
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
122
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
133
#define _syscall0(type,name) \
134
static type name (void) \
136
return syscall(__NR_##name); \
139
#define _syscall1(type,name,type1,arg1) \
140
static type name (type1 arg1) \
142
return syscall(__NR_##name, arg1); \
145
#define _syscall2(type,name,type1,arg1,type2,arg2) \
146
static type name (type1 arg1,type2 arg2) \
148
return syscall(__NR_##name, arg1, arg2); \
151
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
152
static type name (type1 arg1,type2 arg2,type3 arg3) \
154
return syscall(__NR_##name, arg1, arg2, arg3); \
157
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
158
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
160
return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
163
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
167
return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171
#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
172
type5,arg5,type6,arg6) \
173
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176
return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180
#define __NR_sys_uname __NR_uname
181
#define __NR_sys_getcwd1 __NR_getcwd
182
#define __NR_sys_getdents __NR_getdents
183
#define __NR_sys_getdents64 __NR_getdents64
184
#define __NR_sys_getpriority __NR_getpriority
185
#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
186
#define __NR_sys_syslog __NR_syslog
187
#define __NR_sys_tgkill __NR_tgkill
188
#define __NR_sys_tkill __NR_tkill
189
#define __NR_sys_futex __NR_futex
190
#define __NR_sys_inotify_init __NR_inotify_init
191
#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
192
#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194
#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196
#define __NR__llseek __NR_lseek
200
_syscall0(int, gettid)
202
/* This is a replacement for the host gettid() and must return a host
204
static int gettid(void) {
209
_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
211
#if !defined(__NR_getdents) || \
212
(defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
213
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
215
#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
216
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
217
loff_t *, res, uint, wh);
219
_syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
220
_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
221
#if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
222
_syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
224
#if defined(TARGET_NR_tkill) && defined(__NR_tkill)
225
_syscall2(int,sys_tkill,int,tid,int,sig)
227
#ifdef __NR_exit_group
228
_syscall1(int,exit_group,int,error_code)
230
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231
_syscall1(int,set_tid_address,int *,tidptr)
233
#if defined(TARGET_NR_futex) && defined(__NR_futex)
234
_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235
const struct timespec *,timeout,int *,uaddr2,int,val3)
237
#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238
_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239
unsigned long *, user_mask_ptr);
240
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242
unsigned long *, user_mask_ptr);
243
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
246
static bitmask_transtbl fcntl_flags_tbl[] = {
247
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
248
{ TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
249
{ TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
250
{ TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
251
{ TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
252
{ TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
253
{ TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
254
{ TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
255
{ TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
256
{ TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
257
{ TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
258
{ TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
259
{ TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
260
#if defined(O_DIRECT)
261
{ TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
263
#if defined(O_NOATIME)
264
{ TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
266
#if defined(O_CLOEXEC)
267
{ TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
270
{ TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
272
/* Don't terminate the list prematurely on 64-bit host+guest. */
273
#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
274
{ TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
279
#define COPY_UTSNAME_FIELD(dest, src) \
281
/* __NEW_UTS_LEN doesn't include terminating null */ \
282
(void) strncpy((dest), (src), __NEW_UTS_LEN); \
283
(dest)[__NEW_UTS_LEN] = '\0'; \
286
static int sys_uname(struct new_utsname *buf)
288
struct utsname uts_buf;
290
if (uname(&uts_buf) < 0)
294
* Just in case these have some differences, we
295
* translate utsname to new_utsname (which is the
296
* struct linux kernel uses).
299
memset(buf, 0, sizeof(*buf));
300
COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
301
COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
302
COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
303
COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
304
COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
306
COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
310
#undef COPY_UTSNAME_FIELD
313
static int sys_getcwd1(char *buf, size_t size)
315
if (getcwd(buf, size) == NULL) {
316
/* getcwd() sets errno */
319
return strlen(buf)+1;
322
#ifdef TARGET_NR_openat
323
static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
326
* open(2) has extra parameter 'mode' when called with
329
if ((flags & O_CREAT) != 0) {
330
return (openat(dirfd, pathname, flags, mode));
332
return (openat(dirfd, pathname, flags));
336
#ifdef TARGET_NR_utimensat
337
#ifdef CONFIG_UTIMENSAT
338
static int sys_utimensat(int dirfd, const char *pathname,
339
const struct timespec times[2], int flags)
341
if (pathname == NULL)
342
return futimens(dirfd, times);
344
return utimensat(dirfd, pathname, times, flags);
346
#elif defined(__NR_utimensat)
347
#define __NR_sys_utimensat __NR_utimensat
348
_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
349
const struct timespec *,tsp,int,flags)
351
static int sys_utimensat(int dirfd, const char *pathname,
352
const struct timespec times[2], int flags)
358
#endif /* TARGET_NR_utimensat */
360
#ifdef CONFIG_INOTIFY
361
#include <sys/inotify.h>
363
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
364
static int sys_inotify_init(void)
366
return (inotify_init());
369
#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
370
static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
372
return (inotify_add_watch(fd, pathname, mask));
375
#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
376
static int sys_inotify_rm_watch(int fd, int32_t wd)
378
return (inotify_rm_watch(fd, wd));
381
#ifdef CONFIG_INOTIFY1
382
#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
383
static int sys_inotify_init1(int flags)
385
return (inotify_init1(flags));
390
/* Userspace can usually survive runtime without inotify */
391
#undef TARGET_NR_inotify_init
392
#undef TARGET_NR_inotify_init1
393
#undef TARGET_NR_inotify_add_watch
394
#undef TARGET_NR_inotify_rm_watch
395
#endif /* CONFIG_INOTIFY */
397
#if defined(TARGET_NR_ppoll)
399
# define __NR_ppoll -1
401
#define __NR_sys_ppoll __NR_ppoll
402
_syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
403
struct timespec *, timeout, const __sigset_t *, sigmask,
407
#if defined(TARGET_NR_pselect6)
408
#ifndef __NR_pselect6
409
# define __NR_pselect6 -1
411
#define __NR_sys_pselect6 __NR_pselect6
412
_syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
413
fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
416
#if defined(TARGET_NR_prlimit64)
417
#ifndef __NR_prlimit64
418
# define __NR_prlimit64 -1
420
#define __NR_sys_prlimit64 __NR_prlimit64
421
/* The glibc rlimit structure may not be that used by the underlying syscall */
422
struct host_rlimit64 {
426
_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
427
const struct host_rlimit64 *, new_limit,
428
struct host_rlimit64 *, old_limit)
431
/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
433
static inline int regpairs_aligned(void *cpu_env) {
434
return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
436
#elif defined(TARGET_MIPS)
437
static inline int regpairs_aligned(void *cpu_env) { return 1; }
438
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
439
/* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
440
* of registers which translates to the same as ARM/MIPS, because we start with
442
static inline int regpairs_aligned(void *cpu_env) { return 1; }
444
static inline int regpairs_aligned(void *cpu_env) { return 0; }
447
#define ERRNO_TABLE_SIZE 1200
449
/* target_to_host_errno_table[] is initialized from
450
* host_to_target_errno_table[] in syscall_init(). */
451
static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
455
* This list is the union of errno values overridden in asm-<arch>/errno.h
456
* minus the errnos that are not actually generic to all archs.
458
static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
459
[EIDRM] = TARGET_EIDRM,
460
[ECHRNG] = TARGET_ECHRNG,
461
[EL2NSYNC] = TARGET_EL2NSYNC,
462
[EL3HLT] = TARGET_EL3HLT,
463
[EL3RST] = TARGET_EL3RST,
464
[ELNRNG] = TARGET_ELNRNG,
465
[EUNATCH] = TARGET_EUNATCH,
466
[ENOCSI] = TARGET_ENOCSI,
467
[EL2HLT] = TARGET_EL2HLT,
468
[EDEADLK] = TARGET_EDEADLK,
469
[ENOLCK] = TARGET_ENOLCK,
470
[EBADE] = TARGET_EBADE,
471
[EBADR] = TARGET_EBADR,
472
[EXFULL] = TARGET_EXFULL,
473
[ENOANO] = TARGET_ENOANO,
474
[EBADRQC] = TARGET_EBADRQC,
475
[EBADSLT] = TARGET_EBADSLT,
476
[EBFONT] = TARGET_EBFONT,
477
[ENOSTR] = TARGET_ENOSTR,
478
[ENODATA] = TARGET_ENODATA,
479
[ETIME] = TARGET_ETIME,
480
[ENOSR] = TARGET_ENOSR,
481
[ENONET] = TARGET_ENONET,
482
[ENOPKG] = TARGET_ENOPKG,
483
[EREMOTE] = TARGET_EREMOTE,
484
[ENOLINK] = TARGET_ENOLINK,
485
[EADV] = TARGET_EADV,
486
[ESRMNT] = TARGET_ESRMNT,
487
[ECOMM] = TARGET_ECOMM,
488
[EPROTO] = TARGET_EPROTO,
489
[EDOTDOT] = TARGET_EDOTDOT,
490
[EMULTIHOP] = TARGET_EMULTIHOP,
491
[EBADMSG] = TARGET_EBADMSG,
492
[ENAMETOOLONG] = TARGET_ENAMETOOLONG,
493
[EOVERFLOW] = TARGET_EOVERFLOW,
494
[ENOTUNIQ] = TARGET_ENOTUNIQ,
495
[EBADFD] = TARGET_EBADFD,
496
[EREMCHG] = TARGET_EREMCHG,
497
[ELIBACC] = TARGET_ELIBACC,
498
[ELIBBAD] = TARGET_ELIBBAD,
499
[ELIBSCN] = TARGET_ELIBSCN,
500
[ELIBMAX] = TARGET_ELIBMAX,
501
[ELIBEXEC] = TARGET_ELIBEXEC,
502
[EILSEQ] = TARGET_EILSEQ,
503
[ENOSYS] = TARGET_ENOSYS,
504
[ELOOP] = TARGET_ELOOP,
505
[ERESTART] = TARGET_ERESTART,
506
[ESTRPIPE] = TARGET_ESTRPIPE,
507
[ENOTEMPTY] = TARGET_ENOTEMPTY,
508
[EUSERS] = TARGET_EUSERS,
509
[ENOTSOCK] = TARGET_ENOTSOCK,
510
[EDESTADDRREQ] = TARGET_EDESTADDRREQ,
511
[EMSGSIZE] = TARGET_EMSGSIZE,
512
[EPROTOTYPE] = TARGET_EPROTOTYPE,
513
[ENOPROTOOPT] = TARGET_ENOPROTOOPT,
514
[EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
515
[ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
516
[EOPNOTSUPP] = TARGET_EOPNOTSUPP,
517
[EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
518
[EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
519
[EADDRINUSE] = TARGET_EADDRINUSE,
520
[EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
521
[ENETDOWN] = TARGET_ENETDOWN,
522
[ENETUNREACH] = TARGET_ENETUNREACH,
523
[ENETRESET] = TARGET_ENETRESET,
524
[ECONNABORTED] = TARGET_ECONNABORTED,
525
[ECONNRESET] = TARGET_ECONNRESET,
526
[ENOBUFS] = TARGET_ENOBUFS,
527
[EISCONN] = TARGET_EISCONN,
528
[ENOTCONN] = TARGET_ENOTCONN,
529
[EUCLEAN] = TARGET_EUCLEAN,
530
[ENOTNAM] = TARGET_ENOTNAM,
531
[ENAVAIL] = TARGET_ENAVAIL,
532
[EISNAM] = TARGET_EISNAM,
533
[EREMOTEIO] = TARGET_EREMOTEIO,
534
[ESHUTDOWN] = TARGET_ESHUTDOWN,
535
[ETOOMANYREFS] = TARGET_ETOOMANYREFS,
536
[ETIMEDOUT] = TARGET_ETIMEDOUT,
537
[ECONNREFUSED] = TARGET_ECONNREFUSED,
538
[EHOSTDOWN] = TARGET_EHOSTDOWN,
539
[EHOSTUNREACH] = TARGET_EHOSTUNREACH,
540
[EALREADY] = TARGET_EALREADY,
541
[EINPROGRESS] = TARGET_EINPROGRESS,
542
[ESTALE] = TARGET_ESTALE,
543
[ECANCELED] = TARGET_ECANCELED,
544
[ENOMEDIUM] = TARGET_ENOMEDIUM,
545
[EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
547
[ENOKEY] = TARGET_ENOKEY,
550
[EKEYEXPIRED] = TARGET_EKEYEXPIRED,
553
[EKEYREVOKED] = TARGET_EKEYREVOKED,
556
[EKEYREJECTED] = TARGET_EKEYREJECTED,
559
[EOWNERDEAD] = TARGET_EOWNERDEAD,
561
#ifdef ENOTRECOVERABLE
562
[ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
566
static inline int host_to_target_errno(int err)
568
if(host_to_target_errno_table[err])
569
return host_to_target_errno_table[err];
573
static inline int target_to_host_errno(int err)
575
if (target_to_host_errno_table[err])
576
return target_to_host_errno_table[err];
580
static inline abi_long get_errno(abi_long ret)
583
return -host_to_target_errno(errno);
588
static inline int is_error(abi_long ret)
590
return (abi_ulong)ret >= (abi_ulong)(-4096);
593
char *target_strerror(int err)
595
if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
598
return strerror(target_to_host_errno(err));
601
static abi_ulong target_brk;
602
static abi_ulong target_original_brk;
603
static abi_ulong brk_page;
605
void target_set_brk(abi_ulong new_brk)
607
target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
608
brk_page = HOST_PAGE_ALIGN(target_brk);
611
//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
612
#define DEBUGF_BRK(message, args...)
614
/* do_brk() must return target values and target errnos. */
615
abi_long do_brk(abi_ulong new_brk)
617
abi_long mapped_addr;
620
DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
623
DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
626
if (new_brk < target_original_brk) {
627
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
632
/* If the new brk is less than the highest page reserved to the
633
* target heap allocation, set it and we're almost done... */
634
if (new_brk <= brk_page) {
635
/* Heap contents are initialized to zero, as for anonymous
637
if (new_brk > target_brk) {
638
memset(g2h(target_brk), 0, new_brk - target_brk);
640
target_brk = new_brk;
641
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
645
/* We need to allocate more memory after the brk... Note that
646
* we don't use MAP_FIXED because that will map over the top of
647
* any existing mapping (like the one with the host libc or qemu
648
* itself); instead we treat "mapped but at wrong address" as
649
* a failure and unmap again.
651
new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
652
mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
653
PROT_READ|PROT_WRITE,
654
MAP_ANON|MAP_PRIVATE, 0, 0));
656
if (mapped_addr == brk_page) {
657
/* Heap contents are initialized to zero, as for anonymous
658
* mapped pages. Technically the new pages are already
659
* initialized to zero since they *are* anonymous mapped
660
* pages, however we have to take care with the contents that
661
* come from the remaining part of the previous page: it may
662
* contains garbage data due to a previous heap usage (grown
664
memset(g2h(target_brk), 0, brk_page - target_brk);
666
target_brk = new_brk;
667
brk_page = HOST_PAGE_ALIGN(target_brk);
668
DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
671
} else if (mapped_addr != -1) {
672
/* Mapped but at wrong address, meaning there wasn't actually
673
* enough space for this brk.
675
target_munmap(mapped_addr, new_alloc_size);
677
DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
680
DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
683
#if defined(TARGET_ALPHA)
684
/* We (partially) emulate OSF/1 on Alpha, which requires we
685
return a proper errno, not an unchanged brk value. */
686
return -TARGET_ENOMEM;
688
/* For everything else, return the previous break. */
692
static inline abi_long copy_from_user_fdset(fd_set *fds,
693
abi_ulong target_fds_addr,
697
abi_ulong b, *target_fds;
699
nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
700
if (!(target_fds = lock_user(VERIFY_READ,
702
sizeof(abi_ulong) * nw,
704
return -TARGET_EFAULT;
708
for (i = 0; i < nw; i++) {
709
/* grab the abi_ulong */
710
__get_user(b, &target_fds[i]);
711
for (j = 0; j < TARGET_ABI_BITS; j++) {
712
/* check the bit inside the abi_ulong */
719
unlock_user(target_fds, target_fds_addr, 0);
724
static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
725
abi_ulong target_fds_addr,
728
if (target_fds_addr) {
729
if (copy_from_user_fdset(fds, target_fds_addr, n))
730
return -TARGET_EFAULT;
738
static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
744
abi_ulong *target_fds;
746
nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
747
if (!(target_fds = lock_user(VERIFY_WRITE,
749
sizeof(abi_ulong) * nw,
751
return -TARGET_EFAULT;
754
for (i = 0; i < nw; i++) {
756
for (j = 0; j < TARGET_ABI_BITS; j++) {
757
v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
760
__put_user(v, &target_fds[i]);
763
unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
768
#if defined(__alpha__)
774
static inline abi_long host_to_target_clock_t(long ticks)
776
#if HOST_HZ == TARGET_HZ
779
return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
783
static inline abi_long host_to_target_rusage(abi_ulong target_addr,
784
const struct rusage *rusage)
786
struct target_rusage *target_rusage;
788
if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
789
return -TARGET_EFAULT;
790
target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
791
target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
792
target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
793
target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
794
target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
795
target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
796
target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
797
target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
798
target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
799
target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
800
target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
801
target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
802
target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
803
target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
804
target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
805
target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
806
target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
807
target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
808
unlock_user_struct(target_rusage, target_addr, 1);
813
static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
815
abi_ulong target_rlim_swap;
818
target_rlim_swap = tswapal(target_rlim);
819
if (target_rlim_swap == TARGET_RLIM_INFINITY)
820
return RLIM_INFINITY;
822
result = target_rlim_swap;
823
if (target_rlim_swap != (rlim_t)result)
824
return RLIM_INFINITY;
829
static inline abi_ulong host_to_target_rlim(rlim_t rlim)
831
abi_ulong target_rlim_swap;
834
if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
835
target_rlim_swap = TARGET_RLIM_INFINITY;
837
target_rlim_swap = rlim;
838
result = tswapal(target_rlim_swap);
843
static inline int target_to_host_resource(int code)
846
case TARGET_RLIMIT_AS:
848
case TARGET_RLIMIT_CORE:
850
case TARGET_RLIMIT_CPU:
852
case TARGET_RLIMIT_DATA:
854
case TARGET_RLIMIT_FSIZE:
856
case TARGET_RLIMIT_LOCKS:
858
case TARGET_RLIMIT_MEMLOCK:
859
return RLIMIT_MEMLOCK;
860
case TARGET_RLIMIT_MSGQUEUE:
861
return RLIMIT_MSGQUEUE;
862
case TARGET_RLIMIT_NICE:
864
case TARGET_RLIMIT_NOFILE:
865
return RLIMIT_NOFILE;
866
case TARGET_RLIMIT_NPROC:
868
case TARGET_RLIMIT_RSS:
870
case TARGET_RLIMIT_RTPRIO:
871
return RLIMIT_RTPRIO;
872
case TARGET_RLIMIT_SIGPENDING:
873
return RLIMIT_SIGPENDING;
874
case TARGET_RLIMIT_STACK:
881
static inline abi_long copy_from_user_timeval(struct timeval *tv,
882
abi_ulong target_tv_addr)
884
struct target_timeval *target_tv;
886
if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
887
return -TARGET_EFAULT;
889
__get_user(tv->tv_sec, &target_tv->tv_sec);
890
__get_user(tv->tv_usec, &target_tv->tv_usec);
892
unlock_user_struct(target_tv, target_tv_addr, 0);
897
static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
898
const struct timeval *tv)
900
struct target_timeval *target_tv;
902
if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
903
return -TARGET_EFAULT;
905
__put_user(tv->tv_sec, &target_tv->tv_sec);
906
__put_user(tv->tv_usec, &target_tv->tv_usec);
908
unlock_user_struct(target_tv, target_tv_addr, 1);
913
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
916
static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
917
abi_ulong target_mq_attr_addr)
919
struct target_mq_attr *target_mq_attr;
921
if (!lock_user_struct(VERIFY_READ, target_mq_attr,
922
target_mq_attr_addr, 1))
923
return -TARGET_EFAULT;
925
__get_user(attr->mq_flags, &target_mq_attr->mq_flags);
926
__get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
927
__get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
928
__get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
930
unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
935
static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
936
const struct mq_attr *attr)
938
struct target_mq_attr *target_mq_attr;
940
if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
941
target_mq_attr_addr, 0))
942
return -TARGET_EFAULT;
944
__put_user(attr->mq_flags, &target_mq_attr->mq_flags);
945
__put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
946
__put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
947
__put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
949
unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
955
#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
956
/* do_select() must return target values and target errnos. */
957
static abi_long do_select(int n,
958
abi_ulong rfd_addr, abi_ulong wfd_addr,
959
abi_ulong efd_addr, abi_ulong target_tv_addr)
961
fd_set rfds, wfds, efds;
962
fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
963
struct timeval tv, *tv_ptr;
966
ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
970
ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
974
ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
979
if (target_tv_addr) {
980
if (copy_from_user_timeval(&tv, target_tv_addr))
981
return -TARGET_EFAULT;
987
ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
989
if (!is_error(ret)) {
990
if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
991
return -TARGET_EFAULT;
992
if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
993
return -TARGET_EFAULT;
994
if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
995
return -TARGET_EFAULT;
997
if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
998
return -TARGET_EFAULT;
1005
static abi_long do_pipe2(int host_pipe[], int flags)
1008
return pipe2(host_pipe, flags);
1014
static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1015
int flags, int is_pipe2)
1019
ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1022
return get_errno(ret);
1024
/* Several targets have special calling conventions for the original
1025
pipe syscall, but didn't replicate this into the pipe2 syscall. */
1027
#if defined(TARGET_ALPHA)
1028
((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1029
return host_pipe[0];
1030
#elif defined(TARGET_MIPS)
1031
((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1032
return host_pipe[0];
1033
#elif defined(TARGET_SH4)
1034
((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1035
return host_pipe[0];
1036
#elif defined(TARGET_SPARC)
1037
((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1038
return host_pipe[0];
1042
if (put_user_s32(host_pipe[0], pipedes)
1043
|| put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1044
return -TARGET_EFAULT;
1045
return get_errno(ret);
1048
static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1049
abi_ulong target_addr,
1052
struct target_ip_mreqn *target_smreqn;
1054
target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1056
return -TARGET_EFAULT;
1057
mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1058
mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1059
if (len == sizeof(struct target_ip_mreqn))
1060
mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1061
unlock_user(target_smreqn, target_addr, 0);
1066
static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1067
abi_ulong target_addr,
1070
const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1071
sa_family_t sa_family;
1072
struct target_sockaddr *target_saddr;
1074
target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1076
return -TARGET_EFAULT;
1078
sa_family = tswap16(target_saddr->sa_family);
1080
/* Oops. The caller might send a incomplete sun_path; sun_path
1081
* must be terminated by \0 (see the manual page), but
1082
* unfortunately it is quite common to specify sockaddr_un
1083
* length as "strlen(x->sun_path)" while it should be
1084
* "strlen(...) + 1". We'll fix that here if needed.
1085
* Linux kernel has a similar feature.
1088
if (sa_family == AF_UNIX) {
1089
if (len < unix_maxlen && len > 0) {
1090
char *cp = (char*)target_saddr;
1092
if ( cp[len-1] && !cp[len] )
1095
if (len > unix_maxlen)
1099
memcpy(addr, target_saddr, len);
1100
addr->sa_family = sa_family;
1101
unlock_user(target_saddr, target_addr, 0);
1106
static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1107
struct sockaddr *addr,
1110
struct target_sockaddr *target_saddr;
1112
target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1114
return -TARGET_EFAULT;
1115
memcpy(target_saddr, addr, len);
1116
target_saddr->sa_family = tswap16(addr->sa_family);
1117
unlock_user(target_saddr, target_addr, len);
1122
static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1123
struct target_msghdr *target_msgh)
1125
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1126
abi_long msg_controllen;
1127
abi_ulong target_cmsg_addr;
1128
struct target_cmsghdr *target_cmsg;
1129
socklen_t space = 0;
1131
msg_controllen = tswapal(target_msgh->msg_controllen);
1132
if (msg_controllen < sizeof (struct target_cmsghdr))
1134
target_cmsg_addr = tswapal(target_msgh->msg_control);
1135
target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1137
return -TARGET_EFAULT;
1139
while (cmsg && target_cmsg) {
1140
void *data = CMSG_DATA(cmsg);
1141
void *target_data = TARGET_CMSG_DATA(target_cmsg);
1143
int len = tswapal(target_cmsg->cmsg_len)
1144
- TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1146
space += CMSG_SPACE(len);
1147
if (space > msgh->msg_controllen) {
1148
space -= CMSG_SPACE(len);
1149
gemu_log("Host cmsg overflow\n");
1153
if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1154
cmsg->cmsg_level = SOL_SOCKET;
1156
cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1158
cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1159
cmsg->cmsg_len = CMSG_LEN(len);
1161
if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1162
gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1163
memcpy(data, target_data, len);
1165
int *fd = (int *)data;
1166
int *target_fd = (int *)target_data;
1167
int i, numfds = len / sizeof(int);
1169
for (i = 0; i < numfds; i++)
1170
fd[i] = tswap32(target_fd[i]);
1173
cmsg = CMSG_NXTHDR(msgh, cmsg);
1174
target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1176
unlock_user(target_cmsg, target_cmsg_addr, 0);
1178
msgh->msg_controllen = space;
1182
static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1183
struct msghdr *msgh)
1185
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1186
abi_long msg_controllen;
1187
abi_ulong target_cmsg_addr;
1188
struct target_cmsghdr *target_cmsg;
1189
socklen_t space = 0;
1191
msg_controllen = tswapal(target_msgh->msg_controllen);
1192
if (msg_controllen < sizeof (struct target_cmsghdr))
1194
target_cmsg_addr = tswapal(target_msgh->msg_control);
1195
target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1197
return -TARGET_EFAULT;
1199
while (cmsg && target_cmsg) {
1200
void *data = CMSG_DATA(cmsg);
1201
void *target_data = TARGET_CMSG_DATA(target_cmsg);
1203
int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1205
space += TARGET_CMSG_SPACE(len);
1206
if (space > msg_controllen) {
1207
space -= TARGET_CMSG_SPACE(len);
1208
gemu_log("Target cmsg overflow\n");
1212
if (cmsg->cmsg_level == SOL_SOCKET) {
1213
target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1215
target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1217
target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1218
target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1220
if ((cmsg->cmsg_level == SOL_SOCKET) &&
1221
(cmsg->cmsg_type == SCM_RIGHTS)) {
1222
int *fd = (int *)data;
1223
int *target_fd = (int *)target_data;
1224
int i, numfds = len / sizeof(int);
1226
for (i = 0; i < numfds; i++)
1227
target_fd[i] = tswap32(fd[i]);
1228
} else if ((cmsg->cmsg_level == SOL_SOCKET) &&
1229
(cmsg->cmsg_type == SO_TIMESTAMP) &&
1230
(len == sizeof(struct timeval))) {
1231
/* copy struct timeval to target */
1232
struct timeval *tv = (struct timeval *)data;
1233
struct target_timeval *target_tv =
1234
(struct target_timeval *)target_data;
1236
target_tv->tv_sec = tswapal(tv->tv_sec);
1237
target_tv->tv_usec = tswapal(tv->tv_usec);
1239
gemu_log("Unsupported ancillary data: %d/%d\n",
1240
cmsg->cmsg_level, cmsg->cmsg_type);
1241
memcpy(target_data, data, len);
1244
cmsg = CMSG_NXTHDR(msgh, cmsg);
1245
target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1247
unlock_user(target_cmsg, target_cmsg_addr, space);
1249
target_msgh->msg_controllen = tswapal(space);
1253
/* do_setsockopt() Must return target values and target errnos. */
1254
static abi_long do_setsockopt(int sockfd, int level, int optname,
1255
abi_ulong optval_addr, socklen_t optlen)
1259
struct ip_mreqn *ip_mreq;
1260
struct ip_mreq_source *ip_mreq_source;
1264
/* TCP options all take an 'int' value. */
1265
if (optlen < sizeof(uint32_t))
1266
return -TARGET_EINVAL;
1268
if (get_user_u32(val, optval_addr))
1269
return -TARGET_EFAULT;
1270
ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1277
case IP_ROUTER_ALERT:
1281
case IP_MTU_DISCOVER:
1287
case IP_MULTICAST_TTL:
1288
case IP_MULTICAST_LOOP:
1290
if (optlen >= sizeof(uint32_t)) {
1291
if (get_user_u32(val, optval_addr))
1292
return -TARGET_EFAULT;
1293
} else if (optlen >= 1) {
1294
if (get_user_u8(val, optval_addr))
1295
return -TARGET_EFAULT;
1297
ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1299
case IP_ADD_MEMBERSHIP:
1300
case IP_DROP_MEMBERSHIP:
1301
if (optlen < sizeof (struct target_ip_mreq) ||
1302
optlen > sizeof (struct target_ip_mreqn))
1303
return -TARGET_EINVAL;
1305
ip_mreq = (struct ip_mreqn *) alloca(optlen);
1306
target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1307
ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1310
case IP_BLOCK_SOURCE:
1311
case IP_UNBLOCK_SOURCE:
1312
case IP_ADD_SOURCE_MEMBERSHIP:
1313
case IP_DROP_SOURCE_MEMBERSHIP:
1314
if (optlen != sizeof (struct target_ip_mreq_source))
1315
return -TARGET_EINVAL;
1317
ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1318
ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1319
unlock_user (ip_mreq_source, optval_addr, 0);
1328
case IPV6_MTU_DISCOVER:
1331
case IPV6_RECVPKTINFO:
1333
if (optlen < sizeof(uint32_t)) {
1334
return -TARGET_EINVAL;
1336
if (get_user_u32(val, optval_addr)) {
1337
return -TARGET_EFAULT;
1339
ret = get_errno(setsockopt(sockfd, level, optname,
1340
&val, sizeof(val)));
1349
/* struct icmp_filter takes an u32 value */
1350
if (optlen < sizeof(uint32_t)) {
1351
return -TARGET_EINVAL;
1354
if (get_user_u32(val, optval_addr)) {
1355
return -TARGET_EFAULT;
1357
ret = get_errno(setsockopt(sockfd, level, optname,
1358
&val, sizeof(val)));
1365
case TARGET_SOL_SOCKET:
1367
case TARGET_SO_RCVTIMEO:
1371
optname = SO_RCVTIMEO;
1374
if (optlen != sizeof(struct target_timeval)) {
1375
return -TARGET_EINVAL;
1378
if (copy_from_user_timeval(&tv, optval_addr)) {
1379
return -TARGET_EFAULT;
1382
ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1386
case TARGET_SO_SNDTIMEO:
1387
optname = SO_SNDTIMEO;
1389
case TARGET_SO_ATTACH_FILTER:
1391
struct target_sock_fprog *tfprog;
1392
struct target_sock_filter *tfilter;
1393
struct sock_fprog fprog;
1394
struct sock_filter *filter;
1397
if (optlen != sizeof(*tfprog)) {
1398
return -TARGET_EINVAL;
1400
if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1401
return -TARGET_EFAULT;
1403
if (!lock_user_struct(VERIFY_READ, tfilter,
1404
tswapal(tfprog->filter), 0)) {
1405
unlock_user_struct(tfprog, optval_addr, 1);
1406
return -TARGET_EFAULT;
1409
fprog.len = tswap16(tfprog->len);
1410
filter = malloc(fprog.len * sizeof(*filter));
1411
if (filter == NULL) {
1412
unlock_user_struct(tfilter, tfprog->filter, 1);
1413
unlock_user_struct(tfprog, optval_addr, 1);
1414
return -TARGET_ENOMEM;
1416
for (i = 0; i < fprog.len; i++) {
1417
filter[i].code = tswap16(tfilter[i].code);
1418
filter[i].jt = tfilter[i].jt;
1419
filter[i].jf = tfilter[i].jf;
1420
filter[i].k = tswap32(tfilter[i].k);
1422
fprog.filter = filter;
1424
ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1425
SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1428
unlock_user_struct(tfilter, tfprog->filter, 1);
1429
unlock_user_struct(tfprog, optval_addr, 1);
1432
/* Options with 'int' argument. */
1433
case TARGET_SO_DEBUG:
1436
case TARGET_SO_REUSEADDR:
1437
optname = SO_REUSEADDR;
1439
case TARGET_SO_TYPE:
1442
case TARGET_SO_ERROR:
1445
case TARGET_SO_DONTROUTE:
1446
optname = SO_DONTROUTE;
1448
case TARGET_SO_BROADCAST:
1449
optname = SO_BROADCAST;
1451
case TARGET_SO_SNDBUF:
1452
optname = SO_SNDBUF;
1454
case TARGET_SO_RCVBUF:
1455
optname = SO_RCVBUF;
1457
case TARGET_SO_KEEPALIVE:
1458
optname = SO_KEEPALIVE;
1460
case TARGET_SO_OOBINLINE:
1461
optname = SO_OOBINLINE;
1463
case TARGET_SO_NO_CHECK:
1464
optname = SO_NO_CHECK;
1466
case TARGET_SO_PRIORITY:
1467
optname = SO_PRIORITY;
1470
case TARGET_SO_BSDCOMPAT:
1471
optname = SO_BSDCOMPAT;
1474
case TARGET_SO_PASSCRED:
1475
optname = SO_PASSCRED;
1477
case TARGET_SO_TIMESTAMP:
1478
optname = SO_TIMESTAMP;
1480
case TARGET_SO_RCVLOWAT:
1481
optname = SO_RCVLOWAT;
1487
if (optlen < sizeof(uint32_t))
1488
return -TARGET_EINVAL;
1490
if (get_user_u32(val, optval_addr))
1491
return -TARGET_EFAULT;
1492
ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1496
gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1497
ret = -TARGET_ENOPROTOOPT;
1502
/* do_getsockopt() Must return target values and target errnos. */
1503
static abi_long do_getsockopt(int sockfd, int level, int optname,
1504
abi_ulong optval_addr, abi_ulong optlen)
1511
case TARGET_SOL_SOCKET:
1514
/* These don't just return a single integer */
1515
case TARGET_SO_LINGER:
1516
case TARGET_SO_RCVTIMEO:
1517
case TARGET_SO_SNDTIMEO:
1518
case TARGET_SO_PEERNAME:
1520
case TARGET_SO_PEERCRED: {
1523
struct target_ucred *tcr;
1525
if (get_user_u32(len, optlen)) {
1526
return -TARGET_EFAULT;
1529
return -TARGET_EINVAL;
1533
ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1541
if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1542
return -TARGET_EFAULT;
1544
__put_user(cr.pid, &tcr->pid);
1545
__put_user(cr.uid, &tcr->uid);
1546
__put_user(cr.gid, &tcr->gid);
1547
unlock_user_struct(tcr, optval_addr, 1);
1548
if (put_user_u32(len, optlen)) {
1549
return -TARGET_EFAULT;
1553
/* Options with 'int' argument. */
1554
case TARGET_SO_DEBUG:
1557
case TARGET_SO_REUSEADDR:
1558
optname = SO_REUSEADDR;
1560
case TARGET_SO_TYPE:
1563
case TARGET_SO_ERROR:
1566
case TARGET_SO_DONTROUTE:
1567
optname = SO_DONTROUTE;
1569
case TARGET_SO_BROADCAST:
1570
optname = SO_BROADCAST;
1572
case TARGET_SO_SNDBUF:
1573
optname = SO_SNDBUF;
1575
case TARGET_SO_RCVBUF:
1576
optname = SO_RCVBUF;
1578
case TARGET_SO_KEEPALIVE:
1579
optname = SO_KEEPALIVE;
1581
case TARGET_SO_OOBINLINE:
1582
optname = SO_OOBINLINE;
1584
case TARGET_SO_NO_CHECK:
1585
optname = SO_NO_CHECK;
1587
case TARGET_SO_PRIORITY:
1588
optname = SO_PRIORITY;
1591
case TARGET_SO_BSDCOMPAT:
1592
optname = SO_BSDCOMPAT;
1595
case TARGET_SO_PASSCRED:
1596
optname = SO_PASSCRED;
1598
case TARGET_SO_TIMESTAMP:
1599
optname = SO_TIMESTAMP;
1601
case TARGET_SO_RCVLOWAT:
1602
optname = SO_RCVLOWAT;
1609
/* TCP options all take an 'int' value. */
1611
if (get_user_u32(len, optlen))
1612
return -TARGET_EFAULT;
1614
return -TARGET_EINVAL;
1616
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1622
if (put_user_u32(val, optval_addr))
1623
return -TARGET_EFAULT;
1625
if (put_user_u8(val, optval_addr))
1626
return -TARGET_EFAULT;
1628
if (put_user_u32(len, optlen))
1629
return -TARGET_EFAULT;
1636
case IP_ROUTER_ALERT:
1640
case IP_MTU_DISCOVER:
1646
case IP_MULTICAST_TTL:
1647
case IP_MULTICAST_LOOP:
1648
if (get_user_u32(len, optlen))
1649
return -TARGET_EFAULT;
1651
return -TARGET_EINVAL;
1653
ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1656
if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1658
if (put_user_u32(len, optlen)
1659
|| put_user_u8(val, optval_addr))
1660
return -TARGET_EFAULT;
1662
if (len > sizeof(int))
1664
if (put_user_u32(len, optlen)
1665
|| put_user_u32(val, optval_addr))
1666
return -TARGET_EFAULT;
1670
ret = -TARGET_ENOPROTOOPT;
1676
gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1678
ret = -TARGET_EOPNOTSUPP;
1684
static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1685
int count, int copy)
1687
struct target_iovec *target_vec;
1689
abi_ulong total_len, max_len;
1696
if (count < 0 || count > IOV_MAX) {
1701
vec = calloc(count, sizeof(struct iovec));
1707
target_vec = lock_user(VERIFY_READ, target_addr,
1708
count * sizeof(struct target_iovec), 1);
1709
if (target_vec == NULL) {
1714
/* ??? If host page size > target page size, this will result in a
1715
value larger than what we can actually support. */
1716
max_len = 0x7fffffff & TARGET_PAGE_MASK;
1719
for (i = 0; i < count; i++) {
1720
abi_ulong base = tswapal(target_vec[i].iov_base);
1721
abi_long len = tswapal(target_vec[i].iov_len);
1726
} else if (len == 0) {
1727
/* Zero length pointer is ignored. */
1728
vec[i].iov_base = 0;
1730
vec[i].iov_base = lock_user(type, base, len, copy);
1731
if (!vec[i].iov_base) {
1735
if (len > max_len - total_len) {
1736
len = max_len - total_len;
1739
vec[i].iov_len = len;
1743
unlock_user(target_vec, target_addr, 0);
1749
unlock_user(target_vec, target_addr, 0);
1753
static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1754
int count, int copy)
1756
struct target_iovec *target_vec;
1759
target_vec = lock_user(VERIFY_READ, target_addr,
1760
count * sizeof(struct target_iovec), 1);
1762
for (i = 0; i < count; i++) {
1763
abi_ulong base = tswapal(target_vec[i].iov_base);
1764
abi_long len = tswapal(target_vec[i].iov_base);
1768
unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1770
unlock_user(target_vec, target_addr, 0);
1776
static inline int target_to_host_sock_type(int *type)
1779
int target_type = *type;
1781
switch (target_type & TARGET_SOCK_TYPE_MASK) {
1782
case TARGET_SOCK_DGRAM:
1783
host_type = SOCK_DGRAM;
1785
case TARGET_SOCK_STREAM:
1786
host_type = SOCK_STREAM;
1789
host_type = target_type & TARGET_SOCK_TYPE_MASK;
1792
if (target_type & TARGET_SOCK_CLOEXEC) {
1793
#if defined(SOCK_CLOEXEC)
1794
host_type |= SOCK_CLOEXEC;
1796
return -TARGET_EINVAL;
1799
if (target_type & TARGET_SOCK_NONBLOCK) {
1800
#if defined(SOCK_NONBLOCK)
1801
host_type |= SOCK_NONBLOCK;
1802
#elif !defined(O_NONBLOCK)
1803
return -TARGET_EINVAL;
1810
/* Try to emulate socket type flags after socket creation. */
1811
static int sock_flags_fixup(int fd, int target_type)
1813
#if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1814
if (target_type & TARGET_SOCK_NONBLOCK) {
1815
int flags = fcntl(fd, F_GETFL);
1816
if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1818
return -TARGET_EINVAL;
1825
/* do_socket() Must return target values and target errnos. */
1826
static abi_long do_socket(int domain, int type, int protocol)
1828
int target_type = type;
1831
ret = target_to_host_sock_type(&type);
1836
if (domain == PF_NETLINK)
1837
return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1838
ret = get_errno(socket(domain, type, protocol));
1840
ret = sock_flags_fixup(ret, target_type);
1845
/* do_bind() Must return target values and target errnos. */
1846
static abi_long do_bind(int sockfd, abi_ulong target_addr,
1852
if ((int)addrlen < 0) {
1853
return -TARGET_EINVAL;
1856
addr = alloca(addrlen+1);
1858
ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1862
return get_errno(bind(sockfd, addr, addrlen));
1865
/* do_connect() Must return target values and target errnos. */
1866
static abi_long do_connect(int sockfd, abi_ulong target_addr,
1872
if ((int)addrlen < 0) {
1873
return -TARGET_EINVAL;
1876
addr = alloca(addrlen);
1878
ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1882
return get_errno(connect(sockfd, addr, addrlen));
1885
/* do_sendrecvmsg() Must return target values and target errnos. */
1886
static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1887
int flags, int send)
1890
struct target_msghdr *msgp;
1894
abi_ulong target_vec;
1897
if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1901
return -TARGET_EFAULT;
1902
if (msgp->msg_name) {
1903
msg.msg_namelen = tswap32(msgp->msg_namelen);
1904
msg.msg_name = alloca(msg.msg_namelen);
1905
ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1911
msg.msg_name = NULL;
1912
msg.msg_namelen = 0;
1914
msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1915
msg.msg_control = alloca(msg.msg_controllen);
1916
msg.msg_flags = tswap32(msgp->msg_flags);
1918
count = tswapal(msgp->msg_iovlen);
1919
target_vec = tswapal(msgp->msg_iov);
1920
vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1921
target_vec, count, send);
1923
ret = -host_to_target_errno(errno);
1926
msg.msg_iovlen = count;
1930
ret = target_to_host_cmsg(&msg, msgp);
1932
ret = get_errno(sendmsg(fd, &msg, flags));
1934
ret = get_errno(recvmsg(fd, &msg, flags));
1935
if (!is_error(ret)) {
1937
ret = host_to_target_cmsg(msgp, &msg);
1938
if (!is_error(ret)) {
1939
msgp->msg_namelen = tswap32(msg.msg_namelen);
1940
if (msg.msg_name != NULL) {
1941
ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1942
msg.msg_name, msg.msg_namelen);
1954
unlock_iovec(vec, target_vec, count, !send);
1956
unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1960
#ifdef TARGET_NR_sendmmsg
1961
static abi_long do_sendmmsg(int fd, abi_ulong target_msgvec,
1962
unsigned int vlen, unsigned int flags)
1964
struct target_mmsghdr *mmsgp;
1965
abi_ulong arg2 = target_msgvec;
1968
if (!(mmsgp = lock_user(VERIFY_WRITE, target_msgvec,
1969
sizeof(*mmsgp) * vlen, 1))) {
1970
return -TARGET_EFAULT;
1973
for (i = 0; i < vlen; i++) {
1974
mmsgp[i].msg_len = tswap32(do_sendrecvmsg(fd, arg2, flags, 1));
1975
arg2 += sizeof(struct target_mmsghdr);
1978
unlock_user(mmsgp, target_msgvec, 0);
1979
/* XXX need to handle nonblocking case too */
1984
/* If we don't have a system accept4() then just call accept.
1985
* The callsites to do_accept4() will ensure that they don't
1986
* pass a non-zero flags argument in this config.
1988
#ifndef CONFIG_ACCEPT4
1989
static inline int accept4(int sockfd, struct sockaddr *addr,
1990
socklen_t *addrlen, int flags)
1993
return accept(sockfd, addr, addrlen);
1997
/* do_accept4() Must return target values and target errnos. */
1998
static abi_long do_accept4(int fd, abi_ulong target_addr,
1999
abi_ulong target_addrlen_addr, int flags)
2005
if (target_addr == 0) {
2006
return get_errno(accept4(fd, NULL, NULL, flags));
2009
/* linux returns EINVAL if addrlen pointer is invalid */
2010
if (get_user_u32(addrlen, target_addrlen_addr))
2011
return -TARGET_EINVAL;
2013
if ((int)addrlen < 0) {
2014
return -TARGET_EINVAL;
2017
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2018
return -TARGET_EINVAL;
2020
addr = alloca(addrlen);
2022
ret = get_errno(accept4(fd, addr, &addrlen, flags));
2023
if (!is_error(ret)) {
2024
host_to_target_sockaddr(target_addr, addr, addrlen);
2025
if (put_user_u32(addrlen, target_addrlen_addr))
2026
ret = -TARGET_EFAULT;
2031
/* do_getpeername() Must return target values and target errnos. */
2032
static abi_long do_getpeername(int fd, abi_ulong target_addr,
2033
abi_ulong target_addrlen_addr)
2039
if (get_user_u32(addrlen, target_addrlen_addr))
2040
return -TARGET_EFAULT;
2042
if ((int)addrlen < 0) {
2043
return -TARGET_EINVAL;
2046
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2047
return -TARGET_EFAULT;
2049
addr = alloca(addrlen);
2051
ret = get_errno(getpeername(fd, addr, &addrlen));
2052
if (!is_error(ret)) {
2053
host_to_target_sockaddr(target_addr, addr, addrlen);
2054
if (put_user_u32(addrlen, target_addrlen_addr))
2055
ret = -TARGET_EFAULT;
2060
/* do_getsockname() Must return target values and target errnos. */
2061
static abi_long do_getsockname(int fd, abi_ulong target_addr,
2062
abi_ulong target_addrlen_addr)
2068
if (get_user_u32(addrlen, target_addrlen_addr))
2069
return -TARGET_EFAULT;
2071
if ((int)addrlen < 0) {
2072
return -TARGET_EINVAL;
2075
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2076
return -TARGET_EFAULT;
2078
addr = alloca(addrlen);
2080
ret = get_errno(getsockname(fd, addr, &addrlen));
2081
if (!is_error(ret)) {
2082
host_to_target_sockaddr(target_addr, addr, addrlen);
2083
if (put_user_u32(addrlen, target_addrlen_addr))
2084
ret = -TARGET_EFAULT;
2089
/* do_socketpair() Must return target values and target errnos. */
2090
static abi_long do_socketpair(int domain, int type, int protocol,
2091
abi_ulong target_tab_addr)
2096
target_to_host_sock_type(&type);
2098
ret = get_errno(socketpair(domain, type, protocol, tab));
2099
if (!is_error(ret)) {
2100
if (put_user_s32(tab[0], target_tab_addr)
2101
|| put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2102
ret = -TARGET_EFAULT;
2107
/* do_sendto() Must return target values and target errnos. */
2108
static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2109
abi_ulong target_addr, socklen_t addrlen)
2115
if ((int)addrlen < 0) {
2116
return -TARGET_EINVAL;
2119
host_msg = lock_user(VERIFY_READ, msg, len, 1);
2121
return -TARGET_EFAULT;
2123
addr = alloca(addrlen);
2124
ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2126
unlock_user(host_msg, msg, 0);
2129
ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2131
ret = get_errno(send(fd, host_msg, len, flags));
2133
unlock_user(host_msg, msg, 0);
2137
/* do_recvfrom() Must return target values and target errnos. */
2138
static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2139
abi_ulong target_addr,
2140
abi_ulong target_addrlen)
2147
host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2149
return -TARGET_EFAULT;
2151
if (get_user_u32(addrlen, target_addrlen)) {
2152
ret = -TARGET_EFAULT;
2155
if ((int)addrlen < 0) {
2156
ret = -TARGET_EINVAL;
2159
addr = alloca(addrlen);
2160
ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2162
addr = NULL; /* To keep compiler quiet. */
2163
ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2165
if (!is_error(ret)) {
2167
host_to_target_sockaddr(target_addr, addr, addrlen);
2168
if (put_user_u32(addrlen, target_addrlen)) {
2169
ret = -TARGET_EFAULT;
2173
unlock_user(host_msg, msg, len);
2176
unlock_user(host_msg, msg, 0);
2181
#ifdef TARGET_NR_socketcall
2182
/* do_socketcall() Must return target values and target errnos. */
2183
static abi_long do_socketcall(int num, abi_ulong vptr)
2186
const int n = sizeof(abi_ulong);
2191
abi_ulong domain, type, protocol;
2193
if (get_user_ual(domain, vptr)
2194
|| get_user_ual(type, vptr + n)
2195
|| get_user_ual(protocol, vptr + 2 * n))
2196
return -TARGET_EFAULT;
2198
ret = do_socket(domain, type, protocol);
2204
abi_ulong target_addr;
2207
if (get_user_ual(sockfd, vptr)
2208
|| get_user_ual(target_addr, vptr + n)
2209
|| get_user_ual(addrlen, vptr + 2 * n))
2210
return -TARGET_EFAULT;
2212
ret = do_bind(sockfd, target_addr, addrlen);
2215
case SOCKOP_connect:
2218
abi_ulong target_addr;
2221
if (get_user_ual(sockfd, vptr)
2222
|| get_user_ual(target_addr, vptr + n)
2223
|| get_user_ual(addrlen, vptr + 2 * n))
2224
return -TARGET_EFAULT;
2226
ret = do_connect(sockfd, target_addr, addrlen);
2231
abi_ulong sockfd, backlog;
2233
if (get_user_ual(sockfd, vptr)
2234
|| get_user_ual(backlog, vptr + n))
2235
return -TARGET_EFAULT;
2237
ret = get_errno(listen(sockfd, backlog));
2243
abi_ulong target_addr, target_addrlen;
2245
if (get_user_ual(sockfd, vptr)
2246
|| get_user_ual(target_addr, vptr + n)
2247
|| get_user_ual(target_addrlen, vptr + 2 * n))
2248
return -TARGET_EFAULT;
2250
ret = do_accept4(sockfd, target_addr, target_addrlen, 0);
2253
case SOCKOP_getsockname:
2256
abi_ulong target_addr, target_addrlen;
2258
if (get_user_ual(sockfd, vptr)
2259
|| get_user_ual(target_addr, vptr + n)
2260
|| get_user_ual(target_addrlen, vptr + 2 * n))
2261
return -TARGET_EFAULT;
2263
ret = do_getsockname(sockfd, target_addr, target_addrlen);
2266
case SOCKOP_getpeername:
2269
abi_ulong target_addr, target_addrlen;
2271
if (get_user_ual(sockfd, vptr)
2272
|| get_user_ual(target_addr, vptr + n)
2273
|| get_user_ual(target_addrlen, vptr + 2 * n))
2274
return -TARGET_EFAULT;
2276
ret = do_getpeername(sockfd, target_addr, target_addrlen);
2279
case SOCKOP_socketpair:
2281
abi_ulong domain, type, protocol;
2284
if (get_user_ual(domain, vptr)
2285
|| get_user_ual(type, vptr + n)
2286
|| get_user_ual(protocol, vptr + 2 * n)
2287
|| get_user_ual(tab, vptr + 3 * n))
2288
return -TARGET_EFAULT;
2290
ret = do_socketpair(domain, type, protocol, tab);
2300
if (get_user_ual(sockfd, vptr)
2301
|| get_user_ual(msg, vptr + n)
2302
|| get_user_ual(len, vptr + 2 * n)
2303
|| get_user_ual(flags, vptr + 3 * n))
2304
return -TARGET_EFAULT;
2306
ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2316
if (get_user_ual(sockfd, vptr)
2317
|| get_user_ual(msg, vptr + n)
2318
|| get_user_ual(len, vptr + 2 * n)
2319
|| get_user_ual(flags, vptr + 3 * n))
2320
return -TARGET_EFAULT;
2322
ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2334
if (get_user_ual(sockfd, vptr)
2335
|| get_user_ual(msg, vptr + n)
2336
|| get_user_ual(len, vptr + 2 * n)
2337
|| get_user_ual(flags, vptr + 3 * n)
2338
|| get_user_ual(addr, vptr + 4 * n)
2339
|| get_user_ual(addrlen, vptr + 5 * n))
2340
return -TARGET_EFAULT;
2342
ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2345
case SOCKOP_recvfrom:
2354
if (get_user_ual(sockfd, vptr)
2355
|| get_user_ual(msg, vptr + n)
2356
|| get_user_ual(len, vptr + 2 * n)
2357
|| get_user_ual(flags, vptr + 3 * n)
2358
|| get_user_ual(addr, vptr + 4 * n)
2359
|| get_user_ual(addrlen, vptr + 5 * n))
2360
return -TARGET_EFAULT;
2362
ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2365
case SOCKOP_shutdown:
2367
abi_ulong sockfd, how;
2369
if (get_user_ual(sockfd, vptr)
2370
|| get_user_ual(how, vptr + n))
2371
return -TARGET_EFAULT;
2373
ret = get_errno(shutdown(sockfd, how));
2376
case SOCKOP_sendmsg:
2377
case SOCKOP_recvmsg:
2380
abi_ulong target_msg;
2383
if (get_user_ual(fd, vptr)
2384
|| get_user_ual(target_msg, vptr + n)
2385
|| get_user_ual(flags, vptr + 2 * n))
2386
return -TARGET_EFAULT;
2388
ret = do_sendrecvmsg(fd, target_msg, flags,
2389
(num == SOCKOP_sendmsg));
2392
case SOCKOP_setsockopt:
2400
if (get_user_ual(sockfd, vptr)
2401
|| get_user_ual(level, vptr + n)
2402
|| get_user_ual(optname, vptr + 2 * n)
2403
|| get_user_ual(optval, vptr + 3 * n)
2404
|| get_user_ual(optlen, vptr + 4 * n))
2405
return -TARGET_EFAULT;
2407
ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2410
case SOCKOP_getsockopt:
2418
if (get_user_ual(sockfd, vptr)
2419
|| get_user_ual(level, vptr + n)
2420
|| get_user_ual(optname, vptr + 2 * n)
2421
|| get_user_ual(optval, vptr + 3 * n)
2422
|| get_user_ual(optlen, vptr + 4 * n))
2423
return -TARGET_EFAULT;
2425
ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2429
gemu_log("Unsupported socketcall: %d\n", num);
2430
ret = -TARGET_ENOSYS;
2437
#define N_SHM_REGIONS 32
2439
static struct shm_region {
2442
} shm_regions[N_SHM_REGIONS];
2444
struct target_ipc_perm
2451
unsigned short int mode;
2452
unsigned short int __pad1;
2453
unsigned short int __seq;
2454
unsigned short int __pad2;
2455
abi_ulong __unused1;
2456
abi_ulong __unused2;
2459
struct target_semid_ds
2461
struct target_ipc_perm sem_perm;
2462
abi_ulong sem_otime;
2463
abi_ulong __unused1;
2464
abi_ulong sem_ctime;
2465
abi_ulong __unused2;
2466
abi_ulong sem_nsems;
2467
abi_ulong __unused3;
2468
abi_ulong __unused4;
2471
static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2472
abi_ulong target_addr)
2474
struct target_ipc_perm *target_ip;
2475
struct target_semid_ds *target_sd;
2477
if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2478
return -TARGET_EFAULT;
2479
target_ip = &(target_sd->sem_perm);
2480
host_ip->__key = tswapal(target_ip->__key);
2481
host_ip->uid = tswapal(target_ip->uid);
2482
host_ip->gid = tswapal(target_ip->gid);
2483
host_ip->cuid = tswapal(target_ip->cuid);
2484
host_ip->cgid = tswapal(target_ip->cgid);
2485
host_ip->mode = tswap16(target_ip->mode);
2486
unlock_user_struct(target_sd, target_addr, 0);
2490
static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2491
struct ipc_perm *host_ip)
2493
struct target_ipc_perm *target_ip;
2494
struct target_semid_ds *target_sd;
2496
if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2497
return -TARGET_EFAULT;
2498
target_ip = &(target_sd->sem_perm);
2499
target_ip->__key = tswapal(host_ip->__key);
2500
target_ip->uid = tswapal(host_ip->uid);
2501
target_ip->gid = tswapal(host_ip->gid);
2502
target_ip->cuid = tswapal(host_ip->cuid);
2503
target_ip->cgid = tswapal(host_ip->cgid);
2504
target_ip->mode = tswap16(host_ip->mode);
2505
unlock_user_struct(target_sd, target_addr, 1);
2509
static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2510
abi_ulong target_addr)
2512
struct target_semid_ds *target_sd;
2514
if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2515
return -TARGET_EFAULT;
2516
if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2517
return -TARGET_EFAULT;
2518
host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2519
host_sd->sem_otime = tswapal(target_sd->sem_otime);
2520
host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2521
unlock_user_struct(target_sd, target_addr, 0);
2525
static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2526
struct semid_ds *host_sd)
2528
struct target_semid_ds *target_sd;
2530
if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2531
return -TARGET_EFAULT;
2532
if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2533
return -TARGET_EFAULT;
2534
target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2535
target_sd->sem_otime = tswapal(host_sd->sem_otime);
2536
target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2537
unlock_user_struct(target_sd, target_addr, 1);
2541
struct target_seminfo {
2554
static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2555
struct seminfo *host_seminfo)
2557
struct target_seminfo *target_seminfo;
2558
if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2559
return -TARGET_EFAULT;
2560
__put_user(host_seminfo->semmap, &target_seminfo->semmap);
2561
__put_user(host_seminfo->semmni, &target_seminfo->semmni);
2562
__put_user(host_seminfo->semmns, &target_seminfo->semmns);
2563
__put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2564
__put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2565
__put_user(host_seminfo->semopm, &target_seminfo->semopm);
2566
__put_user(host_seminfo->semume, &target_seminfo->semume);
2567
__put_user(host_seminfo->semusz, &target_seminfo->semusz);
2568
__put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2569
__put_user(host_seminfo->semaem, &target_seminfo->semaem);
2570
unlock_user_struct(target_seminfo, target_addr, 1);
2576
struct semid_ds *buf;
2577
unsigned short *array;
2578
struct seminfo *__buf;
2581
union target_semun {
2588
static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2589
abi_ulong target_addr)
2592
unsigned short *array;
2594
struct semid_ds semid_ds;
2597
semun.buf = &semid_ds;
2599
ret = semctl(semid, 0, IPC_STAT, semun);
2601
return get_errno(ret);
2603
nsems = semid_ds.sem_nsems;
2605
*host_array = malloc(nsems*sizeof(unsigned short));
2606
array = lock_user(VERIFY_READ, target_addr,
2607
nsems*sizeof(unsigned short), 1);
2609
return -TARGET_EFAULT;
2611
for(i=0; i<nsems; i++) {
2612
__get_user((*host_array)[i], &array[i]);
2614
unlock_user(array, target_addr, 0);
2619
static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2620
unsigned short **host_array)
2623
unsigned short *array;
2625
struct semid_ds semid_ds;
2628
semun.buf = &semid_ds;
2630
ret = semctl(semid, 0, IPC_STAT, semun);
2632
return get_errno(ret);
2634
nsems = semid_ds.sem_nsems;
2636
array = lock_user(VERIFY_WRITE, target_addr,
2637
nsems*sizeof(unsigned short), 0);
2639
return -TARGET_EFAULT;
2641
for(i=0; i<nsems; i++) {
2642
__put_user((*host_array)[i], &array[i]);
2645
unlock_user(array, target_addr, 1);
2650
static inline abi_long do_semctl(int semid, int semnum, int cmd,
2651
union target_semun target_su)
2654
struct semid_ds dsarg;
2655
unsigned short *array = NULL;
2656
struct seminfo seminfo;
2657
abi_long ret = -TARGET_EINVAL;
2664
arg.val = tswap32(target_su.val);
2665
ret = get_errno(semctl(semid, semnum, cmd, arg));
2666
target_su.val = tswap32(arg.val);
2670
err = target_to_host_semarray(semid, &array, target_su.array);
2674
ret = get_errno(semctl(semid, semnum, cmd, arg));
2675
err = host_to_target_semarray(semid, target_su.array, &array);
2682
err = target_to_host_semid_ds(&dsarg, target_su.buf);
2686
ret = get_errno(semctl(semid, semnum, cmd, arg));
2687
err = host_to_target_semid_ds(target_su.buf, &dsarg);
2693
arg.__buf = &seminfo;
2694
ret = get_errno(semctl(semid, semnum, cmd, arg));
2695
err = host_to_target_seminfo(target_su.__buf, &seminfo);
2703
ret = get_errno(semctl(semid, semnum, cmd, NULL));
2710
struct target_sembuf {
2711
unsigned short sem_num;
2716
static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2717
abi_ulong target_addr,
2720
struct target_sembuf *target_sembuf;
2723
target_sembuf = lock_user(VERIFY_READ, target_addr,
2724
nsops*sizeof(struct target_sembuf), 1);
2726
return -TARGET_EFAULT;
2728
for(i=0; i<nsops; i++) {
2729
__get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2730
__get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2731
__get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2734
unlock_user(target_sembuf, target_addr, 0);
2739
static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2741
struct sembuf sops[nsops];
2743
if (target_to_host_sembuf(sops, ptr, nsops))
2744
return -TARGET_EFAULT;
2746
return get_errno(semop(semid, sops, nsops));
2749
struct target_msqid_ds
2751
struct target_ipc_perm msg_perm;
2752
abi_ulong msg_stime;
2753
#if TARGET_ABI_BITS == 32
2754
abi_ulong __unused1;
2756
abi_ulong msg_rtime;
2757
#if TARGET_ABI_BITS == 32
2758
abi_ulong __unused2;
2760
abi_ulong msg_ctime;
2761
#if TARGET_ABI_BITS == 32
2762
abi_ulong __unused3;
2764
abi_ulong __msg_cbytes;
2766
abi_ulong msg_qbytes;
2767
abi_ulong msg_lspid;
2768
abi_ulong msg_lrpid;
2769
abi_ulong __unused4;
2770
abi_ulong __unused5;
2773
static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2774
abi_ulong target_addr)
2776
struct target_msqid_ds *target_md;
2778
if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2779
return -TARGET_EFAULT;
2780
if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2781
return -TARGET_EFAULT;
2782
host_md->msg_stime = tswapal(target_md->msg_stime);
2783
host_md->msg_rtime = tswapal(target_md->msg_rtime);
2784
host_md->msg_ctime = tswapal(target_md->msg_ctime);
2785
host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2786
host_md->msg_qnum = tswapal(target_md->msg_qnum);
2787
host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2788
host_md->msg_lspid = tswapal(target_md->msg_lspid);
2789
host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2790
unlock_user_struct(target_md, target_addr, 0);
2794
static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2795
struct msqid_ds *host_md)
2797
struct target_msqid_ds *target_md;
2799
if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2800
return -TARGET_EFAULT;
2801
if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2802
return -TARGET_EFAULT;
2803
target_md->msg_stime = tswapal(host_md->msg_stime);
2804
target_md->msg_rtime = tswapal(host_md->msg_rtime);
2805
target_md->msg_ctime = tswapal(host_md->msg_ctime);
2806
target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2807
target_md->msg_qnum = tswapal(host_md->msg_qnum);
2808
target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2809
target_md->msg_lspid = tswapal(host_md->msg_lspid);
2810
target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2811
unlock_user_struct(target_md, target_addr, 1);
2815
struct target_msginfo {
2823
unsigned short int msgseg;
2826
static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2827
struct msginfo *host_msginfo)
2829
struct target_msginfo *target_msginfo;
2830
if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2831
return -TARGET_EFAULT;
2832
__put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2833
__put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2834
__put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2835
__put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2836
__put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2837
__put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2838
__put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2839
__put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2840
unlock_user_struct(target_msginfo, target_addr, 1);
2844
static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2846
struct msqid_ds dsarg;
2847
struct msginfo msginfo;
2848
abi_long ret = -TARGET_EINVAL;
2856
if (target_to_host_msqid_ds(&dsarg,ptr))
2857
return -TARGET_EFAULT;
2858
ret = get_errno(msgctl(msgid, cmd, &dsarg));
2859
if (host_to_target_msqid_ds(ptr,&dsarg))
2860
return -TARGET_EFAULT;
2863
ret = get_errno(msgctl(msgid, cmd, NULL));
2867
ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2868
if (host_to_target_msginfo(ptr, &msginfo))
2869
return -TARGET_EFAULT;
2876
struct target_msgbuf {
2881
static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2882
unsigned int msgsz, int msgflg)
2884
struct target_msgbuf *target_mb;
2885
struct msgbuf *host_mb;
2888
if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2889
return -TARGET_EFAULT;
2890
host_mb = malloc(msgsz+sizeof(long));
2891
host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2892
memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2893
ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2895
unlock_user_struct(target_mb, msgp, 0);
2900
static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2901
unsigned int msgsz, abi_long msgtyp,
2904
struct target_msgbuf *target_mb;
2906
struct msgbuf *host_mb;
2909
if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2910
return -TARGET_EFAULT;
2912
host_mb = g_malloc(msgsz+sizeof(long));
2913
ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2916
abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2917
target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2918
if (!target_mtext) {
2919
ret = -TARGET_EFAULT;
2922
memcpy(target_mb->mtext, host_mb->mtext, ret);
2923
unlock_user(target_mtext, target_mtext_addr, ret);
2926
target_mb->mtype = tswapal(host_mb->mtype);
2930
unlock_user_struct(target_mb, msgp, 1);
2935
struct target_shmid_ds
2937
struct target_ipc_perm shm_perm;
2938
abi_ulong shm_segsz;
2939
abi_ulong shm_atime;
2940
#if TARGET_ABI_BITS == 32
2941
abi_ulong __unused1;
2943
abi_ulong shm_dtime;
2944
#if TARGET_ABI_BITS == 32
2945
abi_ulong __unused2;
2947
abi_ulong shm_ctime;
2948
#if TARGET_ABI_BITS == 32
2949
abi_ulong __unused3;
2953
abi_ulong shm_nattch;
2954
unsigned long int __unused4;
2955
unsigned long int __unused5;
2958
static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2959
abi_ulong target_addr)
2961
struct target_shmid_ds *target_sd;
2963
if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2964
return -TARGET_EFAULT;
2965
if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2966
return -TARGET_EFAULT;
2967
__get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2968
__get_user(host_sd->shm_atime, &target_sd->shm_atime);
2969
__get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2970
__get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2971
__get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2972
__get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2973
__get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2974
unlock_user_struct(target_sd, target_addr, 0);
2978
static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2979
struct shmid_ds *host_sd)
2981
struct target_shmid_ds *target_sd;
2983
if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2984
return -TARGET_EFAULT;
2985
if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2986
return -TARGET_EFAULT;
2987
__put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2988
__put_user(host_sd->shm_atime, &target_sd->shm_atime);
2989
__put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2990
__put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2991
__put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2992
__put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2993
__put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2994
unlock_user_struct(target_sd, target_addr, 1);
2998
struct target_shminfo {
3006
static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3007
struct shminfo *host_shminfo)
3009
struct target_shminfo *target_shminfo;
3010
if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3011
return -TARGET_EFAULT;
3012
__put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3013
__put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3014
__put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3015
__put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3016
__put_user(host_shminfo->shmall, &target_shminfo->shmall);
3017
unlock_user_struct(target_shminfo, target_addr, 1);
3021
struct target_shm_info {
3026
abi_ulong swap_attempts;
3027
abi_ulong swap_successes;
3030
static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3031
struct shm_info *host_shm_info)
3033
struct target_shm_info *target_shm_info;
3034
if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3035
return -TARGET_EFAULT;
3036
__put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3037
__put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3038
__put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3039
__put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3040
__put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3041
__put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3042
unlock_user_struct(target_shm_info, target_addr, 1);
3046
static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3048
struct shmid_ds dsarg;
3049
struct shminfo shminfo;
3050
struct shm_info shm_info;
3051
abi_long ret = -TARGET_EINVAL;
3059
if (target_to_host_shmid_ds(&dsarg, buf))
3060
return -TARGET_EFAULT;
3061
ret = get_errno(shmctl(shmid, cmd, &dsarg));
3062
if (host_to_target_shmid_ds(buf, &dsarg))
3063
return -TARGET_EFAULT;
3066
ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3067
if (host_to_target_shminfo(buf, &shminfo))
3068
return -TARGET_EFAULT;
3071
ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3072
if (host_to_target_shm_info(buf, &shm_info))
3073
return -TARGET_EFAULT;
3078
ret = get_errno(shmctl(shmid, cmd, NULL));
3085
static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3089
struct shmid_ds shm_info;
3092
/* find out the length of the shared memory segment */
3093
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3094
if (is_error(ret)) {
3095
/* can't get length, bail out */
3102
host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3104
abi_ulong mmap_start;
3106
mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3108
if (mmap_start == -1) {
3110
host_raddr = (void *)-1;
3112
host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3115
if (host_raddr == (void *)-1) {
3117
return get_errno((long)host_raddr);
3119
raddr=h2g((unsigned long)host_raddr);
3121
page_set_flags(raddr, raddr + shm_info.shm_segsz,
3122
PAGE_VALID | PAGE_READ |
3123
((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3125
for (i = 0; i < N_SHM_REGIONS; i++) {
3126
if (shm_regions[i].start == 0) {
3127
shm_regions[i].start = raddr;
3128
shm_regions[i].size = shm_info.shm_segsz;
3138
static inline abi_long do_shmdt(abi_ulong shmaddr)
3142
for (i = 0; i < N_SHM_REGIONS; ++i) {
3143
if (shm_regions[i].start == shmaddr) {
3144
shm_regions[i].start = 0;
3145
page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3150
return get_errno(shmdt(g2h(shmaddr)));
3153
#ifdef TARGET_NR_ipc
3154
/* ??? This only works with linear mappings. */
3155
/* do_ipc() must return target values and target errnos. */
3156
static abi_long do_ipc(unsigned int call, int first,
3157
int second, int third,
3158
abi_long ptr, abi_long fifth)
3163
version = call >> 16;
3168
ret = do_semop(first, ptr, second);
3172
ret = get_errno(semget(first, second, third));
3176
ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3180
ret = get_errno(msgget(first, second));
3184
ret = do_msgsnd(first, ptr, second, third);
3188
ret = do_msgctl(first, second, ptr);
3195
struct target_ipc_kludge {
3200
if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3201
ret = -TARGET_EFAULT;
3205
ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3207
unlock_user_struct(tmp, ptr, 0);
3211
ret = do_msgrcv(first, ptr, second, fifth, third);
3220
raddr = do_shmat(first, ptr, second);
3221
if (is_error(raddr))
3222
return get_errno(raddr);
3223
if (put_user_ual(raddr, third))
3224
return -TARGET_EFAULT;
3228
ret = -TARGET_EINVAL;
3233
ret = do_shmdt(ptr);
3237
/* IPC_* flag values are the same on all linux platforms */
3238
ret = get_errno(shmget(first, second, third));
3241
/* IPC_* and SHM_* command values are the same on all linux platforms */
3243
ret = do_shmctl(first, second, third);
3246
gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3247
ret = -TARGET_ENOSYS;
3254
/* kernel structure types definitions */
3256
#define STRUCT(name, ...) STRUCT_ ## name,
3257
#define STRUCT_SPECIAL(name) STRUCT_ ## name,
3259
#include "syscall_types.h"
3262
#undef STRUCT_SPECIAL
3264
#define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3265
#define STRUCT_SPECIAL(name)
3266
#include "syscall_types.h"
3268
#undef STRUCT_SPECIAL
3270
typedef struct IOCTLEntry IOCTLEntry;
3272
typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3273
int fd, abi_long cmd, abi_long arg);
3276
unsigned int target_cmd;
3277
unsigned int host_cmd;
3280
do_ioctl_fn *do_ioctl;
3281
const argtype arg_type[5];
3284
#define IOC_R 0x0001
3285
#define IOC_W 0x0002
3286
#define IOC_RW (IOC_R | IOC_W)
3288
#define MAX_STRUCT_SIZE 4096
3290
#ifdef CONFIG_FIEMAP
3291
/* So fiemap access checks don't overflow on 32 bit systems.
3292
* This is very slightly smaller than the limit imposed by
3293
* the underlying kernel.
3295
#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3296
/ sizeof(struct fiemap_extent))
3298
static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3299
int fd, abi_long cmd, abi_long arg)
3301
/* The parameter for this ioctl is a struct fiemap followed
3302
* by an array of struct fiemap_extent whose size is set
3303
* in fiemap->fm_extent_count. The array is filled in by the
3306
int target_size_in, target_size_out;
3308
const argtype *arg_type = ie->arg_type;
3309
const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3312
int i, extent_size = thunk_type_size(extent_arg_type, 0);
3316
assert(arg_type[0] == TYPE_PTR);
3317
assert(ie->access == IOC_RW);
3319
target_size_in = thunk_type_size(arg_type, 0);
3320
argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3322
return -TARGET_EFAULT;
3324
thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3325
unlock_user(argptr, arg, 0);
3326
fm = (struct fiemap *)buf_temp;
3327
if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3328
return -TARGET_EINVAL;
3331
outbufsz = sizeof (*fm) +
3332
(sizeof(struct fiemap_extent) * fm->fm_extent_count);
3334
if (outbufsz > MAX_STRUCT_SIZE) {
3335
/* We can't fit all the extents into the fixed size buffer.
3336
* Allocate one that is large enough and use it instead.
3338
fm = malloc(outbufsz);
3340
return -TARGET_ENOMEM;
3342
memcpy(fm, buf_temp, sizeof(struct fiemap));
3345
ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3346
if (!is_error(ret)) {
3347
target_size_out = target_size_in;
3348
/* An extent_count of 0 means we were only counting the extents
3349
* so there are no structs to copy
3351
if (fm->fm_extent_count != 0) {
3352
target_size_out += fm->fm_mapped_extents * extent_size;
3354
argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3356
ret = -TARGET_EFAULT;
3358
/* Convert the struct fiemap */
3359
thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3360
if (fm->fm_extent_count != 0) {
3361
p = argptr + target_size_in;
3362
/* ...and then all the struct fiemap_extents */
3363
for (i = 0; i < fm->fm_mapped_extents; i++) {
3364
thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3369
unlock_user(argptr, arg, target_size_out);
3379
static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3380
int fd, abi_long cmd, abi_long arg)
3382
const argtype *arg_type = ie->arg_type;
3386
struct ifconf *host_ifconf;
3388
const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3389
int target_ifreq_size;
3394
abi_long target_ifc_buf;
3398
assert(arg_type[0] == TYPE_PTR);
3399
assert(ie->access == IOC_RW);
3402
target_size = thunk_type_size(arg_type, 0);
3404
argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3406
return -TARGET_EFAULT;
3407
thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3408
unlock_user(argptr, arg, 0);
3410
host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3411
target_ifc_len = host_ifconf->ifc_len;
3412
target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3414
target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3415
nb_ifreq = target_ifc_len / target_ifreq_size;
3416
host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3418
outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3419
if (outbufsz > MAX_STRUCT_SIZE) {
3420
/* We can't fit all the extents into the fixed size buffer.
3421
* Allocate one that is large enough and use it instead.
3423
host_ifconf = malloc(outbufsz);
3425
return -TARGET_ENOMEM;
3427
memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3430
host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3432
host_ifconf->ifc_len = host_ifc_len;
3433
host_ifconf->ifc_buf = host_ifc_buf;
3435
ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3436
if (!is_error(ret)) {
3437
/* convert host ifc_len to target ifc_len */
3439
nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3440
target_ifc_len = nb_ifreq * target_ifreq_size;
3441
host_ifconf->ifc_len = target_ifc_len;
3443
/* restore target ifc_buf */
3445
host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3447
/* copy struct ifconf to target user */
3449
argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3451
return -TARGET_EFAULT;
3452
thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3453
unlock_user(argptr, arg, target_size);
3455
/* copy ifreq[] to target user */
3457
argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3458
for (i = 0; i < nb_ifreq ; i++) {
3459
thunk_convert(argptr + i * target_ifreq_size,
3460
host_ifc_buf + i * sizeof(struct ifreq),
3461
ifreq_arg_type, THUNK_TARGET);
3463
unlock_user(argptr, target_ifc_buf, target_ifc_len);
3473
static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3474
abi_long cmd, abi_long arg)
3477
struct dm_ioctl *host_dm;
3478
abi_long guest_data;
3479
uint32_t guest_data_size;
3481
const argtype *arg_type = ie->arg_type;
3483
void *big_buf = NULL;
3487
target_size = thunk_type_size(arg_type, 0);
3488
argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3490
ret = -TARGET_EFAULT;
3493
thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3494
unlock_user(argptr, arg, 0);
3496
/* buf_temp is too small, so fetch things into a bigger buffer */
3497
big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3498
memcpy(big_buf, buf_temp, target_size);
3502
guest_data = arg + host_dm->data_start;
3503
if ((guest_data - arg) < 0) {
3507
guest_data_size = host_dm->data_size - host_dm->data_start;
3508
host_data = (char*)host_dm + host_dm->data_start;
3510
argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3511
switch (ie->host_cmd) {
3513
case DM_LIST_DEVICES:
3516
case DM_DEV_SUSPEND:
3519
case DM_TABLE_STATUS:
3520
case DM_TABLE_CLEAR:
3522
case DM_LIST_VERSIONS:
3526
case DM_DEV_SET_GEOMETRY:
3527
/* data contains only strings */
3528
memcpy(host_data, argptr, guest_data_size);
3531
memcpy(host_data, argptr, guest_data_size);
3532
*(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3536
void *gspec = argptr;
3537
void *cur_data = host_data;
3538
const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3539
int spec_size = thunk_type_size(arg_type, 0);
3542
for (i = 0; i < host_dm->target_count; i++) {
3543
struct dm_target_spec *spec = cur_data;
3547
thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3548
slen = strlen((char*)gspec + spec_size) + 1;
3550
spec->next = sizeof(*spec) + slen;
3551
strcpy((char*)&spec[1], gspec + spec_size);
3553
cur_data += spec->next;
3558
ret = -TARGET_EINVAL;
3561
unlock_user(argptr, guest_data, 0);
3563
ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3564
if (!is_error(ret)) {
3565
guest_data = arg + host_dm->data_start;
3566
guest_data_size = host_dm->data_size - host_dm->data_start;
3567
argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3568
switch (ie->host_cmd) {
3573
case DM_DEV_SUSPEND:
3576
case DM_TABLE_CLEAR:
3578
case DM_DEV_SET_GEOMETRY:
3579
/* no return data */
3581
case DM_LIST_DEVICES:
3583
struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3584
uint32_t remaining_data = guest_data_size;
3585
void *cur_data = argptr;
3586
const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3587
int nl_size = 12; /* can't use thunk_size due to alignment */
3590
uint32_t next = nl->next;
3592
nl->next = nl_size + (strlen(nl->name) + 1);
3594
if (remaining_data < nl->next) {
3595
host_dm->flags |= DM_BUFFER_FULL_FLAG;
3598
thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3599
strcpy(cur_data + nl_size, nl->name);
3600
cur_data += nl->next;
3601
remaining_data -= nl->next;
3605
nl = (void*)nl + next;
3610
case DM_TABLE_STATUS:
3612
struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3613
void *cur_data = argptr;
3614
const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3615
int spec_size = thunk_type_size(arg_type, 0);
3618
for (i = 0; i < host_dm->target_count; i++) {
3619
uint32_t next = spec->next;
3620
int slen = strlen((char*)&spec[1]) + 1;
3621
spec->next = (cur_data - argptr) + spec_size + slen;
3622
if (guest_data_size < spec->next) {
3623
host_dm->flags |= DM_BUFFER_FULL_FLAG;
3626
thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3627
strcpy(cur_data + spec_size, (char*)&spec[1]);
3628
cur_data = argptr + spec->next;
3629
spec = (void*)host_dm + host_dm->data_start + next;
3635
void *hdata = (void*)host_dm + host_dm->data_start;
3636
int count = *(uint32_t*)hdata;
3637
uint64_t *hdev = hdata + 8;
3638
uint64_t *gdev = argptr + 8;
3641
*(uint32_t*)argptr = tswap32(count);
3642
for (i = 0; i < count; i++) {
3643
*gdev = tswap64(*hdev);
3649
case DM_LIST_VERSIONS:
3651
struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3652
uint32_t remaining_data = guest_data_size;
3653
void *cur_data = argptr;
3654
const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3655
int vers_size = thunk_type_size(arg_type, 0);
3658
uint32_t next = vers->next;
3660
vers->next = vers_size + (strlen(vers->name) + 1);
3662
if (remaining_data < vers->next) {
3663
host_dm->flags |= DM_BUFFER_FULL_FLAG;
3666
thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3667
strcpy(cur_data + vers_size, vers->name);
3668
cur_data += vers->next;
3669
remaining_data -= vers->next;
3673
vers = (void*)vers + next;
3678
ret = -TARGET_EINVAL;
3681
unlock_user(argptr, guest_data, guest_data_size);
3683
argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3685
ret = -TARGET_EFAULT;
3688
thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3689
unlock_user(argptr, arg, target_size);
3696
static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3697
int fd, abi_long cmd, abi_long arg)
3699
const argtype *arg_type = ie->arg_type;
3700
const StructEntry *se;
3701
const argtype *field_types;
3702
const int *dst_offsets, *src_offsets;
3705
abi_ulong *target_rt_dev_ptr;
3706
unsigned long *host_rt_dev_ptr;
3710
assert(ie->access == IOC_W);
3711
assert(*arg_type == TYPE_PTR);
3713
assert(*arg_type == TYPE_STRUCT);
3714
target_size = thunk_type_size(arg_type, 0);
3715
argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3717
return -TARGET_EFAULT;
3720
assert(*arg_type == (int)STRUCT_rtentry);
3721
se = struct_entries + *arg_type++;
3722
assert(se->convert[0] == NULL);
3723
/* convert struct here to be able to catch rt_dev string */
3724
field_types = se->field_types;
3725
dst_offsets = se->field_offsets[THUNK_HOST];
3726
src_offsets = se->field_offsets[THUNK_TARGET];
3727
for (i = 0; i < se->nb_fields; i++) {
3728
if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3729
assert(*field_types == TYPE_PTRVOID);
3730
target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3731
host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3732
if (*target_rt_dev_ptr != 0) {
3733
*host_rt_dev_ptr = (unsigned long)lock_user_string(
3734
tswapal(*target_rt_dev_ptr));
3735
if (!*host_rt_dev_ptr) {
3736
unlock_user(argptr, arg, 0);
3737
return -TARGET_EFAULT;
3740
*host_rt_dev_ptr = 0;
3745
field_types = thunk_convert(buf_temp + dst_offsets[i],
3746
argptr + src_offsets[i],
3747
field_types, THUNK_HOST);
3749
unlock_user(argptr, arg, 0);
3751
ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3752
if (*host_rt_dev_ptr != 0) {
3753
unlock_user((void *)*host_rt_dev_ptr,
3754
*target_rt_dev_ptr, 0);
3759
static IOCTLEntry ioctl_entries[] = {
3760
#define IOCTL(cmd, access, ...) \
3761
{ TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3762
#define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3763
{ TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3768
/* ??? Implement proper locking for ioctls. */
3769
/* do_ioctl() Must return target values and target errnos. */
3770
static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3772
const IOCTLEntry *ie;
3773
const argtype *arg_type;
3775
uint8_t buf_temp[MAX_STRUCT_SIZE];
3781
if (ie->target_cmd == 0) {
3782
gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3783
return -TARGET_ENOSYS;
3785
if (ie->target_cmd == cmd)
3789
arg_type = ie->arg_type;
3791
gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3794
return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3797
switch(arg_type[0]) {
3800
ret = get_errno(ioctl(fd, ie->host_cmd));
3805
ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3809
target_size = thunk_type_size(arg_type, 0);
3810
switch(ie->access) {
3812
ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3813
if (!is_error(ret)) {
3814
argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3816
return -TARGET_EFAULT;
3817
thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3818
unlock_user(argptr, arg, target_size);
3822
argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3824
return -TARGET_EFAULT;
3825
thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3826
unlock_user(argptr, arg, 0);
3827
ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3831
argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3833
return -TARGET_EFAULT;
3834
thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3835
unlock_user(argptr, arg, 0);
3836
ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3837
if (!is_error(ret)) {
3838
argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3840
return -TARGET_EFAULT;
3841
thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3842
unlock_user(argptr, arg, target_size);
3848
gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3849
(long)cmd, arg_type[0]);
3850
ret = -TARGET_ENOSYS;
3856
static const bitmask_transtbl iflag_tbl[] = {
3857
{ TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3858
{ TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3859
{ TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3860
{ TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3861
{ TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3862
{ TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3863
{ TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3864
{ TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3865
{ TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3866
{ TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3867
{ TARGET_IXON, TARGET_IXON, IXON, IXON },
3868
{ TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3869
{ TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3870
{ TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3874
static const bitmask_transtbl oflag_tbl[] = {
3875
{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3876
{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3877
{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3878
{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3879
{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3880
{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3881
{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3882
{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3883
{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3884
{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3885
{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3886
{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3887
{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3888
{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3889
{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3890
{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3891
{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3892
{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3893
{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3894
{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3895
{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3896
{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3897
{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3898
{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3902
static const bitmask_transtbl cflag_tbl[] = {
3903
{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3904
{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3905
{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3906
{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3907
{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3908
{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3909
{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3910
{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3911
{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3912
{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3913
{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3914
{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3915
{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3916
{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3917
{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3918
{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3919
{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3920
{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3921
{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3922
{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3923
{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3924
{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3925
{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3926
{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3927
{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3928
{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3929
{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3930
{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3931
{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3932
{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3933
{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3937
static const bitmask_transtbl lflag_tbl[] = {
3938
{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3939
{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3940
{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3941
{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3942
{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3943
{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3944
{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3945
{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3946
{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3947
{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3948
{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3949
{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3950
{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3951
{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3952
{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3956
static void target_to_host_termios (void *dst, const void *src)
3958
struct host_termios *host = dst;
3959
const struct target_termios *target = src;
3962
target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3964
target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3966
target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3968
target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3969
host->c_line = target->c_line;
3971
memset(host->c_cc, 0, sizeof(host->c_cc));
3972
host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3973
host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3974
host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3975
host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3976
host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3977
host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3978
host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3979
host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3980
host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3981
host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3982
host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3983
host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3984
host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3985
host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3986
host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3987
host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3988
host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3991
static void host_to_target_termios (void *dst, const void *src)
3993
struct target_termios *target = dst;
3994
const struct host_termios *host = src;
3997
tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3999
tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4001
tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4003
tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4004
target->c_line = host->c_line;
4006
memset(target->c_cc, 0, sizeof(target->c_cc));
4007
target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4008
target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4009
target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4010
target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4011
target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4012
target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4013
target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4014
target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4015
target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4016
target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4017
target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4018
target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4019
target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4020
target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4021
target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4022
target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4023
target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4026
static const StructEntry struct_termios_def = {
4027
.convert = { host_to_target_termios, target_to_host_termios },
4028
.size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4029
.align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4032
static bitmask_transtbl mmap_flags_tbl[] = {
4033
{ TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4034
{ TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4035
{ TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4036
{ TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4037
{ TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4038
{ TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4039
{ TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4040
{ TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4044
#if defined(TARGET_I386)
4046
/* NOTE: there is really one LDT for all the threads */
4047
static uint8_t *ldt_table;
4049
static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4056
size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4057
if (size > bytecount)
4059
p = lock_user(VERIFY_WRITE, ptr, size, 0);
4061
return -TARGET_EFAULT;
4062
/* ??? Should this by byteswapped? */
4063
memcpy(p, ldt_table, size);
4064
unlock_user(p, ptr, size);
4068
/* XXX: add locking support */
4069
static abi_long write_ldt(CPUX86State *env,
4070
abi_ulong ptr, unsigned long bytecount, int oldmode)
4072
struct target_modify_ldt_ldt_s ldt_info;
4073
struct target_modify_ldt_ldt_s *target_ldt_info;
4074
int seg_32bit, contents, read_exec_only, limit_in_pages;
4075
int seg_not_present, useable, lm;
4076
uint32_t *lp, entry_1, entry_2;
4078
if (bytecount != sizeof(ldt_info))
4079
return -TARGET_EINVAL;
4080
if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4081
return -TARGET_EFAULT;
4082
ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4083
ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4084
ldt_info.limit = tswap32(target_ldt_info->limit);
4085
ldt_info.flags = tswap32(target_ldt_info->flags);
4086
unlock_user_struct(target_ldt_info, ptr, 0);
4088
if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4089
return -TARGET_EINVAL;
4090
seg_32bit = ldt_info.flags & 1;
4091
contents = (ldt_info.flags >> 1) & 3;
4092
read_exec_only = (ldt_info.flags >> 3) & 1;
4093
limit_in_pages = (ldt_info.flags >> 4) & 1;
4094
seg_not_present = (ldt_info.flags >> 5) & 1;
4095
useable = (ldt_info.flags >> 6) & 1;
4099
lm = (ldt_info.flags >> 7) & 1;
4101
if (contents == 3) {
4103
return -TARGET_EINVAL;
4104
if (seg_not_present == 0)
4105
return -TARGET_EINVAL;
4107
/* allocate the LDT */
4109
env->ldt.base = target_mmap(0,
4110
TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4111
PROT_READ|PROT_WRITE,
4112
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4113
if (env->ldt.base == -1)
4114
return -TARGET_ENOMEM;
4115
memset(g2h(env->ldt.base), 0,
4116
TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4117
env->ldt.limit = 0xffff;
4118
ldt_table = g2h(env->ldt.base);
4121
/* NOTE: same code as Linux kernel */
4122
/* Allow LDTs to be cleared by the user. */
4123
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4126
read_exec_only == 1 &&
4128
limit_in_pages == 0 &&
4129
seg_not_present == 1 &&
4137
entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4138
(ldt_info.limit & 0x0ffff);
4139
entry_2 = (ldt_info.base_addr & 0xff000000) |
4140
((ldt_info.base_addr & 0x00ff0000) >> 16) |
4141
(ldt_info.limit & 0xf0000) |
4142
((read_exec_only ^ 1) << 9) |
4144
((seg_not_present ^ 1) << 15) |
4146
(limit_in_pages << 23) |
4150
entry_2 |= (useable << 20);
4152
/* Install the new entry ... */
4154
lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4155
lp[0] = tswap32(entry_1);
4156
lp[1] = tswap32(entry_2);
4160
/* specific and weird i386 syscalls */
4161
static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4162
unsigned long bytecount)
4168
ret = read_ldt(ptr, bytecount);
4171
ret = write_ldt(env, ptr, bytecount, 1);
4174
ret = write_ldt(env, ptr, bytecount, 0);
4177
ret = -TARGET_ENOSYS;
4183
#if defined(TARGET_I386) && defined(TARGET_ABI32)
4184
abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4186
uint64_t *gdt_table = g2h(env->gdt.base);
4187
struct target_modify_ldt_ldt_s ldt_info;
4188
struct target_modify_ldt_ldt_s *target_ldt_info;
4189
int seg_32bit, contents, read_exec_only, limit_in_pages;
4190
int seg_not_present, useable, lm;
4191
uint32_t *lp, entry_1, entry_2;
4194
lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4195
if (!target_ldt_info)
4196
return -TARGET_EFAULT;
4197
ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4198
ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4199
ldt_info.limit = tswap32(target_ldt_info->limit);
4200
ldt_info.flags = tswap32(target_ldt_info->flags);
4201
if (ldt_info.entry_number == -1) {
4202
for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4203
if (gdt_table[i] == 0) {
4204
ldt_info.entry_number = i;
4205
target_ldt_info->entry_number = tswap32(i);
4210
unlock_user_struct(target_ldt_info, ptr, 1);
4212
if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4213
ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4214
return -TARGET_EINVAL;
4215
seg_32bit = ldt_info.flags & 1;
4216
contents = (ldt_info.flags >> 1) & 3;
4217
read_exec_only = (ldt_info.flags >> 3) & 1;
4218
limit_in_pages = (ldt_info.flags >> 4) & 1;
4219
seg_not_present = (ldt_info.flags >> 5) & 1;
4220
useable = (ldt_info.flags >> 6) & 1;
4224
lm = (ldt_info.flags >> 7) & 1;
4227
if (contents == 3) {
4228
if (seg_not_present == 0)
4229
return -TARGET_EINVAL;
4232
/* NOTE: same code as Linux kernel */
4233
/* Allow LDTs to be cleared by the user. */
4234
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4235
if ((contents == 0 &&
4236
read_exec_only == 1 &&
4238
limit_in_pages == 0 &&
4239
seg_not_present == 1 &&
4247
entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4248
(ldt_info.limit & 0x0ffff);
4249
entry_2 = (ldt_info.base_addr & 0xff000000) |
4250
((ldt_info.base_addr & 0x00ff0000) >> 16) |
4251
(ldt_info.limit & 0xf0000) |
4252
((read_exec_only ^ 1) << 9) |
4254
((seg_not_present ^ 1) << 15) |
4256
(limit_in_pages << 23) |
4261
/* Install the new entry ... */
4263
lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4264
lp[0] = tswap32(entry_1);
4265
lp[1] = tswap32(entry_2);
4269
static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4271
struct target_modify_ldt_ldt_s *target_ldt_info;
4272
uint64_t *gdt_table = g2h(env->gdt.base);
4273
uint32_t base_addr, limit, flags;
4274
int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4275
int seg_not_present, useable, lm;
4276
uint32_t *lp, entry_1, entry_2;
4278
lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4279
if (!target_ldt_info)
4280
return -TARGET_EFAULT;
4281
idx = tswap32(target_ldt_info->entry_number);
4282
if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4283
idx > TARGET_GDT_ENTRY_TLS_MAX) {
4284
unlock_user_struct(target_ldt_info, ptr, 1);
4285
return -TARGET_EINVAL;
4287
lp = (uint32_t *)(gdt_table + idx);
4288
entry_1 = tswap32(lp[0]);
4289
entry_2 = tswap32(lp[1]);
4291
read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4292
contents = (entry_2 >> 10) & 3;
4293
seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4294
seg_32bit = (entry_2 >> 22) & 1;
4295
limit_in_pages = (entry_2 >> 23) & 1;
4296
useable = (entry_2 >> 20) & 1;
4300
lm = (entry_2 >> 21) & 1;
4302
flags = (seg_32bit << 0) | (contents << 1) |
4303
(read_exec_only << 3) | (limit_in_pages << 4) |
4304
(seg_not_present << 5) | (useable << 6) | (lm << 7);
4305
limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4306
base_addr = (entry_1 >> 16) |
4307
(entry_2 & 0xff000000) |
4308
((entry_2 & 0xff) << 16);
4309
target_ldt_info->base_addr = tswapal(base_addr);
4310
target_ldt_info->limit = tswap32(limit);
4311
target_ldt_info->flags = tswap32(flags);
4312
unlock_user_struct(target_ldt_info, ptr, 1);
4315
#endif /* TARGET_I386 && TARGET_ABI32 */
4317
#ifndef TARGET_ABI32
4318
abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4325
case TARGET_ARCH_SET_GS:
4326
case TARGET_ARCH_SET_FS:
4327
if (code == TARGET_ARCH_SET_GS)
4331
cpu_x86_load_seg(env, idx, 0);
4332
env->segs[idx].base = addr;
4334
case TARGET_ARCH_GET_GS:
4335
case TARGET_ARCH_GET_FS:
4336
if (code == TARGET_ARCH_GET_GS)
4340
val = env->segs[idx].base;
4341
if (put_user(val, addr, abi_ulong))
4342
ret = -TARGET_EFAULT;
4345
ret = -TARGET_EINVAL;
4352
#endif /* defined(TARGET_I386) */
4354
#define NEW_STACK_SIZE 0x40000
4357
static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4360
pthread_mutex_t mutex;
4361
pthread_cond_t cond;
4364
abi_ulong child_tidptr;
4365
abi_ulong parent_tidptr;
4369
static void *clone_func(void *arg)
4371
new_thread_info *info = arg;
4377
cpu = ENV_GET_CPU(env);
4379
ts = (TaskState *)env->opaque;
4380
info->tid = gettid();
4381
cpu->host_tid = info->tid;
4383
if (info->child_tidptr)
4384
put_user_u32(info->tid, info->child_tidptr);
4385
if (info->parent_tidptr)
4386
put_user_u32(info->tid, info->parent_tidptr);
4387
/* Enable signals. */
4388
sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4389
/* Signal to the parent that we're ready. */
4390
pthread_mutex_lock(&info->mutex);
4391
pthread_cond_broadcast(&info->cond);
4392
pthread_mutex_unlock(&info->mutex);
4393
/* Wait until the parent has finshed initializing the tls state. */
4394
pthread_mutex_lock(&clone_lock);
4395
pthread_mutex_unlock(&clone_lock);
4401
/* do_fork() Must return host values and target errnos (unlike most
4402
do_*() functions). */
4403
static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4404
abi_ulong parent_tidptr, target_ulong newtls,
4405
abi_ulong child_tidptr)
4409
CPUArchState *new_env;
4410
unsigned int nptl_flags;
4413
/* Emulate vfork() with fork() */
4414
if (flags & CLONE_VFORK)
4415
flags &= ~(CLONE_VFORK | CLONE_VM);
4417
if (flags & CLONE_VM) {
4418
TaskState *parent_ts = (TaskState *)env->opaque;
4419
new_thread_info info;
4420
pthread_attr_t attr;
4422
ts = g_malloc0(sizeof(TaskState));
4423
init_task_state(ts);
4424
/* we create a new CPU instance. */
4425
new_env = cpu_copy(env);
4426
/* Init regs that differ from the parent. */
4427
cpu_clone_regs(new_env, newsp);
4428
new_env->opaque = ts;
4429
ts->bprm = parent_ts->bprm;
4430
ts->info = parent_ts->info;
4432
flags &= ~CLONE_NPTL_FLAGS2;
4434
if (nptl_flags & CLONE_CHILD_CLEARTID) {
4435
ts->child_tidptr = child_tidptr;
4438
if (nptl_flags & CLONE_SETTLS)
4439
cpu_set_tls (new_env, newtls);
4441
/* Grab a mutex so that thread setup appears atomic. */
4442
pthread_mutex_lock(&clone_lock);
4444
memset(&info, 0, sizeof(info));
4445
pthread_mutex_init(&info.mutex, NULL);
4446
pthread_mutex_lock(&info.mutex);
4447
pthread_cond_init(&info.cond, NULL);
4449
if (nptl_flags & CLONE_CHILD_SETTID)
4450
info.child_tidptr = child_tidptr;
4451
if (nptl_flags & CLONE_PARENT_SETTID)
4452
info.parent_tidptr = parent_tidptr;
4454
ret = pthread_attr_init(&attr);
4455
ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4456
ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4457
/* It is not safe to deliver signals until the child has finished
4458
initializing, so temporarily block all signals. */
4459
sigfillset(&sigmask);
4460
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4462
ret = pthread_create(&info.thread, &attr, clone_func, &info);
4463
/* TODO: Free new CPU state if thread creation failed. */
4465
sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4466
pthread_attr_destroy(&attr);
4468
/* Wait for the child to initialize. */
4469
pthread_cond_wait(&info.cond, &info.mutex);
4471
if (flags & CLONE_PARENT_SETTID)
4472
put_user_u32(ret, parent_tidptr);
4476
pthread_mutex_unlock(&info.mutex);
4477
pthread_cond_destroy(&info.cond);
4478
pthread_mutex_destroy(&info.mutex);
4479
pthread_mutex_unlock(&clone_lock);
4481
/* if no CLONE_VM, we consider it is a fork */
4482
if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4487
/* Child Process. */
4488
cpu_clone_regs(env, newsp);
4490
/* There is a race condition here. The parent process could
4491
theoretically read the TID in the child process before the child
4492
tid is set. This would require using either ptrace
4493
(not implemented) or having *_tidptr to point at a shared memory
4494
mapping. We can't repeat the spinlock hack used above because
4495
the child process gets its own copy of the lock. */
4496
if (flags & CLONE_CHILD_SETTID)
4497
put_user_u32(gettid(), child_tidptr);
4498
if (flags & CLONE_PARENT_SETTID)
4499
put_user_u32(gettid(), parent_tidptr);
4500
ts = (TaskState *)env->opaque;
4501
if (flags & CLONE_SETTLS)
4502
cpu_set_tls (env, newtls);
4503
if (flags & CLONE_CHILD_CLEARTID)
4504
ts->child_tidptr = child_tidptr;
4512
/* warning : doesn't handle linux specific flags... */
4513
static int target_to_host_fcntl_cmd(int cmd)
4516
case TARGET_F_DUPFD:
4517
case TARGET_F_GETFD:
4518
case TARGET_F_SETFD:
4519
case TARGET_F_GETFL:
4520
case TARGET_F_SETFL:
4522
case TARGET_F_GETLK:
4524
case TARGET_F_SETLK:
4526
case TARGET_F_SETLKW:
4528
case TARGET_F_GETOWN:
4530
case TARGET_F_SETOWN:
4532
case TARGET_F_GETSIG:
4534
case TARGET_F_SETSIG:
4536
#if TARGET_ABI_BITS == 32
4537
case TARGET_F_GETLK64:
4539
case TARGET_F_SETLK64:
4541
case TARGET_F_SETLKW64:
4544
case TARGET_F_SETLEASE:
4546
case TARGET_F_GETLEASE:
4548
#ifdef F_DUPFD_CLOEXEC
4549
case TARGET_F_DUPFD_CLOEXEC:
4550
return F_DUPFD_CLOEXEC;
4552
case TARGET_F_NOTIFY:
4555
return -TARGET_EINVAL;
4557
return -TARGET_EINVAL;
4560
#define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4561
static const bitmask_transtbl flock_tbl[] = {
4562
TRANSTBL_CONVERT(F_RDLCK),
4563
TRANSTBL_CONVERT(F_WRLCK),
4564
TRANSTBL_CONVERT(F_UNLCK),
4565
TRANSTBL_CONVERT(F_EXLCK),
4566
TRANSTBL_CONVERT(F_SHLCK),
4570
static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4573
struct target_flock *target_fl;
4574
struct flock64 fl64;
4575
struct target_flock64 *target_fl64;
4577
int host_cmd = target_to_host_fcntl_cmd(cmd);
4579
if (host_cmd == -TARGET_EINVAL)
4583
case TARGET_F_GETLK:
4584
if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4585
return -TARGET_EFAULT;
4587
target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4588
fl.l_whence = tswap16(target_fl->l_whence);
4589
fl.l_start = tswapal(target_fl->l_start);
4590
fl.l_len = tswapal(target_fl->l_len);
4591
fl.l_pid = tswap32(target_fl->l_pid);
4592
unlock_user_struct(target_fl, arg, 0);
4593
ret = get_errno(fcntl(fd, host_cmd, &fl));
4595
if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4596
return -TARGET_EFAULT;
4598
host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4599
target_fl->l_whence = tswap16(fl.l_whence);
4600
target_fl->l_start = tswapal(fl.l_start);
4601
target_fl->l_len = tswapal(fl.l_len);
4602
target_fl->l_pid = tswap32(fl.l_pid);
4603
unlock_user_struct(target_fl, arg, 1);
4607
case TARGET_F_SETLK:
4608
case TARGET_F_SETLKW:
4609
if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4610
return -TARGET_EFAULT;
4612
target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4613
fl.l_whence = tswap16(target_fl->l_whence);
4614
fl.l_start = tswapal(target_fl->l_start);
4615
fl.l_len = tswapal(target_fl->l_len);
4616
fl.l_pid = tswap32(target_fl->l_pid);
4617
unlock_user_struct(target_fl, arg, 0);
4618
ret = get_errno(fcntl(fd, host_cmd, &fl));
4621
case TARGET_F_GETLK64:
4622
if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4623
return -TARGET_EFAULT;
4625
target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4626
fl64.l_whence = tswap16(target_fl64->l_whence);
4627
fl64.l_start = tswap64(target_fl64->l_start);
4628
fl64.l_len = tswap64(target_fl64->l_len);
4629
fl64.l_pid = tswap32(target_fl64->l_pid);
4630
unlock_user_struct(target_fl64, arg, 0);
4631
ret = get_errno(fcntl(fd, host_cmd, &fl64));
4633
if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4634
return -TARGET_EFAULT;
4635
target_fl64->l_type =
4636
host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4637
target_fl64->l_whence = tswap16(fl64.l_whence);
4638
target_fl64->l_start = tswap64(fl64.l_start);
4639
target_fl64->l_len = tswap64(fl64.l_len);
4640
target_fl64->l_pid = tswap32(fl64.l_pid);
4641
unlock_user_struct(target_fl64, arg, 1);
4644
case TARGET_F_SETLK64:
4645
case TARGET_F_SETLKW64:
4646
if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4647
return -TARGET_EFAULT;
4649
target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4650
fl64.l_whence = tswap16(target_fl64->l_whence);
4651
fl64.l_start = tswap64(target_fl64->l_start);
4652
fl64.l_len = tswap64(target_fl64->l_len);
4653
fl64.l_pid = tswap32(target_fl64->l_pid);
4654
unlock_user_struct(target_fl64, arg, 0);
4655
ret = get_errno(fcntl(fd, host_cmd, &fl64));
4658
case TARGET_F_GETFL:
4659
ret = get_errno(fcntl(fd, host_cmd, arg));
4661
ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4665
case TARGET_F_SETFL:
4666
ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4669
case TARGET_F_SETOWN:
4670
case TARGET_F_GETOWN:
4671
case TARGET_F_SETSIG:
4672
case TARGET_F_GETSIG:
4673
case TARGET_F_SETLEASE:
4674
case TARGET_F_GETLEASE:
4675
ret = get_errno(fcntl(fd, host_cmd, arg));
4679
ret = get_errno(fcntl(fd, cmd, arg));
4687
static inline int high2lowuid(int uid)
4695
static inline int high2lowgid(int gid)
4703
static inline int low2highuid(int uid)
4705
if ((int16_t)uid == -1)
4711
static inline int low2highgid(int gid)
4713
if ((int16_t)gid == -1)
4718
static inline int tswapid(int id)
4722
#else /* !USE_UID16 */
4723
static inline int high2lowuid(int uid)
4727
static inline int high2lowgid(int gid)
4731
static inline int low2highuid(int uid)
4735
static inline int low2highgid(int gid)
4739
static inline int tswapid(int id)
4743
#endif /* USE_UID16 */
4745
void syscall_init(void)
4748
const argtype *arg_type;
4752
#define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4753
#define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4754
#include "syscall_types.h"
4756
#undef STRUCT_SPECIAL
4758
/* Build target_to_host_errno_table[] table from
4759
* host_to_target_errno_table[]. */
4760
for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4761
target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4764
/* we patch the ioctl size if necessary. We rely on the fact that
4765
no ioctl has all the bits at '1' in the size field */
4767
while (ie->target_cmd != 0) {
4768
if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4769
TARGET_IOC_SIZEMASK) {
4770
arg_type = ie->arg_type;
4771
if (arg_type[0] != TYPE_PTR) {
4772
fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4777
size = thunk_type_size(arg_type, 0);
4778
ie->target_cmd = (ie->target_cmd &
4779
~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4780
(size << TARGET_IOC_SIZESHIFT);
4783
/* automatic consistency check if same arch */
4784
#if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4785
(defined(__x86_64__) && defined(TARGET_X86_64))
4786
if (unlikely(ie->target_cmd != ie->host_cmd)) {
4787
fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4788
ie->name, ie->target_cmd, ie->host_cmd);
4795
#if TARGET_ABI_BITS == 32
4796
static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4798
#ifdef TARGET_WORDS_BIGENDIAN
4799
return ((uint64_t)word0 << 32) | word1;
4801
return ((uint64_t)word1 << 32) | word0;
4804
#else /* TARGET_ABI_BITS == 32 */
4805
static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4809
#endif /* TARGET_ABI_BITS != 32 */
4811
#ifdef TARGET_NR_truncate64
4812
static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4817
if (regpairs_aligned(cpu_env)) {
4821
return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4825
#ifdef TARGET_NR_ftruncate64
4826
static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4831
if (regpairs_aligned(cpu_env)) {
4835
return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4839
static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4840
abi_ulong target_addr)
4842
struct target_timespec *target_ts;
4844
if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4845
return -TARGET_EFAULT;
4846
host_ts->tv_sec = tswapal(target_ts->tv_sec);
4847
host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4848
unlock_user_struct(target_ts, target_addr, 0);
4852
static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4853
struct timespec *host_ts)
4855
struct target_timespec *target_ts;
4857
if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4858
return -TARGET_EFAULT;
4859
target_ts->tv_sec = tswapal(host_ts->tv_sec);
4860
target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4861
unlock_user_struct(target_ts, target_addr, 1);
4865
#if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4866
static inline abi_long host_to_target_stat64(void *cpu_env,
4867
abi_ulong target_addr,
4868
struct stat *host_st)
4870
#if defined(TARGET_ARM) && defined(TARGET_ABI32)
4871
if (((CPUARMState *)cpu_env)->eabi) {
4872
struct target_eabi_stat64 *target_st;
4874
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4875
return -TARGET_EFAULT;
4876
memset(target_st, 0, sizeof(struct target_eabi_stat64));
4877
__put_user(host_st->st_dev, &target_st->st_dev);
4878
__put_user(host_st->st_ino, &target_st->st_ino);
4879
#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4880
__put_user(host_st->st_ino, &target_st->__st_ino);
4882
__put_user(host_st->st_mode, &target_st->st_mode);
4883
__put_user(host_st->st_nlink, &target_st->st_nlink);
4884
__put_user(host_st->st_uid, &target_st->st_uid);
4885
__put_user(host_st->st_gid, &target_st->st_gid);
4886
__put_user(host_st->st_rdev, &target_st->st_rdev);
4887
__put_user(host_st->st_size, &target_st->st_size);
4888
__put_user(host_st->st_blksize, &target_st->st_blksize);
4889
__put_user(host_st->st_blocks, &target_st->st_blocks);
4890
__put_user(host_st->st_atime, &target_st->target_st_atime);
4891
__put_user(host_st->st_mtime, &target_st->target_st_mtime);
4892
__put_user(host_st->st_ctime, &target_st->target_st_ctime);
4893
unlock_user_struct(target_st, target_addr, 1);
4897
#if defined(TARGET_HAS_STRUCT_STAT64)
4898
struct target_stat64 *target_st;
4900
struct target_stat *target_st;
4903
if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4904
return -TARGET_EFAULT;
4905
memset(target_st, 0, sizeof(*target_st));
4906
__put_user(host_st->st_dev, &target_st->st_dev);
4907
__put_user(host_st->st_ino, &target_st->st_ino);
4908
#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4909
__put_user(host_st->st_ino, &target_st->__st_ino);
4911
__put_user(host_st->st_mode, &target_st->st_mode);
4912
__put_user(host_st->st_nlink, &target_st->st_nlink);
4913
__put_user(host_st->st_uid, &target_st->st_uid);
4914
__put_user(host_st->st_gid, &target_st->st_gid);
4915
__put_user(host_st->st_rdev, &target_st->st_rdev);
4916
/* XXX: better use of kernel struct */
4917
__put_user(host_st->st_size, &target_st->st_size);
4918
__put_user(host_st->st_blksize, &target_st->st_blksize);
4919
__put_user(host_st->st_blocks, &target_st->st_blocks);
4920
__put_user(host_st->st_atime, &target_st->target_st_atime);
4921
__put_user(host_st->st_mtime, &target_st->target_st_mtime);
4922
__put_user(host_st->st_ctime, &target_st->target_st_ctime);
4923
unlock_user_struct(target_st, target_addr, 1);
4930
/* ??? Using host futex calls even when target atomic operations
4931
are not really atomic probably breaks things. However implementing
4932
futexes locally would make futexes shared between multiple processes
4933
tricky. However they're probably useless because guest atomic
4934
operations won't work either. */
4935
static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4936
target_ulong uaddr2, int val3)
4938
struct timespec ts, *pts;
4941
/* ??? We assume FUTEX_* constants are the same on both host
4943
#ifdef FUTEX_CMD_MASK
4944
base_op = op & FUTEX_CMD_MASK;
4950
case FUTEX_WAIT_BITSET:
4953
target_to_host_timespec(pts, timeout);
4957
return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4960
return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4962
return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4964
case FUTEX_CMP_REQUEUE:
4966
/* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4967
TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4968
But the prototype takes a `struct timespec *'; insert casts
4969
to satisfy the compiler. We do not need to tswap TIMEOUT
4970
since it's not compared to guest memory. */
4971
pts = (struct timespec *)(uintptr_t) timeout;
4972
return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4974
(base_op == FUTEX_CMP_REQUEUE
4978
return -TARGET_ENOSYS;
4982
/* Map host to target signal numbers for the wait family of syscalls.
4983
Assume all other status bits are the same. */
4984
int host_to_target_waitstatus(int status)
4986
if (WIFSIGNALED(status)) {
4987
return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4989
if (WIFSTOPPED(status)) {
4990
return (host_to_target_signal(WSTOPSIG(status)) << 8)
4996
static int relstr_to_int(const char *s)
4998
/* Convert a uname release string like "2.6.18" to an integer
4999
* of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
5004
for (i = 0; i < 3; i++) {
5006
while (*s >= '0' && *s <= '9') {
5011
tmp = (tmp << 8) + n;
5019
int get_osversion(void)
5021
static int osversion;
5022
struct new_utsname buf;
5027
if (qemu_uname_release && *qemu_uname_release) {
5028
s = qemu_uname_release;
5030
if (sys_uname(&buf))
5034
osversion = relstr_to_int(s);
5038
void init_qemu_uname_release(void)
5040
/* Initialize qemu_uname_release for later use.
5041
* If the host kernel is too old and the user hasn't asked for
5042
* a specific fake version number, we might want to fake a minimum
5043
* target kernel version.
5045
#ifdef UNAME_MINIMUM_RELEASE
5046
struct new_utsname buf;
5048
if (qemu_uname_release && *qemu_uname_release) {
5052
if (sys_uname(&buf)) {
5056
if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
5057
qemu_uname_release = UNAME_MINIMUM_RELEASE;
5062
static int open_self_maps(void *cpu_env, int fd)
5064
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5065
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5072
fp = fopen("/proc/self/maps", "r");
5077
while ((read = getline(&line, &len, fp)) != -1) {
5078
int fields, dev_maj, dev_min, inode;
5079
uint64_t min, max, offset;
5080
char flag_r, flag_w, flag_x, flag_p;
5081
char path[512] = "";
5082
fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5083
" %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5084
&flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5086
if ((fields < 10) || (fields > 11)) {
5089
if (!strncmp(path, "[stack]", 7)) {
5092
if (h2g_valid(min) && h2g_valid(max)) {
5093
dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5094
" %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5095
h2g(min), h2g(max), flag_r, flag_w,
5096
flag_x, flag_p, offset, dev_maj, dev_min, inode,
5097
path[0] ? " " : "", path);
5104
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5105
dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5106
(unsigned long long)ts->info->stack_limit,
5107
(unsigned long long)(ts->info->start_stack +
5108
(TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5109
(unsigned long long)0);
5115
static int open_self_stat(void *cpu_env, int fd)
5117
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5118
abi_ulong start_stack = ts->info->start_stack;
5121
for (i = 0; i < 44; i++) {
5129
snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5130
} else if (i == 1) {
5132
snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5133
} else if (i == 27) {
5136
snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5138
/* for the rest, there is MasterCard */
5139
snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5143
if (write(fd, buf, len) != len) {
5151
static int open_self_auxv(void *cpu_env, int fd)
5153
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
5154
abi_ulong auxv = ts->info->saved_auxv;
5155
abi_ulong len = ts->info->auxv_len;
5159
* Auxiliary vector is stored in target process stack.
5160
* read in whole auxv vector and copy it to file
5162
ptr = lock_user(VERIFY_READ, auxv, len, 0);
5166
r = write(fd, ptr, len);
5173
lseek(fd, 0, SEEK_SET);
5174
unlock_user(ptr, auxv, len);
5180
static int is_proc_myself(const char *filename, const char *entry)
5182
if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5183
filename += strlen("/proc/");
5184
if (!strncmp(filename, "self/", strlen("self/"))) {
5185
filename += strlen("self/");
5186
} else if (*filename >= '1' && *filename <= '9') {
5188
snprintf(myself, sizeof(myself), "%d/", getpid());
5189
if (!strncmp(filename, myself, strlen(myself))) {
5190
filename += strlen(myself);
5197
if (!strcmp(filename, entry)) {
5204
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5205
static int is_proc(const char *filename, const char *entry)
5207
return strcmp(filename, entry) == 0;
5210
static int open_net_route(void *cpu_env, int fd)
5217
fp = fopen("/proc/net/route", "r");
5224
read = getline(&line, &len, fp);
5225
dprintf(fd, "%s", line);
5229
while ((read = getline(&line, &len, fp)) != -1) {
5231
uint32_t dest, gw, mask;
5232
unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5233
sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5234
iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5235
&mask, &mtu, &window, &irtt);
5236
dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5237
iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5238
metric, tswap32(mask), mtu, window, irtt);
5248
static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5251
const char *filename;
5252
int (*fill)(void *cpu_env, int fd);
5253
int (*cmp)(const char *s1, const char *s2);
5255
const struct fake_open *fake_open;
5256
static const struct fake_open fakes[] = {
5257
{ "maps", open_self_maps, is_proc_myself },
5258
{ "stat", open_self_stat, is_proc_myself },
5259
{ "auxv", open_self_auxv, is_proc_myself },
5260
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5261
{ "/proc/net/route", open_net_route, is_proc },
5263
{ NULL, NULL, NULL }
5266
for (fake_open = fakes; fake_open->filename; fake_open++) {
5267
if (fake_open->cmp(pathname, fake_open->filename)) {
5272
if (fake_open->filename) {
5274
char filename[PATH_MAX];
5277
/* create temporary file to map stat to */
5278
tmpdir = getenv("TMPDIR");
5281
snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5282
fd = mkstemp(filename);
5288
if ((r = fake_open->fill(cpu_env, fd))) {
5292
lseek(fd, 0, SEEK_SET);
5297
return get_errno(open(path(pathname), flags, mode));
5300
/* do_syscall() should always have a single exit point at the end so
5301
that actions, such as logging of syscall results, can be performed.
5302
All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5303
abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5304
abi_long arg2, abi_long arg3, abi_long arg4,
5305
abi_long arg5, abi_long arg6, abi_long arg7,
5308
CPUState *cpu = ENV_GET_CPU(cpu_env);
5315
gemu_log("syscall %d", num);
5318
print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5321
case TARGET_NR_exit:
5322
/* In old applications this may be used to implement _exit(2).
5323
However in threaded applictions it is used for thread termination,
5324
and _exit_group is used for application termination.
5325
Do thread termination if we have more then one thread. */
5326
/* FIXME: This probably breaks if a signal arrives. We should probably
5327
be disabling signals. */
5328
if (CPU_NEXT(first_cpu)) {
5332
/* Remove the CPU from the list. */
5333
QTAILQ_REMOVE(&cpus, cpu, node);
5335
ts = ((CPUArchState *)cpu_env)->opaque;
5336
if (ts->child_tidptr) {
5337
put_user_u32(0, ts->child_tidptr);
5338
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5342
object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
5349
gdb_exit(cpu_env, arg1);
5351
ret = 0; /* avoid warning */
5353
case TARGET_NR_read:
5357
if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5359
ret = get_errno(read(arg1, p, arg3));
5360
unlock_user(p, arg2, ret);
5363
case TARGET_NR_write:
5364
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5366
ret = get_errno(write(arg1, p, arg3));
5367
unlock_user(p, arg2, 0);
5369
case TARGET_NR_open:
5370
if (!(p = lock_user_string(arg1)))
5372
ret = get_errno(do_open(cpu_env, p,
5373
target_to_host_bitmask(arg2, fcntl_flags_tbl),
5375
unlock_user(p, arg1, 0);
5377
#if defined(TARGET_NR_openat) && defined(__NR_openat)
5378
case TARGET_NR_openat:
5379
if (!(p = lock_user_string(arg2)))
5381
ret = get_errno(sys_openat(arg1,
5383
target_to_host_bitmask(arg3, fcntl_flags_tbl),
5385
unlock_user(p, arg2, 0);
5388
case TARGET_NR_close:
5389
ret = get_errno(close(arg1));
5394
case TARGET_NR_fork:
5395
ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5397
#ifdef TARGET_NR_waitpid
5398
case TARGET_NR_waitpid:
5401
ret = get_errno(waitpid(arg1, &status, arg3));
5402
if (!is_error(ret) && arg2 && ret
5403
&& put_user_s32(host_to_target_waitstatus(status), arg2))
5408
#ifdef TARGET_NR_waitid
5409
case TARGET_NR_waitid:
5413
ret = get_errno(waitid(arg1, arg2, &info, arg4));
5414
if (!is_error(ret) && arg3 && info.si_pid != 0) {
5415
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5417
host_to_target_siginfo(p, &info);
5418
unlock_user(p, arg3, sizeof(target_siginfo_t));
5423
#ifdef TARGET_NR_creat /* not on alpha */
5424
case TARGET_NR_creat:
5425
if (!(p = lock_user_string(arg1)))
5427
ret = get_errno(creat(p, arg2));
5428
unlock_user(p, arg1, 0);
5431
case TARGET_NR_link:
5434
p = lock_user_string(arg1);
5435
p2 = lock_user_string(arg2);
5437
ret = -TARGET_EFAULT;
5439
ret = get_errno(link(p, p2));
5440
unlock_user(p2, arg2, 0);
5441
unlock_user(p, arg1, 0);
5444
#if defined(TARGET_NR_linkat)
5445
case TARGET_NR_linkat:
5450
p = lock_user_string(arg2);
5451
p2 = lock_user_string(arg4);
5453
ret = -TARGET_EFAULT;
5455
ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5456
unlock_user(p, arg2, 0);
5457
unlock_user(p2, arg4, 0);
5461
case TARGET_NR_unlink:
5462
if (!(p = lock_user_string(arg1)))
5464
ret = get_errno(unlink(p));
5465
unlock_user(p, arg1, 0);
5467
#if defined(TARGET_NR_unlinkat)
5468
case TARGET_NR_unlinkat:
5469
if (!(p = lock_user_string(arg2)))
5471
ret = get_errno(unlinkat(arg1, p, arg3));
5472
unlock_user(p, arg2, 0);
5475
case TARGET_NR_execve:
5477
char **argp, **envp;
5480
abi_ulong guest_argp;
5481
abi_ulong guest_envp;
5488
for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5489
if (get_user_ual(addr, gp))
5497
for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5498
if (get_user_ual(addr, gp))
5505
argp = alloca((argc + 1) * sizeof(void *));
5506
envp = alloca((envc + 1) * sizeof(void *));
5508
for (gp = guest_argp, q = argp; gp;
5509
gp += sizeof(abi_ulong), q++) {
5510
if (get_user_ual(addr, gp))
5514
if (!(*q = lock_user_string(addr)))
5516
total_size += strlen(*q) + 1;
5520
for (gp = guest_envp, q = envp; gp;
5521
gp += sizeof(abi_ulong), q++) {
5522
if (get_user_ual(addr, gp))
5526
if (!(*q = lock_user_string(addr)))
5528
total_size += strlen(*q) + 1;
5532
/* This case will not be caught by the host's execve() if its
5533
page size is bigger than the target's. */
5534
if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5535
ret = -TARGET_E2BIG;
5538
if (!(p = lock_user_string(arg1)))
5540
ret = get_errno(execve(p, argp, envp));
5541
unlock_user(p, arg1, 0);
5546
ret = -TARGET_EFAULT;
5549
for (gp = guest_argp, q = argp; *q;
5550
gp += sizeof(abi_ulong), q++) {
5551
if (get_user_ual(addr, gp)
5554
unlock_user(*q, addr, 0);
5556
for (gp = guest_envp, q = envp; *q;
5557
gp += sizeof(abi_ulong), q++) {
5558
if (get_user_ual(addr, gp)
5561
unlock_user(*q, addr, 0);
5565
case TARGET_NR_chdir:
5566
if (!(p = lock_user_string(arg1)))
5568
ret = get_errno(chdir(p));
5569
unlock_user(p, arg1, 0);
5571
#ifdef TARGET_NR_time
5572
case TARGET_NR_time:
5575
ret = get_errno(time(&host_time));
5578
&& put_user_sal(host_time, arg1))
5583
case TARGET_NR_mknod:
5584
if (!(p = lock_user_string(arg1)))
5586
ret = get_errno(mknod(p, arg2, arg3));
5587
unlock_user(p, arg1, 0);
5589
#if defined(TARGET_NR_mknodat)
5590
case TARGET_NR_mknodat:
5591
if (!(p = lock_user_string(arg2)))
5593
ret = get_errno(mknodat(arg1, p, arg3, arg4));
5594
unlock_user(p, arg2, 0);
5597
case TARGET_NR_chmod:
5598
if (!(p = lock_user_string(arg1)))
5600
ret = get_errno(chmod(p, arg2));
5601
unlock_user(p, arg1, 0);
5603
#ifdef TARGET_NR_break
5604
case TARGET_NR_break:
5607
#ifdef TARGET_NR_oldstat
5608
case TARGET_NR_oldstat:
5611
case TARGET_NR_lseek:
5612
ret = get_errno(lseek(arg1, arg2, arg3));
5614
#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5615
/* Alpha specific */
5616
case TARGET_NR_getxpid:
5617
((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5618
ret = get_errno(getpid());
5621
#ifdef TARGET_NR_getpid
5622
case TARGET_NR_getpid:
5623
ret = get_errno(getpid());
5626
case TARGET_NR_mount:
5628
/* need to look at the data field */
5630
p = lock_user_string(arg1);
5631
p2 = lock_user_string(arg2);
5632
p3 = lock_user_string(arg3);
5633
if (!p || !p2 || !p3)
5634
ret = -TARGET_EFAULT;
5636
/* FIXME - arg5 should be locked, but it isn't clear how to
5637
* do that since it's not guaranteed to be a NULL-terminated
5641
ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5643
ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5645
unlock_user(p, arg1, 0);
5646
unlock_user(p2, arg2, 0);
5647
unlock_user(p3, arg3, 0);
5650
#ifdef TARGET_NR_umount
5651
case TARGET_NR_umount:
5652
if (!(p = lock_user_string(arg1)))
5654
ret = get_errno(umount(p));
5655
unlock_user(p, arg1, 0);
5658
#ifdef TARGET_NR_stime /* not on alpha */
5659
case TARGET_NR_stime:
5662
if (get_user_sal(host_time, arg1))
5664
ret = get_errno(stime(&host_time));
5668
case TARGET_NR_ptrace:
5670
#ifdef TARGET_NR_alarm /* not on alpha */
5671
case TARGET_NR_alarm:
5675
#ifdef TARGET_NR_oldfstat
5676
case TARGET_NR_oldfstat:
5679
#ifdef TARGET_NR_pause /* not on alpha */
5680
case TARGET_NR_pause:
5681
ret = get_errno(pause());
5684
#ifdef TARGET_NR_utime
5685
case TARGET_NR_utime:
5687
struct utimbuf tbuf, *host_tbuf;
5688
struct target_utimbuf *target_tbuf;
5690
if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5692
tbuf.actime = tswapal(target_tbuf->actime);
5693
tbuf.modtime = tswapal(target_tbuf->modtime);
5694
unlock_user_struct(target_tbuf, arg2, 0);
5699
if (!(p = lock_user_string(arg1)))
5701
ret = get_errno(utime(p, host_tbuf));
5702
unlock_user(p, arg1, 0);
5706
case TARGET_NR_utimes:
5708
struct timeval *tvp, tv[2];
5710
if (copy_from_user_timeval(&tv[0], arg2)
5711
|| copy_from_user_timeval(&tv[1],
5712
arg2 + sizeof(struct target_timeval)))
5718
if (!(p = lock_user_string(arg1)))
5720
ret = get_errno(utimes(p, tvp));
5721
unlock_user(p, arg1, 0);
5724
#if defined(TARGET_NR_futimesat)
5725
case TARGET_NR_futimesat:
5727
struct timeval *tvp, tv[2];
5729
if (copy_from_user_timeval(&tv[0], arg3)
5730
|| copy_from_user_timeval(&tv[1],
5731
arg3 + sizeof(struct target_timeval)))
5737
if (!(p = lock_user_string(arg2)))
5739
ret = get_errno(futimesat(arg1, path(p), tvp));
5740
unlock_user(p, arg2, 0);
5744
#ifdef TARGET_NR_stty
5745
case TARGET_NR_stty:
5748
#ifdef TARGET_NR_gtty
5749
case TARGET_NR_gtty:
5752
case TARGET_NR_access:
5753
if (!(p = lock_user_string(arg1)))
5755
ret = get_errno(access(path(p), arg2));
5756
unlock_user(p, arg1, 0);
5758
#if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5759
case TARGET_NR_faccessat:
5760
if (!(p = lock_user_string(arg2)))
5762
ret = get_errno(faccessat(arg1, p, arg3, 0));
5763
unlock_user(p, arg2, 0);
5766
#ifdef TARGET_NR_nice /* not on alpha */
5767
case TARGET_NR_nice:
5768
ret = get_errno(nice(arg1));
5771
#ifdef TARGET_NR_ftime
5772
case TARGET_NR_ftime:
5775
case TARGET_NR_sync:
5779
case TARGET_NR_kill:
5780
ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5782
case TARGET_NR_rename:
5785
p = lock_user_string(arg1);
5786
p2 = lock_user_string(arg2);
5788
ret = -TARGET_EFAULT;
5790
ret = get_errno(rename(p, p2));
5791
unlock_user(p2, arg2, 0);
5792
unlock_user(p, arg1, 0);
5795
#if defined(TARGET_NR_renameat)
5796
case TARGET_NR_renameat:
5799
p = lock_user_string(arg2);
5800
p2 = lock_user_string(arg4);
5802
ret = -TARGET_EFAULT;
5804
ret = get_errno(renameat(arg1, p, arg3, p2));
5805
unlock_user(p2, arg4, 0);
5806
unlock_user(p, arg2, 0);
5810
case TARGET_NR_mkdir:
5811
if (!(p = lock_user_string(arg1)))
5813
ret = get_errno(mkdir(p, arg2));
5814
unlock_user(p, arg1, 0);
5816
#if defined(TARGET_NR_mkdirat)
5817
case TARGET_NR_mkdirat:
5818
if (!(p = lock_user_string(arg2)))
5820
ret = get_errno(mkdirat(arg1, p, arg3));
5821
unlock_user(p, arg2, 0);
5824
case TARGET_NR_rmdir:
5825
if (!(p = lock_user_string(arg1)))
5827
ret = get_errno(rmdir(p));
5828
unlock_user(p, arg1, 0);
5831
ret = get_errno(dup(arg1));
5833
case TARGET_NR_pipe:
5834
ret = do_pipe(cpu_env, arg1, 0, 0);
5836
#ifdef TARGET_NR_pipe2
5837
case TARGET_NR_pipe2:
5838
ret = do_pipe(cpu_env, arg1,
5839
target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5842
case TARGET_NR_times:
5844
struct target_tms *tmsp;
5846
ret = get_errno(times(&tms));
5848
tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5851
tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5852
tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5853
tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5854
tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5857
ret = host_to_target_clock_t(ret);
5860
#ifdef TARGET_NR_prof
5861
case TARGET_NR_prof:
5864
#ifdef TARGET_NR_signal
5865
case TARGET_NR_signal:
5868
case TARGET_NR_acct:
5870
ret = get_errno(acct(NULL));
5872
if (!(p = lock_user_string(arg1)))
5874
ret = get_errno(acct(path(p)));
5875
unlock_user(p, arg1, 0);
5878
#ifdef TARGET_NR_umount2
5879
case TARGET_NR_umount2:
5880
if (!(p = lock_user_string(arg1)))
5882
ret = get_errno(umount2(p, arg2));
5883
unlock_user(p, arg1, 0);
5886
#ifdef TARGET_NR_lock
5887
case TARGET_NR_lock:
5890
case TARGET_NR_ioctl:
5891
ret = do_ioctl(arg1, arg2, arg3);
5893
case TARGET_NR_fcntl:
5894
ret = do_fcntl(arg1, arg2, arg3);
5896
#ifdef TARGET_NR_mpx
5900
case TARGET_NR_setpgid:
5901
ret = get_errno(setpgid(arg1, arg2));
5903
#ifdef TARGET_NR_ulimit
5904
case TARGET_NR_ulimit:
5907
#ifdef TARGET_NR_oldolduname
5908
case TARGET_NR_oldolduname:
5911
case TARGET_NR_umask:
5912
ret = get_errno(umask(arg1));
5914
case TARGET_NR_chroot:
5915
if (!(p = lock_user_string(arg1)))
5917
ret = get_errno(chroot(p));
5918
unlock_user(p, arg1, 0);
5920
case TARGET_NR_ustat:
5922
case TARGET_NR_dup2:
5923
ret = get_errno(dup2(arg1, arg2));
5925
#if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5926
case TARGET_NR_dup3:
5927
ret = get_errno(dup3(arg1, arg2, arg3));
5930
#ifdef TARGET_NR_getppid /* not on alpha */
5931
case TARGET_NR_getppid:
5932
ret = get_errno(getppid());
5935
case TARGET_NR_getpgrp:
5936
ret = get_errno(getpgrp());
5938
case TARGET_NR_setsid:
5939
ret = get_errno(setsid());
5941
#ifdef TARGET_NR_sigaction
5942
case TARGET_NR_sigaction:
5944
#if defined(TARGET_ALPHA)
5945
struct target_sigaction act, oact, *pact = 0;
5946
struct target_old_sigaction *old_act;
5948
if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5950
act._sa_handler = old_act->_sa_handler;
5951
target_siginitset(&act.sa_mask, old_act->sa_mask);
5952
act.sa_flags = old_act->sa_flags;
5953
act.sa_restorer = 0;
5954
unlock_user_struct(old_act, arg2, 0);
5957
ret = get_errno(do_sigaction(arg1, pact, &oact));
5958
if (!is_error(ret) && arg3) {
5959
if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5961
old_act->_sa_handler = oact._sa_handler;
5962
old_act->sa_mask = oact.sa_mask.sig[0];
5963
old_act->sa_flags = oact.sa_flags;
5964
unlock_user_struct(old_act, arg3, 1);
5966
#elif defined(TARGET_MIPS)
5967
struct target_sigaction act, oact, *pact, *old_act;
5970
if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5972
act._sa_handler = old_act->_sa_handler;
5973
target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5974
act.sa_flags = old_act->sa_flags;
5975
unlock_user_struct(old_act, arg2, 0);
5981
ret = get_errno(do_sigaction(arg1, pact, &oact));
5983
if (!is_error(ret) && arg3) {
5984
if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5986
old_act->_sa_handler = oact._sa_handler;
5987
old_act->sa_flags = oact.sa_flags;
5988
old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5989
old_act->sa_mask.sig[1] = 0;
5990
old_act->sa_mask.sig[2] = 0;
5991
old_act->sa_mask.sig[3] = 0;
5992
unlock_user_struct(old_act, arg3, 1);
5995
struct target_old_sigaction *old_act;
5996
struct target_sigaction act, oact, *pact;
5998
if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6000
act._sa_handler = old_act->_sa_handler;
6001
target_siginitset(&act.sa_mask, old_act->sa_mask);
6002
act.sa_flags = old_act->sa_flags;
6003
act.sa_restorer = old_act->sa_restorer;
6004
unlock_user_struct(old_act, arg2, 0);
6009
ret = get_errno(do_sigaction(arg1, pact, &oact));
6010
if (!is_error(ret) && arg3) {
6011
if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6013
old_act->_sa_handler = oact._sa_handler;
6014
old_act->sa_mask = oact.sa_mask.sig[0];
6015
old_act->sa_flags = oact.sa_flags;
6016
old_act->sa_restorer = oact.sa_restorer;
6017
unlock_user_struct(old_act, arg3, 1);
6023
case TARGET_NR_rt_sigaction:
6025
#if defined(TARGET_ALPHA)
6026
struct target_sigaction act, oact, *pact = 0;
6027
struct target_rt_sigaction *rt_act;
6028
/* ??? arg4 == sizeof(sigset_t). */
6030
if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6032
act._sa_handler = rt_act->_sa_handler;
6033
act.sa_mask = rt_act->sa_mask;
6034
act.sa_flags = rt_act->sa_flags;
6035
act.sa_restorer = arg5;
6036
unlock_user_struct(rt_act, arg2, 0);
6039
ret = get_errno(do_sigaction(arg1, pact, &oact));
6040
if (!is_error(ret) && arg3) {
6041
if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6043
rt_act->_sa_handler = oact._sa_handler;
6044
rt_act->sa_mask = oact.sa_mask;
6045
rt_act->sa_flags = oact.sa_flags;
6046
unlock_user_struct(rt_act, arg3, 1);
6049
struct target_sigaction *act;
6050
struct target_sigaction *oact;
6053
if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6058
if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6059
ret = -TARGET_EFAULT;
6060
goto rt_sigaction_fail;
6064
ret = get_errno(do_sigaction(arg1, act, oact));
6067
unlock_user_struct(act, arg2, 0);
6069
unlock_user_struct(oact, arg3, 1);
6073
#ifdef TARGET_NR_sgetmask /* not on alpha */
6074
case TARGET_NR_sgetmask:
6077
abi_ulong target_set;
6078
sigprocmask(0, NULL, &cur_set);
6079
host_to_target_old_sigset(&target_set, &cur_set);
6084
#ifdef TARGET_NR_ssetmask /* not on alpha */
6085
case TARGET_NR_ssetmask:
6087
sigset_t set, oset, cur_set;
6088
abi_ulong target_set = arg1;
6089
sigprocmask(0, NULL, &cur_set);
6090
target_to_host_old_sigset(&set, &target_set);
6091
sigorset(&set, &set, &cur_set);
6092
sigprocmask(SIG_SETMASK, &set, &oset);
6093
host_to_target_old_sigset(&target_set, &oset);
6098
#ifdef TARGET_NR_sigprocmask
6099
case TARGET_NR_sigprocmask:
6101
#if defined(TARGET_ALPHA)
6102
sigset_t set, oldset;
6107
case TARGET_SIG_BLOCK:
6110
case TARGET_SIG_UNBLOCK:
6113
case TARGET_SIG_SETMASK:
6117
ret = -TARGET_EINVAL;
6121
target_to_host_old_sigset(&set, &mask);
6123
ret = get_errno(sigprocmask(how, &set, &oldset));
6124
if (!is_error(ret)) {
6125
host_to_target_old_sigset(&mask, &oldset);
6127
((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6130
sigset_t set, oldset, *set_ptr;
6135
case TARGET_SIG_BLOCK:
6138
case TARGET_SIG_UNBLOCK:
6141
case TARGET_SIG_SETMASK:
6145
ret = -TARGET_EINVAL;
6148
if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6150
target_to_host_old_sigset(&set, p);
6151
unlock_user(p, arg2, 0);
6157
ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6158
if (!is_error(ret) && arg3) {
6159
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6161
host_to_target_old_sigset(p, &oldset);
6162
unlock_user(p, arg3, sizeof(target_sigset_t));
6168
case TARGET_NR_rt_sigprocmask:
6171
sigset_t set, oldset, *set_ptr;
6175
case TARGET_SIG_BLOCK:
6178
case TARGET_SIG_UNBLOCK:
6181
case TARGET_SIG_SETMASK:
6185
ret = -TARGET_EINVAL;
6188
if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6190
target_to_host_sigset(&set, p);
6191
unlock_user(p, arg2, 0);
6197
ret = get_errno(sigprocmask(how, set_ptr, &oldset));
6198
if (!is_error(ret) && arg3) {
6199
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6201
host_to_target_sigset(p, &oldset);
6202
unlock_user(p, arg3, sizeof(target_sigset_t));
6206
#ifdef TARGET_NR_sigpending
6207
case TARGET_NR_sigpending:
6210
ret = get_errno(sigpending(&set));
6211
if (!is_error(ret)) {
6212
if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6214
host_to_target_old_sigset(p, &set);
6215
unlock_user(p, arg1, sizeof(target_sigset_t));
6220
case TARGET_NR_rt_sigpending:
6223
ret = get_errno(sigpending(&set));
6224
if (!is_error(ret)) {
6225
if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6227
host_to_target_sigset(p, &set);
6228
unlock_user(p, arg1, sizeof(target_sigset_t));
6232
#ifdef TARGET_NR_sigsuspend
6233
case TARGET_NR_sigsuspend:
6236
#if defined(TARGET_ALPHA)
6237
abi_ulong mask = arg1;
6238
target_to_host_old_sigset(&set, &mask);
6240
if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6242
target_to_host_old_sigset(&set, p);
6243
unlock_user(p, arg1, 0);
6245
ret = get_errno(sigsuspend(&set));
6249
case TARGET_NR_rt_sigsuspend:
6252
if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6254
target_to_host_sigset(&set, p);
6255
unlock_user(p, arg1, 0);
6256
ret = get_errno(sigsuspend(&set));
6259
case TARGET_NR_rt_sigtimedwait:
6262
struct timespec uts, *puts;
6265
if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6267
target_to_host_sigset(&set, p);
6268
unlock_user(p, arg1, 0);
6271
target_to_host_timespec(puts, arg3);
6275
ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6276
if (!is_error(ret) && arg2) {
6277
if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
6279
host_to_target_siginfo(p, &uinfo);
6280
unlock_user(p, arg2, sizeof(target_siginfo_t));
6284
case TARGET_NR_rt_sigqueueinfo:
6287
if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6289
target_to_host_siginfo(&uinfo, p);
6290
unlock_user(p, arg1, 0);
6291
ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6294
#ifdef TARGET_NR_sigreturn
6295
case TARGET_NR_sigreturn:
6296
/* NOTE: ret is eax, so not transcoding must be done */
6297
ret = do_sigreturn(cpu_env);
6300
case TARGET_NR_rt_sigreturn:
6301
/* NOTE: ret is eax, so not transcoding must be done */
6302
ret = do_rt_sigreturn(cpu_env);
6304
case TARGET_NR_sethostname:
6305
if (!(p = lock_user_string(arg1)))
6307
ret = get_errno(sethostname(p, arg2));
6308
unlock_user(p, arg1, 0);
6310
case TARGET_NR_setrlimit:
6312
int resource = target_to_host_resource(arg1);
6313
struct target_rlimit *target_rlim;
6315
if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6317
rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6318
rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6319
unlock_user_struct(target_rlim, arg2, 0);
6320
ret = get_errno(setrlimit(resource, &rlim));
6323
case TARGET_NR_getrlimit:
6325
int resource = target_to_host_resource(arg1);
6326
struct target_rlimit *target_rlim;
6329
ret = get_errno(getrlimit(resource, &rlim));
6330
if (!is_error(ret)) {
6331
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6333
target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6334
target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6335
unlock_user_struct(target_rlim, arg2, 1);
6339
case TARGET_NR_getrusage:
6341
struct rusage rusage;
6342
ret = get_errno(getrusage(arg1, &rusage));
6343
if (!is_error(ret)) {
6344
host_to_target_rusage(arg2, &rusage);
6348
case TARGET_NR_gettimeofday:
6351
ret = get_errno(gettimeofday(&tv, NULL));
6352
if (!is_error(ret)) {
6353
if (copy_to_user_timeval(arg1, &tv))
6358
case TARGET_NR_settimeofday:
6361
if (copy_from_user_timeval(&tv, arg1))
6363
ret = get_errno(settimeofday(&tv, NULL));
6366
#if defined(TARGET_NR_select)
6367
case TARGET_NR_select:
6368
#if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6369
ret = do_select(arg1, arg2, arg3, arg4, arg5);
6372
struct target_sel_arg_struct *sel;
6373
abi_ulong inp, outp, exp, tvp;
6376
if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6378
nsel = tswapal(sel->n);
6379
inp = tswapal(sel->inp);
6380
outp = tswapal(sel->outp);
6381
exp = tswapal(sel->exp);
6382
tvp = tswapal(sel->tvp);
6383
unlock_user_struct(sel, arg1, 0);
6384
ret = do_select(nsel, inp, outp, exp, tvp);
6389
#ifdef TARGET_NR_pselect6
6390
case TARGET_NR_pselect6:
6392
abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6393
fd_set rfds, wfds, efds;
6394
fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6395
struct timespec ts, *ts_ptr;
6398
* The 6th arg is actually two args smashed together,
6399
* so we cannot use the C library.
6407
abi_ulong arg_sigset, arg_sigsize, *arg7;
6408
target_sigset_t *target_sigset;
6416
ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6420
ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6424
ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6430
* This takes a timespec, and not a timeval, so we cannot
6431
* use the do_select() helper ...
6434
if (target_to_host_timespec(&ts, ts_addr)) {
6442
/* Extract the two packed args for the sigset */
6445
sig.size = _NSIG / 8;
6447
arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6451
arg_sigset = tswapal(arg7[0]);
6452
arg_sigsize = tswapal(arg7[1]);
6453
unlock_user(arg7, arg6, 0);
6457
if (arg_sigsize != sizeof(*target_sigset)) {
6458
/* Like the kernel, we enforce correct size sigsets */
6459
ret = -TARGET_EINVAL;
6462
target_sigset = lock_user(VERIFY_READ, arg_sigset,
6463
sizeof(*target_sigset), 1);
6464
if (!target_sigset) {
6467
target_to_host_sigset(&set, target_sigset);
6468
unlock_user(target_sigset, arg_sigset, 0);
6476
ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6479
if (!is_error(ret)) {
6480
if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6482
if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6484
if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6487
if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6493
case TARGET_NR_symlink:
6496
p = lock_user_string(arg1);
6497
p2 = lock_user_string(arg2);
6499
ret = -TARGET_EFAULT;
6501
ret = get_errno(symlink(p, p2));
6502
unlock_user(p2, arg2, 0);
6503
unlock_user(p, arg1, 0);
6506
#if defined(TARGET_NR_symlinkat)
6507
case TARGET_NR_symlinkat:
6510
p = lock_user_string(arg1);
6511
p2 = lock_user_string(arg3);
6513
ret = -TARGET_EFAULT;
6515
ret = get_errno(symlinkat(p, arg2, p2));
6516
unlock_user(p2, arg3, 0);
6517
unlock_user(p, arg1, 0);
6521
#ifdef TARGET_NR_oldlstat
6522
case TARGET_NR_oldlstat:
6525
case TARGET_NR_readlink:
6528
p = lock_user_string(arg1);
6529
p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6531
ret = -TARGET_EFAULT;
6532
} else if (is_proc_myself((const char *)p, "exe")) {
6533
char real[PATH_MAX], *temp;
6534
temp = realpath(exec_path, real);
6535
ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6536
snprintf((char *)p2, arg3, "%s", real);
6538
ret = get_errno(readlink(path(p), p2, arg3));
6540
unlock_user(p2, arg2, ret);
6541
unlock_user(p, arg1, 0);
6544
#if defined(TARGET_NR_readlinkat)
6545
case TARGET_NR_readlinkat:
6548
p = lock_user_string(arg2);
6549
p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6551
ret = -TARGET_EFAULT;
6552
} else if (is_proc_myself((const char *)p, "exe")) {
6553
char real[PATH_MAX], *temp;
6554
temp = realpath(exec_path, real);
6555
ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6556
snprintf((char *)p2, arg4, "%s", real);
6558
ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6560
unlock_user(p2, arg3, ret);
6561
unlock_user(p, arg2, 0);
6565
#ifdef TARGET_NR_uselib
6566
case TARGET_NR_uselib:
6569
#ifdef TARGET_NR_swapon
6570
case TARGET_NR_swapon:
6571
if (!(p = lock_user_string(arg1)))
6573
ret = get_errno(swapon(p, arg2));
6574
unlock_user(p, arg1, 0);
6577
case TARGET_NR_reboot:
6578
if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6579
/* arg4 must be ignored in all other cases */
6580
p = lock_user_string(arg4);
6584
ret = get_errno(reboot(arg1, arg2, arg3, p));
6585
unlock_user(p, arg4, 0);
6587
ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6590
#ifdef TARGET_NR_readdir
6591
case TARGET_NR_readdir:
6594
#ifdef TARGET_NR_mmap
6595
case TARGET_NR_mmap:
6596
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6597
(defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6598
defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6599
|| defined(TARGET_S390X)
6602
abi_ulong v1, v2, v3, v4, v5, v6;
6603
if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6611
unlock_user(v, arg1, 0);
6612
ret = get_errno(target_mmap(v1, v2, v3,
6613
target_to_host_bitmask(v4, mmap_flags_tbl),
6617
ret = get_errno(target_mmap(arg1, arg2, arg3,
6618
target_to_host_bitmask(arg4, mmap_flags_tbl),
6624
#ifdef TARGET_NR_mmap2
6625
case TARGET_NR_mmap2:
6627
#define MMAP_SHIFT 12
6629
ret = get_errno(target_mmap(arg1, arg2, arg3,
6630
target_to_host_bitmask(arg4, mmap_flags_tbl),
6632
arg6 << MMAP_SHIFT));
6635
case TARGET_NR_munmap:
6636
ret = get_errno(target_munmap(arg1, arg2));
6638
case TARGET_NR_mprotect:
6640
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
6641
/* Special hack to detect libc making the stack executable. */
6642
if ((arg3 & PROT_GROWSDOWN)
6643
&& arg1 >= ts->info->stack_limit
6644
&& arg1 <= ts->info->start_stack) {
6645
arg3 &= ~PROT_GROWSDOWN;
6646
arg2 = arg2 + arg1 - ts->info->stack_limit;
6647
arg1 = ts->info->stack_limit;
6650
ret = get_errno(target_mprotect(arg1, arg2, arg3));
6652
#ifdef TARGET_NR_mremap
6653
case TARGET_NR_mremap:
6654
ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6657
/* ??? msync/mlock/munlock are broken for softmmu. */
6658
#ifdef TARGET_NR_msync
6659
case TARGET_NR_msync:
6660
ret = get_errno(msync(g2h(arg1), arg2, arg3));
6663
#ifdef TARGET_NR_mlock
6664
case TARGET_NR_mlock:
6665
ret = get_errno(mlock(g2h(arg1), arg2));
6668
#ifdef TARGET_NR_munlock
6669
case TARGET_NR_munlock:
6670
ret = get_errno(munlock(g2h(arg1), arg2));
6673
#ifdef TARGET_NR_mlockall
6674
case TARGET_NR_mlockall:
6675
ret = get_errno(mlockall(arg1));
6678
#ifdef TARGET_NR_munlockall
6679
case TARGET_NR_munlockall:
6680
ret = get_errno(munlockall());
6683
case TARGET_NR_truncate:
6684
if (!(p = lock_user_string(arg1)))
6686
ret = get_errno(truncate(p, arg2));
6687
unlock_user(p, arg1, 0);
6689
case TARGET_NR_ftruncate:
6690
ret = get_errno(ftruncate(arg1, arg2));
6692
case TARGET_NR_fchmod:
6693
ret = get_errno(fchmod(arg1, arg2));
6695
#if defined(TARGET_NR_fchmodat)
6696
case TARGET_NR_fchmodat:
6697
if (!(p = lock_user_string(arg2)))
6699
ret = get_errno(fchmodat(arg1, p, arg3, 0));
6700
unlock_user(p, arg2, 0);
6703
case TARGET_NR_getpriority:
6704
/* Note that negative values are valid for getpriority, so we must
6705
differentiate based on errno settings. */
6707
ret = getpriority(arg1, arg2);
6708
if (ret == -1 && errno != 0) {
6709
ret = -host_to_target_errno(errno);
6713
/* Return value is the unbiased priority. Signal no error. */
6714
((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6716
/* Return value is a biased priority to avoid negative numbers. */
6720
case TARGET_NR_setpriority:
6721
ret = get_errno(setpriority(arg1, arg2, arg3));
6723
#ifdef TARGET_NR_profil
6724
case TARGET_NR_profil:
6727
case TARGET_NR_statfs:
6728
if (!(p = lock_user_string(arg1)))
6730
ret = get_errno(statfs(path(p), &stfs));
6731
unlock_user(p, arg1, 0);
6733
if (!is_error(ret)) {
6734
struct target_statfs *target_stfs;
6736
if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6738
__put_user(stfs.f_type, &target_stfs->f_type);
6739
__put_user(stfs.f_bsize, &target_stfs->f_bsize);
6740
__put_user(stfs.f_blocks, &target_stfs->f_blocks);
6741
__put_user(stfs.f_bfree, &target_stfs->f_bfree);
6742
__put_user(stfs.f_bavail, &target_stfs->f_bavail);
6743
__put_user(stfs.f_files, &target_stfs->f_files);
6744
__put_user(stfs.f_ffree, &target_stfs->f_ffree);
6745
__put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6746
__put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6747
__put_user(stfs.f_namelen, &target_stfs->f_namelen);
6748
__put_user(stfs.f_frsize, &target_stfs->f_frsize);
6749
memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6750
unlock_user_struct(target_stfs, arg2, 1);
6753
case TARGET_NR_fstatfs:
6754
ret = get_errno(fstatfs(arg1, &stfs));
6755
goto convert_statfs;
6756
#ifdef TARGET_NR_statfs64
6757
case TARGET_NR_statfs64:
6758
if (!(p = lock_user_string(arg1)))
6760
ret = get_errno(statfs(path(p), &stfs));
6761
unlock_user(p, arg1, 0);
6763
if (!is_error(ret)) {
6764
struct target_statfs64 *target_stfs;
6766
if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6768
__put_user(stfs.f_type, &target_stfs->f_type);
6769
__put_user(stfs.f_bsize, &target_stfs->f_bsize);
6770
__put_user(stfs.f_blocks, &target_stfs->f_blocks);
6771
__put_user(stfs.f_bfree, &target_stfs->f_bfree);
6772
__put_user(stfs.f_bavail, &target_stfs->f_bavail);
6773
__put_user(stfs.f_files, &target_stfs->f_files);
6774
__put_user(stfs.f_ffree, &target_stfs->f_ffree);
6775
__put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6776
__put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6777
__put_user(stfs.f_namelen, &target_stfs->f_namelen);
6778
__put_user(stfs.f_frsize, &target_stfs->f_frsize);
6779
memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6780
unlock_user_struct(target_stfs, arg3, 1);
6783
case TARGET_NR_fstatfs64:
6784
ret = get_errno(fstatfs(arg1, &stfs));
6785
goto convert_statfs64;
6787
#ifdef TARGET_NR_ioperm
6788
case TARGET_NR_ioperm:
6791
#ifdef TARGET_NR_socketcall
6792
case TARGET_NR_socketcall:
6793
ret = do_socketcall(arg1, arg2);
6796
#ifdef TARGET_NR_accept
6797
case TARGET_NR_accept:
6798
ret = do_accept4(arg1, arg2, arg3, 0);
6801
#ifdef TARGET_NR_accept4
6802
case TARGET_NR_accept4:
6803
#ifdef CONFIG_ACCEPT4
6804
ret = do_accept4(arg1, arg2, arg3, arg4);
6810
#ifdef TARGET_NR_bind
6811
case TARGET_NR_bind:
6812
ret = do_bind(arg1, arg2, arg3);
6815
#ifdef TARGET_NR_connect
6816
case TARGET_NR_connect:
6817
ret = do_connect(arg1, arg2, arg3);
6820
#ifdef TARGET_NR_getpeername
6821
case TARGET_NR_getpeername:
6822
ret = do_getpeername(arg1, arg2, arg3);
6825
#ifdef TARGET_NR_getsockname
6826
case TARGET_NR_getsockname:
6827
ret = do_getsockname(arg1, arg2, arg3);
6830
#ifdef TARGET_NR_getsockopt
6831
case TARGET_NR_getsockopt:
6832
ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6835
#ifdef TARGET_NR_listen
6836
case TARGET_NR_listen:
6837
ret = get_errno(listen(arg1, arg2));
6840
#ifdef TARGET_NR_recv
6841
case TARGET_NR_recv:
6842
ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6845
#ifdef TARGET_NR_recvfrom
6846
case TARGET_NR_recvfrom:
6847
ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6850
#ifdef TARGET_NR_recvmsg
6851
case TARGET_NR_recvmsg:
6852
ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6855
#ifdef TARGET_NR_send
6856
case TARGET_NR_send:
6857
ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6860
#ifdef TARGET_NR_sendmsg
6861
case TARGET_NR_sendmsg:
6862
ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6865
#ifdef TARGET_NR_sendmmsg
6866
case TARGET_NR_sendmmsg:
6867
ret = do_sendmmsg(arg1, arg2, arg3, arg4);
6870
#ifdef TARGET_NR_sendto
6871
case TARGET_NR_sendto:
6872
ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6875
#ifdef TARGET_NR_shutdown
6876
case TARGET_NR_shutdown:
6877
ret = get_errno(shutdown(arg1, arg2));
6880
#ifdef TARGET_NR_socket
6881
case TARGET_NR_socket:
6882
ret = do_socket(arg1, arg2, arg3);
6885
#ifdef TARGET_NR_socketpair
6886
case TARGET_NR_socketpair:
6887
ret = do_socketpair(arg1, arg2, arg3, arg4);
6890
#ifdef TARGET_NR_setsockopt
6891
case TARGET_NR_setsockopt:
6892
ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6896
case TARGET_NR_syslog:
6897
if (!(p = lock_user_string(arg2)))
6899
ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6900
unlock_user(p, arg2, 0);
6903
case TARGET_NR_setitimer:
6905
struct itimerval value, ovalue, *pvalue;
6909
if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6910
|| copy_from_user_timeval(&pvalue->it_value,
6911
arg2 + sizeof(struct target_timeval)))
6916
ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6917
if (!is_error(ret) && arg3) {
6918
if (copy_to_user_timeval(arg3,
6919
&ovalue.it_interval)
6920
|| copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6926
case TARGET_NR_getitimer:
6928
struct itimerval value;
6930
ret = get_errno(getitimer(arg1, &value));
6931
if (!is_error(ret) && arg2) {
6932
if (copy_to_user_timeval(arg2,
6934
|| copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6940
case TARGET_NR_stat:
6941
if (!(p = lock_user_string(arg1)))
6943
ret = get_errno(stat(path(p), &st));
6944
unlock_user(p, arg1, 0);
6946
case TARGET_NR_lstat:
6947
if (!(p = lock_user_string(arg1)))
6949
ret = get_errno(lstat(path(p), &st));
6950
unlock_user(p, arg1, 0);
6952
case TARGET_NR_fstat:
6954
ret = get_errno(fstat(arg1, &st));
6956
if (!is_error(ret)) {
6957
struct target_stat *target_st;
6959
if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6961
memset(target_st, 0, sizeof(*target_st));
6962
__put_user(st.st_dev, &target_st->st_dev);
6963
__put_user(st.st_ino, &target_st->st_ino);
6964
__put_user(st.st_mode, &target_st->st_mode);
6965
__put_user(st.st_uid, &target_st->st_uid);
6966
__put_user(st.st_gid, &target_st->st_gid);
6967
__put_user(st.st_nlink, &target_st->st_nlink);
6968
__put_user(st.st_rdev, &target_st->st_rdev);
6969
__put_user(st.st_size, &target_st->st_size);
6970
__put_user(st.st_blksize, &target_st->st_blksize);
6971
__put_user(st.st_blocks, &target_st->st_blocks);
6972
__put_user(st.st_atime, &target_st->target_st_atime);
6973
__put_user(st.st_mtime, &target_st->target_st_mtime);
6974
__put_user(st.st_ctime, &target_st->target_st_ctime);
6975
unlock_user_struct(target_st, arg2, 1);
6979
#ifdef TARGET_NR_olduname
6980
case TARGET_NR_olduname:
6983
#ifdef TARGET_NR_iopl
6984
case TARGET_NR_iopl:
6987
case TARGET_NR_vhangup:
6988
ret = get_errno(vhangup());
6990
#ifdef TARGET_NR_idle
6991
case TARGET_NR_idle:
6994
#ifdef TARGET_NR_syscall
6995
case TARGET_NR_syscall:
6996
ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6997
arg6, arg7, arg8, 0);
7000
case TARGET_NR_wait4:
7003
abi_long status_ptr = arg2;
7004
struct rusage rusage, *rusage_ptr;
7005
abi_ulong target_rusage = arg4;
7007
rusage_ptr = &rusage;
7010
ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7011
if (!is_error(ret)) {
7012
if (status_ptr && ret) {
7013
status = host_to_target_waitstatus(status);
7014
if (put_user_s32(status, status_ptr))
7018
host_to_target_rusage(target_rusage, &rusage);
7022
#ifdef TARGET_NR_swapoff
7023
case TARGET_NR_swapoff:
7024
if (!(p = lock_user_string(arg1)))
7026
ret = get_errno(swapoff(p));
7027
unlock_user(p, arg1, 0);
7030
case TARGET_NR_sysinfo:
7032
struct target_sysinfo *target_value;
7033
struct sysinfo value;
7034
ret = get_errno(sysinfo(&value));
7035
if (!is_error(ret) && arg1)
7037
if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7039
__put_user(value.uptime, &target_value->uptime);
7040
__put_user(value.loads[0], &target_value->loads[0]);
7041
__put_user(value.loads[1], &target_value->loads[1]);
7042
__put_user(value.loads[2], &target_value->loads[2]);
7043
__put_user(value.totalram, &target_value->totalram);
7044
__put_user(value.freeram, &target_value->freeram);
7045
__put_user(value.sharedram, &target_value->sharedram);
7046
__put_user(value.bufferram, &target_value->bufferram);
7047
__put_user(value.totalswap, &target_value->totalswap);
7048
__put_user(value.freeswap, &target_value->freeswap);
7049
__put_user(value.procs, &target_value->procs);
7050
__put_user(value.totalhigh, &target_value->totalhigh);
7051
__put_user(value.freehigh, &target_value->freehigh);
7052
__put_user(value.mem_unit, &target_value->mem_unit);
7053
unlock_user_struct(target_value, arg1, 1);
7057
#ifdef TARGET_NR_ipc
7059
ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7062
#ifdef TARGET_NR_semget
7063
case TARGET_NR_semget:
7064
ret = get_errno(semget(arg1, arg2, arg3));
7067
#ifdef TARGET_NR_semop
7068
case TARGET_NR_semop:
7069
ret = do_semop(arg1, arg2, arg3);
7072
#ifdef TARGET_NR_semctl
7073
case TARGET_NR_semctl:
7074
ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7077
#ifdef TARGET_NR_msgctl
7078
case TARGET_NR_msgctl:
7079
ret = do_msgctl(arg1, arg2, arg3);
7082
#ifdef TARGET_NR_msgget
7083
case TARGET_NR_msgget:
7084
ret = get_errno(msgget(arg1, arg2));
7087
#ifdef TARGET_NR_msgrcv
7088
case TARGET_NR_msgrcv:
7089
ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7092
#ifdef TARGET_NR_msgsnd
7093
case TARGET_NR_msgsnd:
7094
ret = do_msgsnd(arg1, arg2, arg3, arg4);
7097
#ifdef TARGET_NR_shmget
7098
case TARGET_NR_shmget:
7099
ret = get_errno(shmget(arg1, arg2, arg3));
7102
#ifdef TARGET_NR_shmctl
7103
case TARGET_NR_shmctl:
7104
ret = do_shmctl(arg1, arg2, arg3);
7107
#ifdef TARGET_NR_shmat
7108
case TARGET_NR_shmat:
7109
ret = do_shmat(arg1, arg2, arg3);
7112
#ifdef TARGET_NR_shmdt
7113
case TARGET_NR_shmdt:
7114
ret = do_shmdt(arg1);
7117
case TARGET_NR_fsync:
7118
ret = get_errno(fsync(arg1));
7120
case TARGET_NR_clone:
7121
/* Linux manages to have three different orderings for its
7122
* arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7123
* match the kernel's CONFIG_CLONE_* settings.
7124
* Microblaze is further special in that it uses a sixth
7125
* implicit argument to clone for the TLS pointer.
7127
#if defined(TARGET_MICROBLAZE)
7128
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7129
#elif defined(TARGET_CLONE_BACKWARDS)
7130
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7131
#elif defined(TARGET_CLONE_BACKWARDS2)
7132
ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7134
ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7137
#ifdef __NR_exit_group
7138
/* new thread calls */
7139
case TARGET_NR_exit_group:
7143
gdb_exit(cpu_env, arg1);
7144
ret = get_errno(exit_group(arg1));
7147
case TARGET_NR_setdomainname:
7148
if (!(p = lock_user_string(arg1)))
7150
ret = get_errno(setdomainname(p, arg2));
7151
unlock_user(p, arg1, 0);
7153
case TARGET_NR_uname:
7154
/* no need to transcode because we use the linux syscall */
7156
struct new_utsname * buf;
7158
if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7160
ret = get_errno(sys_uname(buf));
7161
if (!is_error(ret)) {
7162
/* Overrite the native machine name with whatever is being
7164
strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7165
/* Allow the user to override the reported release. */
7166
if (qemu_uname_release && *qemu_uname_release)
7167
strcpy (buf->release, qemu_uname_release);
7169
unlock_user_struct(buf, arg1, 1);
7173
case TARGET_NR_modify_ldt:
7174
ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7176
#if !defined(TARGET_X86_64)
7177
case TARGET_NR_vm86old:
7179
case TARGET_NR_vm86:
7180
ret = do_vm86(cpu_env, arg1, arg2);
7184
case TARGET_NR_adjtimex:
7186
#ifdef TARGET_NR_create_module
7187
case TARGET_NR_create_module:
7189
case TARGET_NR_init_module:
7190
case TARGET_NR_delete_module:
7191
#ifdef TARGET_NR_get_kernel_syms
7192
case TARGET_NR_get_kernel_syms:
7195
case TARGET_NR_quotactl:
7197
case TARGET_NR_getpgid:
7198
ret = get_errno(getpgid(arg1));
7200
case TARGET_NR_fchdir:
7201
ret = get_errno(fchdir(arg1));
7203
#ifdef TARGET_NR_bdflush /* not on x86_64 */
7204
case TARGET_NR_bdflush:
7207
#ifdef TARGET_NR_sysfs
7208
case TARGET_NR_sysfs:
7211
case TARGET_NR_personality:
7212
ret = get_errno(personality(arg1));
7214
#ifdef TARGET_NR_afs_syscall
7215
case TARGET_NR_afs_syscall:
7218
#ifdef TARGET_NR__llseek /* Not on alpha */
7219
case TARGET_NR__llseek:
7222
#if !defined(__NR_llseek)
7223
res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7225
ret = get_errno(res);
7230
ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7232
if ((ret == 0) && put_user_s64(res, arg4)) {
7238
case TARGET_NR_getdents:
7239
#ifdef __NR_getdents
7240
#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7242
struct target_dirent *target_dirp;
7243
struct linux_dirent *dirp;
7244
abi_long count = arg3;
7246
dirp = malloc(count);
7248
ret = -TARGET_ENOMEM;
7252
ret = get_errno(sys_getdents(arg1, dirp, count));
7253
if (!is_error(ret)) {
7254
struct linux_dirent *de;
7255
struct target_dirent *tde;
7257
int reclen, treclen;
7258
int count1, tnamelen;
7262
if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7266
reclen = de->d_reclen;
7267
tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7268
assert(tnamelen >= 0);
7269
treclen = tnamelen + offsetof(struct target_dirent, d_name);
7270
assert(count1 + treclen <= count);
7271
tde->d_reclen = tswap16(treclen);
7272
tde->d_ino = tswapal(de->d_ino);
7273
tde->d_off = tswapal(de->d_off);
7274
memcpy(tde->d_name, de->d_name, tnamelen);
7275
de = (struct linux_dirent *)((char *)de + reclen);
7277
tde = (struct target_dirent *)((char *)tde + treclen);
7281
unlock_user(target_dirp, arg2, ret);
7287
struct linux_dirent *dirp;
7288
abi_long count = arg3;
7290
if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7292
ret = get_errno(sys_getdents(arg1, dirp, count));
7293
if (!is_error(ret)) {
7294
struct linux_dirent *de;
7299
reclen = de->d_reclen;
7302
de->d_reclen = tswap16(reclen);
7303
tswapls(&de->d_ino);
7304
tswapls(&de->d_off);
7305
de = (struct linux_dirent *)((char *)de + reclen);
7309
unlock_user(dirp, arg2, ret);
7313
/* Implement getdents in terms of getdents64 */
7315
struct linux_dirent64 *dirp;
7316
abi_long count = arg3;
7318
dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7322
ret = get_errno(sys_getdents64(arg1, dirp, count));
7323
if (!is_error(ret)) {
7324
/* Convert the dirent64 structs to target dirent. We do this
7325
* in-place, since we can guarantee that a target_dirent is no
7326
* larger than a dirent64; however this means we have to be
7327
* careful to read everything before writing in the new format.
7329
struct linux_dirent64 *de;
7330
struct target_dirent *tde;
7335
tde = (struct target_dirent *)dirp;
7337
int namelen, treclen;
7338
int reclen = de->d_reclen;
7339
uint64_t ino = de->d_ino;
7340
int64_t off = de->d_off;
7341
uint8_t type = de->d_type;
7343
namelen = strlen(de->d_name);
7344
treclen = offsetof(struct target_dirent, d_name)
7346
treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7348
memmove(tde->d_name, de->d_name, namelen + 1);
7349
tde->d_ino = tswapal(ino);
7350
tde->d_off = tswapal(off);
7351
tde->d_reclen = tswap16(treclen);
7352
/* The target_dirent type is in what was formerly a padding
7353
* byte at the end of the structure:
7355
*(((char *)tde) + treclen - 1) = type;
7357
de = (struct linux_dirent64 *)((char *)de + reclen);
7358
tde = (struct target_dirent *)((char *)tde + treclen);
7364
unlock_user(dirp, arg2, ret);
7368
#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7369
case TARGET_NR_getdents64:
7371
struct linux_dirent64 *dirp;
7372
abi_long count = arg3;
7373
if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7375
ret = get_errno(sys_getdents64(arg1, dirp, count));
7376
if (!is_error(ret)) {
7377
struct linux_dirent64 *de;
7382
reclen = de->d_reclen;
7385
de->d_reclen = tswap16(reclen);
7386
tswap64s((uint64_t *)&de->d_ino);
7387
tswap64s((uint64_t *)&de->d_off);
7388
de = (struct linux_dirent64 *)((char *)de + reclen);
7392
unlock_user(dirp, arg2, ret);
7395
#endif /* TARGET_NR_getdents64 */
7396
#if defined(TARGET_NR__newselect)
7397
case TARGET_NR__newselect:
7398
ret = do_select(arg1, arg2, arg3, arg4, arg5);
7401
#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7402
# ifdef TARGET_NR_poll
7403
case TARGET_NR_poll:
7405
# ifdef TARGET_NR_ppoll
7406
case TARGET_NR_ppoll:
7409
struct target_pollfd *target_pfd;
7410
unsigned int nfds = arg2;
7415
target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7419
pfd = alloca(sizeof(struct pollfd) * nfds);
7420
for(i = 0; i < nfds; i++) {
7421
pfd[i].fd = tswap32(target_pfd[i].fd);
7422
pfd[i].events = tswap16(target_pfd[i].events);
7425
# ifdef TARGET_NR_ppoll
7426
if (num == TARGET_NR_ppoll) {
7427
struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7428
target_sigset_t *target_set;
7429
sigset_t _set, *set = &_set;
7432
if (target_to_host_timespec(timeout_ts, arg3)) {
7433
unlock_user(target_pfd, arg1, 0);
7441
target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7443
unlock_user(target_pfd, arg1, 0);
7446
target_to_host_sigset(set, target_set);
7451
ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7453
if (!is_error(ret) && arg3) {
7454
host_to_target_timespec(arg3, timeout_ts);
7457
unlock_user(target_set, arg4, 0);
7461
ret = get_errno(poll(pfd, nfds, timeout));
7463
if (!is_error(ret)) {
7464
for(i = 0; i < nfds; i++) {
7465
target_pfd[i].revents = tswap16(pfd[i].revents);
7468
unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7472
case TARGET_NR_flock:
7473
/* NOTE: the flock constant seems to be the same for every
7475
ret = get_errno(flock(arg1, arg2));
7477
case TARGET_NR_readv:
7479
struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7481
ret = get_errno(readv(arg1, vec, arg3));
7482
unlock_iovec(vec, arg2, arg3, 1);
7484
ret = -host_to_target_errno(errno);
7488
case TARGET_NR_writev:
7490
struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7492
ret = get_errno(writev(arg1, vec, arg3));
7493
unlock_iovec(vec, arg2, arg3, 0);
7495
ret = -host_to_target_errno(errno);
7499
case TARGET_NR_getsid:
7500
ret = get_errno(getsid(arg1));
7502
#if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7503
case TARGET_NR_fdatasync:
7504
ret = get_errno(fdatasync(arg1));
7507
case TARGET_NR__sysctl:
7508
/* We don't implement this, but ENOTDIR is always a safe
7510
ret = -TARGET_ENOTDIR;
7512
case TARGET_NR_sched_getaffinity:
7514
unsigned int mask_size;
7515
unsigned long *mask;
7518
* sched_getaffinity needs multiples of ulong, so need to take
7519
* care of mismatches between target ulong and host ulong sizes.
7521
if (arg2 & (sizeof(abi_ulong) - 1)) {
7522
ret = -TARGET_EINVAL;
7525
mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7527
mask = alloca(mask_size);
7528
ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7530
if (!is_error(ret)) {
7531
if (copy_to_user(arg3, mask, ret)) {
7537
case TARGET_NR_sched_setaffinity:
7539
unsigned int mask_size;
7540
unsigned long *mask;
7543
* sched_setaffinity needs multiples of ulong, so need to take
7544
* care of mismatches between target ulong and host ulong sizes.
7546
if (arg2 & (sizeof(abi_ulong) - 1)) {
7547
ret = -TARGET_EINVAL;
7550
mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7552
mask = alloca(mask_size);
7553
if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7556
memcpy(mask, p, arg2);
7557
unlock_user_struct(p, arg2, 0);
7559
ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7562
case TARGET_NR_sched_setparam:
7564
struct sched_param *target_schp;
7565
struct sched_param schp;
7567
if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7569
schp.sched_priority = tswap32(target_schp->sched_priority);
7570
unlock_user_struct(target_schp, arg2, 0);
7571
ret = get_errno(sched_setparam(arg1, &schp));
7574
case TARGET_NR_sched_getparam:
7576
struct sched_param *target_schp;
7577
struct sched_param schp;
7578
ret = get_errno(sched_getparam(arg1, &schp));
7579
if (!is_error(ret)) {
7580
if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7582
target_schp->sched_priority = tswap32(schp.sched_priority);
7583
unlock_user_struct(target_schp, arg2, 1);
7587
case TARGET_NR_sched_setscheduler:
7589
struct sched_param *target_schp;
7590
struct sched_param schp;
7591
if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7593
schp.sched_priority = tswap32(target_schp->sched_priority);
7594
unlock_user_struct(target_schp, arg3, 0);
7595
ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7598
case TARGET_NR_sched_getscheduler:
7599
ret = get_errno(sched_getscheduler(arg1));
7601
case TARGET_NR_sched_yield:
7602
ret = get_errno(sched_yield());
7604
case TARGET_NR_sched_get_priority_max:
7605
ret = get_errno(sched_get_priority_max(arg1));
7607
case TARGET_NR_sched_get_priority_min:
7608
ret = get_errno(sched_get_priority_min(arg1));
7610
case TARGET_NR_sched_rr_get_interval:
7613
ret = get_errno(sched_rr_get_interval(arg1, &ts));
7614
if (!is_error(ret)) {
7615
host_to_target_timespec(arg2, &ts);
7619
case TARGET_NR_nanosleep:
7621
struct timespec req, rem;
7622
target_to_host_timespec(&req, arg1);
7623
ret = get_errno(nanosleep(&req, &rem));
7624
if (is_error(ret) && arg2) {
7625
host_to_target_timespec(arg2, &rem);
7629
#ifdef TARGET_NR_query_module
7630
case TARGET_NR_query_module:
7633
#ifdef TARGET_NR_nfsservctl
7634
case TARGET_NR_nfsservctl:
7637
case TARGET_NR_prctl:
7639
case PR_GET_PDEATHSIG:
7642
ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7643
if (!is_error(ret) && arg2
7644
&& put_user_ual(deathsig, arg2)) {
7652
void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7656
ret = get_errno(prctl(arg1, (unsigned long)name,
7658
unlock_user(name, arg2, 16);
7663
void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7667
ret = get_errno(prctl(arg1, (unsigned long)name,
7669
unlock_user(name, arg2, 0);
7674
/* Most prctl options have no pointer arguments */
7675
ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7679
#ifdef TARGET_NR_arch_prctl
7680
case TARGET_NR_arch_prctl:
7681
#if defined(TARGET_I386) && !defined(TARGET_ABI32)
7682
ret = do_arch_prctl(cpu_env, arg1, arg2);
7688
#ifdef TARGET_NR_pread64
7689
case TARGET_NR_pread64:
7690
if (regpairs_aligned(cpu_env)) {
7694
if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7696
ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7697
unlock_user(p, arg2, ret);
7699
case TARGET_NR_pwrite64:
7700
if (regpairs_aligned(cpu_env)) {
7704
if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7706
ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7707
unlock_user(p, arg2, 0);
7710
case TARGET_NR_getcwd:
7711
if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7713
ret = get_errno(sys_getcwd1(p, arg2));
7714
unlock_user(p, arg1, ret);
7716
case TARGET_NR_capget:
7718
case TARGET_NR_capset:
7720
case TARGET_NR_sigaltstack:
7721
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7722
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7723
defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7724
ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7730
#ifdef CONFIG_SENDFILE
7731
case TARGET_NR_sendfile:
7736
ret = get_user_sal(off, arg3);
7737
if (is_error(ret)) {
7742
ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7743
if (!is_error(ret) && arg3) {
7744
abi_long ret2 = put_user_sal(off, arg3);
7745
if (is_error(ret2)) {
7751
#ifdef TARGET_NR_sendfile64
7752
case TARGET_NR_sendfile64:
7757
ret = get_user_s64(off, arg3);
7758
if (is_error(ret)) {
7763
ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7764
if (!is_error(ret) && arg3) {
7765
abi_long ret2 = put_user_s64(off, arg3);
7766
if (is_error(ret2)) {
7774
case TARGET_NR_sendfile:
7775
#ifdef TARGET_NR_sendfile64
7776
case TARGET_NR_sendfile64:
7781
#ifdef TARGET_NR_getpmsg
7782
case TARGET_NR_getpmsg:
7785
#ifdef TARGET_NR_putpmsg
7786
case TARGET_NR_putpmsg:
7789
#ifdef TARGET_NR_vfork
7790
case TARGET_NR_vfork:
7791
ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7795
#ifdef TARGET_NR_ugetrlimit
7796
case TARGET_NR_ugetrlimit:
7799
int resource = target_to_host_resource(arg1);
7800
ret = get_errno(getrlimit(resource, &rlim));
7801
if (!is_error(ret)) {
7802
struct target_rlimit *target_rlim;
7803
if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7805
target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7806
target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7807
unlock_user_struct(target_rlim, arg2, 1);
7812
#ifdef TARGET_NR_truncate64
7813
case TARGET_NR_truncate64:
7814
if (!(p = lock_user_string(arg1)))
7816
ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7817
unlock_user(p, arg1, 0);
7820
#ifdef TARGET_NR_ftruncate64
7821
case TARGET_NR_ftruncate64:
7822
ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7825
#ifdef TARGET_NR_stat64
7826
case TARGET_NR_stat64:
7827
if (!(p = lock_user_string(arg1)))
7829
ret = get_errno(stat(path(p), &st));
7830
unlock_user(p, arg1, 0);
7832
ret = host_to_target_stat64(cpu_env, arg2, &st);
7835
#ifdef TARGET_NR_lstat64
7836
case TARGET_NR_lstat64:
7837
if (!(p = lock_user_string(arg1)))
7839
ret = get_errno(lstat(path(p), &st));
7840
unlock_user(p, arg1, 0);
7842
ret = host_to_target_stat64(cpu_env, arg2, &st);
7845
#ifdef TARGET_NR_fstat64
7846
case TARGET_NR_fstat64:
7847
ret = get_errno(fstat(arg1, &st));
7849
ret = host_to_target_stat64(cpu_env, arg2, &st);
7852
#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7853
#ifdef TARGET_NR_fstatat64
7854
case TARGET_NR_fstatat64:
7856
#ifdef TARGET_NR_newfstatat
7857
case TARGET_NR_newfstatat:
7859
if (!(p = lock_user_string(arg2)))
7861
ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7863
ret = host_to_target_stat64(cpu_env, arg3, &st);
7866
case TARGET_NR_lchown:
7867
if (!(p = lock_user_string(arg1)))
7869
ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7870
unlock_user(p, arg1, 0);
7872
#ifdef TARGET_NR_getuid
7873
case TARGET_NR_getuid:
7874
ret = get_errno(high2lowuid(getuid()));
7877
#ifdef TARGET_NR_getgid
7878
case TARGET_NR_getgid:
7879
ret = get_errno(high2lowgid(getgid()));
7882
#ifdef TARGET_NR_geteuid
7883
case TARGET_NR_geteuid:
7884
ret = get_errno(high2lowuid(geteuid()));
7887
#ifdef TARGET_NR_getegid
7888
case TARGET_NR_getegid:
7889
ret = get_errno(high2lowgid(getegid()));
7892
case TARGET_NR_setreuid:
7893
ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7895
case TARGET_NR_setregid:
7896
ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7898
case TARGET_NR_getgroups:
7900
int gidsetsize = arg1;
7901
target_id *target_grouplist;
7905
grouplist = alloca(gidsetsize * sizeof(gid_t));
7906
ret = get_errno(getgroups(gidsetsize, grouplist));
7907
if (gidsetsize == 0)
7909
if (!is_error(ret)) {
7910
target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7911
if (!target_grouplist)
7913
for(i = 0;i < ret; i++)
7914
target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7915
unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7919
case TARGET_NR_setgroups:
7921
int gidsetsize = arg1;
7922
target_id *target_grouplist;
7923
gid_t *grouplist = NULL;
7926
grouplist = alloca(gidsetsize * sizeof(gid_t));
7927
target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7928
if (!target_grouplist) {
7929
ret = -TARGET_EFAULT;
7932
for (i = 0; i < gidsetsize; i++) {
7933
grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7935
unlock_user(target_grouplist, arg2, 0);
7937
ret = get_errno(setgroups(gidsetsize, grouplist));
7940
case TARGET_NR_fchown:
7941
ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7943
#if defined(TARGET_NR_fchownat)
7944
case TARGET_NR_fchownat:
7945
if (!(p = lock_user_string(arg2)))
7947
ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7948
low2highgid(arg4), arg5));
7949
unlock_user(p, arg2, 0);
7952
#ifdef TARGET_NR_setresuid
7953
case TARGET_NR_setresuid:
7954
ret = get_errno(setresuid(low2highuid(arg1),
7956
low2highuid(arg3)));
7959
#ifdef TARGET_NR_getresuid
7960
case TARGET_NR_getresuid:
7962
uid_t ruid, euid, suid;
7963
ret = get_errno(getresuid(&ruid, &euid, &suid));
7964
if (!is_error(ret)) {
7965
if (put_user_u16(high2lowuid(ruid), arg1)
7966
|| put_user_u16(high2lowuid(euid), arg2)
7967
|| put_user_u16(high2lowuid(suid), arg3))
7973
#ifdef TARGET_NR_getresgid
7974
case TARGET_NR_setresgid:
7975
ret = get_errno(setresgid(low2highgid(arg1),
7977
low2highgid(arg3)));
7980
#ifdef TARGET_NR_getresgid
7981
case TARGET_NR_getresgid:
7983
gid_t rgid, egid, sgid;
7984
ret = get_errno(getresgid(&rgid, &egid, &sgid));
7985
if (!is_error(ret)) {
7986
if (put_user_u16(high2lowgid(rgid), arg1)
7987
|| put_user_u16(high2lowgid(egid), arg2)
7988
|| put_user_u16(high2lowgid(sgid), arg3))
7994
case TARGET_NR_chown:
7995
if (!(p = lock_user_string(arg1)))
7997
ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7998
unlock_user(p, arg1, 0);
8000
case TARGET_NR_setuid:
8001
ret = get_errno(setuid(low2highuid(arg1)));
8003
case TARGET_NR_setgid:
8004
ret = get_errno(setgid(low2highgid(arg1)));
8006
case TARGET_NR_setfsuid:
8007
ret = get_errno(setfsuid(arg1));
8009
case TARGET_NR_setfsgid:
8010
ret = get_errno(setfsgid(arg1));
8013
#ifdef TARGET_NR_lchown32
8014
case TARGET_NR_lchown32:
8015
if (!(p = lock_user_string(arg1)))
8017
ret = get_errno(lchown(p, arg2, arg3));
8018
unlock_user(p, arg1, 0);
8021
#ifdef TARGET_NR_getuid32
8022
case TARGET_NR_getuid32:
8023
ret = get_errno(getuid());
8027
#if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8028
/* Alpha specific */
8029
case TARGET_NR_getxuid:
8033
((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8035
ret = get_errno(getuid());
8038
#if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8039
/* Alpha specific */
8040
case TARGET_NR_getxgid:
8044
((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8046
ret = get_errno(getgid());
8049
#if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8050
/* Alpha specific */
8051
case TARGET_NR_osf_getsysinfo:
8052
ret = -TARGET_EOPNOTSUPP;
8054
case TARGET_GSI_IEEE_FP_CONTROL:
8056
uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8058
/* Copied from linux ieee_fpcr_to_swcr. */
8059
swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8060
swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8061
swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8062
| SWCR_TRAP_ENABLE_DZE
8063
| SWCR_TRAP_ENABLE_OVF);
8064
swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8065
| SWCR_TRAP_ENABLE_INE);
8066
swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8067
swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8069
if (put_user_u64 (swcr, arg2))
8075
/* case GSI_IEEE_STATE_AT_SIGNAL:
8076
-- Not implemented in linux kernel.
8078
-- Retrieves current unaligned access state; not much used.
8080
-- Retrieves implver information; surely not used.
8082
-- Grabs a copy of the HWRPB; surely not used.
8087
#if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8088
/* Alpha specific */
8089
case TARGET_NR_osf_setsysinfo:
8090
ret = -TARGET_EOPNOTSUPP;
8092
case TARGET_SSI_IEEE_FP_CONTROL:
8094
uint64_t swcr, fpcr, orig_fpcr;
8096
if (get_user_u64 (swcr, arg2)) {
8099
orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8100
fpcr = orig_fpcr & FPCR_DYN_MASK;
8102
/* Copied from linux ieee_swcr_to_fpcr. */
8103
fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8104
fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8105
fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8106
| SWCR_TRAP_ENABLE_DZE
8107
| SWCR_TRAP_ENABLE_OVF)) << 48;
8108
fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8109
| SWCR_TRAP_ENABLE_INE)) << 57;
8110
fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8111
fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8113
cpu_alpha_store_fpcr(cpu_env, fpcr);
8118
case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8120
uint64_t exc, fpcr, orig_fpcr;
8123
if (get_user_u64(exc, arg2)) {
8127
orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8129
/* We only add to the exception status here. */
8130
fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8132
cpu_alpha_store_fpcr(cpu_env, fpcr);
8135
/* Old exceptions are not signaled. */
8136
fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8138
/* If any exceptions set by this call,
8139
and are unmasked, send a signal. */
8141
if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8142
si_code = TARGET_FPE_FLTRES;
8144
if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8145
si_code = TARGET_FPE_FLTUND;
8147
if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8148
si_code = TARGET_FPE_FLTOVF;
8150
if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8151
si_code = TARGET_FPE_FLTDIV;
8153
if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8154
si_code = TARGET_FPE_FLTINV;
8157
target_siginfo_t info;
8158
info.si_signo = SIGFPE;
8160
info.si_code = si_code;
8161
info._sifields._sigfault._addr
8162
= ((CPUArchState *)cpu_env)->pc;
8163
queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8168
/* case SSI_NVPAIRS:
8169
-- Used with SSIN_UACPROC to enable unaligned accesses.
8170
case SSI_IEEE_STATE_AT_SIGNAL:
8171
case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8172
-- Not implemented in linux kernel
8177
#ifdef TARGET_NR_osf_sigprocmask
8178
/* Alpha specific. */
8179
case TARGET_NR_osf_sigprocmask:
8183
sigset_t set, oldset;
8186
case TARGET_SIG_BLOCK:
8189
case TARGET_SIG_UNBLOCK:
8192
case TARGET_SIG_SETMASK:
8196
ret = -TARGET_EINVAL;
8200
target_to_host_old_sigset(&set, &mask);
8201
sigprocmask(how, &set, &oldset);
8202
host_to_target_old_sigset(&mask, &oldset);
8208
#ifdef TARGET_NR_getgid32
8209
case TARGET_NR_getgid32:
8210
ret = get_errno(getgid());
8213
#ifdef TARGET_NR_geteuid32
8214
case TARGET_NR_geteuid32:
8215
ret = get_errno(geteuid());
8218
#ifdef TARGET_NR_getegid32
8219
case TARGET_NR_getegid32:
8220
ret = get_errno(getegid());
8223
#ifdef TARGET_NR_setreuid32
8224
case TARGET_NR_setreuid32:
8225
ret = get_errno(setreuid(arg1, arg2));
8228
#ifdef TARGET_NR_setregid32
8229
case TARGET_NR_setregid32:
8230
ret = get_errno(setregid(arg1, arg2));
8233
#ifdef TARGET_NR_getgroups32
8234
case TARGET_NR_getgroups32:
8236
int gidsetsize = arg1;
8237
uint32_t *target_grouplist;
8241
grouplist = alloca(gidsetsize * sizeof(gid_t));
8242
ret = get_errno(getgroups(gidsetsize, grouplist));
8243
if (gidsetsize == 0)
8245
if (!is_error(ret)) {
8246
target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8247
if (!target_grouplist) {
8248
ret = -TARGET_EFAULT;
8251
for(i = 0;i < ret; i++)
8252
target_grouplist[i] = tswap32(grouplist[i]);
8253
unlock_user(target_grouplist, arg2, gidsetsize * 4);
8258
#ifdef TARGET_NR_setgroups32
8259
case TARGET_NR_setgroups32:
8261
int gidsetsize = arg1;
8262
uint32_t *target_grouplist;
8266
grouplist = alloca(gidsetsize * sizeof(gid_t));
8267
target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8268
if (!target_grouplist) {
8269
ret = -TARGET_EFAULT;
8272
for(i = 0;i < gidsetsize; i++)
8273
grouplist[i] = tswap32(target_grouplist[i]);
8274
unlock_user(target_grouplist, arg2, 0);
8275
ret = get_errno(setgroups(gidsetsize, grouplist));
8279
#ifdef TARGET_NR_fchown32
8280
case TARGET_NR_fchown32:
8281
ret = get_errno(fchown(arg1, arg2, arg3));
8284
#ifdef TARGET_NR_setresuid32
8285
case TARGET_NR_setresuid32:
8286
ret = get_errno(setresuid(arg1, arg2, arg3));
8289
#ifdef TARGET_NR_getresuid32
8290
case TARGET_NR_getresuid32:
8292
uid_t ruid, euid, suid;
8293
ret = get_errno(getresuid(&ruid, &euid, &suid));
8294
if (!is_error(ret)) {
8295
if (put_user_u32(ruid, arg1)
8296
|| put_user_u32(euid, arg2)
8297
|| put_user_u32(suid, arg3))
8303
#ifdef TARGET_NR_setresgid32
8304
case TARGET_NR_setresgid32:
8305
ret = get_errno(setresgid(arg1, arg2, arg3));
8308
#ifdef TARGET_NR_getresgid32
8309
case TARGET_NR_getresgid32:
8311
gid_t rgid, egid, sgid;
8312
ret = get_errno(getresgid(&rgid, &egid, &sgid));
8313
if (!is_error(ret)) {
8314
if (put_user_u32(rgid, arg1)
8315
|| put_user_u32(egid, arg2)
8316
|| put_user_u32(sgid, arg3))
8322
#ifdef TARGET_NR_chown32
8323
case TARGET_NR_chown32:
8324
if (!(p = lock_user_string(arg1)))
8326
ret = get_errno(chown(p, arg2, arg3));
8327
unlock_user(p, arg1, 0);
8330
#ifdef TARGET_NR_setuid32
8331
case TARGET_NR_setuid32:
8332
ret = get_errno(setuid(arg1));
8335
#ifdef TARGET_NR_setgid32
8336
case TARGET_NR_setgid32:
8337
ret = get_errno(setgid(arg1));
8340
#ifdef TARGET_NR_setfsuid32
8341
case TARGET_NR_setfsuid32:
8342
ret = get_errno(setfsuid(arg1));
8345
#ifdef TARGET_NR_setfsgid32
8346
case TARGET_NR_setfsgid32:
8347
ret = get_errno(setfsgid(arg1));
8351
case TARGET_NR_pivot_root:
8353
#ifdef TARGET_NR_mincore
8354
case TARGET_NR_mincore:
8357
ret = -TARGET_EFAULT;
8358
if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8360
if (!(p = lock_user_string(arg3)))
8362
ret = get_errno(mincore(a, arg2, p));
8363
unlock_user(p, arg3, ret);
8365
unlock_user(a, arg1, 0);
8369
#ifdef TARGET_NR_arm_fadvise64_64
8370
case TARGET_NR_arm_fadvise64_64:
8373
* arm_fadvise64_64 looks like fadvise64_64 but
8374
* with different argument order
8382
#if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8383
#ifdef TARGET_NR_fadvise64_64
8384
case TARGET_NR_fadvise64_64:
8386
#ifdef TARGET_NR_fadvise64
8387
case TARGET_NR_fadvise64:
8391
case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8392
case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8393
case 6: arg4 = POSIX_FADV_DONTNEED; break;
8394
case 7: arg4 = POSIX_FADV_NOREUSE; break;
8398
ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8401
#ifdef TARGET_NR_madvise
8402
case TARGET_NR_madvise:
8403
/* A straight passthrough may not be safe because qemu sometimes
8404
turns private file-backed mappings into anonymous mappings.
8405
This will break MADV_DONTNEED.
8406
This is a hint, so ignoring and returning success is ok. */
8410
#if TARGET_ABI_BITS == 32
8411
case TARGET_NR_fcntl64:
8415
struct target_flock64 *target_fl;
8417
struct target_eabi_flock64 *target_efl;
8420
cmd = target_to_host_fcntl_cmd(arg2);
8421
if (cmd == -TARGET_EINVAL) {
8427
case TARGET_F_GETLK64:
8429
if (((CPUARMState *)cpu_env)->eabi) {
8430
if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8432
fl.l_type = tswap16(target_efl->l_type);
8433
fl.l_whence = tswap16(target_efl->l_whence);
8434
fl.l_start = tswap64(target_efl->l_start);
8435
fl.l_len = tswap64(target_efl->l_len);
8436
fl.l_pid = tswap32(target_efl->l_pid);
8437
unlock_user_struct(target_efl, arg3, 0);
8441
if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8443
fl.l_type = tswap16(target_fl->l_type);
8444
fl.l_whence = tswap16(target_fl->l_whence);
8445
fl.l_start = tswap64(target_fl->l_start);
8446
fl.l_len = tswap64(target_fl->l_len);
8447
fl.l_pid = tswap32(target_fl->l_pid);
8448
unlock_user_struct(target_fl, arg3, 0);
8450
ret = get_errno(fcntl(arg1, cmd, &fl));
8453
if (((CPUARMState *)cpu_env)->eabi) {
8454
if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8456
target_efl->l_type = tswap16(fl.l_type);
8457
target_efl->l_whence = tswap16(fl.l_whence);
8458
target_efl->l_start = tswap64(fl.l_start);
8459
target_efl->l_len = tswap64(fl.l_len);
8460
target_efl->l_pid = tswap32(fl.l_pid);
8461
unlock_user_struct(target_efl, arg3, 1);
8465
if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8467
target_fl->l_type = tswap16(fl.l_type);
8468
target_fl->l_whence = tswap16(fl.l_whence);
8469
target_fl->l_start = tswap64(fl.l_start);
8470
target_fl->l_len = tswap64(fl.l_len);
8471
target_fl->l_pid = tswap32(fl.l_pid);
8472
unlock_user_struct(target_fl, arg3, 1);
8477
case TARGET_F_SETLK64:
8478
case TARGET_F_SETLKW64:
8480
if (((CPUARMState *)cpu_env)->eabi) {
8481
if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8483
fl.l_type = tswap16(target_efl->l_type);
8484
fl.l_whence = tswap16(target_efl->l_whence);
8485
fl.l_start = tswap64(target_efl->l_start);
8486
fl.l_len = tswap64(target_efl->l_len);
8487
fl.l_pid = tswap32(target_efl->l_pid);
8488
unlock_user_struct(target_efl, arg3, 0);
8492
if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8494
fl.l_type = tswap16(target_fl->l_type);
8495
fl.l_whence = tswap16(target_fl->l_whence);
8496
fl.l_start = tswap64(target_fl->l_start);
8497
fl.l_len = tswap64(target_fl->l_len);
8498
fl.l_pid = tswap32(target_fl->l_pid);
8499
unlock_user_struct(target_fl, arg3, 0);
8501
ret = get_errno(fcntl(arg1, cmd, &fl));
8504
ret = do_fcntl(arg1, arg2, arg3);
8510
#ifdef TARGET_NR_cacheflush
8511
case TARGET_NR_cacheflush:
8512
/* self-modifying code is handled automatically, so nothing needed */
8516
#ifdef TARGET_NR_security
8517
case TARGET_NR_security:
8520
#ifdef TARGET_NR_getpagesize
8521
case TARGET_NR_getpagesize:
8522
ret = TARGET_PAGE_SIZE;
8525
case TARGET_NR_gettid:
8526
ret = get_errno(gettid());
8528
#ifdef TARGET_NR_readahead
8529
case TARGET_NR_readahead:
8530
#if TARGET_ABI_BITS == 32
8531
if (regpairs_aligned(cpu_env)) {
8536
ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8538
ret = get_errno(readahead(arg1, arg2, arg3));
8543
#ifdef TARGET_NR_setxattr
8544
case TARGET_NR_listxattr:
8545
case TARGET_NR_llistxattr:
8549
b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8551
ret = -TARGET_EFAULT;
8555
p = lock_user_string(arg1);
8557
if (num == TARGET_NR_listxattr) {
8558
ret = get_errno(listxattr(p, b, arg3));
8560
ret = get_errno(llistxattr(p, b, arg3));
8563
ret = -TARGET_EFAULT;
8565
unlock_user(p, arg1, 0);
8566
unlock_user(b, arg2, arg3);
8569
case TARGET_NR_flistxattr:
8573
b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8575
ret = -TARGET_EFAULT;
8579
ret = get_errno(flistxattr(arg1, b, arg3));
8580
unlock_user(b, arg2, arg3);
8583
case TARGET_NR_setxattr:
8584
case TARGET_NR_lsetxattr:
8586
void *p, *n, *v = 0;
8588
v = lock_user(VERIFY_READ, arg3, arg4, 1);
8590
ret = -TARGET_EFAULT;
8594
p = lock_user_string(arg1);
8595
n = lock_user_string(arg2);
8597
if (num == TARGET_NR_setxattr) {
8598
ret = get_errno(setxattr(p, n, v, arg4, arg5));
8600
ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8603
ret = -TARGET_EFAULT;
8605
unlock_user(p, arg1, 0);
8606
unlock_user(n, arg2, 0);
8607
unlock_user(v, arg3, 0);
8610
case TARGET_NR_fsetxattr:
8614
v = lock_user(VERIFY_READ, arg3, arg4, 1);
8616
ret = -TARGET_EFAULT;
8620
n = lock_user_string(arg2);
8622
ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8624
ret = -TARGET_EFAULT;
8626
unlock_user(n, arg2, 0);
8627
unlock_user(v, arg3, 0);
8630
case TARGET_NR_getxattr:
8631
case TARGET_NR_lgetxattr:
8633
void *p, *n, *v = 0;
8635
v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8637
ret = -TARGET_EFAULT;
8641
p = lock_user_string(arg1);
8642
n = lock_user_string(arg2);
8644
if (num == TARGET_NR_getxattr) {
8645
ret = get_errno(getxattr(p, n, v, arg4));
8647
ret = get_errno(lgetxattr(p, n, v, arg4));
8650
ret = -TARGET_EFAULT;
8652
unlock_user(p, arg1, 0);
8653
unlock_user(n, arg2, 0);
8654
unlock_user(v, arg3, arg4);
8657
case TARGET_NR_fgetxattr:
8661
v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8663
ret = -TARGET_EFAULT;
8667
n = lock_user_string(arg2);
8669
ret = get_errno(fgetxattr(arg1, n, v, arg4));
8671
ret = -TARGET_EFAULT;
8673
unlock_user(n, arg2, 0);
8674
unlock_user(v, arg3, arg4);
8677
case TARGET_NR_removexattr:
8678
case TARGET_NR_lremovexattr:
8681
p = lock_user_string(arg1);
8682
n = lock_user_string(arg2);
8684
if (num == TARGET_NR_removexattr) {
8685
ret = get_errno(removexattr(p, n));
8687
ret = get_errno(lremovexattr(p, n));
8690
ret = -TARGET_EFAULT;
8692
unlock_user(p, arg1, 0);
8693
unlock_user(n, arg2, 0);
8696
case TARGET_NR_fremovexattr:
8699
n = lock_user_string(arg2);
8701
ret = get_errno(fremovexattr(arg1, n));
8703
ret = -TARGET_EFAULT;
8705
unlock_user(n, arg2, 0);
8709
#endif /* CONFIG_ATTR */
8710
#ifdef TARGET_NR_set_thread_area
8711
case TARGET_NR_set_thread_area:
8712
#if defined(TARGET_MIPS)
8713
((CPUMIPSState *) cpu_env)->tls_value = arg1;
8716
#elif defined(TARGET_CRIS)
8718
ret = -TARGET_EINVAL;
8720
((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8724
#elif defined(TARGET_I386) && defined(TARGET_ABI32)
8725
ret = do_set_thread_area(cpu_env, arg1);
8727
#elif defined(TARGET_M68K)
8729
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8730
ts->tp_value = arg1;
8735
goto unimplemented_nowarn;
8738
#ifdef TARGET_NR_get_thread_area
8739
case TARGET_NR_get_thread_area:
8740
#if defined(TARGET_I386) && defined(TARGET_ABI32)
8741
ret = do_get_thread_area(cpu_env, arg1);
8743
#elif defined(TARGET_M68K)
8745
TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
8750
goto unimplemented_nowarn;
8753
#ifdef TARGET_NR_getdomainname
8754
case TARGET_NR_getdomainname:
8755
goto unimplemented_nowarn;
8758
#ifdef TARGET_NR_clock_gettime
8759
case TARGET_NR_clock_gettime:
8762
ret = get_errno(clock_gettime(arg1, &ts));
8763
if (!is_error(ret)) {
8764
host_to_target_timespec(arg2, &ts);
8769
#ifdef TARGET_NR_clock_getres
8770
case TARGET_NR_clock_getres:
8773
ret = get_errno(clock_getres(arg1, &ts));
8774
if (!is_error(ret)) {
8775
host_to_target_timespec(arg2, &ts);
8780
#ifdef TARGET_NR_clock_nanosleep
8781
case TARGET_NR_clock_nanosleep:
8784
target_to_host_timespec(&ts, arg3);
8785
ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8787
host_to_target_timespec(arg4, &ts);
8792
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8793
case TARGET_NR_set_tid_address:
8794
ret = get_errno(set_tid_address((int *)g2h(arg1)));
8798
#if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8799
case TARGET_NR_tkill:
8800
ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8804
#if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8805
case TARGET_NR_tgkill:
8806
ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8807
target_to_host_signal(arg3)));
8811
#ifdef TARGET_NR_set_robust_list
8812
case TARGET_NR_set_robust_list:
8813
case TARGET_NR_get_robust_list:
8814
/* The ABI for supporting robust futexes has userspace pass
8815
* the kernel a pointer to a linked list which is updated by
8816
* userspace after the syscall; the list is walked by the kernel
8817
* when the thread exits. Since the linked list in QEMU guest
8818
* memory isn't a valid linked list for the host and we have
8819
* no way to reliably intercept the thread-death event, we can't
8820
* support these. Silently return ENOSYS so that guest userspace
8821
* falls back to a non-robust futex implementation (which should
8822
* be OK except in the corner case of the guest crashing while
8823
* holding a mutex that is shared with another process via
8826
goto unimplemented_nowarn;
8829
#if defined(TARGET_NR_utimensat)
8830
case TARGET_NR_utimensat:
8832
struct timespec *tsp, ts[2];
8836
target_to_host_timespec(ts, arg3);
8837
target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8841
ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8843
if (!(p = lock_user_string(arg2))) {
8844
ret = -TARGET_EFAULT;
8847
ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8848
unlock_user(p, arg2, 0);
8853
case TARGET_NR_futex:
8854
ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8856
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8857
case TARGET_NR_inotify_init:
8858
ret = get_errno(sys_inotify_init());
8861
#ifdef CONFIG_INOTIFY1
8862
#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8863
case TARGET_NR_inotify_init1:
8864
ret = get_errno(sys_inotify_init1(arg1));
8868
#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8869
case TARGET_NR_inotify_add_watch:
8870
p = lock_user_string(arg2);
8871
ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8872
unlock_user(p, arg2, 0);
8875
#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8876
case TARGET_NR_inotify_rm_watch:
8877
ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8881
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8882
case TARGET_NR_mq_open:
8884
struct mq_attr posix_mq_attr;
8886
p = lock_user_string(arg1 - 1);
8888
copy_from_user_mq_attr (&posix_mq_attr, arg4);
8889
ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8890
unlock_user (p, arg1, 0);
8894
case TARGET_NR_mq_unlink:
8895
p = lock_user_string(arg1 - 1);
8896
ret = get_errno(mq_unlink(p));
8897
unlock_user (p, arg1, 0);
8900
case TARGET_NR_mq_timedsend:
8904
p = lock_user (VERIFY_READ, arg2, arg3, 1);
8906
target_to_host_timespec(&ts, arg5);
8907
ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8908
host_to_target_timespec(arg5, &ts);
8911
ret = get_errno(mq_send(arg1, p, arg3, arg4));
8912
unlock_user (p, arg2, arg3);
8916
case TARGET_NR_mq_timedreceive:
8921
p = lock_user (VERIFY_READ, arg2, arg3, 1);
8923
target_to_host_timespec(&ts, arg5);
8924
ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8925
host_to_target_timespec(arg5, &ts);
8928
ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8929
unlock_user (p, arg2, arg3);
8931
put_user_u32(prio, arg4);
8935
/* Not implemented for now... */
8936
/* case TARGET_NR_mq_notify: */
8939
case TARGET_NR_mq_getsetattr:
8941
struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8944
ret = mq_getattr(arg1, &posix_mq_attr_out);
8945
copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8948
copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8949
ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8956
#ifdef CONFIG_SPLICE
8957
#ifdef TARGET_NR_tee
8960
ret = get_errno(tee(arg1,arg2,arg3,arg4));
8964
#ifdef TARGET_NR_splice
8965
case TARGET_NR_splice:
8967
loff_t loff_in, loff_out;
8968
loff_t *ploff_in = NULL, *ploff_out = NULL;
8970
get_user_u64(loff_in, arg2);
8971
ploff_in = &loff_in;
8974
get_user_u64(loff_out, arg2);
8975
ploff_out = &loff_out;
8977
ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
8981
#ifdef TARGET_NR_vmsplice
8982
case TARGET_NR_vmsplice:
8984
struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8986
ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
8987
unlock_iovec(vec, arg2, arg3, 0);
8989
ret = -host_to_target_errno(errno);
8994
#endif /* CONFIG_SPLICE */
8995
#ifdef CONFIG_EVENTFD
8996
#if defined(TARGET_NR_eventfd)
8997
case TARGET_NR_eventfd:
8998
ret = get_errno(eventfd(arg1, 0));
9001
#if defined(TARGET_NR_eventfd2)
9002
case TARGET_NR_eventfd2:
9004
int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9005
if (arg2 & TARGET_O_NONBLOCK) {
9006
host_flags |= O_NONBLOCK;
9008
if (arg2 & TARGET_O_CLOEXEC) {
9009
host_flags |= O_CLOEXEC;
9011
ret = get_errno(eventfd(arg1, host_flags));
9015
#endif /* CONFIG_EVENTFD */
9016
#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9017
case TARGET_NR_fallocate:
9018
#if TARGET_ABI_BITS == 32
9019
ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9020
target_offset64(arg5, arg6)));
9022
ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9026
#if defined(CONFIG_SYNC_FILE_RANGE)
9027
#if defined(TARGET_NR_sync_file_range)
9028
case TARGET_NR_sync_file_range:
9029
#if TARGET_ABI_BITS == 32
9030
#if defined(TARGET_MIPS)
9031
ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9032
target_offset64(arg5, arg6), arg7));
9034
ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9035
target_offset64(arg4, arg5), arg6));
9036
#endif /* !TARGET_MIPS */
9038
ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9042
#if defined(TARGET_NR_sync_file_range2)
9043
case TARGET_NR_sync_file_range2:
9044
/* This is like sync_file_range but the arguments are reordered */
9045
#if TARGET_ABI_BITS == 32
9046
ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9047
target_offset64(arg5, arg6), arg2));
9049
ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9054
#if defined(CONFIG_EPOLL)
9055
#if defined(TARGET_NR_epoll_create)
9056
case TARGET_NR_epoll_create:
9057
ret = get_errno(epoll_create(arg1));
9060
#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9061
case TARGET_NR_epoll_create1:
9062
ret = get_errno(epoll_create1(arg1));
9065
#if defined(TARGET_NR_epoll_ctl)
9066
case TARGET_NR_epoll_ctl:
9068
struct epoll_event ep;
9069
struct epoll_event *epp = 0;
9071
struct target_epoll_event *target_ep;
9072
if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9075
ep.events = tswap32(target_ep->events);
9076
/* The epoll_data_t union is just opaque data to the kernel,
9077
* so we transfer all 64 bits across and need not worry what
9078
* actual data type it is.
9080
ep.data.u64 = tswap64(target_ep->data.u64);
9081
unlock_user_struct(target_ep, arg4, 0);
9084
ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9089
#if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9090
#define IMPLEMENT_EPOLL_PWAIT
9092
#if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9093
#if defined(TARGET_NR_epoll_wait)
9094
case TARGET_NR_epoll_wait:
9096
#if defined(IMPLEMENT_EPOLL_PWAIT)
9097
case TARGET_NR_epoll_pwait:
9100
struct target_epoll_event *target_ep;
9101
struct epoll_event *ep;
9103
int maxevents = arg3;
9106
target_ep = lock_user(VERIFY_WRITE, arg2,
9107
maxevents * sizeof(struct target_epoll_event), 1);
9112
ep = alloca(maxevents * sizeof(struct epoll_event));
9115
#if defined(IMPLEMENT_EPOLL_PWAIT)
9116
case TARGET_NR_epoll_pwait:
9118
target_sigset_t *target_set;
9119
sigset_t _set, *set = &_set;
9122
target_set = lock_user(VERIFY_READ, arg5,
9123
sizeof(target_sigset_t), 1);
9125
unlock_user(target_ep, arg2, 0);
9128
target_to_host_sigset(set, target_set);
9129
unlock_user(target_set, arg5, 0);
9134
ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9138
#if defined(TARGET_NR_epoll_wait)
9139
case TARGET_NR_epoll_wait:
9140
ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9144
ret = -TARGET_ENOSYS;
9146
if (!is_error(ret)) {
9148
for (i = 0; i < ret; i++) {
9149
target_ep[i].events = tswap32(ep[i].events);
9150
target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9153
unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9158
#ifdef TARGET_NR_prlimit64
9159
case TARGET_NR_prlimit64:
9161
/* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9162
struct target_rlimit64 *target_rnew, *target_rold;
9163
struct host_rlimit64 rnew, rold, *rnewp = 0;
9165
if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9168
rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9169
rnew.rlim_max = tswap64(target_rnew->rlim_max);
9170
unlock_user_struct(target_rnew, arg3, 0);
9174
ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9175
if (!is_error(ret) && arg4) {
9176
if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9179
target_rold->rlim_cur = tswap64(rold.rlim_cur);
9180
target_rold->rlim_max = tswap64(rold.rlim_max);
9181
unlock_user_struct(target_rold, arg4, 1);
9186
#ifdef TARGET_NR_gethostname
9187
case TARGET_NR_gethostname:
9189
char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9191
ret = get_errno(gethostname(name, arg2));
9192
unlock_user(name, arg1, arg2);
9194
ret = -TARGET_EFAULT;
9199
#ifdef TARGET_NR_atomic_cmpxchg_32
9200
case TARGET_NR_atomic_cmpxchg_32:
9202
/* should use start_exclusive from main.c */
9203
abi_ulong mem_value;
9204
if (get_user_u32(mem_value, arg6)) {
9205
target_siginfo_t info;
9206
info.si_signo = SIGSEGV;
9208
info.si_code = TARGET_SEGV_MAPERR;
9209
info._sifields._sigfault._addr = arg6;
9210
queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9214
if (mem_value == arg2)
9215
put_user_u32(arg1, arg6);
9220
#ifdef TARGET_NR_atomic_barrier
9221
case TARGET_NR_atomic_barrier:
9223
/* Like the kernel implementation and the qemu arm barrier, no-op this? */
9229
gemu_log("qemu: Unsupported syscall: %d\n", num);
9230
#if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9231
unimplemented_nowarn:
9233
ret = -TARGET_ENOSYS;
9238
gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9241
print_syscall_ret(num, ret);
9244
ret = -TARGET_EFAULT;