xref: /openbmc/qemu/linux-user/syscall.c (revision 24894f39)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064 #ifdef SO_REUSEPORT
2065         case TARGET_SO_REUSEPORT:
2066                 optname = SO_REUSEPORT;
2067                 break;
2068 #endif
2069         case TARGET_SO_TYPE:
2070 		optname = SO_TYPE;
2071 		break;
2072         case TARGET_SO_ERROR:
2073 		optname = SO_ERROR;
2074 		break;
2075         case TARGET_SO_DONTROUTE:
2076 		optname = SO_DONTROUTE;
2077 		break;
2078         case TARGET_SO_BROADCAST:
2079 		optname = SO_BROADCAST;
2080 		break;
2081         case TARGET_SO_SNDBUF:
2082 		optname = SO_SNDBUF;
2083 		break;
2084         case TARGET_SO_SNDBUFFORCE:
2085                 optname = SO_SNDBUFFORCE;
2086                 break;
2087         case TARGET_SO_RCVBUF:
2088 		optname = SO_RCVBUF;
2089 		break;
2090         case TARGET_SO_RCVBUFFORCE:
2091                 optname = SO_RCVBUFFORCE;
2092                 break;
2093         case TARGET_SO_KEEPALIVE:
2094 		optname = SO_KEEPALIVE;
2095 		break;
2096         case TARGET_SO_OOBINLINE:
2097 		optname = SO_OOBINLINE;
2098 		break;
2099         case TARGET_SO_NO_CHECK:
2100 		optname = SO_NO_CHECK;
2101 		break;
2102         case TARGET_SO_PRIORITY:
2103 		optname = SO_PRIORITY;
2104 		break;
2105 #ifdef SO_BSDCOMPAT
2106         case TARGET_SO_BSDCOMPAT:
2107 		optname = SO_BSDCOMPAT;
2108 		break;
2109 #endif
2110         case TARGET_SO_PASSCRED:
2111 		optname = SO_PASSCRED;
2112 		break;
2113         case TARGET_SO_PASSSEC:
2114                 optname = SO_PASSSEC;
2115                 break;
2116         case TARGET_SO_TIMESTAMP:
2117 		optname = SO_TIMESTAMP;
2118 		break;
2119         case TARGET_SO_RCVLOWAT:
2120 		optname = SO_RCVLOWAT;
2121 		break;
2122         default:
2123             goto unimplemented;
2124         }
2125 	if (optlen < sizeof(uint32_t))
2126             return -TARGET_EINVAL;
2127 
2128 	if (get_user_u32(val, optval_addr))
2129             return -TARGET_EFAULT;
2130 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131         break;
2132     default:
2133     unimplemented:
2134         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135         ret = -TARGET_ENOPROTOOPT;
2136     }
2137     return ret;
2138 }
2139 
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142                               abi_ulong optval_addr, abi_ulong optlen)
2143 {
2144     abi_long ret;
2145     int len, val;
2146     socklen_t lv;
2147 
2148     switch(level) {
2149     case TARGET_SOL_SOCKET:
2150         level = SOL_SOCKET;
2151         switch (optname) {
2152         /* These don't just return a single integer */
2153         case TARGET_SO_RCVTIMEO:
2154         case TARGET_SO_SNDTIMEO:
2155         case TARGET_SO_PEERNAME:
2156             goto unimplemented;
2157         case TARGET_SO_PEERCRED: {
2158             struct ucred cr;
2159             socklen_t crlen;
2160             struct target_ucred *tcr;
2161 
2162             if (get_user_u32(len, optlen)) {
2163                 return -TARGET_EFAULT;
2164             }
2165             if (len < 0) {
2166                 return -TARGET_EINVAL;
2167             }
2168 
2169             crlen = sizeof(cr);
2170             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171                                        &cr, &crlen));
2172             if (ret < 0) {
2173                 return ret;
2174             }
2175             if (len > crlen) {
2176                 len = crlen;
2177             }
2178             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             __put_user(cr.pid, &tcr->pid);
2182             __put_user(cr.uid, &tcr->uid);
2183             __put_user(cr.gid, &tcr->gid);
2184             unlock_user_struct(tcr, optval_addr, 1);
2185             if (put_user_u32(len, optlen)) {
2186                 return -TARGET_EFAULT;
2187             }
2188             break;
2189         }
2190         case TARGET_SO_LINGER:
2191         {
2192             struct linger lg;
2193             socklen_t lglen;
2194             struct target_linger *tlg;
2195 
2196             if (get_user_u32(len, optlen)) {
2197                 return -TARGET_EFAULT;
2198             }
2199             if (len < 0) {
2200                 return -TARGET_EINVAL;
2201             }
2202 
2203             lglen = sizeof(lg);
2204             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205                                        &lg, &lglen));
2206             if (ret < 0) {
2207                 return ret;
2208             }
2209             if (len > lglen) {
2210                 len = lglen;
2211             }
2212             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             __put_user(lg.l_onoff, &tlg->l_onoff);
2216             __put_user(lg.l_linger, &tlg->l_linger);
2217             unlock_user_struct(tlg, optval_addr, 1);
2218             if (put_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             break;
2222         }
2223         /* Options with 'int' argument.  */
2224         case TARGET_SO_DEBUG:
2225             optname = SO_DEBUG;
2226             goto int_case;
2227         case TARGET_SO_REUSEADDR:
2228             optname = SO_REUSEADDR;
2229             goto int_case;
2230 #ifdef SO_REUSEPORT
2231         case TARGET_SO_REUSEPORT:
2232             optname = SO_REUSEPORT;
2233             goto int_case;
2234 #endif
2235         case TARGET_SO_TYPE:
2236             optname = SO_TYPE;
2237             goto int_case;
2238         case TARGET_SO_ERROR:
2239             optname = SO_ERROR;
2240             goto int_case;
2241         case TARGET_SO_DONTROUTE:
2242             optname = SO_DONTROUTE;
2243             goto int_case;
2244         case TARGET_SO_BROADCAST:
2245             optname = SO_BROADCAST;
2246             goto int_case;
2247         case TARGET_SO_SNDBUF:
2248             optname = SO_SNDBUF;
2249             goto int_case;
2250         case TARGET_SO_RCVBUF:
2251             optname = SO_RCVBUF;
2252             goto int_case;
2253         case TARGET_SO_KEEPALIVE:
2254             optname = SO_KEEPALIVE;
2255             goto int_case;
2256         case TARGET_SO_OOBINLINE:
2257             optname = SO_OOBINLINE;
2258             goto int_case;
2259         case TARGET_SO_NO_CHECK:
2260             optname = SO_NO_CHECK;
2261             goto int_case;
2262         case TARGET_SO_PRIORITY:
2263             optname = SO_PRIORITY;
2264             goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266         case TARGET_SO_BSDCOMPAT:
2267             optname = SO_BSDCOMPAT;
2268             goto int_case;
2269 #endif
2270         case TARGET_SO_PASSCRED:
2271             optname = SO_PASSCRED;
2272             goto int_case;
2273         case TARGET_SO_TIMESTAMP:
2274             optname = SO_TIMESTAMP;
2275             goto int_case;
2276         case TARGET_SO_RCVLOWAT:
2277             optname = SO_RCVLOWAT;
2278             goto int_case;
2279         case TARGET_SO_ACCEPTCONN:
2280             optname = SO_ACCEPTCONN;
2281             goto int_case;
2282         default:
2283             goto int_case;
2284         }
2285         break;
2286     case SOL_TCP:
2287         /* TCP options all take an 'int' value.  */
2288     int_case:
2289         if (get_user_u32(len, optlen))
2290             return -TARGET_EFAULT;
2291         if (len < 0)
2292             return -TARGET_EINVAL;
2293         lv = sizeof(lv);
2294         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295         if (ret < 0)
2296             return ret;
2297         if (optname == SO_TYPE) {
2298             val = host_to_target_sock_type(val);
2299         }
2300         if (len > lv)
2301             len = lv;
2302         if (len == 4) {
2303             if (put_user_u32(val, optval_addr))
2304                 return -TARGET_EFAULT;
2305         } else {
2306             if (put_user_u8(val, optval_addr))
2307                 return -TARGET_EFAULT;
2308         }
2309         if (put_user_u32(len, optlen))
2310             return -TARGET_EFAULT;
2311         break;
2312     case SOL_IP:
2313         switch(optname) {
2314         case IP_TOS:
2315         case IP_TTL:
2316         case IP_HDRINCL:
2317         case IP_ROUTER_ALERT:
2318         case IP_RECVOPTS:
2319         case IP_RETOPTS:
2320         case IP_PKTINFO:
2321         case IP_MTU_DISCOVER:
2322         case IP_RECVERR:
2323         case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325         case IP_FREEBIND:
2326 #endif
2327         case IP_MULTICAST_TTL:
2328         case IP_MULTICAST_LOOP:
2329             if (get_user_u32(len, optlen))
2330                 return -TARGET_EFAULT;
2331             if (len < 0)
2332                 return -TARGET_EINVAL;
2333             lv = sizeof(lv);
2334             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335             if (ret < 0)
2336                 return ret;
2337             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338                 len = 1;
2339                 if (put_user_u32(len, optlen)
2340                     || put_user_u8(val, optval_addr))
2341                     return -TARGET_EFAULT;
2342             } else {
2343                 if (len > sizeof(int))
2344                     len = sizeof(int);
2345                 if (put_user_u32(len, optlen)
2346                     || put_user_u32(val, optval_addr))
2347                     return -TARGET_EFAULT;
2348             }
2349             break;
2350         default:
2351             ret = -TARGET_ENOPROTOOPT;
2352             break;
2353         }
2354         break;
2355     case SOL_IPV6:
2356         switch (optname) {
2357         case IPV6_MTU_DISCOVER:
2358         case IPV6_MTU:
2359         case IPV6_V6ONLY:
2360         case IPV6_RECVPKTINFO:
2361         case IPV6_UNICAST_HOPS:
2362         case IPV6_MULTICAST_HOPS:
2363         case IPV6_MULTICAST_LOOP:
2364         case IPV6_RECVERR:
2365         case IPV6_RECVHOPLIMIT:
2366         case IPV6_2292HOPLIMIT:
2367         case IPV6_CHECKSUM:
2368             if (get_user_u32(len, optlen))
2369                 return -TARGET_EFAULT;
2370             if (len < 0)
2371                 return -TARGET_EINVAL;
2372             lv = sizeof(lv);
2373             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2374             if (ret < 0)
2375                 return ret;
2376             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2377                 len = 1;
2378                 if (put_user_u32(len, optlen)
2379                     || put_user_u8(val, optval_addr))
2380                     return -TARGET_EFAULT;
2381             } else {
2382                 if (len > sizeof(int))
2383                     len = sizeof(int);
2384                 if (put_user_u32(len, optlen)
2385                     || put_user_u32(val, optval_addr))
2386                     return -TARGET_EFAULT;
2387             }
2388             break;
2389         default:
2390             ret = -TARGET_ENOPROTOOPT;
2391             break;
2392         }
2393         break;
2394     default:
2395     unimplemented:
2396         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2397                  level, optname);
2398         ret = -TARGET_EOPNOTSUPP;
2399         break;
2400     }
2401     return ret;
2402 }
2403 
2404 /* Convert target low/high pair representing file offset into the host
2405  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406  * as the kernel doesn't handle them either.
2407  */
2408 static void target_to_host_low_high(abi_ulong tlow,
2409                                     abi_ulong thigh,
2410                                     unsigned long *hlow,
2411                                     unsigned long *hhigh)
2412 {
2413     uint64_t off = tlow |
2414         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415         TARGET_LONG_BITS / 2;
2416 
2417     *hlow = off;
2418     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2419 }
2420 
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422                                 abi_ulong count, int copy)
2423 {
2424     struct target_iovec *target_vec;
2425     struct iovec *vec;
2426     abi_ulong total_len, max_len;
2427     int i;
2428     int err = 0;
2429     bool bad_address = false;
2430 
2431     if (count == 0) {
2432         errno = 0;
2433         return NULL;
2434     }
2435     if (count > IOV_MAX) {
2436         errno = EINVAL;
2437         return NULL;
2438     }
2439 
2440     vec = g_try_new0(struct iovec, count);
2441     if (vec == NULL) {
2442         errno = ENOMEM;
2443         return NULL;
2444     }
2445 
2446     target_vec = lock_user(VERIFY_READ, target_addr,
2447                            count * sizeof(struct target_iovec), 1);
2448     if (target_vec == NULL) {
2449         err = EFAULT;
2450         goto fail2;
2451     }
2452 
2453     /* ??? If host page size > target page size, this will result in a
2454        value larger than what we can actually support.  */
2455     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2456     total_len = 0;
2457 
2458     for (i = 0; i < count; i++) {
2459         abi_ulong base = tswapal(target_vec[i].iov_base);
2460         abi_long len = tswapal(target_vec[i].iov_len);
2461 
2462         if (len < 0) {
2463             err = EINVAL;
2464             goto fail;
2465         } else if (len == 0) {
2466             /* Zero length pointer is ignored.  */
2467             vec[i].iov_base = 0;
2468         } else {
2469             vec[i].iov_base = lock_user(type, base, len, copy);
2470             /* If the first buffer pointer is bad, this is a fault.  But
2471              * subsequent bad buffers will result in a partial write; this
2472              * is realized by filling the vector with null pointers and
2473              * zero lengths. */
2474             if (!vec[i].iov_base) {
2475                 if (i == 0) {
2476                     err = EFAULT;
2477                     goto fail;
2478                 } else {
2479                     bad_address = true;
2480                 }
2481             }
2482             if (bad_address) {
2483                 len = 0;
2484             }
2485             if (len > max_len - total_len) {
2486                 len = max_len - total_len;
2487             }
2488         }
2489         vec[i].iov_len = len;
2490         total_len += len;
2491     }
2492 
2493     unlock_user(target_vec, target_addr, 0);
2494     return vec;
2495 
2496  fail:
2497     while (--i >= 0) {
2498         if (tswapal(target_vec[i].iov_len) > 0) {
2499             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2500         }
2501     }
2502     unlock_user(target_vec, target_addr, 0);
2503  fail2:
2504     g_free(vec);
2505     errno = err;
2506     return NULL;
2507 }
2508 
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510                          abi_ulong count, int copy)
2511 {
2512     struct target_iovec *target_vec;
2513     int i;
2514 
2515     target_vec = lock_user(VERIFY_READ, target_addr,
2516                            count * sizeof(struct target_iovec), 1);
2517     if (target_vec) {
2518         for (i = 0; i < count; i++) {
2519             abi_ulong base = tswapal(target_vec[i].iov_base);
2520             abi_long len = tswapal(target_vec[i].iov_len);
2521             if (len < 0) {
2522                 break;
2523             }
2524             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2525         }
2526         unlock_user(target_vec, target_addr, 0);
2527     }
2528 
2529     g_free(vec);
2530 }
2531 
2532 static inline int target_to_host_sock_type(int *type)
2533 {
2534     int host_type = 0;
2535     int target_type = *type;
2536 
2537     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538     case TARGET_SOCK_DGRAM:
2539         host_type = SOCK_DGRAM;
2540         break;
2541     case TARGET_SOCK_STREAM:
2542         host_type = SOCK_STREAM;
2543         break;
2544     default:
2545         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2546         break;
2547     }
2548     if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550         host_type |= SOCK_CLOEXEC;
2551 #else
2552         return -TARGET_EINVAL;
2553 #endif
2554     }
2555     if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557         host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559         return -TARGET_EINVAL;
2560 #endif
2561     }
2562     *type = host_type;
2563     return 0;
2564 }
2565 
2566 /* Try to emulate socket type flags after socket creation.  */
2567 static int sock_flags_fixup(int fd, int target_type)
2568 {
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570     if (target_type & TARGET_SOCK_NONBLOCK) {
2571         int flags = fcntl(fd, F_GETFL);
2572         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2573             close(fd);
2574             return -TARGET_EINVAL;
2575         }
2576     }
2577 #endif
2578     return fd;
2579 }
2580 
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2583 {
2584     int target_type = type;
2585     int ret;
2586 
2587     ret = target_to_host_sock_type(&type);
2588     if (ret) {
2589         return ret;
2590     }
2591 
2592     if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594          protocol == NETLINK_ROUTE ||
2595 #endif
2596          protocol == NETLINK_KOBJECT_UEVENT ||
2597          protocol == NETLINK_AUDIT)) {
2598         return -EPFNOSUPPORT;
2599     }
2600 
2601     if (domain == AF_PACKET ||
2602         (domain == AF_INET && type == SOCK_PACKET)) {
2603         protocol = tswap16(protocol);
2604     }
2605 
2606     ret = get_errno(socket(domain, type, protocol));
2607     if (ret >= 0) {
2608         ret = sock_flags_fixup(ret, target_type);
2609         if (type == SOCK_PACKET) {
2610             /* Manage an obsolete case :
2611              * if socket type is SOCK_PACKET, bind by name
2612              */
2613             fd_trans_register(ret, &target_packet_trans);
2614         } else if (domain == PF_NETLINK) {
2615             switch (protocol) {
2616 #ifdef CONFIG_RTNETLINK
2617             case NETLINK_ROUTE:
2618                 fd_trans_register(ret, &target_netlink_route_trans);
2619                 break;
2620 #endif
2621             case NETLINK_KOBJECT_UEVENT:
2622                 /* nothing to do: messages are strings */
2623                 break;
2624             case NETLINK_AUDIT:
2625                 fd_trans_register(ret, &target_netlink_audit_trans);
2626                 break;
2627             default:
2628                 g_assert_not_reached();
2629             }
2630         }
2631     }
2632     return ret;
2633 }
2634 
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2637                         socklen_t addrlen)
2638 {
2639     void *addr;
2640     abi_long ret;
2641 
2642     if ((int)addrlen < 0) {
2643         return -TARGET_EINVAL;
2644     }
2645 
2646     addr = alloca(addrlen+1);
2647 
2648     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2649     if (ret)
2650         return ret;
2651 
2652     return get_errno(bind(sockfd, addr, addrlen));
2653 }
2654 
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2657                            socklen_t addrlen)
2658 {
2659     void *addr;
2660     abi_long ret;
2661 
2662     if ((int)addrlen < 0) {
2663         return -TARGET_EINVAL;
2664     }
2665 
2666     addr = alloca(addrlen+1);
2667 
2668     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2669     if (ret)
2670         return ret;
2671 
2672     return get_errno(safe_connect(sockfd, addr, addrlen));
2673 }
2674 
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677                                       int flags, int send)
2678 {
2679     abi_long ret, len;
2680     struct msghdr msg;
2681     abi_ulong count;
2682     struct iovec *vec;
2683     abi_ulong target_vec;
2684 
2685     if (msgp->msg_name) {
2686         msg.msg_namelen = tswap32(msgp->msg_namelen);
2687         msg.msg_name = alloca(msg.msg_namelen+1);
2688         ret = target_to_host_sockaddr(fd, msg.msg_name,
2689                                       tswapal(msgp->msg_name),
2690                                       msg.msg_namelen);
2691         if (ret == -TARGET_EFAULT) {
2692             /* For connected sockets msg_name and msg_namelen must
2693              * be ignored, so returning EFAULT immediately is wrong.
2694              * Instead, pass a bad msg_name to the host kernel, and
2695              * let it decide whether to return EFAULT or not.
2696              */
2697             msg.msg_name = (void *)-1;
2698         } else if (ret) {
2699             goto out2;
2700         }
2701     } else {
2702         msg.msg_name = NULL;
2703         msg.msg_namelen = 0;
2704     }
2705     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706     msg.msg_control = alloca(msg.msg_controllen);
2707     memset(msg.msg_control, 0, msg.msg_controllen);
2708 
2709     msg.msg_flags = tswap32(msgp->msg_flags);
2710 
2711     count = tswapal(msgp->msg_iovlen);
2712     target_vec = tswapal(msgp->msg_iov);
2713 
2714     if (count > IOV_MAX) {
2715         /* sendrcvmsg returns a different errno for this condition than
2716          * readv/writev, so we must catch it here before lock_iovec() does.
2717          */
2718         ret = -TARGET_EMSGSIZE;
2719         goto out2;
2720     }
2721 
2722     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723                      target_vec, count, send);
2724     if (vec == NULL) {
2725         ret = -host_to_target_errno(errno);
2726         goto out2;
2727     }
2728     msg.msg_iovlen = count;
2729     msg.msg_iov = vec;
2730 
2731     if (send) {
2732         if (fd_trans_target_to_host_data(fd)) {
2733             void *host_msg;
2734 
2735             host_msg = g_malloc(msg.msg_iov->iov_len);
2736             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737             ret = fd_trans_target_to_host_data(fd)(host_msg,
2738                                                    msg.msg_iov->iov_len);
2739             if (ret >= 0) {
2740                 msg.msg_iov->iov_base = host_msg;
2741                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2742             }
2743             g_free(host_msg);
2744         } else {
2745             ret = target_to_host_cmsg(&msg, msgp);
2746             if (ret == 0) {
2747                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2748             }
2749         }
2750     } else {
2751         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752         if (!is_error(ret)) {
2753             len = ret;
2754             if (fd_trans_host_to_target_data(fd)) {
2755                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756                                                MIN(msg.msg_iov->iov_len, len));
2757             } else {
2758                 ret = host_to_target_cmsg(msgp, &msg);
2759             }
2760             if (!is_error(ret)) {
2761                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762                 msgp->msg_flags = tswap32(msg.msg_flags);
2763                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2764                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2765                                     msg.msg_name, msg.msg_namelen);
2766                     if (ret) {
2767                         goto out;
2768                     }
2769                 }
2770 
2771                 ret = len;
2772             }
2773         }
2774     }
2775 
2776 out:
2777     unlock_iovec(vec, target_vec, count, !send);
2778 out2:
2779     return ret;
2780 }
2781 
2782 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2783                                int flags, int send)
2784 {
2785     abi_long ret;
2786     struct target_msghdr *msgp;
2787 
2788     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2789                           msgp,
2790                           target_msg,
2791                           send ? 1 : 0)) {
2792         return -TARGET_EFAULT;
2793     }
2794     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2795     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2796     return ret;
2797 }
2798 
2799 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2800  * so it might not have this *mmsg-specific flag either.
2801  */
2802 #ifndef MSG_WAITFORONE
2803 #define MSG_WAITFORONE 0x10000
2804 #endif
2805 
2806 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2807                                 unsigned int vlen, unsigned int flags,
2808                                 int send)
2809 {
2810     struct target_mmsghdr *mmsgp;
2811     abi_long ret = 0;
2812     int i;
2813 
2814     if (vlen > UIO_MAXIOV) {
2815         vlen = UIO_MAXIOV;
2816     }
2817 
2818     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2819     if (!mmsgp) {
2820         return -TARGET_EFAULT;
2821     }
2822 
2823     for (i = 0; i < vlen; i++) {
2824         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2825         if (is_error(ret)) {
2826             break;
2827         }
2828         mmsgp[i].msg_len = tswap32(ret);
2829         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2830         if (flags & MSG_WAITFORONE) {
2831             flags |= MSG_DONTWAIT;
2832         }
2833     }
2834 
2835     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2836 
2837     /* Return number of datagrams sent if we sent any at all;
2838      * otherwise return the error.
2839      */
2840     if (i) {
2841         return i;
2842     }
2843     return ret;
2844 }
2845 
2846 /* do_accept4() Must return target values and target errnos. */
2847 static abi_long do_accept4(int fd, abi_ulong target_addr,
2848                            abi_ulong target_addrlen_addr, int flags)
2849 {
2850     socklen_t addrlen;
2851     void *addr;
2852     abi_long ret;
2853     int host_flags;
2854 
2855     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2856 
2857     if (target_addr == 0) {
2858         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2859     }
2860 
2861     /* linux returns EINVAL if addrlen pointer is invalid */
2862     if (get_user_u32(addrlen, target_addrlen_addr))
2863         return -TARGET_EINVAL;
2864 
2865     if ((int)addrlen < 0) {
2866         return -TARGET_EINVAL;
2867     }
2868 
2869     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2870         return -TARGET_EINVAL;
2871 
2872     addr = alloca(addrlen);
2873 
2874     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2875     if (!is_error(ret)) {
2876         host_to_target_sockaddr(target_addr, addr, addrlen);
2877         if (put_user_u32(addrlen, target_addrlen_addr))
2878             ret = -TARGET_EFAULT;
2879     }
2880     return ret;
2881 }
2882 
2883 /* do_getpeername() Must return target values and target errnos. */
2884 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2885                                abi_ulong target_addrlen_addr)
2886 {
2887     socklen_t addrlen;
2888     void *addr;
2889     abi_long ret;
2890 
2891     if (get_user_u32(addrlen, target_addrlen_addr))
2892         return -TARGET_EFAULT;
2893 
2894     if ((int)addrlen < 0) {
2895         return -TARGET_EINVAL;
2896     }
2897 
2898     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2899         return -TARGET_EFAULT;
2900 
2901     addr = alloca(addrlen);
2902 
2903     ret = get_errno(getpeername(fd, addr, &addrlen));
2904     if (!is_error(ret)) {
2905         host_to_target_sockaddr(target_addr, addr, addrlen);
2906         if (put_user_u32(addrlen, target_addrlen_addr))
2907             ret = -TARGET_EFAULT;
2908     }
2909     return ret;
2910 }
2911 
2912 /* do_getsockname() Must return target values and target errnos. */
2913 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2914                                abi_ulong target_addrlen_addr)
2915 {
2916     socklen_t addrlen;
2917     void *addr;
2918     abi_long ret;
2919 
2920     if (get_user_u32(addrlen, target_addrlen_addr))
2921         return -TARGET_EFAULT;
2922 
2923     if ((int)addrlen < 0) {
2924         return -TARGET_EINVAL;
2925     }
2926 
2927     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2928         return -TARGET_EFAULT;
2929 
2930     addr = alloca(addrlen);
2931 
2932     ret = get_errno(getsockname(fd, addr, &addrlen));
2933     if (!is_error(ret)) {
2934         host_to_target_sockaddr(target_addr, addr, addrlen);
2935         if (put_user_u32(addrlen, target_addrlen_addr))
2936             ret = -TARGET_EFAULT;
2937     }
2938     return ret;
2939 }
2940 
2941 /* do_socketpair() Must return target values and target errnos. */
2942 static abi_long do_socketpair(int domain, int type, int protocol,
2943                               abi_ulong target_tab_addr)
2944 {
2945     int tab[2];
2946     abi_long ret;
2947 
2948     target_to_host_sock_type(&type);
2949 
2950     ret = get_errno(socketpair(domain, type, protocol, tab));
2951     if (!is_error(ret)) {
2952         if (put_user_s32(tab[0], target_tab_addr)
2953             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2954             ret = -TARGET_EFAULT;
2955     }
2956     return ret;
2957 }
2958 
2959 /* do_sendto() Must return target values and target errnos. */
2960 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2961                           abi_ulong target_addr, socklen_t addrlen)
2962 {
2963     void *addr;
2964     void *host_msg;
2965     void *copy_msg = NULL;
2966     abi_long ret;
2967 
2968     if ((int)addrlen < 0) {
2969         return -TARGET_EINVAL;
2970     }
2971 
2972     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2973     if (!host_msg)
2974         return -TARGET_EFAULT;
2975     if (fd_trans_target_to_host_data(fd)) {
2976         copy_msg = host_msg;
2977         host_msg = g_malloc(len);
2978         memcpy(host_msg, copy_msg, len);
2979         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2980         if (ret < 0) {
2981             goto fail;
2982         }
2983     }
2984     if (target_addr) {
2985         addr = alloca(addrlen+1);
2986         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2987         if (ret) {
2988             goto fail;
2989         }
2990         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2991     } else {
2992         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2993     }
2994 fail:
2995     if (copy_msg) {
2996         g_free(host_msg);
2997         host_msg = copy_msg;
2998     }
2999     unlock_user(host_msg, msg, 0);
3000     return ret;
3001 }
3002 
3003 /* do_recvfrom() Must return target values and target errnos. */
3004 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3005                             abi_ulong target_addr,
3006                             abi_ulong target_addrlen)
3007 {
3008     socklen_t addrlen;
3009     void *addr;
3010     void *host_msg;
3011     abi_long ret;
3012 
3013     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3014     if (!host_msg)
3015         return -TARGET_EFAULT;
3016     if (target_addr) {
3017         if (get_user_u32(addrlen, target_addrlen)) {
3018             ret = -TARGET_EFAULT;
3019             goto fail;
3020         }
3021         if ((int)addrlen < 0) {
3022             ret = -TARGET_EINVAL;
3023             goto fail;
3024         }
3025         addr = alloca(addrlen);
3026         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3027                                       addr, &addrlen));
3028     } else {
3029         addr = NULL; /* To keep compiler quiet.  */
3030         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3031     }
3032     if (!is_error(ret)) {
3033         if (fd_trans_host_to_target_data(fd)) {
3034             abi_long trans;
3035             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3036             if (is_error(trans)) {
3037                 ret = trans;
3038                 goto fail;
3039             }
3040         }
3041         if (target_addr) {
3042             host_to_target_sockaddr(target_addr, addr, addrlen);
3043             if (put_user_u32(addrlen, target_addrlen)) {
3044                 ret = -TARGET_EFAULT;
3045                 goto fail;
3046             }
3047         }
3048         unlock_user(host_msg, msg, len);
3049     } else {
3050 fail:
3051         unlock_user(host_msg, msg, 0);
3052     }
3053     return ret;
3054 }
3055 
3056 #ifdef TARGET_NR_socketcall
3057 /* do_socketcall() must return target values and target errnos. */
3058 static abi_long do_socketcall(int num, abi_ulong vptr)
3059 {
3060     static const unsigned nargs[] = { /* number of arguments per operation */
3061         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3062         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3063         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3064         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3065         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3066         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3067         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3068         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3069         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3070         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3071         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3072         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3073         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3074         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3075         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3076         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3077         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3078         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3079         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3080         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3081     };
3082     abi_long a[6]; /* max 6 args */
3083     unsigned i;
3084 
3085     /* check the range of the first argument num */
3086     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3087     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3088         return -TARGET_EINVAL;
3089     }
3090     /* ensure we have space for args */
3091     if (nargs[num] > ARRAY_SIZE(a)) {
3092         return -TARGET_EINVAL;
3093     }
3094     /* collect the arguments in a[] according to nargs[] */
3095     for (i = 0; i < nargs[num]; ++i) {
3096         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3097             return -TARGET_EFAULT;
3098         }
3099     }
3100     /* now when we have the args, invoke the appropriate underlying function */
3101     switch (num) {
3102     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3103         return do_socket(a[0], a[1], a[2]);
3104     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3105         return do_bind(a[0], a[1], a[2]);
3106     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3107         return do_connect(a[0], a[1], a[2]);
3108     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3109         return get_errno(listen(a[0], a[1]));
3110     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3111         return do_accept4(a[0], a[1], a[2], 0);
3112     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3113         return do_getsockname(a[0], a[1], a[2]);
3114     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3115         return do_getpeername(a[0], a[1], a[2]);
3116     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3117         return do_socketpair(a[0], a[1], a[2], a[3]);
3118     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3119         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3120     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3121         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3122     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3123         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3124     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3125         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3126     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3127         return get_errno(shutdown(a[0], a[1]));
3128     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3129         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3130     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3131         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3132     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3133         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3134     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3135         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3136     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3137         return do_accept4(a[0], a[1], a[2], a[3]);
3138     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3139         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3140     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3141         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3142     default:
3143         gemu_log("Unsupported socketcall: %d\n", num);
3144         return -TARGET_EINVAL;
3145     }
3146 }
3147 #endif
3148 
3149 #define N_SHM_REGIONS	32
3150 
3151 static struct shm_region {
3152     abi_ulong start;
3153     abi_ulong size;
3154     bool in_use;
3155 } shm_regions[N_SHM_REGIONS];
3156 
3157 #ifndef TARGET_SEMID64_DS
3158 /* asm-generic version of this struct */
3159 struct target_semid64_ds
3160 {
3161   struct target_ipc_perm sem_perm;
3162   abi_ulong sem_otime;
3163 #if TARGET_ABI_BITS == 32
3164   abi_ulong __unused1;
3165 #endif
3166   abi_ulong sem_ctime;
3167 #if TARGET_ABI_BITS == 32
3168   abi_ulong __unused2;
3169 #endif
3170   abi_ulong sem_nsems;
3171   abi_ulong __unused3;
3172   abi_ulong __unused4;
3173 };
3174 #endif
3175 
3176 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3177                                                abi_ulong target_addr)
3178 {
3179     struct target_ipc_perm *target_ip;
3180     struct target_semid64_ds *target_sd;
3181 
3182     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3183         return -TARGET_EFAULT;
3184     target_ip = &(target_sd->sem_perm);
3185     host_ip->__key = tswap32(target_ip->__key);
3186     host_ip->uid = tswap32(target_ip->uid);
3187     host_ip->gid = tswap32(target_ip->gid);
3188     host_ip->cuid = tswap32(target_ip->cuid);
3189     host_ip->cgid = tswap32(target_ip->cgid);
3190 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3191     host_ip->mode = tswap32(target_ip->mode);
3192 #else
3193     host_ip->mode = tswap16(target_ip->mode);
3194 #endif
3195 #if defined(TARGET_PPC)
3196     host_ip->__seq = tswap32(target_ip->__seq);
3197 #else
3198     host_ip->__seq = tswap16(target_ip->__seq);
3199 #endif
3200     unlock_user_struct(target_sd, target_addr, 0);
3201     return 0;
3202 }
3203 
3204 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3205                                                struct ipc_perm *host_ip)
3206 {
3207     struct target_ipc_perm *target_ip;
3208     struct target_semid64_ds *target_sd;
3209 
3210     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3211         return -TARGET_EFAULT;
3212     target_ip = &(target_sd->sem_perm);
3213     target_ip->__key = tswap32(host_ip->__key);
3214     target_ip->uid = tswap32(host_ip->uid);
3215     target_ip->gid = tswap32(host_ip->gid);
3216     target_ip->cuid = tswap32(host_ip->cuid);
3217     target_ip->cgid = tswap32(host_ip->cgid);
3218 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3219     target_ip->mode = tswap32(host_ip->mode);
3220 #else
3221     target_ip->mode = tswap16(host_ip->mode);
3222 #endif
3223 #if defined(TARGET_PPC)
3224     target_ip->__seq = tswap32(host_ip->__seq);
3225 #else
3226     target_ip->__seq = tswap16(host_ip->__seq);
3227 #endif
3228     unlock_user_struct(target_sd, target_addr, 1);
3229     return 0;
3230 }
3231 
3232 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3233                                                abi_ulong target_addr)
3234 {
3235     struct target_semid64_ds *target_sd;
3236 
3237     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3238         return -TARGET_EFAULT;
3239     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3240         return -TARGET_EFAULT;
3241     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3242     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3243     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3244     unlock_user_struct(target_sd, target_addr, 0);
3245     return 0;
3246 }
3247 
3248 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3249                                                struct semid_ds *host_sd)
3250 {
3251     struct target_semid64_ds *target_sd;
3252 
3253     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3254         return -TARGET_EFAULT;
3255     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3256         return -TARGET_EFAULT;
3257     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3258     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3259     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3260     unlock_user_struct(target_sd, target_addr, 1);
3261     return 0;
3262 }
3263 
3264 struct target_seminfo {
3265     int semmap;
3266     int semmni;
3267     int semmns;
3268     int semmnu;
3269     int semmsl;
3270     int semopm;
3271     int semume;
3272     int semusz;
3273     int semvmx;
3274     int semaem;
3275 };
3276 
3277 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3278                                               struct seminfo *host_seminfo)
3279 {
3280     struct target_seminfo *target_seminfo;
3281     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3282         return -TARGET_EFAULT;
3283     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3284     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3285     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3286     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3287     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3288     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3289     __put_user(host_seminfo->semume, &target_seminfo->semume);
3290     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3291     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3292     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3293     unlock_user_struct(target_seminfo, target_addr, 1);
3294     return 0;
3295 }
3296 
3297 union semun {
3298 	int val;
3299 	struct semid_ds *buf;
3300 	unsigned short *array;
3301 	struct seminfo *__buf;
3302 };
3303 
3304 union target_semun {
3305 	int val;
3306 	abi_ulong buf;
3307 	abi_ulong array;
3308 	abi_ulong __buf;
3309 };
3310 
3311 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3312                                                abi_ulong target_addr)
3313 {
3314     int nsems;
3315     unsigned short *array;
3316     union semun semun;
3317     struct semid_ds semid_ds;
3318     int i, ret;
3319 
3320     semun.buf = &semid_ds;
3321 
3322     ret = semctl(semid, 0, IPC_STAT, semun);
3323     if (ret == -1)
3324         return get_errno(ret);
3325 
3326     nsems = semid_ds.sem_nsems;
3327 
3328     *host_array = g_try_new(unsigned short, nsems);
3329     if (!*host_array) {
3330         return -TARGET_ENOMEM;
3331     }
3332     array = lock_user(VERIFY_READ, target_addr,
3333                       nsems*sizeof(unsigned short), 1);
3334     if (!array) {
3335         g_free(*host_array);
3336         return -TARGET_EFAULT;
3337     }
3338 
3339     for(i=0; i<nsems; i++) {
3340         __get_user((*host_array)[i], &array[i]);
3341     }
3342     unlock_user(array, target_addr, 0);
3343 
3344     return 0;
3345 }
3346 
3347 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3348                                                unsigned short **host_array)
3349 {
3350     int nsems;
3351     unsigned short *array;
3352     union semun semun;
3353     struct semid_ds semid_ds;
3354     int i, ret;
3355 
3356     semun.buf = &semid_ds;
3357 
3358     ret = semctl(semid, 0, IPC_STAT, semun);
3359     if (ret == -1)
3360         return get_errno(ret);
3361 
3362     nsems = semid_ds.sem_nsems;
3363 
3364     array = lock_user(VERIFY_WRITE, target_addr,
3365                       nsems*sizeof(unsigned short), 0);
3366     if (!array)
3367         return -TARGET_EFAULT;
3368 
3369     for(i=0; i<nsems; i++) {
3370         __put_user((*host_array)[i], &array[i]);
3371     }
3372     g_free(*host_array);
3373     unlock_user(array, target_addr, 1);
3374 
3375     return 0;
3376 }
3377 
3378 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3379                                  abi_ulong target_arg)
3380 {
3381     union target_semun target_su = { .buf = target_arg };
3382     union semun arg;
3383     struct semid_ds dsarg;
3384     unsigned short *array = NULL;
3385     struct seminfo seminfo;
3386     abi_long ret = -TARGET_EINVAL;
3387     abi_long err;
3388     cmd &= 0xff;
3389 
3390     switch( cmd ) {
3391 	case GETVAL:
3392 	case SETVAL:
3393             /* In 64 bit cross-endian situations, we will erroneously pick up
3394              * the wrong half of the union for the "val" element.  To rectify
3395              * this, the entire 8-byte structure is byteswapped, followed by
3396 	     * a swap of the 4 byte val field. In other cases, the data is
3397 	     * already in proper host byte order. */
3398 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3399 		target_su.buf = tswapal(target_su.buf);
3400 		arg.val = tswap32(target_su.val);
3401 	    } else {
3402 		arg.val = target_su.val;
3403 	    }
3404             ret = get_errno(semctl(semid, semnum, cmd, arg));
3405             break;
3406 	case GETALL:
3407 	case SETALL:
3408             err = target_to_host_semarray(semid, &array, target_su.array);
3409             if (err)
3410                 return err;
3411             arg.array = array;
3412             ret = get_errno(semctl(semid, semnum, cmd, arg));
3413             err = host_to_target_semarray(semid, target_su.array, &array);
3414             if (err)
3415                 return err;
3416             break;
3417 	case IPC_STAT:
3418 	case IPC_SET:
3419 	case SEM_STAT:
3420             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3421             if (err)
3422                 return err;
3423             arg.buf = &dsarg;
3424             ret = get_errno(semctl(semid, semnum, cmd, arg));
3425             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3426             if (err)
3427                 return err;
3428             break;
3429 	case IPC_INFO:
3430 	case SEM_INFO:
3431             arg.__buf = &seminfo;
3432             ret = get_errno(semctl(semid, semnum, cmd, arg));
3433             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3434             if (err)
3435                 return err;
3436             break;
3437 	case IPC_RMID:
3438 	case GETPID:
3439 	case GETNCNT:
3440 	case GETZCNT:
3441             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3442             break;
3443     }
3444 
3445     return ret;
3446 }
3447 
3448 struct target_sembuf {
3449     unsigned short sem_num;
3450     short sem_op;
3451     short sem_flg;
3452 };
3453 
3454 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3455                                              abi_ulong target_addr,
3456                                              unsigned nsops)
3457 {
3458     struct target_sembuf *target_sembuf;
3459     int i;
3460 
3461     target_sembuf = lock_user(VERIFY_READ, target_addr,
3462                               nsops*sizeof(struct target_sembuf), 1);
3463     if (!target_sembuf)
3464         return -TARGET_EFAULT;
3465 
3466     for(i=0; i<nsops; i++) {
3467         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3468         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3469         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3470     }
3471 
3472     unlock_user(target_sembuf, target_addr, 0);
3473 
3474     return 0;
3475 }
3476 
3477 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3478 {
3479     struct sembuf sops[nsops];
3480 
3481     if (target_to_host_sembuf(sops, ptr, nsops))
3482         return -TARGET_EFAULT;
3483 
3484     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3485 }
3486 
3487 struct target_msqid_ds
3488 {
3489     struct target_ipc_perm msg_perm;
3490     abi_ulong msg_stime;
3491 #if TARGET_ABI_BITS == 32
3492     abi_ulong __unused1;
3493 #endif
3494     abi_ulong msg_rtime;
3495 #if TARGET_ABI_BITS == 32
3496     abi_ulong __unused2;
3497 #endif
3498     abi_ulong msg_ctime;
3499 #if TARGET_ABI_BITS == 32
3500     abi_ulong __unused3;
3501 #endif
3502     abi_ulong __msg_cbytes;
3503     abi_ulong msg_qnum;
3504     abi_ulong msg_qbytes;
3505     abi_ulong msg_lspid;
3506     abi_ulong msg_lrpid;
3507     abi_ulong __unused4;
3508     abi_ulong __unused5;
3509 };
3510 
3511 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3512                                                abi_ulong target_addr)
3513 {
3514     struct target_msqid_ds *target_md;
3515 
3516     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3517         return -TARGET_EFAULT;
3518     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3519         return -TARGET_EFAULT;
3520     host_md->msg_stime = tswapal(target_md->msg_stime);
3521     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3522     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3523     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3524     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3525     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3526     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3527     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3528     unlock_user_struct(target_md, target_addr, 0);
3529     return 0;
3530 }
3531 
3532 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3533                                                struct msqid_ds *host_md)
3534 {
3535     struct target_msqid_ds *target_md;
3536 
3537     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3538         return -TARGET_EFAULT;
3539     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3540         return -TARGET_EFAULT;
3541     target_md->msg_stime = tswapal(host_md->msg_stime);
3542     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3543     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3544     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3545     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3546     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3547     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3548     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3549     unlock_user_struct(target_md, target_addr, 1);
3550     return 0;
3551 }
3552 
3553 struct target_msginfo {
3554     int msgpool;
3555     int msgmap;
3556     int msgmax;
3557     int msgmnb;
3558     int msgmni;
3559     int msgssz;
3560     int msgtql;
3561     unsigned short int msgseg;
3562 };
3563 
3564 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3565                                               struct msginfo *host_msginfo)
3566 {
3567     struct target_msginfo *target_msginfo;
3568     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3569         return -TARGET_EFAULT;
3570     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3571     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3572     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3573     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3574     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3575     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3576     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3577     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3578     unlock_user_struct(target_msginfo, target_addr, 1);
3579     return 0;
3580 }
3581 
3582 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3583 {
3584     struct msqid_ds dsarg;
3585     struct msginfo msginfo;
3586     abi_long ret = -TARGET_EINVAL;
3587 
3588     cmd &= 0xff;
3589 
3590     switch (cmd) {
3591     case IPC_STAT:
3592     case IPC_SET:
3593     case MSG_STAT:
3594         if (target_to_host_msqid_ds(&dsarg,ptr))
3595             return -TARGET_EFAULT;
3596         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3597         if (host_to_target_msqid_ds(ptr,&dsarg))
3598             return -TARGET_EFAULT;
3599         break;
3600     case IPC_RMID:
3601         ret = get_errno(msgctl(msgid, cmd, NULL));
3602         break;
3603     case IPC_INFO:
3604     case MSG_INFO:
3605         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3606         if (host_to_target_msginfo(ptr, &msginfo))
3607             return -TARGET_EFAULT;
3608         break;
3609     }
3610 
3611     return ret;
3612 }
3613 
3614 struct target_msgbuf {
3615     abi_long mtype;
3616     char	mtext[1];
3617 };
3618 
3619 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3620                                  ssize_t msgsz, int msgflg)
3621 {
3622     struct target_msgbuf *target_mb;
3623     struct msgbuf *host_mb;
3624     abi_long ret = 0;
3625 
3626     if (msgsz < 0) {
3627         return -TARGET_EINVAL;
3628     }
3629 
3630     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3631         return -TARGET_EFAULT;
3632     host_mb = g_try_malloc(msgsz + sizeof(long));
3633     if (!host_mb) {
3634         unlock_user_struct(target_mb, msgp, 0);
3635         return -TARGET_ENOMEM;
3636     }
3637     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3638     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3639     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3640     g_free(host_mb);
3641     unlock_user_struct(target_mb, msgp, 0);
3642 
3643     return ret;
3644 }
3645 
3646 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3647                                  ssize_t msgsz, abi_long msgtyp,
3648                                  int msgflg)
3649 {
3650     struct target_msgbuf *target_mb;
3651     char *target_mtext;
3652     struct msgbuf *host_mb;
3653     abi_long ret = 0;
3654 
3655     if (msgsz < 0) {
3656         return -TARGET_EINVAL;
3657     }
3658 
3659     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3660         return -TARGET_EFAULT;
3661 
3662     host_mb = g_try_malloc(msgsz + sizeof(long));
3663     if (!host_mb) {
3664         ret = -TARGET_ENOMEM;
3665         goto end;
3666     }
3667     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3668 
3669     if (ret > 0) {
3670         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3671         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3672         if (!target_mtext) {
3673             ret = -TARGET_EFAULT;
3674             goto end;
3675         }
3676         memcpy(target_mb->mtext, host_mb->mtext, ret);
3677         unlock_user(target_mtext, target_mtext_addr, ret);
3678     }
3679 
3680     target_mb->mtype = tswapal(host_mb->mtype);
3681 
3682 end:
3683     if (target_mb)
3684         unlock_user_struct(target_mb, msgp, 1);
3685     g_free(host_mb);
3686     return ret;
3687 }
3688 
3689 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3690                                                abi_ulong target_addr)
3691 {
3692     struct target_shmid_ds *target_sd;
3693 
3694     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3695         return -TARGET_EFAULT;
3696     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3697         return -TARGET_EFAULT;
3698     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3699     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3700     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3701     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3702     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3703     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3704     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3705     unlock_user_struct(target_sd, target_addr, 0);
3706     return 0;
3707 }
3708 
3709 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3710                                                struct shmid_ds *host_sd)
3711 {
3712     struct target_shmid_ds *target_sd;
3713 
3714     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3715         return -TARGET_EFAULT;
3716     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3717         return -TARGET_EFAULT;
3718     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3719     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3720     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3721     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3722     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3723     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3724     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3725     unlock_user_struct(target_sd, target_addr, 1);
3726     return 0;
3727 }
3728 
3729 struct  target_shminfo {
3730     abi_ulong shmmax;
3731     abi_ulong shmmin;
3732     abi_ulong shmmni;
3733     abi_ulong shmseg;
3734     abi_ulong shmall;
3735 };
3736 
3737 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3738                                               struct shminfo *host_shminfo)
3739 {
3740     struct target_shminfo *target_shminfo;
3741     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3742         return -TARGET_EFAULT;
3743     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3744     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3745     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3746     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3747     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3748     unlock_user_struct(target_shminfo, target_addr, 1);
3749     return 0;
3750 }
3751 
3752 struct target_shm_info {
3753     int used_ids;
3754     abi_ulong shm_tot;
3755     abi_ulong shm_rss;
3756     abi_ulong shm_swp;
3757     abi_ulong swap_attempts;
3758     abi_ulong swap_successes;
3759 };
3760 
3761 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3762                                                struct shm_info *host_shm_info)
3763 {
3764     struct target_shm_info *target_shm_info;
3765     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3766         return -TARGET_EFAULT;
3767     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3768     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3769     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3770     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3771     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3772     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3773     unlock_user_struct(target_shm_info, target_addr, 1);
3774     return 0;
3775 }
3776 
3777 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3778 {
3779     struct shmid_ds dsarg;
3780     struct shminfo shminfo;
3781     struct shm_info shm_info;
3782     abi_long ret = -TARGET_EINVAL;
3783 
3784     cmd &= 0xff;
3785 
3786     switch(cmd) {
3787     case IPC_STAT:
3788     case IPC_SET:
3789     case SHM_STAT:
3790         if (target_to_host_shmid_ds(&dsarg, buf))
3791             return -TARGET_EFAULT;
3792         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3793         if (host_to_target_shmid_ds(buf, &dsarg))
3794             return -TARGET_EFAULT;
3795         break;
3796     case IPC_INFO:
3797         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3798         if (host_to_target_shminfo(buf, &shminfo))
3799             return -TARGET_EFAULT;
3800         break;
3801     case SHM_INFO:
3802         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3803         if (host_to_target_shm_info(buf, &shm_info))
3804             return -TARGET_EFAULT;
3805         break;
3806     case IPC_RMID:
3807     case SHM_LOCK:
3808     case SHM_UNLOCK:
3809         ret = get_errno(shmctl(shmid, cmd, NULL));
3810         break;
3811     }
3812 
3813     return ret;
3814 }
3815 
3816 #ifndef TARGET_FORCE_SHMLBA
3817 /* For most architectures, SHMLBA is the same as the page size;
3818  * some architectures have larger values, in which case they should
3819  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3820  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3821  * and defining its own value for SHMLBA.
3822  *
3823  * The kernel also permits SHMLBA to be set by the architecture to a
3824  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3825  * this means that addresses are rounded to the large size if
3826  * SHM_RND is set but addresses not aligned to that size are not rejected
3827  * as long as they are at least page-aligned. Since the only architecture
3828  * which uses this is ia64 this code doesn't provide for that oddity.
3829  */
3830 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3831 {
3832     return TARGET_PAGE_SIZE;
3833 }
3834 #endif
3835 
3836 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3837                                  int shmid, abi_ulong shmaddr, int shmflg)
3838 {
3839     abi_long raddr;
3840     void *host_raddr;
3841     struct shmid_ds shm_info;
3842     int i,ret;
3843     abi_ulong shmlba;
3844 
3845     /* find out the length of the shared memory segment */
3846     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3847     if (is_error(ret)) {
3848         /* can't get length, bail out */
3849         return ret;
3850     }
3851 
3852     shmlba = target_shmlba(cpu_env);
3853 
3854     if (shmaddr & (shmlba - 1)) {
3855         if (shmflg & SHM_RND) {
3856             shmaddr &= ~(shmlba - 1);
3857         } else {
3858             return -TARGET_EINVAL;
3859         }
3860     }
3861     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3862         return -TARGET_EINVAL;
3863     }
3864 
3865     mmap_lock();
3866 
3867     if (shmaddr)
3868         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3869     else {
3870         abi_ulong mmap_start;
3871 
3872         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3873 
3874         if (mmap_start == -1) {
3875             errno = ENOMEM;
3876             host_raddr = (void *)-1;
3877         } else
3878             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3879     }
3880 
3881     if (host_raddr == (void *)-1) {
3882         mmap_unlock();
3883         return get_errno((long)host_raddr);
3884     }
3885     raddr=h2g((unsigned long)host_raddr);
3886 
3887     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3888                    PAGE_VALID | PAGE_READ |
3889                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3890 
3891     for (i = 0; i < N_SHM_REGIONS; i++) {
3892         if (!shm_regions[i].in_use) {
3893             shm_regions[i].in_use = true;
3894             shm_regions[i].start = raddr;
3895             shm_regions[i].size = shm_info.shm_segsz;
3896             break;
3897         }
3898     }
3899 
3900     mmap_unlock();
3901     return raddr;
3902 
3903 }
3904 
3905 static inline abi_long do_shmdt(abi_ulong shmaddr)
3906 {
3907     int i;
3908     abi_long rv;
3909 
3910     mmap_lock();
3911 
3912     for (i = 0; i < N_SHM_REGIONS; ++i) {
3913         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3914             shm_regions[i].in_use = false;
3915             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3916             break;
3917         }
3918     }
3919     rv = get_errno(shmdt(g2h(shmaddr)));
3920 
3921     mmap_unlock();
3922 
3923     return rv;
3924 }
3925 
3926 #ifdef TARGET_NR_ipc
3927 /* ??? This only works with linear mappings.  */
3928 /* do_ipc() must return target values and target errnos. */
3929 static abi_long do_ipc(CPUArchState *cpu_env,
3930                        unsigned int call, abi_long first,
3931                        abi_long second, abi_long third,
3932                        abi_long ptr, abi_long fifth)
3933 {
3934     int version;
3935     abi_long ret = 0;
3936 
3937     version = call >> 16;
3938     call &= 0xffff;
3939 
3940     switch (call) {
3941     case IPCOP_semop:
3942         ret = do_semop(first, ptr, second);
3943         break;
3944 
3945     case IPCOP_semget:
3946         ret = get_errno(semget(first, second, third));
3947         break;
3948 
3949     case IPCOP_semctl: {
3950         /* The semun argument to semctl is passed by value, so dereference the
3951          * ptr argument. */
3952         abi_ulong atptr;
3953         get_user_ual(atptr, ptr);
3954         ret = do_semctl(first, second, third, atptr);
3955         break;
3956     }
3957 
3958     case IPCOP_msgget:
3959         ret = get_errno(msgget(first, second));
3960         break;
3961 
3962     case IPCOP_msgsnd:
3963         ret = do_msgsnd(first, ptr, second, third);
3964         break;
3965 
3966     case IPCOP_msgctl:
3967         ret = do_msgctl(first, second, ptr);
3968         break;
3969 
3970     case IPCOP_msgrcv:
3971         switch (version) {
3972         case 0:
3973             {
3974                 struct target_ipc_kludge {
3975                     abi_long msgp;
3976                     abi_long msgtyp;
3977                 } *tmp;
3978 
3979                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3980                     ret = -TARGET_EFAULT;
3981                     break;
3982                 }
3983 
3984                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3985 
3986                 unlock_user_struct(tmp, ptr, 0);
3987                 break;
3988             }
3989         default:
3990             ret = do_msgrcv(first, ptr, second, fifth, third);
3991         }
3992         break;
3993 
3994     case IPCOP_shmat:
3995         switch (version) {
3996         default:
3997         {
3998             abi_ulong raddr;
3999             raddr = do_shmat(cpu_env, first, ptr, second);
4000             if (is_error(raddr))
4001                 return get_errno(raddr);
4002             if (put_user_ual(raddr, third))
4003                 return -TARGET_EFAULT;
4004             break;
4005         }
4006         case 1:
4007             ret = -TARGET_EINVAL;
4008             break;
4009         }
4010 	break;
4011     case IPCOP_shmdt:
4012         ret = do_shmdt(ptr);
4013 	break;
4014 
4015     case IPCOP_shmget:
4016 	/* IPC_* flag values are the same on all linux platforms */
4017 	ret = get_errno(shmget(first, second, third));
4018 	break;
4019 
4020 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4021     case IPCOP_shmctl:
4022         ret = do_shmctl(first, second, ptr);
4023         break;
4024     default:
4025 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4026 	ret = -TARGET_ENOSYS;
4027 	break;
4028     }
4029     return ret;
4030 }
4031 #endif
4032 
4033 /* kernel structure types definitions */
4034 
4035 #define STRUCT(name, ...) STRUCT_ ## name,
4036 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4037 enum {
4038 #include "syscall_types.h"
4039 STRUCT_MAX
4040 };
4041 #undef STRUCT
4042 #undef STRUCT_SPECIAL
4043 
4044 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4045 #define STRUCT_SPECIAL(name)
4046 #include "syscall_types.h"
4047 #undef STRUCT
4048 #undef STRUCT_SPECIAL
4049 
4050 typedef struct IOCTLEntry IOCTLEntry;
4051 
4052 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4053                              int fd, int cmd, abi_long arg);
4054 
4055 struct IOCTLEntry {
4056     int target_cmd;
4057     unsigned int host_cmd;
4058     const char *name;
4059     int access;
4060     do_ioctl_fn *do_ioctl;
4061     const argtype arg_type[5];
4062 };
4063 
4064 #define IOC_R 0x0001
4065 #define IOC_W 0x0002
4066 #define IOC_RW (IOC_R | IOC_W)
4067 
4068 #define MAX_STRUCT_SIZE 4096
4069 
4070 #ifdef CONFIG_FIEMAP
4071 /* So fiemap access checks don't overflow on 32 bit systems.
4072  * This is very slightly smaller than the limit imposed by
4073  * the underlying kernel.
4074  */
4075 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4076                             / sizeof(struct fiemap_extent))
4077 
4078 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4079                                        int fd, int cmd, abi_long arg)
4080 {
4081     /* The parameter for this ioctl is a struct fiemap followed
4082      * by an array of struct fiemap_extent whose size is set
4083      * in fiemap->fm_extent_count. The array is filled in by the
4084      * ioctl.
4085      */
4086     int target_size_in, target_size_out;
4087     struct fiemap *fm;
4088     const argtype *arg_type = ie->arg_type;
4089     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4090     void *argptr, *p;
4091     abi_long ret;
4092     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4093     uint32_t outbufsz;
4094     int free_fm = 0;
4095 
4096     assert(arg_type[0] == TYPE_PTR);
4097     assert(ie->access == IOC_RW);
4098     arg_type++;
4099     target_size_in = thunk_type_size(arg_type, 0);
4100     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4101     if (!argptr) {
4102         return -TARGET_EFAULT;
4103     }
4104     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4105     unlock_user(argptr, arg, 0);
4106     fm = (struct fiemap *)buf_temp;
4107     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4108         return -TARGET_EINVAL;
4109     }
4110 
4111     outbufsz = sizeof (*fm) +
4112         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4113 
4114     if (outbufsz > MAX_STRUCT_SIZE) {
4115         /* We can't fit all the extents into the fixed size buffer.
4116          * Allocate one that is large enough and use it instead.
4117          */
4118         fm = g_try_malloc(outbufsz);
4119         if (!fm) {
4120             return -TARGET_ENOMEM;
4121         }
4122         memcpy(fm, buf_temp, sizeof(struct fiemap));
4123         free_fm = 1;
4124     }
4125     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4126     if (!is_error(ret)) {
4127         target_size_out = target_size_in;
4128         /* An extent_count of 0 means we were only counting the extents
4129          * so there are no structs to copy
4130          */
4131         if (fm->fm_extent_count != 0) {
4132             target_size_out += fm->fm_mapped_extents * extent_size;
4133         }
4134         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4135         if (!argptr) {
4136             ret = -TARGET_EFAULT;
4137         } else {
4138             /* Convert the struct fiemap */
4139             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4140             if (fm->fm_extent_count != 0) {
4141                 p = argptr + target_size_in;
4142                 /* ...and then all the struct fiemap_extents */
4143                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4144                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4145                                   THUNK_TARGET);
4146                     p += extent_size;
4147                 }
4148             }
4149             unlock_user(argptr, arg, target_size_out);
4150         }
4151     }
4152     if (free_fm) {
4153         g_free(fm);
4154     }
4155     return ret;
4156 }
4157 #endif
4158 
4159 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4160                                 int fd, int cmd, abi_long arg)
4161 {
4162     const argtype *arg_type = ie->arg_type;
4163     int target_size;
4164     void *argptr;
4165     int ret;
4166     struct ifconf *host_ifconf;
4167     uint32_t outbufsz;
4168     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4169     int target_ifreq_size;
4170     int nb_ifreq;
4171     int free_buf = 0;
4172     int i;
4173     int target_ifc_len;
4174     abi_long target_ifc_buf;
4175     int host_ifc_len;
4176     char *host_ifc_buf;
4177 
4178     assert(arg_type[0] == TYPE_PTR);
4179     assert(ie->access == IOC_RW);
4180 
4181     arg_type++;
4182     target_size = thunk_type_size(arg_type, 0);
4183 
4184     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4185     if (!argptr)
4186         return -TARGET_EFAULT;
4187     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4188     unlock_user(argptr, arg, 0);
4189 
4190     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4191     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4192     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4193 
4194     if (target_ifc_buf != 0) {
4195         target_ifc_len = host_ifconf->ifc_len;
4196         nb_ifreq = target_ifc_len / target_ifreq_size;
4197         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4198 
4199         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4200         if (outbufsz > MAX_STRUCT_SIZE) {
4201             /*
4202              * We can't fit all the extents into the fixed size buffer.
4203              * Allocate one that is large enough and use it instead.
4204              */
4205             host_ifconf = malloc(outbufsz);
4206             if (!host_ifconf) {
4207                 return -TARGET_ENOMEM;
4208             }
4209             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4210             free_buf = 1;
4211         }
4212         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4213 
4214         host_ifconf->ifc_len = host_ifc_len;
4215     } else {
4216       host_ifc_buf = NULL;
4217     }
4218     host_ifconf->ifc_buf = host_ifc_buf;
4219 
4220     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4221     if (!is_error(ret)) {
4222 	/* convert host ifc_len to target ifc_len */
4223 
4224         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4225         target_ifc_len = nb_ifreq * target_ifreq_size;
4226         host_ifconf->ifc_len = target_ifc_len;
4227 
4228 	/* restore target ifc_buf */
4229 
4230         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4231 
4232 	/* copy struct ifconf to target user */
4233 
4234         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4235         if (!argptr)
4236             return -TARGET_EFAULT;
4237         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4238         unlock_user(argptr, arg, target_size);
4239 
4240         if (target_ifc_buf != 0) {
4241             /* copy ifreq[] to target user */
4242             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4243             for (i = 0; i < nb_ifreq ; i++) {
4244                 thunk_convert(argptr + i * target_ifreq_size,
4245                               host_ifc_buf + i * sizeof(struct ifreq),
4246                               ifreq_arg_type, THUNK_TARGET);
4247             }
4248             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4249         }
4250     }
4251 
4252     if (free_buf) {
4253         free(host_ifconf);
4254     }
4255 
4256     return ret;
4257 }
4258 
4259 #if defined(CONFIG_USBFS)
4260 #if HOST_LONG_BITS > 64
4261 #error USBDEVFS thunks do not support >64 bit hosts yet.
4262 #endif
4263 struct live_urb {
4264     uint64_t target_urb_adr;
4265     uint64_t target_buf_adr;
4266     char *target_buf_ptr;
4267     struct usbdevfs_urb host_urb;
4268 };
4269 
4270 static GHashTable *usbdevfs_urb_hashtable(void)
4271 {
4272     static GHashTable *urb_hashtable;
4273 
4274     if (!urb_hashtable) {
4275         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4276     }
4277     return urb_hashtable;
4278 }
4279 
4280 static void urb_hashtable_insert(struct live_urb *urb)
4281 {
4282     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4283     g_hash_table_insert(urb_hashtable, urb, urb);
4284 }
4285 
4286 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4287 {
4288     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4289     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4290 }
4291 
4292 static void urb_hashtable_remove(struct live_urb *urb)
4293 {
4294     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4295     g_hash_table_remove(urb_hashtable, urb);
4296 }
4297 
4298 static abi_long
4299 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4300                           int fd, int cmd, abi_long arg)
4301 {
4302     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4303     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4304     struct live_urb *lurb;
4305     void *argptr;
4306     uint64_t hurb;
4307     int target_size;
4308     uintptr_t target_urb_adr;
4309     abi_long ret;
4310 
4311     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4312 
4313     memset(buf_temp, 0, sizeof(uint64_t));
4314     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4315     if (is_error(ret)) {
4316         return ret;
4317     }
4318 
4319     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4320     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4321     if (!lurb->target_urb_adr) {
4322         return -TARGET_EFAULT;
4323     }
4324     urb_hashtable_remove(lurb);
4325     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4326         lurb->host_urb.buffer_length);
4327     lurb->target_buf_ptr = NULL;
4328 
4329     /* restore the guest buffer pointer */
4330     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4331 
4332     /* update the guest urb struct */
4333     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4334     if (!argptr) {
4335         g_free(lurb);
4336         return -TARGET_EFAULT;
4337     }
4338     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4339     unlock_user(argptr, lurb->target_urb_adr, target_size);
4340 
4341     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4342     /* write back the urb handle */
4343     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4344     if (!argptr) {
4345         g_free(lurb);
4346         return -TARGET_EFAULT;
4347     }
4348 
4349     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4350     target_urb_adr = lurb->target_urb_adr;
4351     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4352     unlock_user(argptr, arg, target_size);
4353 
4354     g_free(lurb);
4355     return ret;
4356 }
4357 
4358 static abi_long
4359 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4360                              uint8_t *buf_temp __attribute__((unused)),
4361                              int fd, int cmd, abi_long arg)
4362 {
4363     struct live_urb *lurb;
4364 
4365     /* map target address back to host URB with metadata. */
4366     lurb = urb_hashtable_lookup(arg);
4367     if (!lurb) {
4368         return -TARGET_EFAULT;
4369     }
4370     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4371 }
4372 
4373 static abi_long
4374 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4375                             int fd, int cmd, abi_long arg)
4376 {
4377     const argtype *arg_type = ie->arg_type;
4378     int target_size;
4379     abi_long ret;
4380     void *argptr;
4381     int rw_dir;
4382     struct live_urb *lurb;
4383 
4384     /*
4385      * each submitted URB needs to map to a unique ID for the
4386      * kernel, and that unique ID needs to be a pointer to
4387      * host memory.  hence, we need to malloc for each URB.
4388      * isochronous transfers have a variable length struct.
4389      */
4390     arg_type++;
4391     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4392 
4393     /* construct host copy of urb and metadata */
4394     lurb = g_try_malloc0(sizeof(struct live_urb));
4395     if (!lurb) {
4396         return -TARGET_ENOMEM;
4397     }
4398 
4399     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4400     if (!argptr) {
4401         g_free(lurb);
4402         return -TARGET_EFAULT;
4403     }
4404     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4405     unlock_user(argptr, arg, 0);
4406 
4407     lurb->target_urb_adr = arg;
4408     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4409 
4410     /* buffer space used depends on endpoint type so lock the entire buffer */
4411     /* control type urbs should check the buffer contents for true direction */
4412     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4413     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4414         lurb->host_urb.buffer_length, 1);
4415     if (lurb->target_buf_ptr == NULL) {
4416         g_free(lurb);
4417         return -TARGET_EFAULT;
4418     }
4419 
4420     /* update buffer pointer in host copy */
4421     lurb->host_urb.buffer = lurb->target_buf_ptr;
4422 
4423     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4424     if (is_error(ret)) {
4425         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4426         g_free(lurb);
4427     } else {
4428         urb_hashtable_insert(lurb);
4429     }
4430 
4431     return ret;
4432 }
4433 #endif /* CONFIG_USBFS */
4434 
4435 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4436                             int cmd, abi_long arg)
4437 {
4438     void *argptr;
4439     struct dm_ioctl *host_dm;
4440     abi_long guest_data;
4441     uint32_t guest_data_size;
4442     int target_size;
4443     const argtype *arg_type = ie->arg_type;
4444     abi_long ret;
4445     void *big_buf = NULL;
4446     char *host_data;
4447 
4448     arg_type++;
4449     target_size = thunk_type_size(arg_type, 0);
4450     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4451     if (!argptr) {
4452         ret = -TARGET_EFAULT;
4453         goto out;
4454     }
4455     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4456     unlock_user(argptr, arg, 0);
4457 
4458     /* buf_temp is too small, so fetch things into a bigger buffer */
4459     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4460     memcpy(big_buf, buf_temp, target_size);
4461     buf_temp = big_buf;
4462     host_dm = big_buf;
4463 
4464     guest_data = arg + host_dm->data_start;
4465     if ((guest_data - arg) < 0) {
4466         ret = -TARGET_EINVAL;
4467         goto out;
4468     }
4469     guest_data_size = host_dm->data_size - host_dm->data_start;
4470     host_data = (char*)host_dm + host_dm->data_start;
4471 
4472     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4473     if (!argptr) {
4474         ret = -TARGET_EFAULT;
4475         goto out;
4476     }
4477 
4478     switch (ie->host_cmd) {
4479     case DM_REMOVE_ALL:
4480     case DM_LIST_DEVICES:
4481     case DM_DEV_CREATE:
4482     case DM_DEV_REMOVE:
4483     case DM_DEV_SUSPEND:
4484     case DM_DEV_STATUS:
4485     case DM_DEV_WAIT:
4486     case DM_TABLE_STATUS:
4487     case DM_TABLE_CLEAR:
4488     case DM_TABLE_DEPS:
4489     case DM_LIST_VERSIONS:
4490         /* no input data */
4491         break;
4492     case DM_DEV_RENAME:
4493     case DM_DEV_SET_GEOMETRY:
4494         /* data contains only strings */
4495         memcpy(host_data, argptr, guest_data_size);
4496         break;
4497     case DM_TARGET_MSG:
4498         memcpy(host_data, argptr, guest_data_size);
4499         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4500         break;
4501     case DM_TABLE_LOAD:
4502     {
4503         void *gspec = argptr;
4504         void *cur_data = host_data;
4505         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4506         int spec_size = thunk_type_size(arg_type, 0);
4507         int i;
4508 
4509         for (i = 0; i < host_dm->target_count; i++) {
4510             struct dm_target_spec *spec = cur_data;
4511             uint32_t next;
4512             int slen;
4513 
4514             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4515             slen = strlen((char*)gspec + spec_size) + 1;
4516             next = spec->next;
4517             spec->next = sizeof(*spec) + slen;
4518             strcpy((char*)&spec[1], gspec + spec_size);
4519             gspec += next;
4520             cur_data += spec->next;
4521         }
4522         break;
4523     }
4524     default:
4525         ret = -TARGET_EINVAL;
4526         unlock_user(argptr, guest_data, 0);
4527         goto out;
4528     }
4529     unlock_user(argptr, guest_data, 0);
4530 
4531     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4532     if (!is_error(ret)) {
4533         guest_data = arg + host_dm->data_start;
4534         guest_data_size = host_dm->data_size - host_dm->data_start;
4535         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4536         switch (ie->host_cmd) {
4537         case DM_REMOVE_ALL:
4538         case DM_DEV_CREATE:
4539         case DM_DEV_REMOVE:
4540         case DM_DEV_RENAME:
4541         case DM_DEV_SUSPEND:
4542         case DM_DEV_STATUS:
4543         case DM_TABLE_LOAD:
4544         case DM_TABLE_CLEAR:
4545         case DM_TARGET_MSG:
4546         case DM_DEV_SET_GEOMETRY:
4547             /* no return data */
4548             break;
4549         case DM_LIST_DEVICES:
4550         {
4551             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4552             uint32_t remaining_data = guest_data_size;
4553             void *cur_data = argptr;
4554             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4555             int nl_size = 12; /* can't use thunk_size due to alignment */
4556 
4557             while (1) {
4558                 uint32_t next = nl->next;
4559                 if (next) {
4560                     nl->next = nl_size + (strlen(nl->name) + 1);
4561                 }
4562                 if (remaining_data < nl->next) {
4563                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4564                     break;
4565                 }
4566                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4567                 strcpy(cur_data + nl_size, nl->name);
4568                 cur_data += nl->next;
4569                 remaining_data -= nl->next;
4570                 if (!next) {
4571                     break;
4572                 }
4573                 nl = (void*)nl + next;
4574             }
4575             break;
4576         }
4577         case DM_DEV_WAIT:
4578         case DM_TABLE_STATUS:
4579         {
4580             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4581             void *cur_data = argptr;
4582             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4583             int spec_size = thunk_type_size(arg_type, 0);
4584             int i;
4585 
4586             for (i = 0; i < host_dm->target_count; i++) {
4587                 uint32_t next = spec->next;
4588                 int slen = strlen((char*)&spec[1]) + 1;
4589                 spec->next = (cur_data - argptr) + spec_size + slen;
4590                 if (guest_data_size < spec->next) {
4591                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4592                     break;
4593                 }
4594                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4595                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4596                 cur_data = argptr + spec->next;
4597                 spec = (void*)host_dm + host_dm->data_start + next;
4598             }
4599             break;
4600         }
4601         case DM_TABLE_DEPS:
4602         {
4603             void *hdata = (void*)host_dm + host_dm->data_start;
4604             int count = *(uint32_t*)hdata;
4605             uint64_t *hdev = hdata + 8;
4606             uint64_t *gdev = argptr + 8;
4607             int i;
4608 
4609             *(uint32_t*)argptr = tswap32(count);
4610             for (i = 0; i < count; i++) {
4611                 *gdev = tswap64(*hdev);
4612                 gdev++;
4613                 hdev++;
4614             }
4615             break;
4616         }
4617         case DM_LIST_VERSIONS:
4618         {
4619             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4620             uint32_t remaining_data = guest_data_size;
4621             void *cur_data = argptr;
4622             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4623             int vers_size = thunk_type_size(arg_type, 0);
4624 
4625             while (1) {
4626                 uint32_t next = vers->next;
4627                 if (next) {
4628                     vers->next = vers_size + (strlen(vers->name) + 1);
4629                 }
4630                 if (remaining_data < vers->next) {
4631                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4632                     break;
4633                 }
4634                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4635                 strcpy(cur_data + vers_size, vers->name);
4636                 cur_data += vers->next;
4637                 remaining_data -= vers->next;
4638                 if (!next) {
4639                     break;
4640                 }
4641                 vers = (void*)vers + next;
4642             }
4643             break;
4644         }
4645         default:
4646             unlock_user(argptr, guest_data, 0);
4647             ret = -TARGET_EINVAL;
4648             goto out;
4649         }
4650         unlock_user(argptr, guest_data, guest_data_size);
4651 
4652         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4653         if (!argptr) {
4654             ret = -TARGET_EFAULT;
4655             goto out;
4656         }
4657         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4658         unlock_user(argptr, arg, target_size);
4659     }
4660 out:
4661     g_free(big_buf);
4662     return ret;
4663 }
4664 
4665 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4666                                int cmd, abi_long arg)
4667 {
4668     void *argptr;
4669     int target_size;
4670     const argtype *arg_type = ie->arg_type;
4671     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4672     abi_long ret;
4673 
4674     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4675     struct blkpg_partition host_part;
4676 
4677     /* Read and convert blkpg */
4678     arg_type++;
4679     target_size = thunk_type_size(arg_type, 0);
4680     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4681     if (!argptr) {
4682         ret = -TARGET_EFAULT;
4683         goto out;
4684     }
4685     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4686     unlock_user(argptr, arg, 0);
4687 
4688     switch (host_blkpg->op) {
4689     case BLKPG_ADD_PARTITION:
4690     case BLKPG_DEL_PARTITION:
4691         /* payload is struct blkpg_partition */
4692         break;
4693     default:
4694         /* Unknown opcode */
4695         ret = -TARGET_EINVAL;
4696         goto out;
4697     }
4698 
4699     /* Read and convert blkpg->data */
4700     arg = (abi_long)(uintptr_t)host_blkpg->data;
4701     target_size = thunk_type_size(part_arg_type, 0);
4702     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4703     if (!argptr) {
4704         ret = -TARGET_EFAULT;
4705         goto out;
4706     }
4707     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4708     unlock_user(argptr, arg, 0);
4709 
4710     /* Swizzle the data pointer to our local copy and call! */
4711     host_blkpg->data = &host_part;
4712     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4713 
4714 out:
4715     return ret;
4716 }
4717 
4718 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4719                                 int fd, int cmd, abi_long arg)
4720 {
4721     const argtype *arg_type = ie->arg_type;
4722     const StructEntry *se;
4723     const argtype *field_types;
4724     const int *dst_offsets, *src_offsets;
4725     int target_size;
4726     void *argptr;
4727     abi_ulong *target_rt_dev_ptr;
4728     unsigned long *host_rt_dev_ptr;
4729     abi_long ret;
4730     int i;
4731 
4732     assert(ie->access == IOC_W);
4733     assert(*arg_type == TYPE_PTR);
4734     arg_type++;
4735     assert(*arg_type == TYPE_STRUCT);
4736     target_size = thunk_type_size(arg_type, 0);
4737     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4738     if (!argptr) {
4739         return -TARGET_EFAULT;
4740     }
4741     arg_type++;
4742     assert(*arg_type == (int)STRUCT_rtentry);
4743     se = struct_entries + *arg_type++;
4744     assert(se->convert[0] == NULL);
4745     /* convert struct here to be able to catch rt_dev string */
4746     field_types = se->field_types;
4747     dst_offsets = se->field_offsets[THUNK_HOST];
4748     src_offsets = se->field_offsets[THUNK_TARGET];
4749     for (i = 0; i < se->nb_fields; i++) {
4750         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4751             assert(*field_types == TYPE_PTRVOID);
4752             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4753             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4754             if (*target_rt_dev_ptr != 0) {
4755                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4756                                                   tswapal(*target_rt_dev_ptr));
4757                 if (!*host_rt_dev_ptr) {
4758                     unlock_user(argptr, arg, 0);
4759                     return -TARGET_EFAULT;
4760                 }
4761             } else {
4762                 *host_rt_dev_ptr = 0;
4763             }
4764             field_types++;
4765             continue;
4766         }
4767         field_types = thunk_convert(buf_temp + dst_offsets[i],
4768                                     argptr + src_offsets[i],
4769                                     field_types, THUNK_HOST);
4770     }
4771     unlock_user(argptr, arg, 0);
4772 
4773     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4774     if (*host_rt_dev_ptr != 0) {
4775         unlock_user((void *)*host_rt_dev_ptr,
4776                     *target_rt_dev_ptr, 0);
4777     }
4778     return ret;
4779 }
4780 
4781 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4782                                      int fd, int cmd, abi_long arg)
4783 {
4784     int sig = target_to_host_signal(arg);
4785     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4786 }
4787 
4788 #ifdef TIOCGPTPEER
4789 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4790                                      int fd, int cmd, abi_long arg)
4791 {
4792     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4793     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4794 }
4795 #endif
4796 
4797 static IOCTLEntry ioctl_entries[] = {
4798 #define IOCTL(cmd, access, ...) \
4799     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4800 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4801     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4802 #define IOCTL_IGNORE(cmd) \
4803     { TARGET_ ## cmd, 0, #cmd },
4804 #include "ioctls.h"
4805     { 0, 0, },
4806 };
4807 
4808 /* ??? Implement proper locking for ioctls.  */
4809 /* do_ioctl() Must return target values and target errnos. */
4810 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4811 {
4812     const IOCTLEntry *ie;
4813     const argtype *arg_type;
4814     abi_long ret;
4815     uint8_t buf_temp[MAX_STRUCT_SIZE];
4816     int target_size;
4817     void *argptr;
4818 
4819     ie = ioctl_entries;
4820     for(;;) {
4821         if (ie->target_cmd == 0) {
4822             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4823             return -TARGET_ENOSYS;
4824         }
4825         if (ie->target_cmd == cmd)
4826             break;
4827         ie++;
4828     }
4829     arg_type = ie->arg_type;
4830     if (ie->do_ioctl) {
4831         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4832     } else if (!ie->host_cmd) {
4833         /* Some architectures define BSD ioctls in their headers
4834            that are not implemented in Linux.  */
4835         return -TARGET_ENOSYS;
4836     }
4837 
4838     switch(arg_type[0]) {
4839     case TYPE_NULL:
4840         /* no argument */
4841         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4842         break;
4843     case TYPE_PTRVOID:
4844     case TYPE_INT:
4845         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4846         break;
4847     case TYPE_PTR:
4848         arg_type++;
4849         target_size = thunk_type_size(arg_type, 0);
4850         switch(ie->access) {
4851         case IOC_R:
4852             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4853             if (!is_error(ret)) {
4854                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4855                 if (!argptr)
4856                     return -TARGET_EFAULT;
4857                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4858                 unlock_user(argptr, arg, target_size);
4859             }
4860             break;
4861         case IOC_W:
4862             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4863             if (!argptr)
4864                 return -TARGET_EFAULT;
4865             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4866             unlock_user(argptr, arg, 0);
4867             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4868             break;
4869         default:
4870         case IOC_RW:
4871             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4872             if (!argptr)
4873                 return -TARGET_EFAULT;
4874             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4875             unlock_user(argptr, arg, 0);
4876             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4877             if (!is_error(ret)) {
4878                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4879                 if (!argptr)
4880                     return -TARGET_EFAULT;
4881                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4882                 unlock_user(argptr, arg, target_size);
4883             }
4884             break;
4885         }
4886         break;
4887     default:
4888         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4889                  (long)cmd, arg_type[0]);
4890         ret = -TARGET_ENOSYS;
4891         break;
4892     }
4893     return ret;
4894 }
4895 
4896 static const bitmask_transtbl iflag_tbl[] = {
4897         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4898         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4899         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4900         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4901         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4902         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4903         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4904         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4905         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4906         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4907         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4908         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4909         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4910         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4911         { 0, 0, 0, 0 }
4912 };
4913 
4914 static const bitmask_transtbl oflag_tbl[] = {
4915 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4916 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4917 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4918 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4919 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4920 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4921 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4922 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4923 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4924 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4925 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4926 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4927 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4928 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4929 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4930 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4931 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4932 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4933 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4934 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4935 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4936 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4937 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4938 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4939 	{ 0, 0, 0, 0 }
4940 };
4941 
4942 static const bitmask_transtbl cflag_tbl[] = {
4943 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4944 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4945 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4946 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4947 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4948 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4949 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4950 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4951 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4952 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4953 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4954 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4955 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4956 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4957 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4958 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4959 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4960 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4961 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4962 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4963 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4964 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4965 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4966 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4967 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4968 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4969 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4970 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4971 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4972 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4973 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4974 	{ 0, 0, 0, 0 }
4975 };
4976 
4977 static const bitmask_transtbl lflag_tbl[] = {
4978 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4979 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4980 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4981 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4982 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4983 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4984 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4985 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4986 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4987 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4988 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4989 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4990 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4991 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4992 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4993 	{ 0, 0, 0, 0 }
4994 };
4995 
4996 static void target_to_host_termios (void *dst, const void *src)
4997 {
4998     struct host_termios *host = dst;
4999     const struct target_termios *target = src;
5000 
5001     host->c_iflag =
5002         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5003     host->c_oflag =
5004         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5005     host->c_cflag =
5006         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5007     host->c_lflag =
5008         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5009     host->c_line = target->c_line;
5010 
5011     memset(host->c_cc, 0, sizeof(host->c_cc));
5012     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5013     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5014     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5015     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5016     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5017     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5018     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5019     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5020     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5021     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5022     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5023     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5024     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5025     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5026     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5027     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5028     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5029 }
5030 
5031 static void host_to_target_termios (void *dst, const void *src)
5032 {
5033     struct target_termios *target = dst;
5034     const struct host_termios *host = src;
5035 
5036     target->c_iflag =
5037         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5038     target->c_oflag =
5039         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5040     target->c_cflag =
5041         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5042     target->c_lflag =
5043         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5044     target->c_line = host->c_line;
5045 
5046     memset(target->c_cc, 0, sizeof(target->c_cc));
5047     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5048     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5049     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5050     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5051     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5052     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5053     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5054     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5055     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5056     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5057     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5058     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5059     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5060     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5061     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5062     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5063     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5064 }
5065 
5066 static const StructEntry struct_termios_def = {
5067     .convert = { host_to_target_termios, target_to_host_termios },
5068     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5069     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5070 };
5071 
5072 static bitmask_transtbl mmap_flags_tbl[] = {
5073     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5074     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5075     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5076     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5077       MAP_ANONYMOUS, MAP_ANONYMOUS },
5078     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5079       MAP_GROWSDOWN, MAP_GROWSDOWN },
5080     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5081       MAP_DENYWRITE, MAP_DENYWRITE },
5082     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5083       MAP_EXECUTABLE, MAP_EXECUTABLE },
5084     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5085     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5086       MAP_NORESERVE, MAP_NORESERVE },
5087     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5088     /* MAP_STACK had been ignored by the kernel for quite some time.
5089        Recognize it for the target insofar as we do not want to pass
5090        it through to the host.  */
5091     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5092     { 0, 0, 0, 0 }
5093 };
5094 
5095 #if defined(TARGET_I386)
5096 
5097 /* NOTE: there is really one LDT for all the threads */
5098 static uint8_t *ldt_table;
5099 
5100 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5101 {
5102     int size;
5103     void *p;
5104 
5105     if (!ldt_table)
5106         return 0;
5107     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5108     if (size > bytecount)
5109         size = bytecount;
5110     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5111     if (!p)
5112         return -TARGET_EFAULT;
5113     /* ??? Should this by byteswapped?  */
5114     memcpy(p, ldt_table, size);
5115     unlock_user(p, ptr, size);
5116     return size;
5117 }
5118 
5119 /* XXX: add locking support */
5120 static abi_long write_ldt(CPUX86State *env,
5121                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5122 {
5123     struct target_modify_ldt_ldt_s ldt_info;
5124     struct target_modify_ldt_ldt_s *target_ldt_info;
5125     int seg_32bit, contents, read_exec_only, limit_in_pages;
5126     int seg_not_present, useable, lm;
5127     uint32_t *lp, entry_1, entry_2;
5128 
5129     if (bytecount != sizeof(ldt_info))
5130         return -TARGET_EINVAL;
5131     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5132         return -TARGET_EFAULT;
5133     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5134     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5135     ldt_info.limit = tswap32(target_ldt_info->limit);
5136     ldt_info.flags = tswap32(target_ldt_info->flags);
5137     unlock_user_struct(target_ldt_info, ptr, 0);
5138 
5139     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5140         return -TARGET_EINVAL;
5141     seg_32bit = ldt_info.flags & 1;
5142     contents = (ldt_info.flags >> 1) & 3;
5143     read_exec_only = (ldt_info.flags >> 3) & 1;
5144     limit_in_pages = (ldt_info.flags >> 4) & 1;
5145     seg_not_present = (ldt_info.flags >> 5) & 1;
5146     useable = (ldt_info.flags >> 6) & 1;
5147 #ifdef TARGET_ABI32
5148     lm = 0;
5149 #else
5150     lm = (ldt_info.flags >> 7) & 1;
5151 #endif
5152     if (contents == 3) {
5153         if (oldmode)
5154             return -TARGET_EINVAL;
5155         if (seg_not_present == 0)
5156             return -TARGET_EINVAL;
5157     }
5158     /* allocate the LDT */
5159     if (!ldt_table) {
5160         env->ldt.base = target_mmap(0,
5161                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5162                                     PROT_READ|PROT_WRITE,
5163                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5164         if (env->ldt.base == -1)
5165             return -TARGET_ENOMEM;
5166         memset(g2h(env->ldt.base), 0,
5167                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5168         env->ldt.limit = 0xffff;
5169         ldt_table = g2h(env->ldt.base);
5170     }
5171 
5172     /* NOTE: same code as Linux kernel */
5173     /* Allow LDTs to be cleared by the user. */
5174     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5175         if (oldmode ||
5176             (contents == 0		&&
5177              read_exec_only == 1	&&
5178              seg_32bit == 0		&&
5179              limit_in_pages == 0	&&
5180              seg_not_present == 1	&&
5181              useable == 0 )) {
5182             entry_1 = 0;
5183             entry_2 = 0;
5184             goto install;
5185         }
5186     }
5187 
5188     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5189         (ldt_info.limit & 0x0ffff);
5190     entry_2 = (ldt_info.base_addr & 0xff000000) |
5191         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5192         (ldt_info.limit & 0xf0000) |
5193         ((read_exec_only ^ 1) << 9) |
5194         (contents << 10) |
5195         ((seg_not_present ^ 1) << 15) |
5196         (seg_32bit << 22) |
5197         (limit_in_pages << 23) |
5198         (lm << 21) |
5199         0x7000;
5200     if (!oldmode)
5201         entry_2 |= (useable << 20);
5202 
5203     /* Install the new entry ...  */
5204 install:
5205     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5206     lp[0] = tswap32(entry_1);
5207     lp[1] = tswap32(entry_2);
5208     return 0;
5209 }
5210 
5211 /* specific and weird i386 syscalls */
5212 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5213                               unsigned long bytecount)
5214 {
5215     abi_long ret;
5216 
5217     switch (func) {
5218     case 0:
5219         ret = read_ldt(ptr, bytecount);
5220         break;
5221     case 1:
5222         ret = write_ldt(env, ptr, bytecount, 1);
5223         break;
5224     case 0x11:
5225         ret = write_ldt(env, ptr, bytecount, 0);
5226         break;
5227     default:
5228         ret = -TARGET_ENOSYS;
5229         break;
5230     }
5231     return ret;
5232 }
5233 
5234 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5235 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5236 {
5237     uint64_t *gdt_table = g2h(env->gdt.base);
5238     struct target_modify_ldt_ldt_s ldt_info;
5239     struct target_modify_ldt_ldt_s *target_ldt_info;
5240     int seg_32bit, contents, read_exec_only, limit_in_pages;
5241     int seg_not_present, useable, lm;
5242     uint32_t *lp, entry_1, entry_2;
5243     int i;
5244 
5245     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5246     if (!target_ldt_info)
5247         return -TARGET_EFAULT;
5248     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5249     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5250     ldt_info.limit = tswap32(target_ldt_info->limit);
5251     ldt_info.flags = tswap32(target_ldt_info->flags);
5252     if (ldt_info.entry_number == -1) {
5253         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5254             if (gdt_table[i] == 0) {
5255                 ldt_info.entry_number = i;
5256                 target_ldt_info->entry_number = tswap32(i);
5257                 break;
5258             }
5259         }
5260     }
5261     unlock_user_struct(target_ldt_info, ptr, 1);
5262 
5263     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5264         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5265            return -TARGET_EINVAL;
5266     seg_32bit = ldt_info.flags & 1;
5267     contents = (ldt_info.flags >> 1) & 3;
5268     read_exec_only = (ldt_info.flags >> 3) & 1;
5269     limit_in_pages = (ldt_info.flags >> 4) & 1;
5270     seg_not_present = (ldt_info.flags >> 5) & 1;
5271     useable = (ldt_info.flags >> 6) & 1;
5272 #ifdef TARGET_ABI32
5273     lm = 0;
5274 #else
5275     lm = (ldt_info.flags >> 7) & 1;
5276 #endif
5277 
5278     if (contents == 3) {
5279         if (seg_not_present == 0)
5280             return -TARGET_EINVAL;
5281     }
5282 
5283     /* NOTE: same code as Linux kernel */
5284     /* Allow LDTs to be cleared by the user. */
5285     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5286         if ((contents == 0             &&
5287              read_exec_only == 1       &&
5288              seg_32bit == 0            &&
5289              limit_in_pages == 0       &&
5290              seg_not_present == 1      &&
5291              useable == 0 )) {
5292             entry_1 = 0;
5293             entry_2 = 0;
5294             goto install;
5295         }
5296     }
5297 
5298     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5299         (ldt_info.limit & 0x0ffff);
5300     entry_2 = (ldt_info.base_addr & 0xff000000) |
5301         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5302         (ldt_info.limit & 0xf0000) |
5303         ((read_exec_only ^ 1) << 9) |
5304         (contents << 10) |
5305         ((seg_not_present ^ 1) << 15) |
5306         (seg_32bit << 22) |
5307         (limit_in_pages << 23) |
5308         (useable << 20) |
5309         (lm << 21) |
5310         0x7000;
5311 
5312     /* Install the new entry ...  */
5313 install:
5314     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5315     lp[0] = tswap32(entry_1);
5316     lp[1] = tswap32(entry_2);
5317     return 0;
5318 }
5319 
5320 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5321 {
5322     struct target_modify_ldt_ldt_s *target_ldt_info;
5323     uint64_t *gdt_table = g2h(env->gdt.base);
5324     uint32_t base_addr, limit, flags;
5325     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5326     int seg_not_present, useable, lm;
5327     uint32_t *lp, entry_1, entry_2;
5328 
5329     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5330     if (!target_ldt_info)
5331         return -TARGET_EFAULT;
5332     idx = tswap32(target_ldt_info->entry_number);
5333     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5334         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5335         unlock_user_struct(target_ldt_info, ptr, 1);
5336         return -TARGET_EINVAL;
5337     }
5338     lp = (uint32_t *)(gdt_table + idx);
5339     entry_1 = tswap32(lp[0]);
5340     entry_2 = tswap32(lp[1]);
5341 
5342     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5343     contents = (entry_2 >> 10) & 3;
5344     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5345     seg_32bit = (entry_2 >> 22) & 1;
5346     limit_in_pages = (entry_2 >> 23) & 1;
5347     useable = (entry_2 >> 20) & 1;
5348 #ifdef TARGET_ABI32
5349     lm = 0;
5350 #else
5351     lm = (entry_2 >> 21) & 1;
5352 #endif
5353     flags = (seg_32bit << 0) | (contents << 1) |
5354         (read_exec_only << 3) | (limit_in_pages << 4) |
5355         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5356     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5357     base_addr = (entry_1 >> 16) |
5358         (entry_2 & 0xff000000) |
5359         ((entry_2 & 0xff) << 16);
5360     target_ldt_info->base_addr = tswapal(base_addr);
5361     target_ldt_info->limit = tswap32(limit);
5362     target_ldt_info->flags = tswap32(flags);
5363     unlock_user_struct(target_ldt_info, ptr, 1);
5364     return 0;
5365 }
5366 #endif /* TARGET_I386 && TARGET_ABI32 */
5367 
5368 #ifndef TARGET_ABI32
5369 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5370 {
5371     abi_long ret = 0;
5372     abi_ulong val;
5373     int idx;
5374 
5375     switch(code) {
5376     case TARGET_ARCH_SET_GS:
5377     case TARGET_ARCH_SET_FS:
5378         if (code == TARGET_ARCH_SET_GS)
5379             idx = R_GS;
5380         else
5381             idx = R_FS;
5382         cpu_x86_load_seg(env, idx, 0);
5383         env->segs[idx].base = addr;
5384         break;
5385     case TARGET_ARCH_GET_GS:
5386     case TARGET_ARCH_GET_FS:
5387         if (code == TARGET_ARCH_GET_GS)
5388             idx = R_GS;
5389         else
5390             idx = R_FS;
5391         val = env->segs[idx].base;
5392         if (put_user(val, addr, abi_ulong))
5393             ret = -TARGET_EFAULT;
5394         break;
5395     default:
5396         ret = -TARGET_EINVAL;
5397         break;
5398     }
5399     return ret;
5400 }
5401 #endif
5402 
5403 #endif /* defined(TARGET_I386) */
5404 
5405 #define NEW_STACK_SIZE 0x40000
5406 
5407 
5408 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5409 typedef struct {
5410     CPUArchState *env;
5411     pthread_mutex_t mutex;
5412     pthread_cond_t cond;
5413     pthread_t thread;
5414     uint32_t tid;
5415     abi_ulong child_tidptr;
5416     abi_ulong parent_tidptr;
5417     sigset_t sigmask;
5418 } new_thread_info;
5419 
5420 static void *clone_func(void *arg)
5421 {
5422     new_thread_info *info = arg;
5423     CPUArchState *env;
5424     CPUState *cpu;
5425     TaskState *ts;
5426 
5427     rcu_register_thread();
5428     tcg_register_thread();
5429     env = info->env;
5430     cpu = ENV_GET_CPU(env);
5431     thread_cpu = cpu;
5432     ts = (TaskState *)cpu->opaque;
5433     info->tid = gettid();
5434     task_settid(ts);
5435     if (info->child_tidptr)
5436         put_user_u32(info->tid, info->child_tidptr);
5437     if (info->parent_tidptr)
5438         put_user_u32(info->tid, info->parent_tidptr);
5439     /* Enable signals.  */
5440     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5441     /* Signal to the parent that we're ready.  */
5442     pthread_mutex_lock(&info->mutex);
5443     pthread_cond_broadcast(&info->cond);
5444     pthread_mutex_unlock(&info->mutex);
5445     /* Wait until the parent has finished initializing the tls state.  */
5446     pthread_mutex_lock(&clone_lock);
5447     pthread_mutex_unlock(&clone_lock);
5448     cpu_loop(env);
5449     /* never exits */
5450     return NULL;
5451 }
5452 
5453 /* do_fork() Must return host values and target errnos (unlike most
5454    do_*() functions). */
5455 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5456                    abi_ulong parent_tidptr, target_ulong newtls,
5457                    abi_ulong child_tidptr)
5458 {
5459     CPUState *cpu = ENV_GET_CPU(env);
5460     int ret;
5461     TaskState *ts;
5462     CPUState *new_cpu;
5463     CPUArchState *new_env;
5464     sigset_t sigmask;
5465 
5466     flags &= ~CLONE_IGNORED_FLAGS;
5467 
5468     /* Emulate vfork() with fork() */
5469     if (flags & CLONE_VFORK)
5470         flags &= ~(CLONE_VFORK | CLONE_VM);
5471 
5472     if (flags & CLONE_VM) {
5473         TaskState *parent_ts = (TaskState *)cpu->opaque;
5474         new_thread_info info;
5475         pthread_attr_t attr;
5476 
5477         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5478             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5479             return -TARGET_EINVAL;
5480         }
5481 
5482         ts = g_new0(TaskState, 1);
5483         init_task_state(ts);
5484 
5485         /* Grab a mutex so that thread setup appears atomic.  */
5486         pthread_mutex_lock(&clone_lock);
5487 
5488         /* we create a new CPU instance. */
5489         new_env = cpu_copy(env);
5490         /* Init regs that differ from the parent.  */
5491         cpu_clone_regs(new_env, newsp);
5492         new_cpu = ENV_GET_CPU(new_env);
5493         new_cpu->opaque = ts;
5494         ts->bprm = parent_ts->bprm;
5495         ts->info = parent_ts->info;
5496         ts->signal_mask = parent_ts->signal_mask;
5497 
5498         if (flags & CLONE_CHILD_CLEARTID) {
5499             ts->child_tidptr = child_tidptr;
5500         }
5501 
5502         if (flags & CLONE_SETTLS) {
5503             cpu_set_tls (new_env, newtls);
5504         }
5505 
5506         memset(&info, 0, sizeof(info));
5507         pthread_mutex_init(&info.mutex, NULL);
5508         pthread_mutex_lock(&info.mutex);
5509         pthread_cond_init(&info.cond, NULL);
5510         info.env = new_env;
5511         if (flags & CLONE_CHILD_SETTID) {
5512             info.child_tidptr = child_tidptr;
5513         }
5514         if (flags & CLONE_PARENT_SETTID) {
5515             info.parent_tidptr = parent_tidptr;
5516         }
5517 
5518         ret = pthread_attr_init(&attr);
5519         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5520         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5521         /* It is not safe to deliver signals until the child has finished
5522            initializing, so temporarily block all signals.  */
5523         sigfillset(&sigmask);
5524         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5525 
5526         /* If this is our first additional thread, we need to ensure we
5527          * generate code for parallel execution and flush old translations.
5528          */
5529         if (!parallel_cpus) {
5530             parallel_cpus = true;
5531             tb_flush(cpu);
5532         }
5533 
5534         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5535         /* TODO: Free new CPU state if thread creation failed.  */
5536 
5537         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5538         pthread_attr_destroy(&attr);
5539         if (ret == 0) {
5540             /* Wait for the child to initialize.  */
5541             pthread_cond_wait(&info.cond, &info.mutex);
5542             ret = info.tid;
5543         } else {
5544             ret = -1;
5545         }
5546         pthread_mutex_unlock(&info.mutex);
5547         pthread_cond_destroy(&info.cond);
5548         pthread_mutex_destroy(&info.mutex);
5549         pthread_mutex_unlock(&clone_lock);
5550     } else {
5551         /* if no CLONE_VM, we consider it is a fork */
5552         if (flags & CLONE_INVALID_FORK_FLAGS) {
5553             return -TARGET_EINVAL;
5554         }
5555 
5556         /* We can't support custom termination signals */
5557         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5558             return -TARGET_EINVAL;
5559         }
5560 
5561         if (block_signals()) {
5562             return -TARGET_ERESTARTSYS;
5563         }
5564 
5565         fork_start();
5566         ret = fork();
5567         if (ret == 0) {
5568             /* Child Process.  */
5569             cpu_clone_regs(env, newsp);
5570             fork_end(1);
5571             /* There is a race condition here.  The parent process could
5572                theoretically read the TID in the child process before the child
5573                tid is set.  This would require using either ptrace
5574                (not implemented) or having *_tidptr to point at a shared memory
5575                mapping.  We can't repeat the spinlock hack used above because
5576                the child process gets its own copy of the lock.  */
5577             if (flags & CLONE_CHILD_SETTID)
5578                 put_user_u32(gettid(), child_tidptr);
5579             if (flags & CLONE_PARENT_SETTID)
5580                 put_user_u32(gettid(), parent_tidptr);
5581             ts = (TaskState *)cpu->opaque;
5582             if (flags & CLONE_SETTLS)
5583                 cpu_set_tls (env, newtls);
5584             if (flags & CLONE_CHILD_CLEARTID)
5585                 ts->child_tidptr = child_tidptr;
5586         } else {
5587             fork_end(0);
5588         }
5589     }
5590     return ret;
5591 }
5592 
5593 /* warning : doesn't handle linux specific flags... */
5594 static int target_to_host_fcntl_cmd(int cmd)
5595 {
5596     int ret;
5597 
5598     switch(cmd) {
5599     case TARGET_F_DUPFD:
5600     case TARGET_F_GETFD:
5601     case TARGET_F_SETFD:
5602     case TARGET_F_GETFL:
5603     case TARGET_F_SETFL:
5604         ret = cmd;
5605         break;
5606     case TARGET_F_GETLK:
5607         ret = F_GETLK64;
5608         break;
5609     case TARGET_F_SETLK:
5610         ret = F_SETLK64;
5611         break;
5612     case TARGET_F_SETLKW:
5613         ret = F_SETLKW64;
5614         break;
5615     case TARGET_F_GETOWN:
5616         ret = F_GETOWN;
5617         break;
5618     case TARGET_F_SETOWN:
5619         ret = F_SETOWN;
5620         break;
5621     case TARGET_F_GETSIG:
5622         ret = F_GETSIG;
5623         break;
5624     case TARGET_F_SETSIG:
5625         ret = F_SETSIG;
5626         break;
5627 #if TARGET_ABI_BITS == 32
5628     case TARGET_F_GETLK64:
5629         ret = F_GETLK64;
5630         break;
5631     case TARGET_F_SETLK64:
5632         ret = F_SETLK64;
5633         break;
5634     case TARGET_F_SETLKW64:
5635         ret = F_SETLKW64;
5636         break;
5637 #endif
5638     case TARGET_F_SETLEASE:
5639         ret = F_SETLEASE;
5640         break;
5641     case TARGET_F_GETLEASE:
5642         ret = F_GETLEASE;
5643         break;
5644 #ifdef F_DUPFD_CLOEXEC
5645     case TARGET_F_DUPFD_CLOEXEC:
5646         ret = F_DUPFD_CLOEXEC;
5647         break;
5648 #endif
5649     case TARGET_F_NOTIFY:
5650         ret = F_NOTIFY;
5651         break;
5652 #ifdef F_GETOWN_EX
5653     case TARGET_F_GETOWN_EX:
5654         ret = F_GETOWN_EX;
5655         break;
5656 #endif
5657 #ifdef F_SETOWN_EX
5658     case TARGET_F_SETOWN_EX:
5659         ret = F_SETOWN_EX;
5660         break;
5661 #endif
5662 #ifdef F_SETPIPE_SZ
5663     case TARGET_F_SETPIPE_SZ:
5664         ret = F_SETPIPE_SZ;
5665         break;
5666     case TARGET_F_GETPIPE_SZ:
5667         ret = F_GETPIPE_SZ;
5668         break;
5669 #endif
5670     default:
5671         ret = -TARGET_EINVAL;
5672         break;
5673     }
5674 
5675 #if defined(__powerpc64__)
5676     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5677      * is not supported by kernel. The glibc fcntl call actually adjusts
5678      * them to 5, 6 and 7 before making the syscall(). Since we make the
5679      * syscall directly, adjust to what is supported by the kernel.
5680      */
5681     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5682         ret -= F_GETLK64 - 5;
5683     }
5684 #endif
5685 
5686     return ret;
5687 }
5688 
5689 #define FLOCK_TRANSTBL \
5690     switch (type) { \
5691     TRANSTBL_CONVERT(F_RDLCK); \
5692     TRANSTBL_CONVERT(F_WRLCK); \
5693     TRANSTBL_CONVERT(F_UNLCK); \
5694     TRANSTBL_CONVERT(F_EXLCK); \
5695     TRANSTBL_CONVERT(F_SHLCK); \
5696     }
5697 
5698 static int target_to_host_flock(int type)
5699 {
5700 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5701     FLOCK_TRANSTBL
5702 #undef  TRANSTBL_CONVERT
5703     return -TARGET_EINVAL;
5704 }
5705 
5706 static int host_to_target_flock(int type)
5707 {
5708 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5709     FLOCK_TRANSTBL
5710 #undef  TRANSTBL_CONVERT
5711     /* if we don't know how to convert the value coming
5712      * from the host we copy to the target field as-is
5713      */
5714     return type;
5715 }
5716 
5717 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5718                                             abi_ulong target_flock_addr)
5719 {
5720     struct target_flock *target_fl;
5721     int l_type;
5722 
5723     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5724         return -TARGET_EFAULT;
5725     }
5726 
5727     __get_user(l_type, &target_fl->l_type);
5728     l_type = target_to_host_flock(l_type);
5729     if (l_type < 0) {
5730         return l_type;
5731     }
5732     fl->l_type = l_type;
5733     __get_user(fl->l_whence, &target_fl->l_whence);
5734     __get_user(fl->l_start, &target_fl->l_start);
5735     __get_user(fl->l_len, &target_fl->l_len);
5736     __get_user(fl->l_pid, &target_fl->l_pid);
5737     unlock_user_struct(target_fl, target_flock_addr, 0);
5738     return 0;
5739 }
5740 
5741 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5742                                           const struct flock64 *fl)
5743 {
5744     struct target_flock *target_fl;
5745     short l_type;
5746 
5747     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5748         return -TARGET_EFAULT;
5749     }
5750 
5751     l_type = host_to_target_flock(fl->l_type);
5752     __put_user(l_type, &target_fl->l_type);
5753     __put_user(fl->l_whence, &target_fl->l_whence);
5754     __put_user(fl->l_start, &target_fl->l_start);
5755     __put_user(fl->l_len, &target_fl->l_len);
5756     __put_user(fl->l_pid, &target_fl->l_pid);
5757     unlock_user_struct(target_fl, target_flock_addr, 1);
5758     return 0;
5759 }
5760 
5761 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5762 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5763 
5764 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5765 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5766                                                    abi_ulong target_flock_addr)
5767 {
5768     struct target_oabi_flock64 *target_fl;
5769     int l_type;
5770 
5771     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5772         return -TARGET_EFAULT;
5773     }
5774 
5775     __get_user(l_type, &target_fl->l_type);
5776     l_type = target_to_host_flock(l_type);
5777     if (l_type < 0) {
5778         return l_type;
5779     }
5780     fl->l_type = l_type;
5781     __get_user(fl->l_whence, &target_fl->l_whence);
5782     __get_user(fl->l_start, &target_fl->l_start);
5783     __get_user(fl->l_len, &target_fl->l_len);
5784     __get_user(fl->l_pid, &target_fl->l_pid);
5785     unlock_user_struct(target_fl, target_flock_addr, 0);
5786     return 0;
5787 }
5788 
5789 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5790                                                  const struct flock64 *fl)
5791 {
5792     struct target_oabi_flock64 *target_fl;
5793     short l_type;
5794 
5795     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5796         return -TARGET_EFAULT;
5797     }
5798 
5799     l_type = host_to_target_flock(fl->l_type);
5800     __put_user(l_type, &target_fl->l_type);
5801     __put_user(fl->l_whence, &target_fl->l_whence);
5802     __put_user(fl->l_start, &target_fl->l_start);
5803     __put_user(fl->l_len, &target_fl->l_len);
5804     __put_user(fl->l_pid, &target_fl->l_pid);
5805     unlock_user_struct(target_fl, target_flock_addr, 1);
5806     return 0;
5807 }
5808 #endif
5809 
5810 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5811                                               abi_ulong target_flock_addr)
5812 {
5813     struct target_flock64 *target_fl;
5814     int l_type;
5815 
5816     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5817         return -TARGET_EFAULT;
5818     }
5819 
5820     __get_user(l_type, &target_fl->l_type);
5821     l_type = target_to_host_flock(l_type);
5822     if (l_type < 0) {
5823         return l_type;
5824     }
5825     fl->l_type = l_type;
5826     __get_user(fl->l_whence, &target_fl->l_whence);
5827     __get_user(fl->l_start, &target_fl->l_start);
5828     __get_user(fl->l_len, &target_fl->l_len);
5829     __get_user(fl->l_pid, &target_fl->l_pid);
5830     unlock_user_struct(target_fl, target_flock_addr, 0);
5831     return 0;
5832 }
5833 
5834 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5835                                             const struct flock64 *fl)
5836 {
5837     struct target_flock64 *target_fl;
5838     short l_type;
5839 
5840     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5841         return -TARGET_EFAULT;
5842     }
5843 
5844     l_type = host_to_target_flock(fl->l_type);
5845     __put_user(l_type, &target_fl->l_type);
5846     __put_user(fl->l_whence, &target_fl->l_whence);
5847     __put_user(fl->l_start, &target_fl->l_start);
5848     __put_user(fl->l_len, &target_fl->l_len);
5849     __put_user(fl->l_pid, &target_fl->l_pid);
5850     unlock_user_struct(target_fl, target_flock_addr, 1);
5851     return 0;
5852 }
5853 
5854 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5855 {
5856     struct flock64 fl64;
5857 #ifdef F_GETOWN_EX
5858     struct f_owner_ex fox;
5859     struct target_f_owner_ex *target_fox;
5860 #endif
5861     abi_long ret;
5862     int host_cmd = target_to_host_fcntl_cmd(cmd);
5863 
5864     if (host_cmd == -TARGET_EINVAL)
5865 	    return host_cmd;
5866 
5867     switch(cmd) {
5868     case TARGET_F_GETLK:
5869         ret = copy_from_user_flock(&fl64, arg);
5870         if (ret) {
5871             return ret;
5872         }
5873         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5874         if (ret == 0) {
5875             ret = copy_to_user_flock(arg, &fl64);
5876         }
5877         break;
5878 
5879     case TARGET_F_SETLK:
5880     case TARGET_F_SETLKW:
5881         ret = copy_from_user_flock(&fl64, arg);
5882         if (ret) {
5883             return ret;
5884         }
5885         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5886         break;
5887 
5888     case TARGET_F_GETLK64:
5889         ret = copy_from_user_flock64(&fl64, arg);
5890         if (ret) {
5891             return ret;
5892         }
5893         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5894         if (ret == 0) {
5895             ret = copy_to_user_flock64(arg, &fl64);
5896         }
5897         break;
5898     case TARGET_F_SETLK64:
5899     case TARGET_F_SETLKW64:
5900         ret = copy_from_user_flock64(&fl64, arg);
5901         if (ret) {
5902             return ret;
5903         }
5904         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5905         break;
5906 
5907     case TARGET_F_GETFL:
5908         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5909         if (ret >= 0) {
5910             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5911         }
5912         break;
5913 
5914     case TARGET_F_SETFL:
5915         ret = get_errno(safe_fcntl(fd, host_cmd,
5916                                    target_to_host_bitmask(arg,
5917                                                           fcntl_flags_tbl)));
5918         break;
5919 
5920 #ifdef F_GETOWN_EX
5921     case TARGET_F_GETOWN_EX:
5922         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5923         if (ret >= 0) {
5924             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5925                 return -TARGET_EFAULT;
5926             target_fox->type = tswap32(fox.type);
5927             target_fox->pid = tswap32(fox.pid);
5928             unlock_user_struct(target_fox, arg, 1);
5929         }
5930         break;
5931 #endif
5932 
5933 #ifdef F_SETOWN_EX
5934     case TARGET_F_SETOWN_EX:
5935         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5936             return -TARGET_EFAULT;
5937         fox.type = tswap32(target_fox->type);
5938         fox.pid = tswap32(target_fox->pid);
5939         unlock_user_struct(target_fox, arg, 0);
5940         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5941         break;
5942 #endif
5943 
5944     case TARGET_F_SETOWN:
5945     case TARGET_F_GETOWN:
5946     case TARGET_F_SETSIG:
5947     case TARGET_F_GETSIG:
5948     case TARGET_F_SETLEASE:
5949     case TARGET_F_GETLEASE:
5950     case TARGET_F_SETPIPE_SZ:
5951     case TARGET_F_GETPIPE_SZ:
5952         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5953         break;
5954 
5955     default:
5956         ret = get_errno(safe_fcntl(fd, cmd, arg));
5957         break;
5958     }
5959     return ret;
5960 }
5961 
5962 #ifdef USE_UID16
5963 
5964 static inline int high2lowuid(int uid)
5965 {
5966     if (uid > 65535)
5967         return 65534;
5968     else
5969         return uid;
5970 }
5971 
5972 static inline int high2lowgid(int gid)
5973 {
5974     if (gid > 65535)
5975         return 65534;
5976     else
5977         return gid;
5978 }
5979 
5980 static inline int low2highuid(int uid)
5981 {
5982     if ((int16_t)uid == -1)
5983         return -1;
5984     else
5985         return uid;
5986 }
5987 
5988 static inline int low2highgid(int gid)
5989 {
5990     if ((int16_t)gid == -1)
5991         return -1;
5992     else
5993         return gid;
5994 }
5995 static inline int tswapid(int id)
5996 {
5997     return tswap16(id);
5998 }
5999 
6000 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6001 
6002 #else /* !USE_UID16 */
6003 static inline int high2lowuid(int uid)
6004 {
6005     return uid;
6006 }
6007 static inline int high2lowgid(int gid)
6008 {
6009     return gid;
6010 }
6011 static inline int low2highuid(int uid)
6012 {
6013     return uid;
6014 }
6015 static inline int low2highgid(int gid)
6016 {
6017     return gid;
6018 }
6019 static inline int tswapid(int id)
6020 {
6021     return tswap32(id);
6022 }
6023 
6024 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6025 
6026 #endif /* USE_UID16 */
6027 
6028 /* We must do direct syscalls for setting UID/GID, because we want to
6029  * implement the Linux system call semantics of "change only for this thread",
6030  * not the libc/POSIX semantics of "change for all threads in process".
6031  * (See http://ewontfix.com/17/ for more details.)
6032  * We use the 32-bit version of the syscalls if present; if it is not
6033  * then either the host architecture supports 32-bit UIDs natively with
6034  * the standard syscall, or the 16-bit UID is the best we can do.
6035  */
6036 #ifdef __NR_setuid32
6037 #define __NR_sys_setuid __NR_setuid32
6038 #else
6039 #define __NR_sys_setuid __NR_setuid
6040 #endif
6041 #ifdef __NR_setgid32
6042 #define __NR_sys_setgid __NR_setgid32
6043 #else
6044 #define __NR_sys_setgid __NR_setgid
6045 #endif
6046 #ifdef __NR_setresuid32
6047 #define __NR_sys_setresuid __NR_setresuid32
6048 #else
6049 #define __NR_sys_setresuid __NR_setresuid
6050 #endif
6051 #ifdef __NR_setresgid32
6052 #define __NR_sys_setresgid __NR_setresgid32
6053 #else
6054 #define __NR_sys_setresgid __NR_setresgid
6055 #endif
6056 
6057 _syscall1(int, sys_setuid, uid_t, uid)
6058 _syscall1(int, sys_setgid, gid_t, gid)
6059 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6060 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6061 
6062 void syscall_init(void)
6063 {
6064     IOCTLEntry *ie;
6065     const argtype *arg_type;
6066     int size;
6067     int i;
6068 
6069     thunk_init(STRUCT_MAX);
6070 
6071 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6072 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6073 #include "syscall_types.h"
6074 #undef STRUCT
6075 #undef STRUCT_SPECIAL
6076 
6077     /* Build target_to_host_errno_table[] table from
6078      * host_to_target_errno_table[]. */
6079     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6080         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6081     }
6082 
6083     /* we patch the ioctl size if necessary. We rely on the fact that
6084        no ioctl has all the bits at '1' in the size field */
6085     ie = ioctl_entries;
6086     while (ie->target_cmd != 0) {
6087         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6088             TARGET_IOC_SIZEMASK) {
6089             arg_type = ie->arg_type;
6090             if (arg_type[0] != TYPE_PTR) {
6091                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6092                         ie->target_cmd);
6093                 exit(1);
6094             }
6095             arg_type++;
6096             size = thunk_type_size(arg_type, 0);
6097             ie->target_cmd = (ie->target_cmd &
6098                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6099                 (size << TARGET_IOC_SIZESHIFT);
6100         }
6101 
6102         /* automatic consistency check if same arch */
6103 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6104     (defined(__x86_64__) && defined(TARGET_X86_64))
6105         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6106             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6107                     ie->name, ie->target_cmd, ie->host_cmd);
6108         }
6109 #endif
6110         ie++;
6111     }
6112 }
6113 
6114 #if TARGET_ABI_BITS == 32
6115 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6116 {
6117 #ifdef TARGET_WORDS_BIGENDIAN
6118     return ((uint64_t)word0 << 32) | word1;
6119 #else
6120     return ((uint64_t)word1 << 32) | word0;
6121 #endif
6122 }
6123 #else /* TARGET_ABI_BITS == 32 */
6124 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6125 {
6126     return word0;
6127 }
6128 #endif /* TARGET_ABI_BITS != 32 */
6129 
6130 #ifdef TARGET_NR_truncate64
6131 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6132                                          abi_long arg2,
6133                                          abi_long arg3,
6134                                          abi_long arg4)
6135 {
6136     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6137         arg2 = arg3;
6138         arg3 = arg4;
6139     }
6140     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6141 }
6142 #endif
6143 
6144 #ifdef TARGET_NR_ftruncate64
6145 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6146                                           abi_long arg2,
6147                                           abi_long arg3,
6148                                           abi_long arg4)
6149 {
6150     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6151         arg2 = arg3;
6152         arg3 = arg4;
6153     }
6154     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6155 }
6156 #endif
6157 
6158 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6159                                                abi_ulong target_addr)
6160 {
6161     struct target_timespec *target_ts;
6162 
6163     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6164         return -TARGET_EFAULT;
6165     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6166     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6167     unlock_user_struct(target_ts, target_addr, 0);
6168     return 0;
6169 }
6170 
6171 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6172                                                struct timespec *host_ts)
6173 {
6174     struct target_timespec *target_ts;
6175 
6176     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6177         return -TARGET_EFAULT;
6178     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6179     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6180     unlock_user_struct(target_ts, target_addr, 1);
6181     return 0;
6182 }
6183 
6184 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6185                                                  abi_ulong target_addr)
6186 {
6187     struct target_itimerspec *target_itspec;
6188 
6189     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6190         return -TARGET_EFAULT;
6191     }
6192 
6193     host_itspec->it_interval.tv_sec =
6194                             tswapal(target_itspec->it_interval.tv_sec);
6195     host_itspec->it_interval.tv_nsec =
6196                             tswapal(target_itspec->it_interval.tv_nsec);
6197     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6198     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6199 
6200     unlock_user_struct(target_itspec, target_addr, 1);
6201     return 0;
6202 }
6203 
6204 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6205                                                struct itimerspec *host_its)
6206 {
6207     struct target_itimerspec *target_itspec;
6208 
6209     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6210         return -TARGET_EFAULT;
6211     }
6212 
6213     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6214     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6215 
6216     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6217     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6218 
6219     unlock_user_struct(target_itspec, target_addr, 0);
6220     return 0;
6221 }
6222 
6223 static inline abi_long target_to_host_timex(struct timex *host_tx,
6224                                             abi_long target_addr)
6225 {
6226     struct target_timex *target_tx;
6227 
6228     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6229         return -TARGET_EFAULT;
6230     }
6231 
6232     __get_user(host_tx->modes, &target_tx->modes);
6233     __get_user(host_tx->offset, &target_tx->offset);
6234     __get_user(host_tx->freq, &target_tx->freq);
6235     __get_user(host_tx->maxerror, &target_tx->maxerror);
6236     __get_user(host_tx->esterror, &target_tx->esterror);
6237     __get_user(host_tx->status, &target_tx->status);
6238     __get_user(host_tx->constant, &target_tx->constant);
6239     __get_user(host_tx->precision, &target_tx->precision);
6240     __get_user(host_tx->tolerance, &target_tx->tolerance);
6241     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6242     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6243     __get_user(host_tx->tick, &target_tx->tick);
6244     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6245     __get_user(host_tx->jitter, &target_tx->jitter);
6246     __get_user(host_tx->shift, &target_tx->shift);
6247     __get_user(host_tx->stabil, &target_tx->stabil);
6248     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6249     __get_user(host_tx->calcnt, &target_tx->calcnt);
6250     __get_user(host_tx->errcnt, &target_tx->errcnt);
6251     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6252     __get_user(host_tx->tai, &target_tx->tai);
6253 
6254     unlock_user_struct(target_tx, target_addr, 0);
6255     return 0;
6256 }
6257 
6258 static inline abi_long host_to_target_timex(abi_long target_addr,
6259                                             struct timex *host_tx)
6260 {
6261     struct target_timex *target_tx;
6262 
6263     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6264         return -TARGET_EFAULT;
6265     }
6266 
6267     __put_user(host_tx->modes, &target_tx->modes);
6268     __put_user(host_tx->offset, &target_tx->offset);
6269     __put_user(host_tx->freq, &target_tx->freq);
6270     __put_user(host_tx->maxerror, &target_tx->maxerror);
6271     __put_user(host_tx->esterror, &target_tx->esterror);
6272     __put_user(host_tx->status, &target_tx->status);
6273     __put_user(host_tx->constant, &target_tx->constant);
6274     __put_user(host_tx->precision, &target_tx->precision);
6275     __put_user(host_tx->tolerance, &target_tx->tolerance);
6276     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6277     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6278     __put_user(host_tx->tick, &target_tx->tick);
6279     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6280     __put_user(host_tx->jitter, &target_tx->jitter);
6281     __put_user(host_tx->shift, &target_tx->shift);
6282     __put_user(host_tx->stabil, &target_tx->stabil);
6283     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6284     __put_user(host_tx->calcnt, &target_tx->calcnt);
6285     __put_user(host_tx->errcnt, &target_tx->errcnt);
6286     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6287     __put_user(host_tx->tai, &target_tx->tai);
6288 
6289     unlock_user_struct(target_tx, target_addr, 1);
6290     return 0;
6291 }
6292 
6293 
6294 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6295                                                abi_ulong target_addr)
6296 {
6297     struct target_sigevent *target_sevp;
6298 
6299     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6300         return -TARGET_EFAULT;
6301     }
6302 
6303     /* This union is awkward on 64 bit systems because it has a 32 bit
6304      * integer and a pointer in it; we follow the conversion approach
6305      * used for handling sigval types in signal.c so the guest should get
6306      * the correct value back even if we did a 64 bit byteswap and it's
6307      * using the 32 bit integer.
6308      */
6309     host_sevp->sigev_value.sival_ptr =
6310         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6311     host_sevp->sigev_signo =
6312         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6313     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6314     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6315 
6316     unlock_user_struct(target_sevp, target_addr, 1);
6317     return 0;
6318 }
6319 
6320 #if defined(TARGET_NR_mlockall)
6321 static inline int target_to_host_mlockall_arg(int arg)
6322 {
6323     int result = 0;
6324 
6325     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6326         result |= MCL_CURRENT;
6327     }
6328     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6329         result |= MCL_FUTURE;
6330     }
6331     return result;
6332 }
6333 #endif
6334 
6335 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6336      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6337      defined(TARGET_NR_newfstatat))
6338 static inline abi_long host_to_target_stat64(void *cpu_env,
6339                                              abi_ulong target_addr,
6340                                              struct stat *host_st)
6341 {
6342 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6343     if (((CPUARMState *)cpu_env)->eabi) {
6344         struct target_eabi_stat64 *target_st;
6345 
6346         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6347             return -TARGET_EFAULT;
6348         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6349         __put_user(host_st->st_dev, &target_st->st_dev);
6350         __put_user(host_st->st_ino, &target_st->st_ino);
6351 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6352         __put_user(host_st->st_ino, &target_st->__st_ino);
6353 #endif
6354         __put_user(host_st->st_mode, &target_st->st_mode);
6355         __put_user(host_st->st_nlink, &target_st->st_nlink);
6356         __put_user(host_st->st_uid, &target_st->st_uid);
6357         __put_user(host_st->st_gid, &target_st->st_gid);
6358         __put_user(host_st->st_rdev, &target_st->st_rdev);
6359         __put_user(host_st->st_size, &target_st->st_size);
6360         __put_user(host_st->st_blksize, &target_st->st_blksize);
6361         __put_user(host_st->st_blocks, &target_st->st_blocks);
6362         __put_user(host_st->st_atime, &target_st->target_st_atime);
6363         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6364         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6365         unlock_user_struct(target_st, target_addr, 1);
6366     } else
6367 #endif
6368     {
6369 #if defined(TARGET_HAS_STRUCT_STAT64)
6370         struct target_stat64 *target_st;
6371 #else
6372         struct target_stat *target_st;
6373 #endif
6374 
6375         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6376             return -TARGET_EFAULT;
6377         memset(target_st, 0, sizeof(*target_st));
6378         __put_user(host_st->st_dev, &target_st->st_dev);
6379         __put_user(host_st->st_ino, &target_st->st_ino);
6380 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6381         __put_user(host_st->st_ino, &target_st->__st_ino);
6382 #endif
6383         __put_user(host_st->st_mode, &target_st->st_mode);
6384         __put_user(host_st->st_nlink, &target_st->st_nlink);
6385         __put_user(host_st->st_uid, &target_st->st_uid);
6386         __put_user(host_st->st_gid, &target_st->st_gid);
6387         __put_user(host_st->st_rdev, &target_st->st_rdev);
6388         /* XXX: better use of kernel struct */
6389         __put_user(host_st->st_size, &target_st->st_size);
6390         __put_user(host_st->st_blksize, &target_st->st_blksize);
6391         __put_user(host_st->st_blocks, &target_st->st_blocks);
6392         __put_user(host_st->st_atime, &target_st->target_st_atime);
6393         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6394         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6395         unlock_user_struct(target_st, target_addr, 1);
6396     }
6397 
6398     return 0;
6399 }
6400 #endif
6401 
6402 /* ??? Using host futex calls even when target atomic operations
6403    are not really atomic probably breaks things.  However implementing
6404    futexes locally would make futexes shared between multiple processes
6405    tricky.  However they're probably useless because guest atomic
6406    operations won't work either.  */
6407 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6408                     target_ulong uaddr2, int val3)
6409 {
6410     struct timespec ts, *pts;
6411     int base_op;
6412 
6413     /* ??? We assume FUTEX_* constants are the same on both host
6414        and target.  */
6415 #ifdef FUTEX_CMD_MASK
6416     base_op = op & FUTEX_CMD_MASK;
6417 #else
6418     base_op = op;
6419 #endif
6420     switch (base_op) {
6421     case FUTEX_WAIT:
6422     case FUTEX_WAIT_BITSET:
6423         if (timeout) {
6424             pts = &ts;
6425             target_to_host_timespec(pts, timeout);
6426         } else {
6427             pts = NULL;
6428         }
6429         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6430                          pts, NULL, val3));
6431     case FUTEX_WAKE:
6432         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6433     case FUTEX_FD:
6434         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6435     case FUTEX_REQUEUE:
6436     case FUTEX_CMP_REQUEUE:
6437     case FUTEX_WAKE_OP:
6438         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6439            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6440            But the prototype takes a `struct timespec *'; insert casts
6441            to satisfy the compiler.  We do not need to tswap TIMEOUT
6442            since it's not compared to guest memory.  */
6443         pts = (struct timespec *)(uintptr_t) timeout;
6444         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6445                                     g2h(uaddr2),
6446                                     (base_op == FUTEX_CMP_REQUEUE
6447                                      ? tswap32(val3)
6448                                      : val3)));
6449     default:
6450         return -TARGET_ENOSYS;
6451     }
6452 }
6453 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6454 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6455                                      abi_long handle, abi_long mount_id,
6456                                      abi_long flags)
6457 {
6458     struct file_handle *target_fh;
6459     struct file_handle *fh;
6460     int mid = 0;
6461     abi_long ret;
6462     char *name;
6463     unsigned int size, total_size;
6464 
6465     if (get_user_s32(size, handle)) {
6466         return -TARGET_EFAULT;
6467     }
6468 
6469     name = lock_user_string(pathname);
6470     if (!name) {
6471         return -TARGET_EFAULT;
6472     }
6473 
6474     total_size = sizeof(struct file_handle) + size;
6475     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6476     if (!target_fh) {
6477         unlock_user(name, pathname, 0);
6478         return -TARGET_EFAULT;
6479     }
6480 
6481     fh = g_malloc0(total_size);
6482     fh->handle_bytes = size;
6483 
6484     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6485     unlock_user(name, pathname, 0);
6486 
6487     /* man name_to_handle_at(2):
6488      * Other than the use of the handle_bytes field, the caller should treat
6489      * the file_handle structure as an opaque data type
6490      */
6491 
6492     memcpy(target_fh, fh, total_size);
6493     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6494     target_fh->handle_type = tswap32(fh->handle_type);
6495     g_free(fh);
6496     unlock_user(target_fh, handle, total_size);
6497 
6498     if (put_user_s32(mid, mount_id)) {
6499         return -TARGET_EFAULT;
6500     }
6501 
6502     return ret;
6503 
6504 }
6505 #endif
6506 
6507 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6508 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6509                                      abi_long flags)
6510 {
6511     struct file_handle *target_fh;
6512     struct file_handle *fh;
6513     unsigned int size, total_size;
6514     abi_long ret;
6515 
6516     if (get_user_s32(size, handle)) {
6517         return -TARGET_EFAULT;
6518     }
6519 
6520     total_size = sizeof(struct file_handle) + size;
6521     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6522     if (!target_fh) {
6523         return -TARGET_EFAULT;
6524     }
6525 
6526     fh = g_memdup(target_fh, total_size);
6527     fh->handle_bytes = size;
6528     fh->handle_type = tswap32(target_fh->handle_type);
6529 
6530     ret = get_errno(open_by_handle_at(mount_fd, fh,
6531                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6532 
6533     g_free(fh);
6534 
6535     unlock_user(target_fh, handle, total_size);
6536 
6537     return ret;
6538 }
6539 #endif
6540 
6541 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6542 
6543 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6544 {
6545     int host_flags;
6546     target_sigset_t *target_mask;
6547     sigset_t host_mask;
6548     abi_long ret;
6549 
6550     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6551         return -TARGET_EINVAL;
6552     }
6553     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6554         return -TARGET_EFAULT;
6555     }
6556 
6557     target_to_host_sigset(&host_mask, target_mask);
6558 
6559     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6560 
6561     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6562     if (ret >= 0) {
6563         fd_trans_register(ret, &target_signalfd_trans);
6564     }
6565 
6566     unlock_user_struct(target_mask, mask, 0);
6567 
6568     return ret;
6569 }
6570 #endif
6571 
6572 /* Map host to target signal numbers for the wait family of syscalls.
6573    Assume all other status bits are the same.  */
6574 int host_to_target_waitstatus(int status)
6575 {
6576     if (WIFSIGNALED(status)) {
6577         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6578     }
6579     if (WIFSTOPPED(status)) {
6580         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6581                | (status & 0xff);
6582     }
6583     return status;
6584 }
6585 
6586 static int open_self_cmdline(void *cpu_env, int fd)
6587 {
6588     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6589     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6590     int i;
6591 
6592     for (i = 0; i < bprm->argc; i++) {
6593         size_t len = strlen(bprm->argv[i]) + 1;
6594 
6595         if (write(fd, bprm->argv[i], len) != len) {
6596             return -1;
6597         }
6598     }
6599 
6600     return 0;
6601 }
6602 
6603 static int open_self_maps(void *cpu_env, int fd)
6604 {
6605     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6606     TaskState *ts = cpu->opaque;
6607     FILE *fp;
6608     char *line = NULL;
6609     size_t len = 0;
6610     ssize_t read;
6611 
6612     fp = fopen("/proc/self/maps", "r");
6613     if (fp == NULL) {
6614         return -1;
6615     }
6616 
6617     while ((read = getline(&line, &len, fp)) != -1) {
6618         int fields, dev_maj, dev_min, inode;
6619         uint64_t min, max, offset;
6620         char flag_r, flag_w, flag_x, flag_p;
6621         char path[512] = "";
6622         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6623                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6624                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6625 
6626         if ((fields < 10) || (fields > 11)) {
6627             continue;
6628         }
6629         if (h2g_valid(min)) {
6630             int flags = page_get_flags(h2g(min));
6631             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6632             if (page_check_range(h2g(min), max - min, flags) == -1) {
6633                 continue;
6634             }
6635             if (h2g(min) == ts->info->stack_limit) {
6636                 pstrcpy(path, sizeof(path), "      [stack]");
6637             }
6638             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6639                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6640                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6641                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6642                     path[0] ? "         " : "", path);
6643         }
6644     }
6645 
6646     free(line);
6647     fclose(fp);
6648 
6649     return 0;
6650 }
6651 
6652 static int open_self_stat(void *cpu_env, int fd)
6653 {
6654     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6655     TaskState *ts = cpu->opaque;
6656     abi_ulong start_stack = ts->info->start_stack;
6657     int i;
6658 
6659     for (i = 0; i < 44; i++) {
6660       char buf[128];
6661       int len;
6662       uint64_t val = 0;
6663 
6664       if (i == 0) {
6665         /* pid */
6666         val = getpid();
6667         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6668       } else if (i == 1) {
6669         /* app name */
6670         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6671       } else if (i == 27) {
6672         /* stack bottom */
6673         val = start_stack;
6674         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6675       } else {
6676         /* for the rest, there is MasterCard */
6677         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6678       }
6679 
6680       len = strlen(buf);
6681       if (write(fd, buf, len) != len) {
6682           return -1;
6683       }
6684     }
6685 
6686     return 0;
6687 }
6688 
6689 static int open_self_auxv(void *cpu_env, int fd)
6690 {
6691     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6692     TaskState *ts = cpu->opaque;
6693     abi_ulong auxv = ts->info->saved_auxv;
6694     abi_ulong len = ts->info->auxv_len;
6695     char *ptr;
6696 
6697     /*
6698      * Auxiliary vector is stored in target process stack.
6699      * read in whole auxv vector and copy it to file
6700      */
6701     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6702     if (ptr != NULL) {
6703         while (len > 0) {
6704             ssize_t r;
6705             r = write(fd, ptr, len);
6706             if (r <= 0) {
6707                 break;
6708             }
6709             len -= r;
6710             ptr += r;
6711         }
6712         lseek(fd, 0, SEEK_SET);
6713         unlock_user(ptr, auxv, len);
6714     }
6715 
6716     return 0;
6717 }
6718 
6719 static int is_proc_myself(const char *filename, const char *entry)
6720 {
6721     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6722         filename += strlen("/proc/");
6723         if (!strncmp(filename, "self/", strlen("self/"))) {
6724             filename += strlen("self/");
6725         } else if (*filename >= '1' && *filename <= '9') {
6726             char myself[80];
6727             snprintf(myself, sizeof(myself), "%d/", getpid());
6728             if (!strncmp(filename, myself, strlen(myself))) {
6729                 filename += strlen(myself);
6730             } else {
6731                 return 0;
6732             }
6733         } else {
6734             return 0;
6735         }
6736         if (!strcmp(filename, entry)) {
6737             return 1;
6738         }
6739     }
6740     return 0;
6741 }
6742 
6743 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6744 static int is_proc(const char *filename, const char *entry)
6745 {
6746     return strcmp(filename, entry) == 0;
6747 }
6748 
6749 static int open_net_route(void *cpu_env, int fd)
6750 {
6751     FILE *fp;
6752     char *line = NULL;
6753     size_t len = 0;
6754     ssize_t read;
6755 
6756     fp = fopen("/proc/net/route", "r");
6757     if (fp == NULL) {
6758         return -1;
6759     }
6760 
6761     /* read header */
6762 
6763     read = getline(&line, &len, fp);
6764     dprintf(fd, "%s", line);
6765 
6766     /* read routes */
6767 
6768     while ((read = getline(&line, &len, fp)) != -1) {
6769         char iface[16];
6770         uint32_t dest, gw, mask;
6771         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6772         int fields;
6773 
6774         fields = sscanf(line,
6775                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6776                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6777                         &mask, &mtu, &window, &irtt);
6778         if (fields != 11) {
6779             continue;
6780         }
6781         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6782                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6783                 metric, tswap32(mask), mtu, window, irtt);
6784     }
6785 
6786     free(line);
6787     fclose(fp);
6788 
6789     return 0;
6790 }
6791 #endif
6792 
6793 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6794 {
6795     struct fake_open {
6796         const char *filename;
6797         int (*fill)(void *cpu_env, int fd);
6798         int (*cmp)(const char *s1, const char *s2);
6799     };
6800     const struct fake_open *fake_open;
6801     static const struct fake_open fakes[] = {
6802         { "maps", open_self_maps, is_proc_myself },
6803         { "stat", open_self_stat, is_proc_myself },
6804         { "auxv", open_self_auxv, is_proc_myself },
6805         { "cmdline", open_self_cmdline, is_proc_myself },
6806 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6807         { "/proc/net/route", open_net_route, is_proc },
6808 #endif
6809         { NULL, NULL, NULL }
6810     };
6811 
6812     if (is_proc_myself(pathname, "exe")) {
6813         int execfd = qemu_getauxval(AT_EXECFD);
6814         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6815     }
6816 
6817     for (fake_open = fakes; fake_open->filename; fake_open++) {
6818         if (fake_open->cmp(pathname, fake_open->filename)) {
6819             break;
6820         }
6821     }
6822 
6823     if (fake_open->filename) {
6824         const char *tmpdir;
6825         char filename[PATH_MAX];
6826         int fd, r;
6827 
6828         /* create temporary file to map stat to */
6829         tmpdir = getenv("TMPDIR");
6830         if (!tmpdir)
6831             tmpdir = "/tmp";
6832         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6833         fd = mkstemp(filename);
6834         if (fd < 0) {
6835             return fd;
6836         }
6837         unlink(filename);
6838 
6839         if ((r = fake_open->fill(cpu_env, fd))) {
6840             int e = errno;
6841             close(fd);
6842             errno = e;
6843             return r;
6844         }
6845         lseek(fd, 0, SEEK_SET);
6846 
6847         return fd;
6848     }
6849 
6850     return safe_openat(dirfd, path(pathname), flags, mode);
6851 }
6852 
6853 #define TIMER_MAGIC 0x0caf0000
6854 #define TIMER_MAGIC_MASK 0xffff0000
6855 
6856 /* Convert QEMU provided timer ID back to internal 16bit index format */
6857 static target_timer_t get_timer_id(abi_long arg)
6858 {
6859     target_timer_t timerid = arg;
6860 
6861     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6862         return -TARGET_EINVAL;
6863     }
6864 
6865     timerid &= 0xffff;
6866 
6867     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6868         return -TARGET_EINVAL;
6869     }
6870 
6871     return timerid;
6872 }
6873 
6874 static int target_to_host_cpu_mask(unsigned long *host_mask,
6875                                    size_t host_size,
6876                                    abi_ulong target_addr,
6877                                    size_t target_size)
6878 {
6879     unsigned target_bits = sizeof(abi_ulong) * 8;
6880     unsigned host_bits = sizeof(*host_mask) * 8;
6881     abi_ulong *target_mask;
6882     unsigned i, j;
6883 
6884     assert(host_size >= target_size);
6885 
6886     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6887     if (!target_mask) {
6888         return -TARGET_EFAULT;
6889     }
6890     memset(host_mask, 0, host_size);
6891 
6892     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6893         unsigned bit = i * target_bits;
6894         abi_ulong val;
6895 
6896         __get_user(val, &target_mask[i]);
6897         for (j = 0; j < target_bits; j++, bit++) {
6898             if (val & (1UL << j)) {
6899                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6900             }
6901         }
6902     }
6903 
6904     unlock_user(target_mask, target_addr, 0);
6905     return 0;
6906 }
6907 
6908 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6909                                    size_t host_size,
6910                                    abi_ulong target_addr,
6911                                    size_t target_size)
6912 {
6913     unsigned target_bits = sizeof(abi_ulong) * 8;
6914     unsigned host_bits = sizeof(*host_mask) * 8;
6915     abi_ulong *target_mask;
6916     unsigned i, j;
6917 
6918     assert(host_size >= target_size);
6919 
6920     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6921     if (!target_mask) {
6922         return -TARGET_EFAULT;
6923     }
6924 
6925     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6926         unsigned bit = i * target_bits;
6927         abi_ulong val = 0;
6928 
6929         for (j = 0; j < target_bits; j++, bit++) {
6930             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6931                 val |= 1UL << j;
6932             }
6933         }
6934         __put_user(val, &target_mask[i]);
6935     }
6936 
6937     unlock_user(target_mask, target_addr, target_size);
6938     return 0;
6939 }
6940 
6941 /* This is an internal helper for do_syscall so that it is easier
6942  * to have a single return point, so that actions, such as logging
6943  * of syscall results, can be performed.
6944  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6945  */
6946 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6947                             abi_long arg2, abi_long arg3, abi_long arg4,
6948                             abi_long arg5, abi_long arg6, abi_long arg7,
6949                             abi_long arg8)
6950 {
6951     CPUState *cpu = ENV_GET_CPU(cpu_env);
6952     abi_long ret;
6953 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6954     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6955     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6956     struct stat st;
6957 #endif
6958 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6959     || defined(TARGET_NR_fstatfs)
6960     struct statfs stfs;
6961 #endif
6962     void *p;
6963 
6964     switch(num) {
6965     case TARGET_NR_exit:
6966         /* In old applications this may be used to implement _exit(2).
6967            However in threaded applictions it is used for thread termination,
6968            and _exit_group is used for application termination.
6969            Do thread termination if we have more then one thread.  */
6970 
6971         if (block_signals()) {
6972             return -TARGET_ERESTARTSYS;
6973         }
6974 
6975         cpu_list_lock();
6976 
6977         if (CPU_NEXT(first_cpu)) {
6978             TaskState *ts;
6979 
6980             /* Remove the CPU from the list.  */
6981             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6982 
6983             cpu_list_unlock();
6984 
6985             ts = cpu->opaque;
6986             if (ts->child_tidptr) {
6987                 put_user_u32(0, ts->child_tidptr);
6988                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6989                           NULL, NULL, 0);
6990             }
6991             thread_cpu = NULL;
6992             object_unref(OBJECT(cpu));
6993             g_free(ts);
6994             rcu_unregister_thread();
6995             pthread_exit(NULL);
6996         }
6997 
6998         cpu_list_unlock();
6999         preexit_cleanup(cpu_env, arg1);
7000         _exit(arg1);
7001         return 0; /* avoid warning */
7002     case TARGET_NR_read:
7003         if (arg3 == 0) {
7004             return 0;
7005         } else {
7006             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7007                 return -TARGET_EFAULT;
7008             ret = get_errno(safe_read(arg1, p, arg3));
7009             if (ret >= 0 &&
7010                 fd_trans_host_to_target_data(arg1)) {
7011                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7012             }
7013             unlock_user(p, arg2, ret);
7014         }
7015         return ret;
7016     case TARGET_NR_write:
7017         if (arg2 == 0 && arg3 == 0) {
7018             return get_errno(safe_write(arg1, 0, 0));
7019         }
7020         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7021             return -TARGET_EFAULT;
7022         if (fd_trans_target_to_host_data(arg1)) {
7023             void *copy = g_malloc(arg3);
7024             memcpy(copy, p, arg3);
7025             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7026             if (ret >= 0) {
7027                 ret = get_errno(safe_write(arg1, copy, ret));
7028             }
7029             g_free(copy);
7030         } else {
7031             ret = get_errno(safe_write(arg1, p, arg3));
7032         }
7033         unlock_user(p, arg2, 0);
7034         return ret;
7035 
7036 #ifdef TARGET_NR_open
7037     case TARGET_NR_open:
7038         if (!(p = lock_user_string(arg1)))
7039             return -TARGET_EFAULT;
7040         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7041                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7042                                   arg3));
7043         fd_trans_unregister(ret);
7044         unlock_user(p, arg1, 0);
7045         return ret;
7046 #endif
7047     case TARGET_NR_openat:
7048         if (!(p = lock_user_string(arg2)))
7049             return -TARGET_EFAULT;
7050         ret = get_errno(do_openat(cpu_env, arg1, p,
7051                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7052                                   arg4));
7053         fd_trans_unregister(ret);
7054         unlock_user(p, arg2, 0);
7055         return ret;
7056 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7057     case TARGET_NR_name_to_handle_at:
7058         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7059         return ret;
7060 #endif
7061 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7062     case TARGET_NR_open_by_handle_at:
7063         ret = do_open_by_handle_at(arg1, arg2, arg3);
7064         fd_trans_unregister(ret);
7065         return ret;
7066 #endif
7067     case TARGET_NR_close:
7068         fd_trans_unregister(arg1);
7069         return get_errno(close(arg1));
7070 
7071     case TARGET_NR_brk:
7072         return do_brk(arg1);
7073 #ifdef TARGET_NR_fork
7074     case TARGET_NR_fork:
7075         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7076 #endif
7077 #ifdef TARGET_NR_waitpid
7078     case TARGET_NR_waitpid:
7079         {
7080             int status;
7081             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7082             if (!is_error(ret) && arg2 && ret
7083                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7084                 return -TARGET_EFAULT;
7085         }
7086         return ret;
7087 #endif
7088 #ifdef TARGET_NR_waitid
7089     case TARGET_NR_waitid:
7090         {
7091             siginfo_t info;
7092             info.si_pid = 0;
7093             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7094             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7095                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7096                     return -TARGET_EFAULT;
7097                 host_to_target_siginfo(p, &info);
7098                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7099             }
7100         }
7101         return ret;
7102 #endif
7103 #ifdef TARGET_NR_creat /* not on alpha */
7104     case TARGET_NR_creat:
7105         if (!(p = lock_user_string(arg1)))
7106             return -TARGET_EFAULT;
7107         ret = get_errno(creat(p, arg2));
7108         fd_trans_unregister(ret);
7109         unlock_user(p, arg1, 0);
7110         return ret;
7111 #endif
7112 #ifdef TARGET_NR_link
7113     case TARGET_NR_link:
7114         {
7115             void * p2;
7116             p = lock_user_string(arg1);
7117             p2 = lock_user_string(arg2);
7118             if (!p || !p2)
7119                 ret = -TARGET_EFAULT;
7120             else
7121                 ret = get_errno(link(p, p2));
7122             unlock_user(p2, arg2, 0);
7123             unlock_user(p, arg1, 0);
7124         }
7125         return ret;
7126 #endif
7127 #if defined(TARGET_NR_linkat)
7128     case TARGET_NR_linkat:
7129         {
7130             void * p2 = NULL;
7131             if (!arg2 || !arg4)
7132                 return -TARGET_EFAULT;
7133             p  = lock_user_string(arg2);
7134             p2 = lock_user_string(arg4);
7135             if (!p || !p2)
7136                 ret = -TARGET_EFAULT;
7137             else
7138                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7139             unlock_user(p, arg2, 0);
7140             unlock_user(p2, arg4, 0);
7141         }
7142         return ret;
7143 #endif
7144 #ifdef TARGET_NR_unlink
7145     case TARGET_NR_unlink:
7146         if (!(p = lock_user_string(arg1)))
7147             return -TARGET_EFAULT;
7148         ret = get_errno(unlink(p));
7149         unlock_user(p, arg1, 0);
7150         return ret;
7151 #endif
7152 #if defined(TARGET_NR_unlinkat)
7153     case TARGET_NR_unlinkat:
7154         if (!(p = lock_user_string(arg2)))
7155             return -TARGET_EFAULT;
7156         ret = get_errno(unlinkat(arg1, p, arg3));
7157         unlock_user(p, arg2, 0);
7158         return ret;
7159 #endif
7160     case TARGET_NR_execve:
7161         {
7162             char **argp, **envp;
7163             int argc, envc;
7164             abi_ulong gp;
7165             abi_ulong guest_argp;
7166             abi_ulong guest_envp;
7167             abi_ulong addr;
7168             char **q;
7169             int total_size = 0;
7170 
7171             argc = 0;
7172             guest_argp = arg2;
7173             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7174                 if (get_user_ual(addr, gp))
7175                     return -TARGET_EFAULT;
7176                 if (!addr)
7177                     break;
7178                 argc++;
7179             }
7180             envc = 0;
7181             guest_envp = arg3;
7182             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7183                 if (get_user_ual(addr, gp))
7184                     return -TARGET_EFAULT;
7185                 if (!addr)
7186                     break;
7187                 envc++;
7188             }
7189 
7190             argp = g_new0(char *, argc + 1);
7191             envp = g_new0(char *, envc + 1);
7192 
7193             for (gp = guest_argp, q = argp; gp;
7194                   gp += sizeof(abi_ulong), q++) {
7195                 if (get_user_ual(addr, gp))
7196                     goto execve_efault;
7197                 if (!addr)
7198                     break;
7199                 if (!(*q = lock_user_string(addr)))
7200                     goto execve_efault;
7201                 total_size += strlen(*q) + 1;
7202             }
7203             *q = NULL;
7204 
7205             for (gp = guest_envp, q = envp; gp;
7206                   gp += sizeof(abi_ulong), q++) {
7207                 if (get_user_ual(addr, gp))
7208                     goto execve_efault;
7209                 if (!addr)
7210                     break;
7211                 if (!(*q = lock_user_string(addr)))
7212                     goto execve_efault;
7213                 total_size += strlen(*q) + 1;
7214             }
7215             *q = NULL;
7216 
7217             if (!(p = lock_user_string(arg1)))
7218                 goto execve_efault;
7219             /* Although execve() is not an interruptible syscall it is
7220              * a special case where we must use the safe_syscall wrapper:
7221              * if we allow a signal to happen before we make the host
7222              * syscall then we will 'lose' it, because at the point of
7223              * execve the process leaves QEMU's control. So we use the
7224              * safe syscall wrapper to ensure that we either take the
7225              * signal as a guest signal, or else it does not happen
7226              * before the execve completes and makes it the other
7227              * program's problem.
7228              */
7229             ret = get_errno(safe_execve(p, argp, envp));
7230             unlock_user(p, arg1, 0);
7231 
7232             goto execve_end;
7233 
7234         execve_efault:
7235             ret = -TARGET_EFAULT;
7236 
7237         execve_end:
7238             for (gp = guest_argp, q = argp; *q;
7239                   gp += sizeof(abi_ulong), q++) {
7240                 if (get_user_ual(addr, gp)
7241                     || !addr)
7242                     break;
7243                 unlock_user(*q, addr, 0);
7244             }
7245             for (gp = guest_envp, q = envp; *q;
7246                   gp += sizeof(abi_ulong), q++) {
7247                 if (get_user_ual(addr, gp)
7248                     || !addr)
7249                     break;
7250                 unlock_user(*q, addr, 0);
7251             }
7252 
7253             g_free(argp);
7254             g_free(envp);
7255         }
7256         return ret;
7257     case TARGET_NR_chdir:
7258         if (!(p = lock_user_string(arg1)))
7259             return -TARGET_EFAULT;
7260         ret = get_errno(chdir(p));
7261         unlock_user(p, arg1, 0);
7262         return ret;
7263 #ifdef TARGET_NR_time
7264     case TARGET_NR_time:
7265         {
7266             time_t host_time;
7267             ret = get_errno(time(&host_time));
7268             if (!is_error(ret)
7269                 && arg1
7270                 && put_user_sal(host_time, arg1))
7271                 return -TARGET_EFAULT;
7272         }
7273         return ret;
7274 #endif
7275 #ifdef TARGET_NR_mknod
7276     case TARGET_NR_mknod:
7277         if (!(p = lock_user_string(arg1)))
7278             return -TARGET_EFAULT;
7279         ret = get_errno(mknod(p, arg2, arg3));
7280         unlock_user(p, arg1, 0);
7281         return ret;
7282 #endif
7283 #if defined(TARGET_NR_mknodat)
7284     case TARGET_NR_mknodat:
7285         if (!(p = lock_user_string(arg2)))
7286             return -TARGET_EFAULT;
7287         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7288         unlock_user(p, arg2, 0);
7289         return ret;
7290 #endif
7291 #ifdef TARGET_NR_chmod
7292     case TARGET_NR_chmod:
7293         if (!(p = lock_user_string(arg1)))
7294             return -TARGET_EFAULT;
7295         ret = get_errno(chmod(p, arg2));
7296         unlock_user(p, arg1, 0);
7297         return ret;
7298 #endif
7299 #ifdef TARGET_NR_lseek
7300     case TARGET_NR_lseek:
7301         return get_errno(lseek(arg1, arg2, arg3));
7302 #endif
7303 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7304     /* Alpha specific */
7305     case TARGET_NR_getxpid:
7306         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7307         return get_errno(getpid());
7308 #endif
7309 #ifdef TARGET_NR_getpid
7310     case TARGET_NR_getpid:
7311         return get_errno(getpid());
7312 #endif
7313     case TARGET_NR_mount:
7314         {
7315             /* need to look at the data field */
7316             void *p2, *p3;
7317 
7318             if (arg1) {
7319                 p = lock_user_string(arg1);
7320                 if (!p) {
7321                     return -TARGET_EFAULT;
7322                 }
7323             } else {
7324                 p = NULL;
7325             }
7326 
7327             p2 = lock_user_string(arg2);
7328             if (!p2) {
7329                 if (arg1) {
7330                     unlock_user(p, arg1, 0);
7331                 }
7332                 return -TARGET_EFAULT;
7333             }
7334 
7335             if (arg3) {
7336                 p3 = lock_user_string(arg3);
7337                 if (!p3) {
7338                     if (arg1) {
7339                         unlock_user(p, arg1, 0);
7340                     }
7341                     unlock_user(p2, arg2, 0);
7342                     return -TARGET_EFAULT;
7343                 }
7344             } else {
7345                 p3 = NULL;
7346             }
7347 
7348             /* FIXME - arg5 should be locked, but it isn't clear how to
7349              * do that since it's not guaranteed to be a NULL-terminated
7350              * string.
7351              */
7352             if (!arg5) {
7353                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7354             } else {
7355                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7356             }
7357             ret = get_errno(ret);
7358 
7359             if (arg1) {
7360                 unlock_user(p, arg1, 0);
7361             }
7362             unlock_user(p2, arg2, 0);
7363             if (arg3) {
7364                 unlock_user(p3, arg3, 0);
7365             }
7366         }
7367         return ret;
7368 #ifdef TARGET_NR_umount
7369     case TARGET_NR_umount:
7370         if (!(p = lock_user_string(arg1)))
7371             return -TARGET_EFAULT;
7372         ret = get_errno(umount(p));
7373         unlock_user(p, arg1, 0);
7374         return ret;
7375 #endif
7376 #ifdef TARGET_NR_stime /* not on alpha */
7377     case TARGET_NR_stime:
7378         {
7379             time_t host_time;
7380             if (get_user_sal(host_time, arg1))
7381                 return -TARGET_EFAULT;
7382             return get_errno(stime(&host_time));
7383         }
7384 #endif
7385 #ifdef TARGET_NR_alarm /* not on alpha */
7386     case TARGET_NR_alarm:
7387         return alarm(arg1);
7388 #endif
7389 #ifdef TARGET_NR_pause /* not on alpha */
7390     case TARGET_NR_pause:
7391         if (!block_signals()) {
7392             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7393         }
7394         return -TARGET_EINTR;
7395 #endif
7396 #ifdef TARGET_NR_utime
7397     case TARGET_NR_utime:
7398         {
7399             struct utimbuf tbuf, *host_tbuf;
7400             struct target_utimbuf *target_tbuf;
7401             if (arg2) {
7402                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7403                     return -TARGET_EFAULT;
7404                 tbuf.actime = tswapal(target_tbuf->actime);
7405                 tbuf.modtime = tswapal(target_tbuf->modtime);
7406                 unlock_user_struct(target_tbuf, arg2, 0);
7407                 host_tbuf = &tbuf;
7408             } else {
7409                 host_tbuf = NULL;
7410             }
7411             if (!(p = lock_user_string(arg1)))
7412                 return -TARGET_EFAULT;
7413             ret = get_errno(utime(p, host_tbuf));
7414             unlock_user(p, arg1, 0);
7415         }
7416         return ret;
7417 #endif
7418 #ifdef TARGET_NR_utimes
7419     case TARGET_NR_utimes:
7420         {
7421             struct timeval *tvp, tv[2];
7422             if (arg2) {
7423                 if (copy_from_user_timeval(&tv[0], arg2)
7424                     || copy_from_user_timeval(&tv[1],
7425                                               arg2 + sizeof(struct target_timeval)))
7426                     return -TARGET_EFAULT;
7427                 tvp = tv;
7428             } else {
7429                 tvp = NULL;
7430             }
7431             if (!(p = lock_user_string(arg1)))
7432                 return -TARGET_EFAULT;
7433             ret = get_errno(utimes(p, tvp));
7434             unlock_user(p, arg1, 0);
7435         }
7436         return ret;
7437 #endif
7438 #if defined(TARGET_NR_futimesat)
7439     case TARGET_NR_futimesat:
7440         {
7441             struct timeval *tvp, tv[2];
7442             if (arg3) {
7443                 if (copy_from_user_timeval(&tv[0], arg3)
7444                     || copy_from_user_timeval(&tv[1],
7445                                               arg3 + sizeof(struct target_timeval)))
7446                     return -TARGET_EFAULT;
7447                 tvp = tv;
7448             } else {
7449                 tvp = NULL;
7450             }
7451             if (!(p = lock_user_string(arg2))) {
7452                 return -TARGET_EFAULT;
7453             }
7454             ret = get_errno(futimesat(arg1, path(p), tvp));
7455             unlock_user(p, arg2, 0);
7456         }
7457         return ret;
7458 #endif
7459 #ifdef TARGET_NR_access
7460     case TARGET_NR_access:
7461         if (!(p = lock_user_string(arg1))) {
7462             return -TARGET_EFAULT;
7463         }
7464         ret = get_errno(access(path(p), arg2));
7465         unlock_user(p, arg1, 0);
7466         return ret;
7467 #endif
7468 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7469     case TARGET_NR_faccessat:
7470         if (!(p = lock_user_string(arg2))) {
7471             return -TARGET_EFAULT;
7472         }
7473         ret = get_errno(faccessat(arg1, p, arg3, 0));
7474         unlock_user(p, arg2, 0);
7475         return ret;
7476 #endif
7477 #ifdef TARGET_NR_nice /* not on alpha */
7478     case TARGET_NR_nice:
7479         return get_errno(nice(arg1));
7480 #endif
7481     case TARGET_NR_sync:
7482         sync();
7483         return 0;
7484 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7485     case TARGET_NR_syncfs:
7486         return get_errno(syncfs(arg1));
7487 #endif
7488     case TARGET_NR_kill:
7489         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7490 #ifdef TARGET_NR_rename
7491     case TARGET_NR_rename:
7492         {
7493             void *p2;
7494             p = lock_user_string(arg1);
7495             p2 = lock_user_string(arg2);
7496             if (!p || !p2)
7497                 ret = -TARGET_EFAULT;
7498             else
7499                 ret = get_errno(rename(p, p2));
7500             unlock_user(p2, arg2, 0);
7501             unlock_user(p, arg1, 0);
7502         }
7503         return ret;
7504 #endif
7505 #if defined(TARGET_NR_renameat)
7506     case TARGET_NR_renameat:
7507         {
7508             void *p2;
7509             p  = lock_user_string(arg2);
7510             p2 = lock_user_string(arg4);
7511             if (!p || !p2)
7512                 ret = -TARGET_EFAULT;
7513             else
7514                 ret = get_errno(renameat(arg1, p, arg3, p2));
7515             unlock_user(p2, arg4, 0);
7516             unlock_user(p, arg2, 0);
7517         }
7518         return ret;
7519 #endif
7520 #if defined(TARGET_NR_renameat2)
7521     case TARGET_NR_renameat2:
7522         {
7523             void *p2;
7524             p  = lock_user_string(arg2);
7525             p2 = lock_user_string(arg4);
7526             if (!p || !p2) {
7527                 ret = -TARGET_EFAULT;
7528             } else {
7529                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7530             }
7531             unlock_user(p2, arg4, 0);
7532             unlock_user(p, arg2, 0);
7533         }
7534         return ret;
7535 #endif
7536 #ifdef TARGET_NR_mkdir
7537     case TARGET_NR_mkdir:
7538         if (!(p = lock_user_string(arg1)))
7539             return -TARGET_EFAULT;
7540         ret = get_errno(mkdir(p, arg2));
7541         unlock_user(p, arg1, 0);
7542         return ret;
7543 #endif
7544 #if defined(TARGET_NR_mkdirat)
7545     case TARGET_NR_mkdirat:
7546         if (!(p = lock_user_string(arg2)))
7547             return -TARGET_EFAULT;
7548         ret = get_errno(mkdirat(arg1, p, arg3));
7549         unlock_user(p, arg2, 0);
7550         return ret;
7551 #endif
7552 #ifdef TARGET_NR_rmdir
7553     case TARGET_NR_rmdir:
7554         if (!(p = lock_user_string(arg1)))
7555             return -TARGET_EFAULT;
7556         ret = get_errno(rmdir(p));
7557         unlock_user(p, arg1, 0);
7558         return ret;
7559 #endif
7560     case TARGET_NR_dup:
7561         ret = get_errno(dup(arg1));
7562         if (ret >= 0) {
7563             fd_trans_dup(arg1, ret);
7564         }
7565         return ret;
7566 #ifdef TARGET_NR_pipe
7567     case TARGET_NR_pipe:
7568         return do_pipe(cpu_env, arg1, 0, 0);
7569 #endif
7570 #ifdef TARGET_NR_pipe2
7571     case TARGET_NR_pipe2:
7572         return do_pipe(cpu_env, arg1,
7573                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7574 #endif
7575     case TARGET_NR_times:
7576         {
7577             struct target_tms *tmsp;
7578             struct tms tms;
7579             ret = get_errno(times(&tms));
7580             if (arg1) {
7581                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7582                 if (!tmsp)
7583                     return -TARGET_EFAULT;
7584                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7585                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7586                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7587                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7588             }
7589             if (!is_error(ret))
7590                 ret = host_to_target_clock_t(ret);
7591         }
7592         return ret;
7593     case TARGET_NR_acct:
7594         if (arg1 == 0) {
7595             ret = get_errno(acct(NULL));
7596         } else {
7597             if (!(p = lock_user_string(arg1))) {
7598                 return -TARGET_EFAULT;
7599             }
7600             ret = get_errno(acct(path(p)));
7601             unlock_user(p, arg1, 0);
7602         }
7603         return ret;
7604 #ifdef TARGET_NR_umount2
7605     case TARGET_NR_umount2:
7606         if (!(p = lock_user_string(arg1)))
7607             return -TARGET_EFAULT;
7608         ret = get_errno(umount2(p, arg2));
7609         unlock_user(p, arg1, 0);
7610         return ret;
7611 #endif
7612     case TARGET_NR_ioctl:
7613         return do_ioctl(arg1, arg2, arg3);
7614 #ifdef TARGET_NR_fcntl
7615     case TARGET_NR_fcntl:
7616         return do_fcntl(arg1, arg2, arg3);
7617 #endif
7618     case TARGET_NR_setpgid:
7619         return get_errno(setpgid(arg1, arg2));
7620     case TARGET_NR_umask:
7621         return get_errno(umask(arg1));
7622     case TARGET_NR_chroot:
7623         if (!(p = lock_user_string(arg1)))
7624             return -TARGET_EFAULT;
7625         ret = get_errno(chroot(p));
7626         unlock_user(p, arg1, 0);
7627         return ret;
7628 #ifdef TARGET_NR_dup2
7629     case TARGET_NR_dup2:
7630         ret = get_errno(dup2(arg1, arg2));
7631         if (ret >= 0) {
7632             fd_trans_dup(arg1, arg2);
7633         }
7634         return ret;
7635 #endif
7636 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7637     case TARGET_NR_dup3:
7638     {
7639         int host_flags;
7640 
7641         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7642             return -EINVAL;
7643         }
7644         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7645         ret = get_errno(dup3(arg1, arg2, host_flags));
7646         if (ret >= 0) {
7647             fd_trans_dup(arg1, arg2);
7648         }
7649         return ret;
7650     }
7651 #endif
7652 #ifdef TARGET_NR_getppid /* not on alpha */
7653     case TARGET_NR_getppid:
7654         return get_errno(getppid());
7655 #endif
7656 #ifdef TARGET_NR_getpgrp
7657     case TARGET_NR_getpgrp:
7658         return get_errno(getpgrp());
7659 #endif
7660     case TARGET_NR_setsid:
7661         return get_errno(setsid());
7662 #ifdef TARGET_NR_sigaction
7663     case TARGET_NR_sigaction:
7664         {
7665 #if defined(TARGET_ALPHA)
7666             struct target_sigaction act, oact, *pact = 0;
7667             struct target_old_sigaction *old_act;
7668             if (arg2) {
7669                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7670                     return -TARGET_EFAULT;
7671                 act._sa_handler = old_act->_sa_handler;
7672                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7673                 act.sa_flags = old_act->sa_flags;
7674                 act.sa_restorer = 0;
7675                 unlock_user_struct(old_act, arg2, 0);
7676                 pact = &act;
7677             }
7678             ret = get_errno(do_sigaction(arg1, pact, &oact));
7679             if (!is_error(ret) && arg3) {
7680                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7681                     return -TARGET_EFAULT;
7682                 old_act->_sa_handler = oact._sa_handler;
7683                 old_act->sa_mask = oact.sa_mask.sig[0];
7684                 old_act->sa_flags = oact.sa_flags;
7685                 unlock_user_struct(old_act, arg3, 1);
7686             }
7687 #elif defined(TARGET_MIPS)
7688 	    struct target_sigaction act, oact, *pact, *old_act;
7689 
7690 	    if (arg2) {
7691                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7692                     return -TARGET_EFAULT;
7693 		act._sa_handler = old_act->_sa_handler;
7694 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7695 		act.sa_flags = old_act->sa_flags;
7696 		unlock_user_struct(old_act, arg2, 0);
7697 		pact = &act;
7698 	    } else {
7699 		pact = NULL;
7700 	    }
7701 
7702 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7703 
7704 	    if (!is_error(ret) && arg3) {
7705                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7706                     return -TARGET_EFAULT;
7707 		old_act->_sa_handler = oact._sa_handler;
7708 		old_act->sa_flags = oact.sa_flags;
7709 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7710 		old_act->sa_mask.sig[1] = 0;
7711 		old_act->sa_mask.sig[2] = 0;
7712 		old_act->sa_mask.sig[3] = 0;
7713 		unlock_user_struct(old_act, arg3, 1);
7714 	    }
7715 #else
7716             struct target_old_sigaction *old_act;
7717             struct target_sigaction act, oact, *pact;
7718             if (arg2) {
7719                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7720                     return -TARGET_EFAULT;
7721                 act._sa_handler = old_act->_sa_handler;
7722                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7723                 act.sa_flags = old_act->sa_flags;
7724                 act.sa_restorer = old_act->sa_restorer;
7725 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7726                 act.ka_restorer = 0;
7727 #endif
7728                 unlock_user_struct(old_act, arg2, 0);
7729                 pact = &act;
7730             } else {
7731                 pact = NULL;
7732             }
7733             ret = get_errno(do_sigaction(arg1, pact, &oact));
7734             if (!is_error(ret) && arg3) {
7735                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7736                     return -TARGET_EFAULT;
7737                 old_act->_sa_handler = oact._sa_handler;
7738                 old_act->sa_mask = oact.sa_mask.sig[0];
7739                 old_act->sa_flags = oact.sa_flags;
7740                 old_act->sa_restorer = oact.sa_restorer;
7741                 unlock_user_struct(old_act, arg3, 1);
7742             }
7743 #endif
7744         }
7745         return ret;
7746 #endif
7747     case TARGET_NR_rt_sigaction:
7748         {
7749 #if defined(TARGET_ALPHA)
7750             /* For Alpha and SPARC this is a 5 argument syscall, with
7751              * a 'restorer' parameter which must be copied into the
7752              * sa_restorer field of the sigaction struct.
7753              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7754              * and arg5 is the sigsetsize.
7755              * Alpha also has a separate rt_sigaction struct that it uses
7756              * here; SPARC uses the usual sigaction struct.
7757              */
7758             struct target_rt_sigaction *rt_act;
7759             struct target_sigaction act, oact, *pact = 0;
7760 
7761             if (arg4 != sizeof(target_sigset_t)) {
7762                 return -TARGET_EINVAL;
7763             }
7764             if (arg2) {
7765                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7766                     return -TARGET_EFAULT;
7767                 act._sa_handler = rt_act->_sa_handler;
7768                 act.sa_mask = rt_act->sa_mask;
7769                 act.sa_flags = rt_act->sa_flags;
7770                 act.sa_restorer = arg5;
7771                 unlock_user_struct(rt_act, arg2, 0);
7772                 pact = &act;
7773             }
7774             ret = get_errno(do_sigaction(arg1, pact, &oact));
7775             if (!is_error(ret) && arg3) {
7776                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7777                     return -TARGET_EFAULT;
7778                 rt_act->_sa_handler = oact._sa_handler;
7779                 rt_act->sa_mask = oact.sa_mask;
7780                 rt_act->sa_flags = oact.sa_flags;
7781                 unlock_user_struct(rt_act, arg3, 1);
7782             }
7783 #else
7784 #ifdef TARGET_SPARC
7785             target_ulong restorer = arg4;
7786             target_ulong sigsetsize = arg5;
7787 #else
7788             target_ulong sigsetsize = arg4;
7789 #endif
7790             struct target_sigaction *act;
7791             struct target_sigaction *oact;
7792 
7793             if (sigsetsize != sizeof(target_sigset_t)) {
7794                 return -TARGET_EINVAL;
7795             }
7796             if (arg2) {
7797                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7798                     return -TARGET_EFAULT;
7799                 }
7800 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7801                 act->ka_restorer = restorer;
7802 #endif
7803             } else {
7804                 act = NULL;
7805             }
7806             if (arg3) {
7807                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7808                     ret = -TARGET_EFAULT;
7809                     goto rt_sigaction_fail;
7810                 }
7811             } else
7812                 oact = NULL;
7813             ret = get_errno(do_sigaction(arg1, act, oact));
7814 	rt_sigaction_fail:
7815             if (act)
7816                 unlock_user_struct(act, arg2, 0);
7817             if (oact)
7818                 unlock_user_struct(oact, arg3, 1);
7819 #endif
7820         }
7821         return ret;
7822 #ifdef TARGET_NR_sgetmask /* not on alpha */
7823     case TARGET_NR_sgetmask:
7824         {
7825             sigset_t cur_set;
7826             abi_ulong target_set;
7827             ret = do_sigprocmask(0, NULL, &cur_set);
7828             if (!ret) {
7829                 host_to_target_old_sigset(&target_set, &cur_set);
7830                 ret = target_set;
7831             }
7832         }
7833         return ret;
7834 #endif
7835 #ifdef TARGET_NR_ssetmask /* not on alpha */
7836     case TARGET_NR_ssetmask:
7837         {
7838             sigset_t set, oset;
7839             abi_ulong target_set = arg1;
7840             target_to_host_old_sigset(&set, &target_set);
7841             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7842             if (!ret) {
7843                 host_to_target_old_sigset(&target_set, &oset);
7844                 ret = target_set;
7845             }
7846         }
7847         return ret;
7848 #endif
7849 #ifdef TARGET_NR_sigprocmask
7850     case TARGET_NR_sigprocmask:
7851         {
7852 #if defined(TARGET_ALPHA)
7853             sigset_t set, oldset;
7854             abi_ulong mask;
7855             int how;
7856 
7857             switch (arg1) {
7858             case TARGET_SIG_BLOCK:
7859                 how = SIG_BLOCK;
7860                 break;
7861             case TARGET_SIG_UNBLOCK:
7862                 how = SIG_UNBLOCK;
7863                 break;
7864             case TARGET_SIG_SETMASK:
7865                 how = SIG_SETMASK;
7866                 break;
7867             default:
7868                 return -TARGET_EINVAL;
7869             }
7870             mask = arg2;
7871             target_to_host_old_sigset(&set, &mask);
7872 
7873             ret = do_sigprocmask(how, &set, &oldset);
7874             if (!is_error(ret)) {
7875                 host_to_target_old_sigset(&mask, &oldset);
7876                 ret = mask;
7877                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7878             }
7879 #else
7880             sigset_t set, oldset, *set_ptr;
7881             int how;
7882 
7883             if (arg2) {
7884                 switch (arg1) {
7885                 case TARGET_SIG_BLOCK:
7886                     how = SIG_BLOCK;
7887                     break;
7888                 case TARGET_SIG_UNBLOCK:
7889                     how = SIG_UNBLOCK;
7890                     break;
7891                 case TARGET_SIG_SETMASK:
7892                     how = SIG_SETMASK;
7893                     break;
7894                 default:
7895                     return -TARGET_EINVAL;
7896                 }
7897                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7898                     return -TARGET_EFAULT;
7899                 target_to_host_old_sigset(&set, p);
7900                 unlock_user(p, arg2, 0);
7901                 set_ptr = &set;
7902             } else {
7903                 how = 0;
7904                 set_ptr = NULL;
7905             }
7906             ret = do_sigprocmask(how, set_ptr, &oldset);
7907             if (!is_error(ret) && arg3) {
7908                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7909                     return -TARGET_EFAULT;
7910                 host_to_target_old_sigset(p, &oldset);
7911                 unlock_user(p, arg3, sizeof(target_sigset_t));
7912             }
7913 #endif
7914         }
7915         return ret;
7916 #endif
7917     case TARGET_NR_rt_sigprocmask:
7918         {
7919             int how = arg1;
7920             sigset_t set, oldset, *set_ptr;
7921 
7922             if (arg4 != sizeof(target_sigset_t)) {
7923                 return -TARGET_EINVAL;
7924             }
7925 
7926             if (arg2) {
7927                 switch(how) {
7928                 case TARGET_SIG_BLOCK:
7929                     how = SIG_BLOCK;
7930                     break;
7931                 case TARGET_SIG_UNBLOCK:
7932                     how = SIG_UNBLOCK;
7933                     break;
7934                 case TARGET_SIG_SETMASK:
7935                     how = SIG_SETMASK;
7936                     break;
7937                 default:
7938                     return -TARGET_EINVAL;
7939                 }
7940                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7941                     return -TARGET_EFAULT;
7942                 target_to_host_sigset(&set, p);
7943                 unlock_user(p, arg2, 0);
7944                 set_ptr = &set;
7945             } else {
7946                 how = 0;
7947                 set_ptr = NULL;
7948             }
7949             ret = do_sigprocmask(how, set_ptr, &oldset);
7950             if (!is_error(ret) && arg3) {
7951                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7952                     return -TARGET_EFAULT;
7953                 host_to_target_sigset(p, &oldset);
7954                 unlock_user(p, arg3, sizeof(target_sigset_t));
7955             }
7956         }
7957         return ret;
7958 #ifdef TARGET_NR_sigpending
7959     case TARGET_NR_sigpending:
7960         {
7961             sigset_t set;
7962             ret = get_errno(sigpending(&set));
7963             if (!is_error(ret)) {
7964                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7965                     return -TARGET_EFAULT;
7966                 host_to_target_old_sigset(p, &set);
7967                 unlock_user(p, arg1, sizeof(target_sigset_t));
7968             }
7969         }
7970         return ret;
7971 #endif
7972     case TARGET_NR_rt_sigpending:
7973         {
7974             sigset_t set;
7975 
7976             /* Yes, this check is >, not != like most. We follow the kernel's
7977              * logic and it does it like this because it implements
7978              * NR_sigpending through the same code path, and in that case
7979              * the old_sigset_t is smaller in size.
7980              */
7981             if (arg2 > sizeof(target_sigset_t)) {
7982                 return -TARGET_EINVAL;
7983             }
7984 
7985             ret = get_errno(sigpending(&set));
7986             if (!is_error(ret)) {
7987                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7988                     return -TARGET_EFAULT;
7989                 host_to_target_sigset(p, &set);
7990                 unlock_user(p, arg1, sizeof(target_sigset_t));
7991             }
7992         }
7993         return ret;
7994 #ifdef TARGET_NR_sigsuspend
7995     case TARGET_NR_sigsuspend:
7996         {
7997             TaskState *ts = cpu->opaque;
7998 #if defined(TARGET_ALPHA)
7999             abi_ulong mask = arg1;
8000             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8001 #else
8002             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8003                 return -TARGET_EFAULT;
8004             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8005             unlock_user(p, arg1, 0);
8006 #endif
8007             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8008                                                SIGSET_T_SIZE));
8009             if (ret != -TARGET_ERESTARTSYS) {
8010                 ts->in_sigsuspend = 1;
8011             }
8012         }
8013         return ret;
8014 #endif
8015     case TARGET_NR_rt_sigsuspend:
8016         {
8017             TaskState *ts = cpu->opaque;
8018 
8019             if (arg2 != sizeof(target_sigset_t)) {
8020                 return -TARGET_EINVAL;
8021             }
8022             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8023                 return -TARGET_EFAULT;
8024             target_to_host_sigset(&ts->sigsuspend_mask, p);
8025             unlock_user(p, arg1, 0);
8026             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8027                                                SIGSET_T_SIZE));
8028             if (ret != -TARGET_ERESTARTSYS) {
8029                 ts->in_sigsuspend = 1;
8030             }
8031         }
8032         return ret;
8033     case TARGET_NR_rt_sigtimedwait:
8034         {
8035             sigset_t set;
8036             struct timespec uts, *puts;
8037             siginfo_t uinfo;
8038 
8039             if (arg4 != sizeof(target_sigset_t)) {
8040                 return -TARGET_EINVAL;
8041             }
8042 
8043             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8044                 return -TARGET_EFAULT;
8045             target_to_host_sigset(&set, p);
8046             unlock_user(p, arg1, 0);
8047             if (arg3) {
8048                 puts = &uts;
8049                 target_to_host_timespec(puts, arg3);
8050             } else {
8051                 puts = NULL;
8052             }
8053             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8054                                                  SIGSET_T_SIZE));
8055             if (!is_error(ret)) {
8056                 if (arg2) {
8057                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8058                                   0);
8059                     if (!p) {
8060                         return -TARGET_EFAULT;
8061                     }
8062                     host_to_target_siginfo(p, &uinfo);
8063                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8064                 }
8065                 ret = host_to_target_signal(ret);
8066             }
8067         }
8068         return ret;
8069     case TARGET_NR_rt_sigqueueinfo:
8070         {
8071             siginfo_t uinfo;
8072 
8073             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8074             if (!p) {
8075                 return -TARGET_EFAULT;
8076             }
8077             target_to_host_siginfo(&uinfo, p);
8078             unlock_user(p, arg3, 0);
8079             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8080         }
8081         return ret;
8082     case TARGET_NR_rt_tgsigqueueinfo:
8083         {
8084             siginfo_t uinfo;
8085 
8086             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8087             if (!p) {
8088                 return -TARGET_EFAULT;
8089             }
8090             target_to_host_siginfo(&uinfo, p);
8091             unlock_user(p, arg4, 0);
8092             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8093         }
8094         return ret;
8095 #ifdef TARGET_NR_sigreturn
8096     case TARGET_NR_sigreturn:
8097         if (block_signals()) {
8098             return -TARGET_ERESTARTSYS;
8099         }
8100         return do_sigreturn(cpu_env);
8101 #endif
8102     case TARGET_NR_rt_sigreturn:
8103         if (block_signals()) {
8104             return -TARGET_ERESTARTSYS;
8105         }
8106         return do_rt_sigreturn(cpu_env);
8107     case TARGET_NR_sethostname:
8108         if (!(p = lock_user_string(arg1)))
8109             return -TARGET_EFAULT;
8110         ret = get_errno(sethostname(p, arg2));
8111         unlock_user(p, arg1, 0);
8112         return ret;
8113 #ifdef TARGET_NR_setrlimit
8114     case TARGET_NR_setrlimit:
8115         {
8116             int resource = target_to_host_resource(arg1);
8117             struct target_rlimit *target_rlim;
8118             struct rlimit rlim;
8119             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8120                 return -TARGET_EFAULT;
8121             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8122             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8123             unlock_user_struct(target_rlim, arg2, 0);
8124             /*
8125              * If we just passed through resource limit settings for memory then
8126              * they would also apply to QEMU's own allocations, and QEMU will
8127              * crash or hang or die if its allocations fail. Ideally we would
8128              * track the guest allocations in QEMU and apply the limits ourselves.
8129              * For now, just tell the guest the call succeeded but don't actually
8130              * limit anything.
8131              */
8132             if (resource != RLIMIT_AS &&
8133                 resource != RLIMIT_DATA &&
8134                 resource != RLIMIT_STACK) {
8135                 return get_errno(setrlimit(resource, &rlim));
8136             } else {
8137                 return 0;
8138             }
8139         }
8140 #endif
8141 #ifdef TARGET_NR_getrlimit
8142     case TARGET_NR_getrlimit:
8143         {
8144             int resource = target_to_host_resource(arg1);
8145             struct target_rlimit *target_rlim;
8146             struct rlimit rlim;
8147 
8148             ret = get_errno(getrlimit(resource, &rlim));
8149             if (!is_error(ret)) {
8150                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8151                     return -TARGET_EFAULT;
8152                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8153                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8154                 unlock_user_struct(target_rlim, arg2, 1);
8155             }
8156         }
8157         return ret;
8158 #endif
8159     case TARGET_NR_getrusage:
8160         {
8161             struct rusage rusage;
8162             ret = get_errno(getrusage(arg1, &rusage));
8163             if (!is_error(ret)) {
8164                 ret = host_to_target_rusage(arg2, &rusage);
8165             }
8166         }
8167         return ret;
8168     case TARGET_NR_gettimeofday:
8169         {
8170             struct timeval tv;
8171             ret = get_errno(gettimeofday(&tv, NULL));
8172             if (!is_error(ret)) {
8173                 if (copy_to_user_timeval(arg1, &tv))
8174                     return -TARGET_EFAULT;
8175             }
8176         }
8177         return ret;
8178     case TARGET_NR_settimeofday:
8179         {
8180             struct timeval tv, *ptv = NULL;
8181             struct timezone tz, *ptz = NULL;
8182 
8183             if (arg1) {
8184                 if (copy_from_user_timeval(&tv, arg1)) {
8185                     return -TARGET_EFAULT;
8186                 }
8187                 ptv = &tv;
8188             }
8189 
8190             if (arg2) {
8191                 if (copy_from_user_timezone(&tz, arg2)) {
8192                     return -TARGET_EFAULT;
8193                 }
8194                 ptz = &tz;
8195             }
8196 
8197             return get_errno(settimeofday(ptv, ptz));
8198         }
8199 #if defined(TARGET_NR_select)
8200     case TARGET_NR_select:
8201 #if defined(TARGET_WANT_NI_OLD_SELECT)
8202         /* some architectures used to have old_select here
8203          * but now ENOSYS it.
8204          */
8205         ret = -TARGET_ENOSYS;
8206 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8207         ret = do_old_select(arg1);
8208 #else
8209         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8210 #endif
8211         return ret;
8212 #endif
8213 #ifdef TARGET_NR_pselect6
8214     case TARGET_NR_pselect6:
8215         {
8216             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8217             fd_set rfds, wfds, efds;
8218             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8219             struct timespec ts, *ts_ptr;
8220 
8221             /*
8222              * The 6th arg is actually two args smashed together,
8223              * so we cannot use the C library.
8224              */
8225             sigset_t set;
8226             struct {
8227                 sigset_t *set;
8228                 size_t size;
8229             } sig, *sig_ptr;
8230 
8231             abi_ulong arg_sigset, arg_sigsize, *arg7;
8232             target_sigset_t *target_sigset;
8233 
8234             n = arg1;
8235             rfd_addr = arg2;
8236             wfd_addr = arg3;
8237             efd_addr = arg4;
8238             ts_addr = arg5;
8239 
8240             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8241             if (ret) {
8242                 return ret;
8243             }
8244             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8245             if (ret) {
8246                 return ret;
8247             }
8248             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8249             if (ret) {
8250                 return ret;
8251             }
8252 
8253             /*
8254              * This takes a timespec, and not a timeval, so we cannot
8255              * use the do_select() helper ...
8256              */
8257             if (ts_addr) {
8258                 if (target_to_host_timespec(&ts, ts_addr)) {
8259                     return -TARGET_EFAULT;
8260                 }
8261                 ts_ptr = &ts;
8262             } else {
8263                 ts_ptr = NULL;
8264             }
8265 
8266             /* Extract the two packed args for the sigset */
8267             if (arg6) {
8268                 sig_ptr = &sig;
8269                 sig.size = SIGSET_T_SIZE;
8270 
8271                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8272                 if (!arg7) {
8273                     return -TARGET_EFAULT;
8274                 }
8275                 arg_sigset = tswapal(arg7[0]);
8276                 arg_sigsize = tswapal(arg7[1]);
8277                 unlock_user(arg7, arg6, 0);
8278 
8279                 if (arg_sigset) {
8280                     sig.set = &set;
8281                     if (arg_sigsize != sizeof(*target_sigset)) {
8282                         /* Like the kernel, we enforce correct size sigsets */
8283                         return -TARGET_EINVAL;
8284                     }
8285                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8286                                               sizeof(*target_sigset), 1);
8287                     if (!target_sigset) {
8288                         return -TARGET_EFAULT;
8289                     }
8290                     target_to_host_sigset(&set, target_sigset);
8291                     unlock_user(target_sigset, arg_sigset, 0);
8292                 } else {
8293                     sig.set = NULL;
8294                 }
8295             } else {
8296                 sig_ptr = NULL;
8297             }
8298 
8299             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8300                                           ts_ptr, sig_ptr));
8301 
8302             if (!is_error(ret)) {
8303                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8304                     return -TARGET_EFAULT;
8305                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8306                     return -TARGET_EFAULT;
8307                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8308                     return -TARGET_EFAULT;
8309 
8310                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8311                     return -TARGET_EFAULT;
8312             }
8313         }
8314         return ret;
8315 #endif
8316 #ifdef TARGET_NR_symlink
8317     case TARGET_NR_symlink:
8318         {
8319             void *p2;
8320             p = lock_user_string(arg1);
8321             p2 = lock_user_string(arg2);
8322             if (!p || !p2)
8323                 ret = -TARGET_EFAULT;
8324             else
8325                 ret = get_errno(symlink(p, p2));
8326             unlock_user(p2, arg2, 0);
8327             unlock_user(p, arg1, 0);
8328         }
8329         return ret;
8330 #endif
8331 #if defined(TARGET_NR_symlinkat)
8332     case TARGET_NR_symlinkat:
8333         {
8334             void *p2;
8335             p  = lock_user_string(arg1);
8336             p2 = lock_user_string(arg3);
8337             if (!p || !p2)
8338                 ret = -TARGET_EFAULT;
8339             else
8340                 ret = get_errno(symlinkat(p, arg2, p2));
8341             unlock_user(p2, arg3, 0);
8342             unlock_user(p, arg1, 0);
8343         }
8344         return ret;
8345 #endif
8346 #ifdef TARGET_NR_readlink
8347     case TARGET_NR_readlink:
8348         {
8349             void *p2;
8350             p = lock_user_string(arg1);
8351             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8352             if (!p || !p2) {
8353                 ret = -TARGET_EFAULT;
8354             } else if (!arg3) {
8355                 /* Short circuit this for the magic exe check. */
8356                 ret = -TARGET_EINVAL;
8357             } else if (is_proc_myself((const char *)p, "exe")) {
8358                 char real[PATH_MAX], *temp;
8359                 temp = realpath(exec_path, real);
8360                 /* Return value is # of bytes that we wrote to the buffer. */
8361                 if (temp == NULL) {
8362                     ret = get_errno(-1);
8363                 } else {
8364                     /* Don't worry about sign mismatch as earlier mapping
8365                      * logic would have thrown a bad address error. */
8366                     ret = MIN(strlen(real), arg3);
8367                     /* We cannot NUL terminate the string. */
8368                     memcpy(p2, real, ret);
8369                 }
8370             } else {
8371                 ret = get_errno(readlink(path(p), p2, arg3));
8372             }
8373             unlock_user(p2, arg2, ret);
8374             unlock_user(p, arg1, 0);
8375         }
8376         return ret;
8377 #endif
8378 #if defined(TARGET_NR_readlinkat)
8379     case TARGET_NR_readlinkat:
8380         {
8381             void *p2;
8382             p  = lock_user_string(arg2);
8383             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8384             if (!p || !p2) {
8385                 ret = -TARGET_EFAULT;
8386             } else if (is_proc_myself((const char *)p, "exe")) {
8387                 char real[PATH_MAX], *temp;
8388                 temp = realpath(exec_path, real);
8389                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8390                 snprintf((char *)p2, arg4, "%s", real);
8391             } else {
8392                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8393             }
8394             unlock_user(p2, arg3, ret);
8395             unlock_user(p, arg2, 0);
8396         }
8397         return ret;
8398 #endif
8399 #ifdef TARGET_NR_swapon
8400     case TARGET_NR_swapon:
8401         if (!(p = lock_user_string(arg1)))
8402             return -TARGET_EFAULT;
8403         ret = get_errno(swapon(p, arg2));
8404         unlock_user(p, arg1, 0);
8405         return ret;
8406 #endif
8407     case TARGET_NR_reboot:
8408         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8409            /* arg4 must be ignored in all other cases */
8410            p = lock_user_string(arg4);
8411            if (!p) {
8412                return -TARGET_EFAULT;
8413            }
8414            ret = get_errno(reboot(arg1, arg2, arg3, p));
8415            unlock_user(p, arg4, 0);
8416         } else {
8417            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8418         }
8419         return ret;
8420 #ifdef TARGET_NR_mmap
8421     case TARGET_NR_mmap:
8422 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8423     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8424     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8425     || defined(TARGET_S390X)
8426         {
8427             abi_ulong *v;
8428             abi_ulong v1, v2, v3, v4, v5, v6;
8429             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8430                 return -TARGET_EFAULT;
8431             v1 = tswapal(v[0]);
8432             v2 = tswapal(v[1]);
8433             v3 = tswapal(v[2]);
8434             v4 = tswapal(v[3]);
8435             v5 = tswapal(v[4]);
8436             v6 = tswapal(v[5]);
8437             unlock_user(v, arg1, 0);
8438             ret = get_errno(target_mmap(v1, v2, v3,
8439                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8440                                         v5, v6));
8441         }
8442 #else
8443         ret = get_errno(target_mmap(arg1, arg2, arg3,
8444                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8445                                     arg5,
8446                                     arg6));
8447 #endif
8448         return ret;
8449 #endif
8450 #ifdef TARGET_NR_mmap2
8451     case TARGET_NR_mmap2:
8452 #ifndef MMAP_SHIFT
8453 #define MMAP_SHIFT 12
8454 #endif
8455         ret = target_mmap(arg1, arg2, arg3,
8456                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8457                           arg5, arg6 << MMAP_SHIFT);
8458         return get_errno(ret);
8459 #endif
8460     case TARGET_NR_munmap:
8461         return get_errno(target_munmap(arg1, arg2));
8462     case TARGET_NR_mprotect:
8463         {
8464             TaskState *ts = cpu->opaque;
8465             /* Special hack to detect libc making the stack executable.  */
8466             if ((arg3 & PROT_GROWSDOWN)
8467                 && arg1 >= ts->info->stack_limit
8468                 && arg1 <= ts->info->start_stack) {
8469                 arg3 &= ~PROT_GROWSDOWN;
8470                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8471                 arg1 = ts->info->stack_limit;
8472             }
8473         }
8474         return get_errno(target_mprotect(arg1, arg2, arg3));
8475 #ifdef TARGET_NR_mremap
8476     case TARGET_NR_mremap:
8477         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8478 #endif
8479         /* ??? msync/mlock/munlock are broken for softmmu.  */
8480 #ifdef TARGET_NR_msync
8481     case TARGET_NR_msync:
8482         return get_errno(msync(g2h(arg1), arg2, arg3));
8483 #endif
8484 #ifdef TARGET_NR_mlock
8485     case TARGET_NR_mlock:
8486         return get_errno(mlock(g2h(arg1), arg2));
8487 #endif
8488 #ifdef TARGET_NR_munlock
8489     case TARGET_NR_munlock:
8490         return get_errno(munlock(g2h(arg1), arg2));
8491 #endif
8492 #ifdef TARGET_NR_mlockall
8493     case TARGET_NR_mlockall:
8494         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8495 #endif
8496 #ifdef TARGET_NR_munlockall
8497     case TARGET_NR_munlockall:
8498         return get_errno(munlockall());
8499 #endif
8500 #ifdef TARGET_NR_truncate
8501     case TARGET_NR_truncate:
8502         if (!(p = lock_user_string(arg1)))
8503             return -TARGET_EFAULT;
8504         ret = get_errno(truncate(p, arg2));
8505         unlock_user(p, arg1, 0);
8506         return ret;
8507 #endif
8508 #ifdef TARGET_NR_ftruncate
8509     case TARGET_NR_ftruncate:
8510         return get_errno(ftruncate(arg1, arg2));
8511 #endif
8512     case TARGET_NR_fchmod:
8513         return get_errno(fchmod(arg1, arg2));
8514 #if defined(TARGET_NR_fchmodat)
8515     case TARGET_NR_fchmodat:
8516         if (!(p = lock_user_string(arg2)))
8517             return -TARGET_EFAULT;
8518         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8519         unlock_user(p, arg2, 0);
8520         return ret;
8521 #endif
8522     case TARGET_NR_getpriority:
8523         /* Note that negative values are valid for getpriority, so we must
8524            differentiate based on errno settings.  */
8525         errno = 0;
8526         ret = getpriority(arg1, arg2);
8527         if (ret == -1 && errno != 0) {
8528             return -host_to_target_errno(errno);
8529         }
8530 #ifdef TARGET_ALPHA
8531         /* Return value is the unbiased priority.  Signal no error.  */
8532         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8533 #else
8534         /* Return value is a biased priority to avoid negative numbers.  */
8535         ret = 20 - ret;
8536 #endif
8537         return ret;
8538     case TARGET_NR_setpriority:
8539         return get_errno(setpriority(arg1, arg2, arg3));
8540 #ifdef TARGET_NR_statfs
8541     case TARGET_NR_statfs:
8542         if (!(p = lock_user_string(arg1))) {
8543             return -TARGET_EFAULT;
8544         }
8545         ret = get_errno(statfs(path(p), &stfs));
8546         unlock_user(p, arg1, 0);
8547     convert_statfs:
8548         if (!is_error(ret)) {
8549             struct target_statfs *target_stfs;
8550 
8551             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8552                 return -TARGET_EFAULT;
8553             __put_user(stfs.f_type, &target_stfs->f_type);
8554             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8555             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8556             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8557             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8558             __put_user(stfs.f_files, &target_stfs->f_files);
8559             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8560             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8561             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8562             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8563             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8564 #ifdef _STATFS_F_FLAGS
8565             __put_user(stfs.f_flags, &target_stfs->f_flags);
8566 #else
8567             __put_user(0, &target_stfs->f_flags);
8568 #endif
8569             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8570             unlock_user_struct(target_stfs, arg2, 1);
8571         }
8572         return ret;
8573 #endif
8574 #ifdef TARGET_NR_fstatfs
8575     case TARGET_NR_fstatfs:
8576         ret = get_errno(fstatfs(arg1, &stfs));
8577         goto convert_statfs;
8578 #endif
8579 #ifdef TARGET_NR_statfs64
8580     case TARGET_NR_statfs64:
8581         if (!(p = lock_user_string(arg1))) {
8582             return -TARGET_EFAULT;
8583         }
8584         ret = get_errno(statfs(path(p), &stfs));
8585         unlock_user(p, arg1, 0);
8586     convert_statfs64:
8587         if (!is_error(ret)) {
8588             struct target_statfs64 *target_stfs;
8589 
8590             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8591                 return -TARGET_EFAULT;
8592             __put_user(stfs.f_type, &target_stfs->f_type);
8593             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8594             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8595             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8596             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8597             __put_user(stfs.f_files, &target_stfs->f_files);
8598             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8599             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8600             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8601             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8602             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8603             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8604             unlock_user_struct(target_stfs, arg3, 1);
8605         }
8606         return ret;
8607     case TARGET_NR_fstatfs64:
8608         ret = get_errno(fstatfs(arg1, &stfs));
8609         goto convert_statfs64;
8610 #endif
8611 #ifdef TARGET_NR_socketcall
8612     case TARGET_NR_socketcall:
8613         return do_socketcall(arg1, arg2);
8614 #endif
8615 #ifdef TARGET_NR_accept
8616     case TARGET_NR_accept:
8617         return do_accept4(arg1, arg2, arg3, 0);
8618 #endif
8619 #ifdef TARGET_NR_accept4
8620     case TARGET_NR_accept4:
8621         return do_accept4(arg1, arg2, arg3, arg4);
8622 #endif
8623 #ifdef TARGET_NR_bind
8624     case TARGET_NR_bind:
8625         return do_bind(arg1, arg2, arg3);
8626 #endif
8627 #ifdef TARGET_NR_connect
8628     case TARGET_NR_connect:
8629         return do_connect(arg1, arg2, arg3);
8630 #endif
8631 #ifdef TARGET_NR_getpeername
8632     case TARGET_NR_getpeername:
8633         return do_getpeername(arg1, arg2, arg3);
8634 #endif
8635 #ifdef TARGET_NR_getsockname
8636     case TARGET_NR_getsockname:
8637         return do_getsockname(arg1, arg2, arg3);
8638 #endif
8639 #ifdef TARGET_NR_getsockopt
8640     case TARGET_NR_getsockopt:
8641         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8642 #endif
8643 #ifdef TARGET_NR_listen
8644     case TARGET_NR_listen:
8645         return get_errno(listen(arg1, arg2));
8646 #endif
8647 #ifdef TARGET_NR_recv
8648     case TARGET_NR_recv:
8649         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8650 #endif
8651 #ifdef TARGET_NR_recvfrom
8652     case TARGET_NR_recvfrom:
8653         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8654 #endif
8655 #ifdef TARGET_NR_recvmsg
8656     case TARGET_NR_recvmsg:
8657         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8658 #endif
8659 #ifdef TARGET_NR_send
8660     case TARGET_NR_send:
8661         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8662 #endif
8663 #ifdef TARGET_NR_sendmsg
8664     case TARGET_NR_sendmsg:
8665         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8666 #endif
8667 #ifdef TARGET_NR_sendmmsg
8668     case TARGET_NR_sendmmsg:
8669         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8670     case TARGET_NR_recvmmsg:
8671         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8672 #endif
8673 #ifdef TARGET_NR_sendto
8674     case TARGET_NR_sendto:
8675         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8676 #endif
8677 #ifdef TARGET_NR_shutdown
8678     case TARGET_NR_shutdown:
8679         return get_errno(shutdown(arg1, arg2));
8680 #endif
8681 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8682     case TARGET_NR_getrandom:
8683         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8684         if (!p) {
8685             return -TARGET_EFAULT;
8686         }
8687         ret = get_errno(getrandom(p, arg2, arg3));
8688         unlock_user(p, arg1, ret);
8689         return ret;
8690 #endif
8691 #ifdef TARGET_NR_socket
8692     case TARGET_NR_socket:
8693         return do_socket(arg1, arg2, arg3);
8694 #endif
8695 #ifdef TARGET_NR_socketpair
8696     case TARGET_NR_socketpair:
8697         return do_socketpair(arg1, arg2, arg3, arg4);
8698 #endif
8699 #ifdef TARGET_NR_setsockopt
8700     case TARGET_NR_setsockopt:
8701         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8702 #endif
8703 #if defined(TARGET_NR_syslog)
8704     case TARGET_NR_syslog:
8705         {
8706             int len = arg2;
8707 
8708             switch (arg1) {
8709             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8710             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8711             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8712             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8713             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8714             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8715             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8716             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8717                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8718             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8719             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8720             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8721                 {
8722                     if (len < 0) {
8723                         return -TARGET_EINVAL;
8724                     }
8725                     if (len == 0) {
8726                         return 0;
8727                     }
8728                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8729                     if (!p) {
8730                         return -TARGET_EFAULT;
8731                     }
8732                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8733                     unlock_user(p, arg2, arg3);
8734                 }
8735                 return ret;
8736             default:
8737                 return -TARGET_EINVAL;
8738             }
8739         }
8740         break;
8741 #endif
8742     case TARGET_NR_setitimer:
8743         {
8744             struct itimerval value, ovalue, *pvalue;
8745 
8746             if (arg2) {
8747                 pvalue = &value;
8748                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8749                     || copy_from_user_timeval(&pvalue->it_value,
8750                                               arg2 + sizeof(struct target_timeval)))
8751                     return -TARGET_EFAULT;
8752             } else {
8753                 pvalue = NULL;
8754             }
8755             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8756             if (!is_error(ret) && arg3) {
8757                 if (copy_to_user_timeval(arg3,
8758                                          &ovalue.it_interval)
8759                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8760                                             &ovalue.it_value))
8761                     return -TARGET_EFAULT;
8762             }
8763         }
8764         return ret;
8765     case TARGET_NR_getitimer:
8766         {
8767             struct itimerval value;
8768 
8769             ret = get_errno(getitimer(arg1, &value));
8770             if (!is_error(ret) && arg2) {
8771                 if (copy_to_user_timeval(arg2,
8772                                          &value.it_interval)
8773                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8774                                             &value.it_value))
8775                     return -TARGET_EFAULT;
8776             }
8777         }
8778         return ret;
8779 #ifdef TARGET_NR_stat
8780     case TARGET_NR_stat:
8781         if (!(p = lock_user_string(arg1))) {
8782             return -TARGET_EFAULT;
8783         }
8784         ret = get_errno(stat(path(p), &st));
8785         unlock_user(p, arg1, 0);
8786         goto do_stat;
8787 #endif
8788 #ifdef TARGET_NR_lstat
8789     case TARGET_NR_lstat:
8790         if (!(p = lock_user_string(arg1))) {
8791             return -TARGET_EFAULT;
8792         }
8793         ret = get_errno(lstat(path(p), &st));
8794         unlock_user(p, arg1, 0);
8795         goto do_stat;
8796 #endif
8797 #ifdef TARGET_NR_fstat
8798     case TARGET_NR_fstat:
8799         {
8800             ret = get_errno(fstat(arg1, &st));
8801 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8802         do_stat:
8803 #endif
8804             if (!is_error(ret)) {
8805                 struct target_stat *target_st;
8806 
8807                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8808                     return -TARGET_EFAULT;
8809                 memset(target_st, 0, sizeof(*target_st));
8810                 __put_user(st.st_dev, &target_st->st_dev);
8811                 __put_user(st.st_ino, &target_st->st_ino);
8812                 __put_user(st.st_mode, &target_st->st_mode);
8813                 __put_user(st.st_uid, &target_st->st_uid);
8814                 __put_user(st.st_gid, &target_st->st_gid);
8815                 __put_user(st.st_nlink, &target_st->st_nlink);
8816                 __put_user(st.st_rdev, &target_st->st_rdev);
8817                 __put_user(st.st_size, &target_st->st_size);
8818                 __put_user(st.st_blksize, &target_st->st_blksize);
8819                 __put_user(st.st_blocks, &target_st->st_blocks);
8820                 __put_user(st.st_atime, &target_st->target_st_atime);
8821                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8822                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8823                 unlock_user_struct(target_st, arg2, 1);
8824             }
8825         }
8826         return ret;
8827 #endif
8828     case TARGET_NR_vhangup:
8829         return get_errno(vhangup());
8830 #ifdef TARGET_NR_syscall
8831     case TARGET_NR_syscall:
8832         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8833                           arg6, arg7, arg8, 0);
8834 #endif
8835     case TARGET_NR_wait4:
8836         {
8837             int status;
8838             abi_long status_ptr = arg2;
8839             struct rusage rusage, *rusage_ptr;
8840             abi_ulong target_rusage = arg4;
8841             abi_long rusage_err;
8842             if (target_rusage)
8843                 rusage_ptr = &rusage;
8844             else
8845                 rusage_ptr = NULL;
8846             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8847             if (!is_error(ret)) {
8848                 if (status_ptr && ret) {
8849                     status = host_to_target_waitstatus(status);
8850                     if (put_user_s32(status, status_ptr))
8851                         return -TARGET_EFAULT;
8852                 }
8853                 if (target_rusage) {
8854                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8855                     if (rusage_err) {
8856                         ret = rusage_err;
8857                     }
8858                 }
8859             }
8860         }
8861         return ret;
8862 #ifdef TARGET_NR_swapoff
8863     case TARGET_NR_swapoff:
8864         if (!(p = lock_user_string(arg1)))
8865             return -TARGET_EFAULT;
8866         ret = get_errno(swapoff(p));
8867         unlock_user(p, arg1, 0);
8868         return ret;
8869 #endif
8870     case TARGET_NR_sysinfo:
8871         {
8872             struct target_sysinfo *target_value;
8873             struct sysinfo value;
8874             ret = get_errno(sysinfo(&value));
8875             if (!is_error(ret) && arg1)
8876             {
8877                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8878                     return -TARGET_EFAULT;
8879                 __put_user(value.uptime, &target_value->uptime);
8880                 __put_user(value.loads[0], &target_value->loads[0]);
8881                 __put_user(value.loads[1], &target_value->loads[1]);
8882                 __put_user(value.loads[2], &target_value->loads[2]);
8883                 __put_user(value.totalram, &target_value->totalram);
8884                 __put_user(value.freeram, &target_value->freeram);
8885                 __put_user(value.sharedram, &target_value->sharedram);
8886                 __put_user(value.bufferram, &target_value->bufferram);
8887                 __put_user(value.totalswap, &target_value->totalswap);
8888                 __put_user(value.freeswap, &target_value->freeswap);
8889                 __put_user(value.procs, &target_value->procs);
8890                 __put_user(value.totalhigh, &target_value->totalhigh);
8891                 __put_user(value.freehigh, &target_value->freehigh);
8892                 __put_user(value.mem_unit, &target_value->mem_unit);
8893                 unlock_user_struct(target_value, arg1, 1);
8894             }
8895         }
8896         return ret;
8897 #ifdef TARGET_NR_ipc
8898     case TARGET_NR_ipc:
8899         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8900 #endif
8901 #ifdef TARGET_NR_semget
8902     case TARGET_NR_semget:
8903         return get_errno(semget(arg1, arg2, arg3));
8904 #endif
8905 #ifdef TARGET_NR_semop
8906     case TARGET_NR_semop:
8907         return do_semop(arg1, arg2, arg3);
8908 #endif
8909 #ifdef TARGET_NR_semctl
8910     case TARGET_NR_semctl:
8911         return do_semctl(arg1, arg2, arg3, arg4);
8912 #endif
8913 #ifdef TARGET_NR_msgctl
8914     case TARGET_NR_msgctl:
8915         return do_msgctl(arg1, arg2, arg3);
8916 #endif
8917 #ifdef TARGET_NR_msgget
8918     case TARGET_NR_msgget:
8919         return get_errno(msgget(arg1, arg2));
8920 #endif
8921 #ifdef TARGET_NR_msgrcv
8922     case TARGET_NR_msgrcv:
8923         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8924 #endif
8925 #ifdef TARGET_NR_msgsnd
8926     case TARGET_NR_msgsnd:
8927         return do_msgsnd(arg1, arg2, arg3, arg4);
8928 #endif
8929 #ifdef TARGET_NR_shmget
8930     case TARGET_NR_shmget:
8931         return get_errno(shmget(arg1, arg2, arg3));
8932 #endif
8933 #ifdef TARGET_NR_shmctl
8934     case TARGET_NR_shmctl:
8935         return do_shmctl(arg1, arg2, arg3);
8936 #endif
8937 #ifdef TARGET_NR_shmat
8938     case TARGET_NR_shmat:
8939         return do_shmat(cpu_env, arg1, arg2, arg3);
8940 #endif
8941 #ifdef TARGET_NR_shmdt
8942     case TARGET_NR_shmdt:
8943         return do_shmdt(arg1);
8944 #endif
8945     case TARGET_NR_fsync:
8946         return get_errno(fsync(arg1));
8947     case TARGET_NR_clone:
8948         /* Linux manages to have three different orderings for its
8949          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8950          * match the kernel's CONFIG_CLONE_* settings.
8951          * Microblaze is further special in that it uses a sixth
8952          * implicit argument to clone for the TLS pointer.
8953          */
8954 #if defined(TARGET_MICROBLAZE)
8955         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8956 #elif defined(TARGET_CLONE_BACKWARDS)
8957         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8958 #elif defined(TARGET_CLONE_BACKWARDS2)
8959         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8960 #else
8961         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8962 #endif
8963         return ret;
8964 #ifdef __NR_exit_group
8965         /* new thread calls */
8966     case TARGET_NR_exit_group:
8967         preexit_cleanup(cpu_env, arg1);
8968         return get_errno(exit_group(arg1));
8969 #endif
8970     case TARGET_NR_setdomainname:
8971         if (!(p = lock_user_string(arg1)))
8972             return -TARGET_EFAULT;
8973         ret = get_errno(setdomainname(p, arg2));
8974         unlock_user(p, arg1, 0);
8975         return ret;
8976     case TARGET_NR_uname:
8977         /* no need to transcode because we use the linux syscall */
8978         {
8979             struct new_utsname * buf;
8980 
8981             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8982                 return -TARGET_EFAULT;
8983             ret = get_errno(sys_uname(buf));
8984             if (!is_error(ret)) {
8985                 /* Overwrite the native machine name with whatever is being
8986                    emulated. */
8987                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8988                           sizeof(buf->machine));
8989                 /* Allow the user to override the reported release.  */
8990                 if (qemu_uname_release && *qemu_uname_release) {
8991                     g_strlcpy(buf->release, qemu_uname_release,
8992                               sizeof(buf->release));
8993                 }
8994             }
8995             unlock_user_struct(buf, arg1, 1);
8996         }
8997         return ret;
8998 #ifdef TARGET_I386
8999     case TARGET_NR_modify_ldt:
9000         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9001 #if !defined(TARGET_X86_64)
9002     case TARGET_NR_vm86:
9003         return do_vm86(cpu_env, arg1, arg2);
9004 #endif
9005 #endif
9006     case TARGET_NR_adjtimex:
9007         {
9008             struct timex host_buf;
9009 
9010             if (target_to_host_timex(&host_buf, arg1) != 0) {
9011                 return -TARGET_EFAULT;
9012             }
9013             ret = get_errno(adjtimex(&host_buf));
9014             if (!is_error(ret)) {
9015                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9016                     return -TARGET_EFAULT;
9017                 }
9018             }
9019         }
9020         return ret;
9021 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9022     case TARGET_NR_clock_adjtime:
9023         {
9024             struct timex htx, *phtx = &htx;
9025 
9026             if (target_to_host_timex(phtx, arg2) != 0) {
9027                 return -TARGET_EFAULT;
9028             }
9029             ret = get_errno(clock_adjtime(arg1, phtx));
9030             if (!is_error(ret) && phtx) {
9031                 if (host_to_target_timex(arg2, phtx) != 0) {
9032                     return -TARGET_EFAULT;
9033                 }
9034             }
9035         }
9036         return ret;
9037 #endif
9038     case TARGET_NR_getpgid:
9039         return get_errno(getpgid(arg1));
9040     case TARGET_NR_fchdir:
9041         return get_errno(fchdir(arg1));
9042     case TARGET_NR_personality:
9043         return get_errno(personality(arg1));
9044 #ifdef TARGET_NR__llseek /* Not on alpha */
9045     case TARGET_NR__llseek:
9046         {
9047             int64_t res;
9048 #if !defined(__NR_llseek)
9049             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9050             if (res == -1) {
9051                 ret = get_errno(res);
9052             } else {
9053                 ret = 0;
9054             }
9055 #else
9056             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9057 #endif
9058             if ((ret == 0) && put_user_s64(res, arg4)) {
9059                 return -TARGET_EFAULT;
9060             }
9061         }
9062         return ret;
9063 #endif
9064 #ifdef TARGET_NR_getdents
9065     case TARGET_NR_getdents:
9066 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9067 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9068         {
9069             struct target_dirent *target_dirp;
9070             struct linux_dirent *dirp;
9071             abi_long count = arg3;
9072 
9073             dirp = g_try_malloc(count);
9074             if (!dirp) {
9075                 return -TARGET_ENOMEM;
9076             }
9077 
9078             ret = get_errno(sys_getdents(arg1, dirp, count));
9079             if (!is_error(ret)) {
9080                 struct linux_dirent *de;
9081 		struct target_dirent *tde;
9082                 int len = ret;
9083                 int reclen, treclen;
9084 		int count1, tnamelen;
9085 
9086 		count1 = 0;
9087                 de = dirp;
9088                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9089                     return -TARGET_EFAULT;
9090 		tde = target_dirp;
9091                 while (len > 0) {
9092                     reclen = de->d_reclen;
9093                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9094                     assert(tnamelen >= 0);
9095                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9096                     assert(count1 + treclen <= count);
9097                     tde->d_reclen = tswap16(treclen);
9098                     tde->d_ino = tswapal(de->d_ino);
9099                     tde->d_off = tswapal(de->d_off);
9100                     memcpy(tde->d_name, de->d_name, tnamelen);
9101                     de = (struct linux_dirent *)((char *)de + reclen);
9102                     len -= reclen;
9103                     tde = (struct target_dirent *)((char *)tde + treclen);
9104 		    count1 += treclen;
9105                 }
9106 		ret = count1;
9107                 unlock_user(target_dirp, arg2, ret);
9108             }
9109             g_free(dirp);
9110         }
9111 #else
9112         {
9113             struct linux_dirent *dirp;
9114             abi_long count = arg3;
9115 
9116             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9117                 return -TARGET_EFAULT;
9118             ret = get_errno(sys_getdents(arg1, dirp, count));
9119             if (!is_error(ret)) {
9120                 struct linux_dirent *de;
9121                 int len = ret;
9122                 int reclen;
9123                 de = dirp;
9124                 while (len > 0) {
9125                     reclen = de->d_reclen;
9126                     if (reclen > len)
9127                         break;
9128                     de->d_reclen = tswap16(reclen);
9129                     tswapls(&de->d_ino);
9130                     tswapls(&de->d_off);
9131                     de = (struct linux_dirent *)((char *)de + reclen);
9132                     len -= reclen;
9133                 }
9134             }
9135             unlock_user(dirp, arg2, ret);
9136         }
9137 #endif
9138 #else
9139         /* Implement getdents in terms of getdents64 */
9140         {
9141             struct linux_dirent64 *dirp;
9142             abi_long count = arg3;
9143 
9144             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9145             if (!dirp) {
9146                 return -TARGET_EFAULT;
9147             }
9148             ret = get_errno(sys_getdents64(arg1, dirp, count));
9149             if (!is_error(ret)) {
9150                 /* Convert the dirent64 structs to target dirent.  We do this
9151                  * in-place, since we can guarantee that a target_dirent is no
9152                  * larger than a dirent64; however this means we have to be
9153                  * careful to read everything before writing in the new format.
9154                  */
9155                 struct linux_dirent64 *de;
9156                 struct target_dirent *tde;
9157                 int len = ret;
9158                 int tlen = 0;
9159 
9160                 de = dirp;
9161                 tde = (struct target_dirent *)dirp;
9162                 while (len > 0) {
9163                     int namelen, treclen;
9164                     int reclen = de->d_reclen;
9165                     uint64_t ino = de->d_ino;
9166                     int64_t off = de->d_off;
9167                     uint8_t type = de->d_type;
9168 
9169                     namelen = strlen(de->d_name);
9170                     treclen = offsetof(struct target_dirent, d_name)
9171                         + namelen + 2;
9172                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9173 
9174                     memmove(tde->d_name, de->d_name, namelen + 1);
9175                     tde->d_ino = tswapal(ino);
9176                     tde->d_off = tswapal(off);
9177                     tde->d_reclen = tswap16(treclen);
9178                     /* The target_dirent type is in what was formerly a padding
9179                      * byte at the end of the structure:
9180                      */
9181                     *(((char *)tde) + treclen - 1) = type;
9182 
9183                     de = (struct linux_dirent64 *)((char *)de + reclen);
9184                     tde = (struct target_dirent *)((char *)tde + treclen);
9185                     len -= reclen;
9186                     tlen += treclen;
9187                 }
9188                 ret = tlen;
9189             }
9190             unlock_user(dirp, arg2, ret);
9191         }
9192 #endif
9193         return ret;
9194 #endif /* TARGET_NR_getdents */
9195 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9196     case TARGET_NR_getdents64:
9197         {
9198             struct linux_dirent64 *dirp;
9199             abi_long count = arg3;
9200             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9201                 return -TARGET_EFAULT;
9202             ret = get_errno(sys_getdents64(arg1, dirp, count));
9203             if (!is_error(ret)) {
9204                 struct linux_dirent64 *de;
9205                 int len = ret;
9206                 int reclen;
9207                 de = dirp;
9208                 while (len > 0) {
9209                     reclen = de->d_reclen;
9210                     if (reclen > len)
9211                         break;
9212                     de->d_reclen = tswap16(reclen);
9213                     tswap64s((uint64_t *)&de->d_ino);
9214                     tswap64s((uint64_t *)&de->d_off);
9215                     de = (struct linux_dirent64 *)((char *)de + reclen);
9216                     len -= reclen;
9217                 }
9218             }
9219             unlock_user(dirp, arg2, ret);
9220         }
9221         return ret;
9222 #endif /* TARGET_NR_getdents64 */
9223 #if defined(TARGET_NR__newselect)
9224     case TARGET_NR__newselect:
9225         return do_select(arg1, arg2, arg3, arg4, arg5);
9226 #endif
9227 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9228 # ifdef TARGET_NR_poll
9229     case TARGET_NR_poll:
9230 # endif
9231 # ifdef TARGET_NR_ppoll
9232     case TARGET_NR_ppoll:
9233 # endif
9234         {
9235             struct target_pollfd *target_pfd;
9236             unsigned int nfds = arg2;
9237             struct pollfd *pfd;
9238             unsigned int i;
9239 
9240             pfd = NULL;
9241             target_pfd = NULL;
9242             if (nfds) {
9243                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9244                     return -TARGET_EINVAL;
9245                 }
9246 
9247                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9248                                        sizeof(struct target_pollfd) * nfds, 1);
9249                 if (!target_pfd) {
9250                     return -TARGET_EFAULT;
9251                 }
9252 
9253                 pfd = alloca(sizeof(struct pollfd) * nfds);
9254                 for (i = 0; i < nfds; i++) {
9255                     pfd[i].fd = tswap32(target_pfd[i].fd);
9256                     pfd[i].events = tswap16(target_pfd[i].events);
9257                 }
9258             }
9259 
9260             switch (num) {
9261 # ifdef TARGET_NR_ppoll
9262             case TARGET_NR_ppoll:
9263             {
9264                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9265                 target_sigset_t *target_set;
9266                 sigset_t _set, *set = &_set;
9267 
9268                 if (arg3) {
9269                     if (target_to_host_timespec(timeout_ts, arg3)) {
9270                         unlock_user(target_pfd, arg1, 0);
9271                         return -TARGET_EFAULT;
9272                     }
9273                 } else {
9274                     timeout_ts = NULL;
9275                 }
9276 
9277                 if (arg4) {
9278                     if (arg5 != sizeof(target_sigset_t)) {
9279                         unlock_user(target_pfd, arg1, 0);
9280                         return -TARGET_EINVAL;
9281                     }
9282 
9283                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9284                     if (!target_set) {
9285                         unlock_user(target_pfd, arg1, 0);
9286                         return -TARGET_EFAULT;
9287                     }
9288                     target_to_host_sigset(set, target_set);
9289                 } else {
9290                     set = NULL;
9291                 }
9292 
9293                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9294                                            set, SIGSET_T_SIZE));
9295 
9296                 if (!is_error(ret) && arg3) {
9297                     host_to_target_timespec(arg3, timeout_ts);
9298                 }
9299                 if (arg4) {
9300                     unlock_user(target_set, arg4, 0);
9301                 }
9302                 break;
9303             }
9304 # endif
9305 # ifdef TARGET_NR_poll
9306             case TARGET_NR_poll:
9307             {
9308                 struct timespec ts, *pts;
9309 
9310                 if (arg3 >= 0) {
9311                     /* Convert ms to secs, ns */
9312                     ts.tv_sec = arg3 / 1000;
9313                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9314                     pts = &ts;
9315                 } else {
9316                     /* -ve poll() timeout means "infinite" */
9317                     pts = NULL;
9318                 }
9319                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9320                 break;
9321             }
9322 # endif
9323             default:
9324                 g_assert_not_reached();
9325             }
9326 
9327             if (!is_error(ret)) {
9328                 for(i = 0; i < nfds; i++) {
9329                     target_pfd[i].revents = tswap16(pfd[i].revents);
9330                 }
9331             }
9332             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9333         }
9334         return ret;
9335 #endif
9336     case TARGET_NR_flock:
9337         /* NOTE: the flock constant seems to be the same for every
9338            Linux platform */
9339         return get_errno(safe_flock(arg1, arg2));
9340     case TARGET_NR_readv:
9341         {
9342             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9343             if (vec != NULL) {
9344                 ret = get_errno(safe_readv(arg1, vec, arg3));
9345                 unlock_iovec(vec, arg2, arg3, 1);
9346             } else {
9347                 ret = -host_to_target_errno(errno);
9348             }
9349         }
9350         return ret;
9351     case TARGET_NR_writev:
9352         {
9353             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9354             if (vec != NULL) {
9355                 ret = get_errno(safe_writev(arg1, vec, arg3));
9356                 unlock_iovec(vec, arg2, arg3, 0);
9357             } else {
9358                 ret = -host_to_target_errno(errno);
9359             }
9360         }
9361         return ret;
9362 #if defined(TARGET_NR_preadv)
9363     case TARGET_NR_preadv:
9364         {
9365             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9366             if (vec != NULL) {
9367                 unsigned long low, high;
9368 
9369                 target_to_host_low_high(arg4, arg5, &low, &high);
9370                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9371                 unlock_iovec(vec, arg2, arg3, 1);
9372             } else {
9373                 ret = -host_to_target_errno(errno);
9374            }
9375         }
9376         return ret;
9377 #endif
9378 #if defined(TARGET_NR_pwritev)
9379     case TARGET_NR_pwritev:
9380         {
9381             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9382             if (vec != NULL) {
9383                 unsigned long low, high;
9384 
9385                 target_to_host_low_high(arg4, arg5, &low, &high);
9386                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9387                 unlock_iovec(vec, arg2, arg3, 0);
9388             } else {
9389                 ret = -host_to_target_errno(errno);
9390            }
9391         }
9392         return ret;
9393 #endif
9394     case TARGET_NR_getsid:
9395         return get_errno(getsid(arg1));
9396 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9397     case TARGET_NR_fdatasync:
9398         return get_errno(fdatasync(arg1));
9399 #endif
9400 #ifdef TARGET_NR__sysctl
9401     case TARGET_NR__sysctl:
9402         /* We don't implement this, but ENOTDIR is always a safe
9403            return value. */
9404         return -TARGET_ENOTDIR;
9405 #endif
9406     case TARGET_NR_sched_getaffinity:
9407         {
9408             unsigned int mask_size;
9409             unsigned long *mask;
9410 
9411             /*
9412              * sched_getaffinity needs multiples of ulong, so need to take
9413              * care of mismatches between target ulong and host ulong sizes.
9414              */
9415             if (arg2 & (sizeof(abi_ulong) - 1)) {
9416                 return -TARGET_EINVAL;
9417             }
9418             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9419 
9420             mask = alloca(mask_size);
9421             memset(mask, 0, mask_size);
9422             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9423 
9424             if (!is_error(ret)) {
9425                 if (ret > arg2) {
9426                     /* More data returned than the caller's buffer will fit.
9427                      * This only happens if sizeof(abi_long) < sizeof(long)
9428                      * and the caller passed us a buffer holding an odd number
9429                      * of abi_longs. If the host kernel is actually using the
9430                      * extra 4 bytes then fail EINVAL; otherwise we can just
9431                      * ignore them and only copy the interesting part.
9432                      */
9433                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9434                     if (numcpus > arg2 * 8) {
9435                         return -TARGET_EINVAL;
9436                     }
9437                     ret = arg2;
9438                 }
9439 
9440                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9441                     return -TARGET_EFAULT;
9442                 }
9443             }
9444         }
9445         return ret;
9446     case TARGET_NR_sched_setaffinity:
9447         {
9448             unsigned int mask_size;
9449             unsigned long *mask;
9450 
9451             /*
9452              * sched_setaffinity needs multiples of ulong, so need to take
9453              * care of mismatches between target ulong and host ulong sizes.
9454              */
9455             if (arg2 & (sizeof(abi_ulong) - 1)) {
9456                 return -TARGET_EINVAL;
9457             }
9458             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9459             mask = alloca(mask_size);
9460 
9461             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9462             if (ret) {
9463                 return ret;
9464             }
9465 
9466             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9467         }
9468     case TARGET_NR_getcpu:
9469         {
9470             unsigned cpu, node;
9471             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9472                                        arg2 ? &node : NULL,
9473                                        NULL));
9474             if (is_error(ret)) {
9475                 return ret;
9476             }
9477             if (arg1 && put_user_u32(cpu, arg1)) {
9478                 return -TARGET_EFAULT;
9479             }
9480             if (arg2 && put_user_u32(node, arg2)) {
9481                 return -TARGET_EFAULT;
9482             }
9483         }
9484         return ret;
9485     case TARGET_NR_sched_setparam:
9486         {
9487             struct sched_param *target_schp;
9488             struct sched_param schp;
9489 
9490             if (arg2 == 0) {
9491                 return -TARGET_EINVAL;
9492             }
9493             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9494                 return -TARGET_EFAULT;
9495             schp.sched_priority = tswap32(target_schp->sched_priority);
9496             unlock_user_struct(target_schp, arg2, 0);
9497             return get_errno(sched_setparam(arg1, &schp));
9498         }
9499     case TARGET_NR_sched_getparam:
9500         {
9501             struct sched_param *target_schp;
9502             struct sched_param schp;
9503 
9504             if (arg2 == 0) {
9505                 return -TARGET_EINVAL;
9506             }
9507             ret = get_errno(sched_getparam(arg1, &schp));
9508             if (!is_error(ret)) {
9509                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9510                     return -TARGET_EFAULT;
9511                 target_schp->sched_priority = tswap32(schp.sched_priority);
9512                 unlock_user_struct(target_schp, arg2, 1);
9513             }
9514         }
9515         return ret;
9516     case TARGET_NR_sched_setscheduler:
9517         {
9518             struct sched_param *target_schp;
9519             struct sched_param schp;
9520             if (arg3 == 0) {
9521                 return -TARGET_EINVAL;
9522             }
9523             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9524                 return -TARGET_EFAULT;
9525             schp.sched_priority = tswap32(target_schp->sched_priority);
9526             unlock_user_struct(target_schp, arg3, 0);
9527             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9528         }
9529     case TARGET_NR_sched_getscheduler:
9530         return get_errno(sched_getscheduler(arg1));
9531     case TARGET_NR_sched_yield:
9532         return get_errno(sched_yield());
9533     case TARGET_NR_sched_get_priority_max:
9534         return get_errno(sched_get_priority_max(arg1));
9535     case TARGET_NR_sched_get_priority_min:
9536         return get_errno(sched_get_priority_min(arg1));
9537     case TARGET_NR_sched_rr_get_interval:
9538         {
9539             struct timespec ts;
9540             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9541             if (!is_error(ret)) {
9542                 ret = host_to_target_timespec(arg2, &ts);
9543             }
9544         }
9545         return ret;
9546     case TARGET_NR_nanosleep:
9547         {
9548             struct timespec req, rem;
9549             target_to_host_timespec(&req, arg1);
9550             ret = get_errno(safe_nanosleep(&req, &rem));
9551             if (is_error(ret) && arg2) {
9552                 host_to_target_timespec(arg2, &rem);
9553             }
9554         }
9555         return ret;
9556     case TARGET_NR_prctl:
9557         switch (arg1) {
9558         case PR_GET_PDEATHSIG:
9559         {
9560             int deathsig;
9561             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9562             if (!is_error(ret) && arg2
9563                 && put_user_ual(deathsig, arg2)) {
9564                 return -TARGET_EFAULT;
9565             }
9566             return ret;
9567         }
9568 #ifdef PR_GET_NAME
9569         case PR_GET_NAME:
9570         {
9571             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9572             if (!name) {
9573                 return -TARGET_EFAULT;
9574             }
9575             ret = get_errno(prctl(arg1, (unsigned long)name,
9576                                   arg3, arg4, arg5));
9577             unlock_user(name, arg2, 16);
9578             return ret;
9579         }
9580         case PR_SET_NAME:
9581         {
9582             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9583             if (!name) {
9584                 return -TARGET_EFAULT;
9585             }
9586             ret = get_errno(prctl(arg1, (unsigned long)name,
9587                                   arg3, arg4, arg5));
9588             unlock_user(name, arg2, 0);
9589             return ret;
9590         }
9591 #endif
9592 #ifdef TARGET_MIPS
9593         case TARGET_PR_GET_FP_MODE:
9594         {
9595             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9596             ret = 0;
9597             if (env->CP0_Status & (1 << CP0St_FR)) {
9598                 ret |= TARGET_PR_FP_MODE_FR;
9599             }
9600             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9601                 ret |= TARGET_PR_FP_MODE_FRE;
9602             }
9603             return ret;
9604         }
9605         case TARGET_PR_SET_FP_MODE:
9606         {
9607             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9608             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9609             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9610             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9611             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9612 
9613             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9614                                             TARGET_PR_FP_MODE_FRE;
9615 
9616             /* If nothing to change, return right away, successfully.  */
9617             if (old_fr == new_fr && old_fre == new_fre) {
9618                 return 0;
9619             }
9620             /* Check the value is valid */
9621             if (arg2 & ~known_bits) {
9622                 return -TARGET_EOPNOTSUPP;
9623             }
9624             /* Setting FRE without FR is not supported.  */
9625             if (new_fre && !new_fr) {
9626                 return -TARGET_EOPNOTSUPP;
9627             }
9628             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9629                 /* FR1 is not supported */
9630                 return -TARGET_EOPNOTSUPP;
9631             }
9632             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9633                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9634                 /* cannot set FR=0 */
9635                 return -TARGET_EOPNOTSUPP;
9636             }
9637             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9638                 /* Cannot set FRE=1 */
9639                 return -TARGET_EOPNOTSUPP;
9640             }
9641 
9642             int i;
9643             fpr_t *fpr = env->active_fpu.fpr;
9644             for (i = 0; i < 32 ; i += 2) {
9645                 if (!old_fr && new_fr) {
9646                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9647                 } else if (old_fr && !new_fr) {
9648                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9649                 }
9650             }
9651 
9652             if (new_fr) {
9653                 env->CP0_Status |= (1 << CP0St_FR);
9654                 env->hflags |= MIPS_HFLAG_F64;
9655             } else {
9656                 env->CP0_Status &= ~(1 << CP0St_FR);
9657                 env->hflags &= ~MIPS_HFLAG_F64;
9658             }
9659             if (new_fre) {
9660                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9661                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9662                     env->hflags |= MIPS_HFLAG_FRE;
9663                 }
9664             } else {
9665                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9666                 env->hflags &= ~MIPS_HFLAG_FRE;
9667             }
9668 
9669             return 0;
9670         }
9671 #endif /* MIPS */
9672 #ifdef TARGET_AARCH64
9673         case TARGET_PR_SVE_SET_VL:
9674             /*
9675              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9676              * PR_SVE_VL_INHERIT.  Note the kernel definition
9677              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9678              * even though the current architectural maximum is VQ=16.
9679              */
9680             ret = -TARGET_EINVAL;
9681             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9682                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9683                 CPUARMState *env = cpu_env;
9684                 ARMCPU *cpu = arm_env_get_cpu(env);
9685                 uint32_t vq, old_vq;
9686 
9687                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9688                 vq = MAX(arg2 / 16, 1);
9689                 vq = MIN(vq, cpu->sve_max_vq);
9690 
9691                 if (vq < old_vq) {
9692                     aarch64_sve_narrow_vq(env, vq);
9693                 }
9694                 env->vfp.zcr_el[1] = vq - 1;
9695                 ret = vq * 16;
9696             }
9697             return ret;
9698         case TARGET_PR_SVE_GET_VL:
9699             ret = -TARGET_EINVAL;
9700             {
9701                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9702                 if (cpu_isar_feature(aa64_sve, cpu)) {
9703                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9704                 }
9705             }
9706             return ret;
9707         case TARGET_PR_PAC_RESET_KEYS:
9708             {
9709                 CPUARMState *env = cpu_env;
9710                 ARMCPU *cpu = arm_env_get_cpu(env);
9711 
9712                 if (arg3 || arg4 || arg5) {
9713                     return -TARGET_EINVAL;
9714                 }
9715                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9716                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9717                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9718                                TARGET_PR_PAC_APGAKEY);
9719                     if (arg2 == 0) {
9720                         arg2 = all;
9721                     } else if (arg2 & ~all) {
9722                         return -TARGET_EINVAL;
9723                     }
9724                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9725                         arm_init_pauth_key(&env->apia_key);
9726                     }
9727                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9728                         arm_init_pauth_key(&env->apib_key);
9729                     }
9730                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9731                         arm_init_pauth_key(&env->apda_key);
9732                     }
9733                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9734                         arm_init_pauth_key(&env->apdb_key);
9735                     }
9736                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9737                         arm_init_pauth_key(&env->apga_key);
9738                     }
9739                     return 0;
9740                 }
9741             }
9742             return -TARGET_EINVAL;
9743 #endif /* AARCH64 */
9744         case PR_GET_SECCOMP:
9745         case PR_SET_SECCOMP:
9746             /* Disable seccomp to prevent the target disabling syscalls we
9747              * need. */
9748             return -TARGET_EINVAL;
9749         default:
9750             /* Most prctl options have no pointer arguments */
9751             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9752         }
9753         break;
9754 #ifdef TARGET_NR_arch_prctl
9755     case TARGET_NR_arch_prctl:
9756 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9757         return do_arch_prctl(cpu_env, arg1, arg2);
9758 #else
9759 #error unreachable
9760 #endif
9761 #endif
9762 #ifdef TARGET_NR_pread64
9763     case TARGET_NR_pread64:
9764         if (regpairs_aligned(cpu_env, num)) {
9765             arg4 = arg5;
9766             arg5 = arg6;
9767         }
9768         if (arg2 == 0 && arg3 == 0) {
9769             /* Special-case NULL buffer and zero length, which should succeed */
9770             p = 0;
9771         } else {
9772             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9773             if (!p) {
9774                 return -TARGET_EFAULT;
9775             }
9776         }
9777         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9778         unlock_user(p, arg2, ret);
9779         return ret;
9780     case TARGET_NR_pwrite64:
9781         if (regpairs_aligned(cpu_env, num)) {
9782             arg4 = arg5;
9783             arg5 = arg6;
9784         }
9785         if (arg2 == 0 && arg3 == 0) {
9786             /* Special-case NULL buffer and zero length, which should succeed */
9787             p = 0;
9788         } else {
9789             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9790             if (!p) {
9791                 return -TARGET_EFAULT;
9792             }
9793         }
9794         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9795         unlock_user(p, arg2, 0);
9796         return ret;
9797 #endif
9798     case TARGET_NR_getcwd:
9799         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9800             return -TARGET_EFAULT;
9801         ret = get_errno(sys_getcwd1(p, arg2));
9802         unlock_user(p, arg1, ret);
9803         return ret;
9804     case TARGET_NR_capget:
9805     case TARGET_NR_capset:
9806     {
9807         struct target_user_cap_header *target_header;
9808         struct target_user_cap_data *target_data = NULL;
9809         struct __user_cap_header_struct header;
9810         struct __user_cap_data_struct data[2];
9811         struct __user_cap_data_struct *dataptr = NULL;
9812         int i, target_datalen;
9813         int data_items = 1;
9814 
9815         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9816             return -TARGET_EFAULT;
9817         }
9818         header.version = tswap32(target_header->version);
9819         header.pid = tswap32(target_header->pid);
9820 
9821         if (header.version != _LINUX_CAPABILITY_VERSION) {
9822             /* Version 2 and up takes pointer to two user_data structs */
9823             data_items = 2;
9824         }
9825 
9826         target_datalen = sizeof(*target_data) * data_items;
9827 
9828         if (arg2) {
9829             if (num == TARGET_NR_capget) {
9830                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9831             } else {
9832                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9833             }
9834             if (!target_data) {
9835                 unlock_user_struct(target_header, arg1, 0);
9836                 return -TARGET_EFAULT;
9837             }
9838 
9839             if (num == TARGET_NR_capset) {
9840                 for (i = 0; i < data_items; i++) {
9841                     data[i].effective = tswap32(target_data[i].effective);
9842                     data[i].permitted = tswap32(target_data[i].permitted);
9843                     data[i].inheritable = tswap32(target_data[i].inheritable);
9844                 }
9845             }
9846 
9847             dataptr = data;
9848         }
9849 
9850         if (num == TARGET_NR_capget) {
9851             ret = get_errno(capget(&header, dataptr));
9852         } else {
9853             ret = get_errno(capset(&header, dataptr));
9854         }
9855 
9856         /* The kernel always updates version for both capget and capset */
9857         target_header->version = tswap32(header.version);
9858         unlock_user_struct(target_header, arg1, 1);
9859 
9860         if (arg2) {
9861             if (num == TARGET_NR_capget) {
9862                 for (i = 0; i < data_items; i++) {
9863                     target_data[i].effective = tswap32(data[i].effective);
9864                     target_data[i].permitted = tswap32(data[i].permitted);
9865                     target_data[i].inheritable = tswap32(data[i].inheritable);
9866                 }
9867                 unlock_user(target_data, arg2, target_datalen);
9868             } else {
9869                 unlock_user(target_data, arg2, 0);
9870             }
9871         }
9872         return ret;
9873     }
9874     case TARGET_NR_sigaltstack:
9875         return do_sigaltstack(arg1, arg2,
9876                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9877 
9878 #ifdef CONFIG_SENDFILE
9879 #ifdef TARGET_NR_sendfile
9880     case TARGET_NR_sendfile:
9881     {
9882         off_t *offp = NULL;
9883         off_t off;
9884         if (arg3) {
9885             ret = get_user_sal(off, arg3);
9886             if (is_error(ret)) {
9887                 return ret;
9888             }
9889             offp = &off;
9890         }
9891         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9892         if (!is_error(ret) && arg3) {
9893             abi_long ret2 = put_user_sal(off, arg3);
9894             if (is_error(ret2)) {
9895                 ret = ret2;
9896             }
9897         }
9898         return ret;
9899     }
9900 #endif
9901 #ifdef TARGET_NR_sendfile64
9902     case TARGET_NR_sendfile64:
9903     {
9904         off_t *offp = NULL;
9905         off_t off;
9906         if (arg3) {
9907             ret = get_user_s64(off, arg3);
9908             if (is_error(ret)) {
9909                 return ret;
9910             }
9911             offp = &off;
9912         }
9913         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9914         if (!is_error(ret) && arg3) {
9915             abi_long ret2 = put_user_s64(off, arg3);
9916             if (is_error(ret2)) {
9917                 ret = ret2;
9918             }
9919         }
9920         return ret;
9921     }
9922 #endif
9923 #endif
9924 #ifdef TARGET_NR_vfork
9925     case TARGET_NR_vfork:
9926         return get_errno(do_fork(cpu_env,
9927                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9928                          0, 0, 0, 0));
9929 #endif
9930 #ifdef TARGET_NR_ugetrlimit
9931     case TARGET_NR_ugetrlimit:
9932     {
9933 	struct rlimit rlim;
9934 	int resource = target_to_host_resource(arg1);
9935 	ret = get_errno(getrlimit(resource, &rlim));
9936 	if (!is_error(ret)) {
9937 	    struct target_rlimit *target_rlim;
9938             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9939                 return -TARGET_EFAULT;
9940 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9941 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9942             unlock_user_struct(target_rlim, arg2, 1);
9943 	}
9944         return ret;
9945     }
9946 #endif
9947 #ifdef TARGET_NR_truncate64
9948     case TARGET_NR_truncate64:
9949         if (!(p = lock_user_string(arg1)))
9950             return -TARGET_EFAULT;
9951 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9952         unlock_user(p, arg1, 0);
9953         return ret;
9954 #endif
9955 #ifdef TARGET_NR_ftruncate64
9956     case TARGET_NR_ftruncate64:
9957         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9958 #endif
9959 #ifdef TARGET_NR_stat64
9960     case TARGET_NR_stat64:
9961         if (!(p = lock_user_string(arg1))) {
9962             return -TARGET_EFAULT;
9963         }
9964         ret = get_errno(stat(path(p), &st));
9965         unlock_user(p, arg1, 0);
9966         if (!is_error(ret))
9967             ret = host_to_target_stat64(cpu_env, arg2, &st);
9968         return ret;
9969 #endif
9970 #ifdef TARGET_NR_lstat64
9971     case TARGET_NR_lstat64:
9972         if (!(p = lock_user_string(arg1))) {
9973             return -TARGET_EFAULT;
9974         }
9975         ret = get_errno(lstat(path(p), &st));
9976         unlock_user(p, arg1, 0);
9977         if (!is_error(ret))
9978             ret = host_to_target_stat64(cpu_env, arg2, &st);
9979         return ret;
9980 #endif
9981 #ifdef TARGET_NR_fstat64
9982     case TARGET_NR_fstat64:
9983         ret = get_errno(fstat(arg1, &st));
9984         if (!is_error(ret))
9985             ret = host_to_target_stat64(cpu_env, arg2, &st);
9986         return ret;
9987 #endif
9988 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9989 #ifdef TARGET_NR_fstatat64
9990     case TARGET_NR_fstatat64:
9991 #endif
9992 #ifdef TARGET_NR_newfstatat
9993     case TARGET_NR_newfstatat:
9994 #endif
9995         if (!(p = lock_user_string(arg2))) {
9996             return -TARGET_EFAULT;
9997         }
9998         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9999         unlock_user(p, arg2, 0);
10000         if (!is_error(ret))
10001             ret = host_to_target_stat64(cpu_env, arg3, &st);
10002         return ret;
10003 #endif
10004 #ifdef TARGET_NR_lchown
10005     case TARGET_NR_lchown:
10006         if (!(p = lock_user_string(arg1)))
10007             return -TARGET_EFAULT;
10008         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10009         unlock_user(p, arg1, 0);
10010         return ret;
10011 #endif
10012 #ifdef TARGET_NR_getuid
10013     case TARGET_NR_getuid:
10014         return get_errno(high2lowuid(getuid()));
10015 #endif
10016 #ifdef TARGET_NR_getgid
10017     case TARGET_NR_getgid:
10018         return get_errno(high2lowgid(getgid()));
10019 #endif
10020 #ifdef TARGET_NR_geteuid
10021     case TARGET_NR_geteuid:
10022         return get_errno(high2lowuid(geteuid()));
10023 #endif
10024 #ifdef TARGET_NR_getegid
10025     case TARGET_NR_getegid:
10026         return get_errno(high2lowgid(getegid()));
10027 #endif
10028     case TARGET_NR_setreuid:
10029         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10030     case TARGET_NR_setregid:
10031         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10032     case TARGET_NR_getgroups:
10033         {
10034             int gidsetsize = arg1;
10035             target_id *target_grouplist;
10036             gid_t *grouplist;
10037             int i;
10038 
10039             grouplist = alloca(gidsetsize * sizeof(gid_t));
10040             ret = get_errno(getgroups(gidsetsize, grouplist));
10041             if (gidsetsize == 0)
10042                 return ret;
10043             if (!is_error(ret)) {
10044                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10045                 if (!target_grouplist)
10046                     return -TARGET_EFAULT;
10047                 for(i = 0;i < ret; i++)
10048                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10049                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10050             }
10051         }
10052         return ret;
10053     case TARGET_NR_setgroups:
10054         {
10055             int gidsetsize = arg1;
10056             target_id *target_grouplist;
10057             gid_t *grouplist = NULL;
10058             int i;
10059             if (gidsetsize) {
10060                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10061                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10062                 if (!target_grouplist) {
10063                     return -TARGET_EFAULT;
10064                 }
10065                 for (i = 0; i < gidsetsize; i++) {
10066                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10067                 }
10068                 unlock_user(target_grouplist, arg2, 0);
10069             }
10070             return get_errno(setgroups(gidsetsize, grouplist));
10071         }
10072     case TARGET_NR_fchown:
10073         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10074 #if defined(TARGET_NR_fchownat)
10075     case TARGET_NR_fchownat:
10076         if (!(p = lock_user_string(arg2)))
10077             return -TARGET_EFAULT;
10078         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10079                                  low2highgid(arg4), arg5));
10080         unlock_user(p, arg2, 0);
10081         return ret;
10082 #endif
10083 #ifdef TARGET_NR_setresuid
10084     case TARGET_NR_setresuid:
10085         return get_errno(sys_setresuid(low2highuid(arg1),
10086                                        low2highuid(arg2),
10087                                        low2highuid(arg3)));
10088 #endif
10089 #ifdef TARGET_NR_getresuid
10090     case TARGET_NR_getresuid:
10091         {
10092             uid_t ruid, euid, suid;
10093             ret = get_errno(getresuid(&ruid, &euid, &suid));
10094             if (!is_error(ret)) {
10095                 if (put_user_id(high2lowuid(ruid), arg1)
10096                     || put_user_id(high2lowuid(euid), arg2)
10097                     || put_user_id(high2lowuid(suid), arg3))
10098                     return -TARGET_EFAULT;
10099             }
10100         }
10101         return ret;
10102 #endif
10103 #ifdef TARGET_NR_getresgid
10104     case TARGET_NR_setresgid:
10105         return get_errno(sys_setresgid(low2highgid(arg1),
10106                                        low2highgid(arg2),
10107                                        low2highgid(arg3)));
10108 #endif
10109 #ifdef TARGET_NR_getresgid
10110     case TARGET_NR_getresgid:
10111         {
10112             gid_t rgid, egid, sgid;
10113             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10114             if (!is_error(ret)) {
10115                 if (put_user_id(high2lowgid(rgid), arg1)
10116                     || put_user_id(high2lowgid(egid), arg2)
10117                     || put_user_id(high2lowgid(sgid), arg3))
10118                     return -TARGET_EFAULT;
10119             }
10120         }
10121         return ret;
10122 #endif
10123 #ifdef TARGET_NR_chown
10124     case TARGET_NR_chown:
10125         if (!(p = lock_user_string(arg1)))
10126             return -TARGET_EFAULT;
10127         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10128         unlock_user(p, arg1, 0);
10129         return ret;
10130 #endif
10131     case TARGET_NR_setuid:
10132         return get_errno(sys_setuid(low2highuid(arg1)));
10133     case TARGET_NR_setgid:
10134         return get_errno(sys_setgid(low2highgid(arg1)));
10135     case TARGET_NR_setfsuid:
10136         return get_errno(setfsuid(arg1));
10137     case TARGET_NR_setfsgid:
10138         return get_errno(setfsgid(arg1));
10139 
10140 #ifdef TARGET_NR_lchown32
10141     case TARGET_NR_lchown32:
10142         if (!(p = lock_user_string(arg1)))
10143             return -TARGET_EFAULT;
10144         ret = get_errno(lchown(p, arg2, arg3));
10145         unlock_user(p, arg1, 0);
10146         return ret;
10147 #endif
10148 #ifdef TARGET_NR_getuid32
10149     case TARGET_NR_getuid32:
10150         return get_errno(getuid());
10151 #endif
10152 
10153 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10154    /* Alpha specific */
10155     case TARGET_NR_getxuid:
10156          {
10157             uid_t euid;
10158             euid=geteuid();
10159             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10160          }
10161         return get_errno(getuid());
10162 #endif
10163 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10164    /* Alpha specific */
10165     case TARGET_NR_getxgid:
10166          {
10167             uid_t egid;
10168             egid=getegid();
10169             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10170          }
10171         return get_errno(getgid());
10172 #endif
10173 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10174     /* Alpha specific */
10175     case TARGET_NR_osf_getsysinfo:
10176         ret = -TARGET_EOPNOTSUPP;
10177         switch (arg1) {
10178           case TARGET_GSI_IEEE_FP_CONTROL:
10179             {
10180                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10181 
10182                 /* Copied from linux ieee_fpcr_to_swcr.  */
10183                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10184                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10185                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10186                                         | SWCR_TRAP_ENABLE_DZE
10187                                         | SWCR_TRAP_ENABLE_OVF);
10188                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10189                                         | SWCR_TRAP_ENABLE_INE);
10190                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10191                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10192 
10193                 if (put_user_u64 (swcr, arg2))
10194                         return -TARGET_EFAULT;
10195                 ret = 0;
10196             }
10197             break;
10198 
10199           /* case GSI_IEEE_STATE_AT_SIGNAL:
10200              -- Not implemented in linux kernel.
10201              case GSI_UACPROC:
10202              -- Retrieves current unaligned access state; not much used.
10203              case GSI_PROC_TYPE:
10204              -- Retrieves implver information; surely not used.
10205              case GSI_GET_HWRPB:
10206              -- Grabs a copy of the HWRPB; surely not used.
10207           */
10208         }
10209         return ret;
10210 #endif
10211 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10212     /* Alpha specific */
10213     case TARGET_NR_osf_setsysinfo:
10214         ret = -TARGET_EOPNOTSUPP;
10215         switch (arg1) {
10216           case TARGET_SSI_IEEE_FP_CONTROL:
10217             {
10218                 uint64_t swcr, fpcr, orig_fpcr;
10219 
10220                 if (get_user_u64 (swcr, arg2)) {
10221                     return -TARGET_EFAULT;
10222                 }
10223                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10224                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10225 
10226                 /* Copied from linux ieee_swcr_to_fpcr.  */
10227                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10228                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10229                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10230                                   | SWCR_TRAP_ENABLE_DZE
10231                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10232                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10233                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10234                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10235                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10236 
10237                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10238                 ret = 0;
10239             }
10240             break;
10241 
10242           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10243             {
10244                 uint64_t exc, fpcr, orig_fpcr;
10245                 int si_code;
10246 
10247                 if (get_user_u64(exc, arg2)) {
10248                     return -TARGET_EFAULT;
10249                 }
10250 
10251                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10252 
10253                 /* We only add to the exception status here.  */
10254                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10255 
10256                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10257                 ret = 0;
10258 
10259                 /* Old exceptions are not signaled.  */
10260                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10261 
10262                 /* If any exceptions set by this call,
10263                    and are unmasked, send a signal.  */
10264                 si_code = 0;
10265                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10266                     si_code = TARGET_FPE_FLTRES;
10267                 }
10268                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10269                     si_code = TARGET_FPE_FLTUND;
10270                 }
10271                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10272                     si_code = TARGET_FPE_FLTOVF;
10273                 }
10274                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10275                     si_code = TARGET_FPE_FLTDIV;
10276                 }
10277                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10278                     si_code = TARGET_FPE_FLTINV;
10279                 }
10280                 if (si_code != 0) {
10281                     target_siginfo_t info;
10282                     info.si_signo = SIGFPE;
10283                     info.si_errno = 0;
10284                     info.si_code = si_code;
10285                     info._sifields._sigfault._addr
10286                         = ((CPUArchState *)cpu_env)->pc;
10287                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10288                                  QEMU_SI_FAULT, &info);
10289                 }
10290             }
10291             break;
10292 
10293           /* case SSI_NVPAIRS:
10294              -- Used with SSIN_UACPROC to enable unaligned accesses.
10295              case SSI_IEEE_STATE_AT_SIGNAL:
10296              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10297              -- Not implemented in linux kernel
10298           */
10299         }
10300         return ret;
10301 #endif
10302 #ifdef TARGET_NR_osf_sigprocmask
10303     /* Alpha specific.  */
10304     case TARGET_NR_osf_sigprocmask:
10305         {
10306             abi_ulong mask;
10307             int how;
10308             sigset_t set, oldset;
10309 
10310             switch(arg1) {
10311             case TARGET_SIG_BLOCK:
10312                 how = SIG_BLOCK;
10313                 break;
10314             case TARGET_SIG_UNBLOCK:
10315                 how = SIG_UNBLOCK;
10316                 break;
10317             case TARGET_SIG_SETMASK:
10318                 how = SIG_SETMASK;
10319                 break;
10320             default:
10321                 return -TARGET_EINVAL;
10322             }
10323             mask = arg2;
10324             target_to_host_old_sigset(&set, &mask);
10325             ret = do_sigprocmask(how, &set, &oldset);
10326             if (!ret) {
10327                 host_to_target_old_sigset(&mask, &oldset);
10328                 ret = mask;
10329             }
10330         }
10331         return ret;
10332 #endif
10333 
10334 #ifdef TARGET_NR_getgid32
10335     case TARGET_NR_getgid32:
10336         return get_errno(getgid());
10337 #endif
10338 #ifdef TARGET_NR_geteuid32
10339     case TARGET_NR_geteuid32:
10340         return get_errno(geteuid());
10341 #endif
10342 #ifdef TARGET_NR_getegid32
10343     case TARGET_NR_getegid32:
10344         return get_errno(getegid());
10345 #endif
10346 #ifdef TARGET_NR_setreuid32
10347     case TARGET_NR_setreuid32:
10348         return get_errno(setreuid(arg1, arg2));
10349 #endif
10350 #ifdef TARGET_NR_setregid32
10351     case TARGET_NR_setregid32:
10352         return get_errno(setregid(arg1, arg2));
10353 #endif
10354 #ifdef TARGET_NR_getgroups32
10355     case TARGET_NR_getgroups32:
10356         {
10357             int gidsetsize = arg1;
10358             uint32_t *target_grouplist;
10359             gid_t *grouplist;
10360             int i;
10361 
10362             grouplist = alloca(gidsetsize * sizeof(gid_t));
10363             ret = get_errno(getgroups(gidsetsize, grouplist));
10364             if (gidsetsize == 0)
10365                 return ret;
10366             if (!is_error(ret)) {
10367                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10368                 if (!target_grouplist) {
10369                     return -TARGET_EFAULT;
10370                 }
10371                 for(i = 0;i < ret; i++)
10372                     target_grouplist[i] = tswap32(grouplist[i]);
10373                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10374             }
10375         }
10376         return ret;
10377 #endif
10378 #ifdef TARGET_NR_setgroups32
10379     case TARGET_NR_setgroups32:
10380         {
10381             int gidsetsize = arg1;
10382             uint32_t *target_grouplist;
10383             gid_t *grouplist;
10384             int i;
10385 
10386             grouplist = alloca(gidsetsize * sizeof(gid_t));
10387             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10388             if (!target_grouplist) {
10389                 return -TARGET_EFAULT;
10390             }
10391             for(i = 0;i < gidsetsize; i++)
10392                 grouplist[i] = tswap32(target_grouplist[i]);
10393             unlock_user(target_grouplist, arg2, 0);
10394             return get_errno(setgroups(gidsetsize, grouplist));
10395         }
10396 #endif
10397 #ifdef TARGET_NR_fchown32
10398     case TARGET_NR_fchown32:
10399         return get_errno(fchown(arg1, arg2, arg3));
10400 #endif
10401 #ifdef TARGET_NR_setresuid32
10402     case TARGET_NR_setresuid32:
10403         return get_errno(sys_setresuid(arg1, arg2, arg3));
10404 #endif
10405 #ifdef TARGET_NR_getresuid32
10406     case TARGET_NR_getresuid32:
10407         {
10408             uid_t ruid, euid, suid;
10409             ret = get_errno(getresuid(&ruid, &euid, &suid));
10410             if (!is_error(ret)) {
10411                 if (put_user_u32(ruid, arg1)
10412                     || put_user_u32(euid, arg2)
10413                     || put_user_u32(suid, arg3))
10414                     return -TARGET_EFAULT;
10415             }
10416         }
10417         return ret;
10418 #endif
10419 #ifdef TARGET_NR_setresgid32
10420     case TARGET_NR_setresgid32:
10421         return get_errno(sys_setresgid(arg1, arg2, arg3));
10422 #endif
10423 #ifdef TARGET_NR_getresgid32
10424     case TARGET_NR_getresgid32:
10425         {
10426             gid_t rgid, egid, sgid;
10427             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10428             if (!is_error(ret)) {
10429                 if (put_user_u32(rgid, arg1)
10430                     || put_user_u32(egid, arg2)
10431                     || put_user_u32(sgid, arg3))
10432                     return -TARGET_EFAULT;
10433             }
10434         }
10435         return ret;
10436 #endif
10437 #ifdef TARGET_NR_chown32
10438     case TARGET_NR_chown32:
10439         if (!(p = lock_user_string(arg1)))
10440             return -TARGET_EFAULT;
10441         ret = get_errno(chown(p, arg2, arg3));
10442         unlock_user(p, arg1, 0);
10443         return ret;
10444 #endif
10445 #ifdef TARGET_NR_setuid32
10446     case TARGET_NR_setuid32:
10447         return get_errno(sys_setuid(arg1));
10448 #endif
10449 #ifdef TARGET_NR_setgid32
10450     case TARGET_NR_setgid32:
10451         return get_errno(sys_setgid(arg1));
10452 #endif
10453 #ifdef TARGET_NR_setfsuid32
10454     case TARGET_NR_setfsuid32:
10455         return get_errno(setfsuid(arg1));
10456 #endif
10457 #ifdef TARGET_NR_setfsgid32
10458     case TARGET_NR_setfsgid32:
10459         return get_errno(setfsgid(arg1));
10460 #endif
10461 #ifdef TARGET_NR_mincore
10462     case TARGET_NR_mincore:
10463         {
10464             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10465             if (!a) {
10466                 return -TARGET_ENOMEM;
10467             }
10468             p = lock_user_string(arg3);
10469             if (!p) {
10470                 ret = -TARGET_EFAULT;
10471             } else {
10472                 ret = get_errno(mincore(a, arg2, p));
10473                 unlock_user(p, arg3, ret);
10474             }
10475             unlock_user(a, arg1, 0);
10476         }
10477         return ret;
10478 #endif
10479 #ifdef TARGET_NR_arm_fadvise64_64
10480     case TARGET_NR_arm_fadvise64_64:
10481         /* arm_fadvise64_64 looks like fadvise64_64 but
10482          * with different argument order: fd, advice, offset, len
10483          * rather than the usual fd, offset, len, advice.
10484          * Note that offset and len are both 64-bit so appear as
10485          * pairs of 32-bit registers.
10486          */
10487         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10488                             target_offset64(arg5, arg6), arg2);
10489         return -host_to_target_errno(ret);
10490 #endif
10491 
10492 #if TARGET_ABI_BITS == 32
10493 
10494 #ifdef TARGET_NR_fadvise64_64
10495     case TARGET_NR_fadvise64_64:
10496 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10497         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10498         ret = arg2;
10499         arg2 = arg3;
10500         arg3 = arg4;
10501         arg4 = arg5;
10502         arg5 = arg6;
10503         arg6 = ret;
10504 #else
10505         /* 6 args: fd, offset (high, low), len (high, low), advice */
10506         if (regpairs_aligned(cpu_env, num)) {
10507             /* offset is in (3,4), len in (5,6) and advice in 7 */
10508             arg2 = arg3;
10509             arg3 = arg4;
10510             arg4 = arg5;
10511             arg5 = arg6;
10512             arg6 = arg7;
10513         }
10514 #endif
10515         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10516                             target_offset64(arg4, arg5), arg6);
10517         return -host_to_target_errno(ret);
10518 #endif
10519 
10520 #ifdef TARGET_NR_fadvise64
10521     case TARGET_NR_fadvise64:
10522         /* 5 args: fd, offset (high, low), len, advice */
10523         if (regpairs_aligned(cpu_env, num)) {
10524             /* offset is in (3,4), len in 5 and advice in 6 */
10525             arg2 = arg3;
10526             arg3 = arg4;
10527             arg4 = arg5;
10528             arg5 = arg6;
10529         }
10530         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10531         return -host_to_target_errno(ret);
10532 #endif
10533 
10534 #else /* not a 32-bit ABI */
10535 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10536 #ifdef TARGET_NR_fadvise64_64
10537     case TARGET_NR_fadvise64_64:
10538 #endif
10539 #ifdef TARGET_NR_fadvise64
10540     case TARGET_NR_fadvise64:
10541 #endif
10542 #ifdef TARGET_S390X
10543         switch (arg4) {
10544         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10545         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10546         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10547         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10548         default: break;
10549         }
10550 #endif
10551         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10552 #endif
10553 #endif /* end of 64-bit ABI fadvise handling */
10554 
10555 #ifdef TARGET_NR_madvise
10556     case TARGET_NR_madvise:
10557         /* A straight passthrough may not be safe because qemu sometimes
10558            turns private file-backed mappings into anonymous mappings.
10559            This will break MADV_DONTNEED.
10560            This is a hint, so ignoring and returning success is ok.  */
10561         return 0;
10562 #endif
10563 #if TARGET_ABI_BITS == 32
10564     case TARGET_NR_fcntl64:
10565     {
10566 	int cmd;
10567 	struct flock64 fl;
10568         from_flock64_fn *copyfrom = copy_from_user_flock64;
10569         to_flock64_fn *copyto = copy_to_user_flock64;
10570 
10571 #ifdef TARGET_ARM
10572         if (!((CPUARMState *)cpu_env)->eabi) {
10573             copyfrom = copy_from_user_oabi_flock64;
10574             copyto = copy_to_user_oabi_flock64;
10575         }
10576 #endif
10577 
10578 	cmd = target_to_host_fcntl_cmd(arg2);
10579         if (cmd == -TARGET_EINVAL) {
10580             return cmd;
10581         }
10582 
10583         switch(arg2) {
10584         case TARGET_F_GETLK64:
10585             ret = copyfrom(&fl, arg3);
10586             if (ret) {
10587                 break;
10588             }
10589             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10590             if (ret == 0) {
10591                 ret = copyto(arg3, &fl);
10592             }
10593 	    break;
10594 
10595         case TARGET_F_SETLK64:
10596         case TARGET_F_SETLKW64:
10597             ret = copyfrom(&fl, arg3);
10598             if (ret) {
10599                 break;
10600             }
10601             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10602 	    break;
10603         default:
10604             ret = do_fcntl(arg1, arg2, arg3);
10605             break;
10606         }
10607         return ret;
10608     }
10609 #endif
10610 #ifdef TARGET_NR_cacheflush
10611     case TARGET_NR_cacheflush:
10612         /* self-modifying code is handled automatically, so nothing needed */
10613         return 0;
10614 #endif
10615 #ifdef TARGET_NR_getpagesize
10616     case TARGET_NR_getpagesize:
10617         return TARGET_PAGE_SIZE;
10618 #endif
10619     case TARGET_NR_gettid:
10620         return get_errno(gettid());
10621 #ifdef TARGET_NR_readahead
10622     case TARGET_NR_readahead:
10623 #if TARGET_ABI_BITS == 32
10624         if (regpairs_aligned(cpu_env, num)) {
10625             arg2 = arg3;
10626             arg3 = arg4;
10627             arg4 = arg5;
10628         }
10629         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10630 #else
10631         ret = get_errno(readahead(arg1, arg2, arg3));
10632 #endif
10633         return ret;
10634 #endif
10635 #ifdef CONFIG_ATTR
10636 #ifdef TARGET_NR_setxattr
10637     case TARGET_NR_listxattr:
10638     case TARGET_NR_llistxattr:
10639     {
10640         void *p, *b = 0;
10641         if (arg2) {
10642             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10643             if (!b) {
10644                 return -TARGET_EFAULT;
10645             }
10646         }
10647         p = lock_user_string(arg1);
10648         if (p) {
10649             if (num == TARGET_NR_listxattr) {
10650                 ret = get_errno(listxattr(p, b, arg3));
10651             } else {
10652                 ret = get_errno(llistxattr(p, b, arg3));
10653             }
10654         } else {
10655             ret = -TARGET_EFAULT;
10656         }
10657         unlock_user(p, arg1, 0);
10658         unlock_user(b, arg2, arg3);
10659         return ret;
10660     }
10661     case TARGET_NR_flistxattr:
10662     {
10663         void *b = 0;
10664         if (arg2) {
10665             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10666             if (!b) {
10667                 return -TARGET_EFAULT;
10668             }
10669         }
10670         ret = get_errno(flistxattr(arg1, b, arg3));
10671         unlock_user(b, arg2, arg3);
10672         return ret;
10673     }
10674     case TARGET_NR_setxattr:
10675     case TARGET_NR_lsetxattr:
10676         {
10677             void *p, *n, *v = 0;
10678             if (arg3) {
10679                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10680                 if (!v) {
10681                     return -TARGET_EFAULT;
10682                 }
10683             }
10684             p = lock_user_string(arg1);
10685             n = lock_user_string(arg2);
10686             if (p && n) {
10687                 if (num == TARGET_NR_setxattr) {
10688                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10689                 } else {
10690                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10691                 }
10692             } else {
10693                 ret = -TARGET_EFAULT;
10694             }
10695             unlock_user(p, arg1, 0);
10696             unlock_user(n, arg2, 0);
10697             unlock_user(v, arg3, 0);
10698         }
10699         return ret;
10700     case TARGET_NR_fsetxattr:
10701         {
10702             void *n, *v = 0;
10703             if (arg3) {
10704                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10705                 if (!v) {
10706                     return -TARGET_EFAULT;
10707                 }
10708             }
10709             n = lock_user_string(arg2);
10710             if (n) {
10711                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10712             } else {
10713                 ret = -TARGET_EFAULT;
10714             }
10715             unlock_user(n, arg2, 0);
10716             unlock_user(v, arg3, 0);
10717         }
10718         return ret;
10719     case TARGET_NR_getxattr:
10720     case TARGET_NR_lgetxattr:
10721         {
10722             void *p, *n, *v = 0;
10723             if (arg3) {
10724                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10725                 if (!v) {
10726                     return -TARGET_EFAULT;
10727                 }
10728             }
10729             p = lock_user_string(arg1);
10730             n = lock_user_string(arg2);
10731             if (p && n) {
10732                 if (num == TARGET_NR_getxattr) {
10733                     ret = get_errno(getxattr(p, n, v, arg4));
10734                 } else {
10735                     ret = get_errno(lgetxattr(p, n, v, arg4));
10736                 }
10737             } else {
10738                 ret = -TARGET_EFAULT;
10739             }
10740             unlock_user(p, arg1, 0);
10741             unlock_user(n, arg2, 0);
10742             unlock_user(v, arg3, arg4);
10743         }
10744         return ret;
10745     case TARGET_NR_fgetxattr:
10746         {
10747             void *n, *v = 0;
10748             if (arg3) {
10749                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10750                 if (!v) {
10751                     return -TARGET_EFAULT;
10752                 }
10753             }
10754             n = lock_user_string(arg2);
10755             if (n) {
10756                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10757             } else {
10758                 ret = -TARGET_EFAULT;
10759             }
10760             unlock_user(n, arg2, 0);
10761             unlock_user(v, arg3, arg4);
10762         }
10763         return ret;
10764     case TARGET_NR_removexattr:
10765     case TARGET_NR_lremovexattr:
10766         {
10767             void *p, *n;
10768             p = lock_user_string(arg1);
10769             n = lock_user_string(arg2);
10770             if (p && n) {
10771                 if (num == TARGET_NR_removexattr) {
10772                     ret = get_errno(removexattr(p, n));
10773                 } else {
10774                     ret = get_errno(lremovexattr(p, n));
10775                 }
10776             } else {
10777                 ret = -TARGET_EFAULT;
10778             }
10779             unlock_user(p, arg1, 0);
10780             unlock_user(n, arg2, 0);
10781         }
10782         return ret;
10783     case TARGET_NR_fremovexattr:
10784         {
10785             void *n;
10786             n = lock_user_string(arg2);
10787             if (n) {
10788                 ret = get_errno(fremovexattr(arg1, n));
10789             } else {
10790                 ret = -TARGET_EFAULT;
10791             }
10792             unlock_user(n, arg2, 0);
10793         }
10794         return ret;
10795 #endif
10796 #endif /* CONFIG_ATTR */
10797 #ifdef TARGET_NR_set_thread_area
10798     case TARGET_NR_set_thread_area:
10799 #if defined(TARGET_MIPS)
10800       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10801       return 0;
10802 #elif defined(TARGET_CRIS)
10803       if (arg1 & 0xff)
10804           ret = -TARGET_EINVAL;
10805       else {
10806           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10807           ret = 0;
10808       }
10809       return ret;
10810 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10811       return do_set_thread_area(cpu_env, arg1);
10812 #elif defined(TARGET_M68K)
10813       {
10814           TaskState *ts = cpu->opaque;
10815           ts->tp_value = arg1;
10816           return 0;
10817       }
10818 #else
10819       return -TARGET_ENOSYS;
10820 #endif
10821 #endif
10822 #ifdef TARGET_NR_get_thread_area
10823     case TARGET_NR_get_thread_area:
10824 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10825         return do_get_thread_area(cpu_env, arg1);
10826 #elif defined(TARGET_M68K)
10827         {
10828             TaskState *ts = cpu->opaque;
10829             return ts->tp_value;
10830         }
10831 #else
10832         return -TARGET_ENOSYS;
10833 #endif
10834 #endif
10835 #ifdef TARGET_NR_getdomainname
10836     case TARGET_NR_getdomainname:
10837         return -TARGET_ENOSYS;
10838 #endif
10839 
10840 #ifdef TARGET_NR_clock_settime
10841     case TARGET_NR_clock_settime:
10842     {
10843         struct timespec ts;
10844 
10845         ret = target_to_host_timespec(&ts, arg2);
10846         if (!is_error(ret)) {
10847             ret = get_errno(clock_settime(arg1, &ts));
10848         }
10849         return ret;
10850     }
10851 #endif
10852 #ifdef TARGET_NR_clock_gettime
10853     case TARGET_NR_clock_gettime:
10854     {
10855         struct timespec ts;
10856         ret = get_errno(clock_gettime(arg1, &ts));
10857         if (!is_error(ret)) {
10858             ret = host_to_target_timespec(arg2, &ts);
10859         }
10860         return ret;
10861     }
10862 #endif
10863 #ifdef TARGET_NR_clock_getres
10864     case TARGET_NR_clock_getres:
10865     {
10866         struct timespec ts;
10867         ret = get_errno(clock_getres(arg1, &ts));
10868         if (!is_error(ret)) {
10869             host_to_target_timespec(arg2, &ts);
10870         }
10871         return ret;
10872     }
10873 #endif
10874 #ifdef TARGET_NR_clock_nanosleep
10875     case TARGET_NR_clock_nanosleep:
10876     {
10877         struct timespec ts;
10878         target_to_host_timespec(&ts, arg3);
10879         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10880                                              &ts, arg4 ? &ts : NULL));
10881         if (arg4)
10882             host_to_target_timespec(arg4, &ts);
10883 
10884 #if defined(TARGET_PPC)
10885         /* clock_nanosleep is odd in that it returns positive errno values.
10886          * On PPC, CR0 bit 3 should be set in such a situation. */
10887         if (ret && ret != -TARGET_ERESTARTSYS) {
10888             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10889         }
10890 #endif
10891         return ret;
10892     }
10893 #endif
10894 
10895 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10896     case TARGET_NR_set_tid_address:
10897         return get_errno(set_tid_address((int *)g2h(arg1)));
10898 #endif
10899 
10900     case TARGET_NR_tkill:
10901         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10902 
10903     case TARGET_NR_tgkill:
10904         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10905                          target_to_host_signal(arg3)));
10906 
10907 #ifdef TARGET_NR_set_robust_list
10908     case TARGET_NR_set_robust_list:
10909     case TARGET_NR_get_robust_list:
10910         /* The ABI for supporting robust futexes has userspace pass
10911          * the kernel a pointer to a linked list which is updated by
10912          * userspace after the syscall; the list is walked by the kernel
10913          * when the thread exits. Since the linked list in QEMU guest
10914          * memory isn't a valid linked list for the host and we have
10915          * no way to reliably intercept the thread-death event, we can't
10916          * support these. Silently return ENOSYS so that guest userspace
10917          * falls back to a non-robust futex implementation (which should
10918          * be OK except in the corner case of the guest crashing while
10919          * holding a mutex that is shared with another process via
10920          * shared memory).
10921          */
10922         return -TARGET_ENOSYS;
10923 #endif
10924 
10925 #if defined(TARGET_NR_utimensat)
10926     case TARGET_NR_utimensat:
10927         {
10928             struct timespec *tsp, ts[2];
10929             if (!arg3) {
10930                 tsp = NULL;
10931             } else {
10932                 target_to_host_timespec(ts, arg3);
10933                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10934                 tsp = ts;
10935             }
10936             if (!arg2)
10937                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10938             else {
10939                 if (!(p = lock_user_string(arg2))) {
10940                     return -TARGET_EFAULT;
10941                 }
10942                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10943                 unlock_user(p, arg2, 0);
10944             }
10945         }
10946         return ret;
10947 #endif
10948     case TARGET_NR_futex:
10949         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10950 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10951     case TARGET_NR_inotify_init:
10952         ret = get_errno(sys_inotify_init());
10953         if (ret >= 0) {
10954             fd_trans_register(ret, &target_inotify_trans);
10955         }
10956         return ret;
10957 #endif
10958 #ifdef CONFIG_INOTIFY1
10959 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10960     case TARGET_NR_inotify_init1:
10961         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10962                                           fcntl_flags_tbl)));
10963         if (ret >= 0) {
10964             fd_trans_register(ret, &target_inotify_trans);
10965         }
10966         return ret;
10967 #endif
10968 #endif
10969 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10970     case TARGET_NR_inotify_add_watch:
10971         p = lock_user_string(arg2);
10972         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10973         unlock_user(p, arg2, 0);
10974         return ret;
10975 #endif
10976 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10977     case TARGET_NR_inotify_rm_watch:
10978         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10979 #endif
10980 
10981 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10982     case TARGET_NR_mq_open:
10983         {
10984             struct mq_attr posix_mq_attr;
10985             struct mq_attr *pposix_mq_attr;
10986             int host_flags;
10987 
10988             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10989             pposix_mq_attr = NULL;
10990             if (arg4) {
10991                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10992                     return -TARGET_EFAULT;
10993                 }
10994                 pposix_mq_attr = &posix_mq_attr;
10995             }
10996             p = lock_user_string(arg1 - 1);
10997             if (!p) {
10998                 return -TARGET_EFAULT;
10999             }
11000             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11001             unlock_user (p, arg1, 0);
11002         }
11003         return ret;
11004 
11005     case TARGET_NR_mq_unlink:
11006         p = lock_user_string(arg1 - 1);
11007         if (!p) {
11008             return -TARGET_EFAULT;
11009         }
11010         ret = get_errno(mq_unlink(p));
11011         unlock_user (p, arg1, 0);
11012         return ret;
11013 
11014     case TARGET_NR_mq_timedsend:
11015         {
11016             struct timespec ts;
11017 
11018             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11019             if (arg5 != 0) {
11020                 target_to_host_timespec(&ts, arg5);
11021                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11022                 host_to_target_timespec(arg5, &ts);
11023             } else {
11024                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11025             }
11026             unlock_user (p, arg2, arg3);
11027         }
11028         return ret;
11029 
11030     case TARGET_NR_mq_timedreceive:
11031         {
11032             struct timespec ts;
11033             unsigned int prio;
11034 
11035             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11036             if (arg5 != 0) {
11037                 target_to_host_timespec(&ts, arg5);
11038                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11039                                                      &prio, &ts));
11040                 host_to_target_timespec(arg5, &ts);
11041             } else {
11042                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11043                                                      &prio, NULL));
11044             }
11045             unlock_user (p, arg2, arg3);
11046             if (arg4 != 0)
11047                 put_user_u32(prio, arg4);
11048         }
11049         return ret;
11050 
11051     /* Not implemented for now... */
11052 /*     case TARGET_NR_mq_notify: */
11053 /*         break; */
11054 
11055     case TARGET_NR_mq_getsetattr:
11056         {
11057             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11058             ret = 0;
11059             if (arg2 != 0) {
11060                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11061                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11062                                            &posix_mq_attr_out));
11063             } else if (arg3 != 0) {
11064                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11065             }
11066             if (ret == 0 && arg3 != 0) {
11067                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11068             }
11069         }
11070         return ret;
11071 #endif
11072 
11073 #ifdef CONFIG_SPLICE
11074 #ifdef TARGET_NR_tee
11075     case TARGET_NR_tee:
11076         {
11077             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11078         }
11079         return ret;
11080 #endif
11081 #ifdef TARGET_NR_splice
11082     case TARGET_NR_splice:
11083         {
11084             loff_t loff_in, loff_out;
11085             loff_t *ploff_in = NULL, *ploff_out = NULL;
11086             if (arg2) {
11087                 if (get_user_u64(loff_in, arg2)) {
11088                     return -TARGET_EFAULT;
11089                 }
11090                 ploff_in = &loff_in;
11091             }
11092             if (arg4) {
11093                 if (get_user_u64(loff_out, arg4)) {
11094                     return -TARGET_EFAULT;
11095                 }
11096                 ploff_out = &loff_out;
11097             }
11098             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11099             if (arg2) {
11100                 if (put_user_u64(loff_in, arg2)) {
11101                     return -TARGET_EFAULT;
11102                 }
11103             }
11104             if (arg4) {
11105                 if (put_user_u64(loff_out, arg4)) {
11106                     return -TARGET_EFAULT;
11107                 }
11108             }
11109         }
11110         return ret;
11111 #endif
11112 #ifdef TARGET_NR_vmsplice
11113 	case TARGET_NR_vmsplice:
11114         {
11115             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11116             if (vec != NULL) {
11117                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11118                 unlock_iovec(vec, arg2, arg3, 0);
11119             } else {
11120                 ret = -host_to_target_errno(errno);
11121             }
11122         }
11123         return ret;
11124 #endif
11125 #endif /* CONFIG_SPLICE */
11126 #ifdef CONFIG_EVENTFD
11127 #if defined(TARGET_NR_eventfd)
11128     case TARGET_NR_eventfd:
11129         ret = get_errno(eventfd(arg1, 0));
11130         if (ret >= 0) {
11131             fd_trans_register(ret, &target_eventfd_trans);
11132         }
11133         return ret;
11134 #endif
11135 #if defined(TARGET_NR_eventfd2)
11136     case TARGET_NR_eventfd2:
11137     {
11138         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11139         if (arg2 & TARGET_O_NONBLOCK) {
11140             host_flags |= O_NONBLOCK;
11141         }
11142         if (arg2 & TARGET_O_CLOEXEC) {
11143             host_flags |= O_CLOEXEC;
11144         }
11145         ret = get_errno(eventfd(arg1, host_flags));
11146         if (ret >= 0) {
11147             fd_trans_register(ret, &target_eventfd_trans);
11148         }
11149         return ret;
11150     }
11151 #endif
11152 #endif /* CONFIG_EVENTFD  */
11153 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11154     case TARGET_NR_fallocate:
11155 #if TARGET_ABI_BITS == 32
11156         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11157                                   target_offset64(arg5, arg6)));
11158 #else
11159         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11160 #endif
11161         return ret;
11162 #endif
11163 #if defined(CONFIG_SYNC_FILE_RANGE)
11164 #if defined(TARGET_NR_sync_file_range)
11165     case TARGET_NR_sync_file_range:
11166 #if TARGET_ABI_BITS == 32
11167 #if defined(TARGET_MIPS)
11168         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11169                                         target_offset64(arg5, arg6), arg7));
11170 #else
11171         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11172                                         target_offset64(arg4, arg5), arg6));
11173 #endif /* !TARGET_MIPS */
11174 #else
11175         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11176 #endif
11177         return ret;
11178 #endif
11179 #if defined(TARGET_NR_sync_file_range2)
11180     case TARGET_NR_sync_file_range2:
11181         /* This is like sync_file_range but the arguments are reordered */
11182 #if TARGET_ABI_BITS == 32
11183         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11184                                         target_offset64(arg5, arg6), arg2));
11185 #else
11186         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11187 #endif
11188         return ret;
11189 #endif
11190 #endif
11191 #if defined(TARGET_NR_signalfd4)
11192     case TARGET_NR_signalfd4:
11193         return do_signalfd4(arg1, arg2, arg4);
11194 #endif
11195 #if defined(TARGET_NR_signalfd)
11196     case TARGET_NR_signalfd:
11197         return do_signalfd4(arg1, arg2, 0);
11198 #endif
11199 #if defined(CONFIG_EPOLL)
11200 #if defined(TARGET_NR_epoll_create)
11201     case TARGET_NR_epoll_create:
11202         return get_errno(epoll_create(arg1));
11203 #endif
11204 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11205     case TARGET_NR_epoll_create1:
11206         return get_errno(epoll_create1(arg1));
11207 #endif
11208 #if defined(TARGET_NR_epoll_ctl)
11209     case TARGET_NR_epoll_ctl:
11210     {
11211         struct epoll_event ep;
11212         struct epoll_event *epp = 0;
11213         if (arg4) {
11214             struct target_epoll_event *target_ep;
11215             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11216                 return -TARGET_EFAULT;
11217             }
11218             ep.events = tswap32(target_ep->events);
11219             /* The epoll_data_t union is just opaque data to the kernel,
11220              * so we transfer all 64 bits across and need not worry what
11221              * actual data type it is.
11222              */
11223             ep.data.u64 = tswap64(target_ep->data.u64);
11224             unlock_user_struct(target_ep, arg4, 0);
11225             epp = &ep;
11226         }
11227         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11228     }
11229 #endif
11230 
11231 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11232 #if defined(TARGET_NR_epoll_wait)
11233     case TARGET_NR_epoll_wait:
11234 #endif
11235 #if defined(TARGET_NR_epoll_pwait)
11236     case TARGET_NR_epoll_pwait:
11237 #endif
11238     {
11239         struct target_epoll_event *target_ep;
11240         struct epoll_event *ep;
11241         int epfd = arg1;
11242         int maxevents = arg3;
11243         int timeout = arg4;
11244 
11245         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11246             return -TARGET_EINVAL;
11247         }
11248 
11249         target_ep = lock_user(VERIFY_WRITE, arg2,
11250                               maxevents * sizeof(struct target_epoll_event), 1);
11251         if (!target_ep) {
11252             return -TARGET_EFAULT;
11253         }
11254 
11255         ep = g_try_new(struct epoll_event, maxevents);
11256         if (!ep) {
11257             unlock_user(target_ep, arg2, 0);
11258             return -TARGET_ENOMEM;
11259         }
11260 
11261         switch (num) {
11262 #if defined(TARGET_NR_epoll_pwait)
11263         case TARGET_NR_epoll_pwait:
11264         {
11265             target_sigset_t *target_set;
11266             sigset_t _set, *set = &_set;
11267 
11268             if (arg5) {
11269                 if (arg6 != sizeof(target_sigset_t)) {
11270                     ret = -TARGET_EINVAL;
11271                     break;
11272                 }
11273 
11274                 target_set = lock_user(VERIFY_READ, arg5,
11275                                        sizeof(target_sigset_t), 1);
11276                 if (!target_set) {
11277                     ret = -TARGET_EFAULT;
11278                     break;
11279                 }
11280                 target_to_host_sigset(set, target_set);
11281                 unlock_user(target_set, arg5, 0);
11282             } else {
11283                 set = NULL;
11284             }
11285 
11286             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11287                                              set, SIGSET_T_SIZE));
11288             break;
11289         }
11290 #endif
11291 #if defined(TARGET_NR_epoll_wait)
11292         case TARGET_NR_epoll_wait:
11293             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11294                                              NULL, 0));
11295             break;
11296 #endif
11297         default:
11298             ret = -TARGET_ENOSYS;
11299         }
11300         if (!is_error(ret)) {
11301             int i;
11302             for (i = 0; i < ret; i++) {
11303                 target_ep[i].events = tswap32(ep[i].events);
11304                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11305             }
11306             unlock_user(target_ep, arg2,
11307                         ret * sizeof(struct target_epoll_event));
11308         } else {
11309             unlock_user(target_ep, arg2, 0);
11310         }
11311         g_free(ep);
11312         return ret;
11313     }
11314 #endif
11315 #endif
11316 #ifdef TARGET_NR_prlimit64
11317     case TARGET_NR_prlimit64:
11318     {
11319         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11320         struct target_rlimit64 *target_rnew, *target_rold;
11321         struct host_rlimit64 rnew, rold, *rnewp = 0;
11322         int resource = target_to_host_resource(arg2);
11323         if (arg3) {
11324             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11325                 return -TARGET_EFAULT;
11326             }
11327             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11328             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11329             unlock_user_struct(target_rnew, arg3, 0);
11330             rnewp = &rnew;
11331         }
11332 
11333         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11334         if (!is_error(ret) && arg4) {
11335             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11336                 return -TARGET_EFAULT;
11337             }
11338             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11339             target_rold->rlim_max = tswap64(rold.rlim_max);
11340             unlock_user_struct(target_rold, arg4, 1);
11341         }
11342         return ret;
11343     }
11344 #endif
11345 #ifdef TARGET_NR_gethostname
11346     case TARGET_NR_gethostname:
11347     {
11348         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11349         if (name) {
11350             ret = get_errno(gethostname(name, arg2));
11351             unlock_user(name, arg1, arg2);
11352         } else {
11353             ret = -TARGET_EFAULT;
11354         }
11355         return ret;
11356     }
11357 #endif
11358 #ifdef TARGET_NR_atomic_cmpxchg_32
11359     case TARGET_NR_atomic_cmpxchg_32:
11360     {
11361         /* should use start_exclusive from main.c */
11362         abi_ulong mem_value;
11363         if (get_user_u32(mem_value, arg6)) {
11364             target_siginfo_t info;
11365             info.si_signo = SIGSEGV;
11366             info.si_errno = 0;
11367             info.si_code = TARGET_SEGV_MAPERR;
11368             info._sifields._sigfault._addr = arg6;
11369             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11370                          QEMU_SI_FAULT, &info);
11371             ret = 0xdeadbeef;
11372 
11373         }
11374         if (mem_value == arg2)
11375             put_user_u32(arg1, arg6);
11376         return mem_value;
11377     }
11378 #endif
11379 #ifdef TARGET_NR_atomic_barrier
11380     case TARGET_NR_atomic_barrier:
11381         /* Like the kernel implementation and the
11382            qemu arm barrier, no-op this? */
11383         return 0;
11384 #endif
11385 
11386 #ifdef TARGET_NR_timer_create
11387     case TARGET_NR_timer_create:
11388     {
11389         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11390 
11391         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11392 
11393         int clkid = arg1;
11394         int timer_index = next_free_host_timer();
11395 
11396         if (timer_index < 0) {
11397             ret = -TARGET_EAGAIN;
11398         } else {
11399             timer_t *phtimer = g_posix_timers  + timer_index;
11400 
11401             if (arg2) {
11402                 phost_sevp = &host_sevp;
11403                 ret = target_to_host_sigevent(phost_sevp, arg2);
11404                 if (ret != 0) {
11405                     return ret;
11406                 }
11407             }
11408 
11409             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11410             if (ret) {
11411                 phtimer = NULL;
11412             } else {
11413                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11414                     return -TARGET_EFAULT;
11415                 }
11416             }
11417         }
11418         return ret;
11419     }
11420 #endif
11421 
11422 #ifdef TARGET_NR_timer_settime
11423     case TARGET_NR_timer_settime:
11424     {
11425         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11426          * struct itimerspec * old_value */
11427         target_timer_t timerid = get_timer_id(arg1);
11428 
11429         if (timerid < 0) {
11430             ret = timerid;
11431         } else if (arg3 == 0) {
11432             ret = -TARGET_EINVAL;
11433         } else {
11434             timer_t htimer = g_posix_timers[timerid];
11435             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11436 
11437             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11438                 return -TARGET_EFAULT;
11439             }
11440             ret = get_errno(
11441                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11442             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11443                 return -TARGET_EFAULT;
11444             }
11445         }
11446         return ret;
11447     }
11448 #endif
11449 
11450 #ifdef TARGET_NR_timer_gettime
11451     case TARGET_NR_timer_gettime:
11452     {
11453         /* args: timer_t timerid, struct itimerspec *curr_value */
11454         target_timer_t timerid = get_timer_id(arg1);
11455 
11456         if (timerid < 0) {
11457             ret = timerid;
11458         } else if (!arg2) {
11459             ret = -TARGET_EFAULT;
11460         } else {
11461             timer_t htimer = g_posix_timers[timerid];
11462             struct itimerspec hspec;
11463             ret = get_errno(timer_gettime(htimer, &hspec));
11464 
11465             if (host_to_target_itimerspec(arg2, &hspec)) {
11466                 ret = -TARGET_EFAULT;
11467             }
11468         }
11469         return ret;
11470     }
11471 #endif
11472 
11473 #ifdef TARGET_NR_timer_getoverrun
11474     case TARGET_NR_timer_getoverrun:
11475     {
11476         /* args: timer_t timerid */
11477         target_timer_t timerid = get_timer_id(arg1);
11478 
11479         if (timerid < 0) {
11480             ret = timerid;
11481         } else {
11482             timer_t htimer = g_posix_timers[timerid];
11483             ret = get_errno(timer_getoverrun(htimer));
11484         }
11485         fd_trans_unregister(ret);
11486         return ret;
11487     }
11488 #endif
11489 
11490 #ifdef TARGET_NR_timer_delete
11491     case TARGET_NR_timer_delete:
11492     {
11493         /* args: timer_t timerid */
11494         target_timer_t timerid = get_timer_id(arg1);
11495 
11496         if (timerid < 0) {
11497             ret = timerid;
11498         } else {
11499             timer_t htimer = g_posix_timers[timerid];
11500             ret = get_errno(timer_delete(htimer));
11501             g_posix_timers[timerid] = 0;
11502         }
11503         return ret;
11504     }
11505 #endif
11506 
11507 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11508     case TARGET_NR_timerfd_create:
11509         return get_errno(timerfd_create(arg1,
11510                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11511 #endif
11512 
11513 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11514     case TARGET_NR_timerfd_gettime:
11515         {
11516             struct itimerspec its_curr;
11517 
11518             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11519 
11520             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11521                 return -TARGET_EFAULT;
11522             }
11523         }
11524         return ret;
11525 #endif
11526 
11527 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11528     case TARGET_NR_timerfd_settime:
11529         {
11530             struct itimerspec its_new, its_old, *p_new;
11531 
11532             if (arg3) {
11533                 if (target_to_host_itimerspec(&its_new, arg3)) {
11534                     return -TARGET_EFAULT;
11535                 }
11536                 p_new = &its_new;
11537             } else {
11538                 p_new = NULL;
11539             }
11540 
11541             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11542 
11543             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11544                 return -TARGET_EFAULT;
11545             }
11546         }
11547         return ret;
11548 #endif
11549 
11550 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11551     case TARGET_NR_ioprio_get:
11552         return get_errno(ioprio_get(arg1, arg2));
11553 #endif
11554 
11555 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11556     case TARGET_NR_ioprio_set:
11557         return get_errno(ioprio_set(arg1, arg2, arg3));
11558 #endif
11559 
11560 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11561     case TARGET_NR_setns:
11562         return get_errno(setns(arg1, arg2));
11563 #endif
11564 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11565     case TARGET_NR_unshare:
11566         return get_errno(unshare(arg1));
11567 #endif
11568 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11569     case TARGET_NR_kcmp:
11570         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11571 #endif
11572 #ifdef TARGET_NR_swapcontext
11573     case TARGET_NR_swapcontext:
11574         /* PowerPC specific.  */
11575         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11576 #endif
11577 
11578     default:
11579         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11580         return -TARGET_ENOSYS;
11581     }
11582     return ret;
11583 }
11584 
11585 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11586                     abi_long arg2, abi_long arg3, abi_long arg4,
11587                     abi_long arg5, abi_long arg6, abi_long arg7,
11588                     abi_long arg8)
11589 {
11590     CPUState *cpu = ENV_GET_CPU(cpu_env);
11591     abi_long ret;
11592 
11593 #ifdef DEBUG_ERESTARTSYS
11594     /* Debug-only code for exercising the syscall-restart code paths
11595      * in the per-architecture cpu main loops: restart every syscall
11596      * the guest makes once before letting it through.
11597      */
11598     {
11599         static bool flag;
11600         flag = !flag;
11601         if (flag) {
11602             return -TARGET_ERESTARTSYS;
11603         }
11604     }
11605 #endif
11606 
11607     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11608                              arg5, arg6, arg7, arg8);
11609 
11610     if (unlikely(do_strace)) {
11611         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11612         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11613                           arg5, arg6, arg7, arg8);
11614         print_syscall_ret(num, ret);
11615     } else {
11616         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11617                           arg5, arg6, arg7, arg8);
11618     }
11619 
11620     trace_guest_user_syscall_ret(cpu, num, ret);
11621     return ret;
11622 }
11623