xref: /openbmc/qemu/linux-user/syscall.c (revision 09a274d8)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 static int sys_getcwd1(char *buf, size_t size)
364 {
365   if (getcwd(buf, size) == NULL) {
366       /* getcwd() sets errno */
367       return (-1);
368   }
369   return strlen(buf)+1;
370 }
371 
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376           const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379                          const struct timespec times[2], int flags)
380 {
381     errno = ENOSYS;
382     return -1;
383 }
384 #endif
385 #endif /* TARGET_NR_utimensat */
386 
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391           const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394                          int newfd, const char *new, int flags)
395 {
396     if (flags == 0) {
397         return renameat(oldfd, old, newfd, new);
398     }
399     errno = ENOSYS;
400     return -1;
401 }
402 #endif
403 #endif /* TARGET_NR_renameat2 */
404 
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
407 
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
410 {
411   return (inotify_init());
412 }
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
416 {
417   return (inotify_add_watch(fd, pathname, mask));
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
422 {
423   return (inotify_rm_watch(fd, wd));
424 }
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
429 {
430   return (inotify_init1(flags));
431 }
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY  */
441 
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449     uint64_t rlim_cur;
450     uint64_t rlim_max;
451 };
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453           const struct host_rlimit64 *, new_limit,
454           struct host_rlimit64 *, old_limit)
455 #endif
456 
457 
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
461 
462 static inline int next_free_host_timer(void)
463 {
464     int k ;
465     /* FIXME: Does finding the next free slot require a lock? */
466     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467         if (g_posix_timers[k] == 0) {
468             g_posix_timers[k] = (timer_t) 1;
469             return k;
470         }
471     }
472     return -1;
473 }
474 #endif
475 
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
479 {
480     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 }
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486  * of registers which translates to the same as ARM/MIPS, because we start with
487  * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
492 {
493     switch (num) {
494     case TARGET_NR_pread64:
495     case TARGET_NR_pwrite64:
496         return 1;
497 
498     default:
499         return 0;
500     }
501 }
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
507 
508 #define ERRNO_TABLE_SIZE 1200
509 
510 /* target_to_host_errno_table[] is initialized from
511  * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
513 };
514 
515 /*
516  * This list is the union of errno values overridden in asm-<arch>/errno.h
517  * minus the errnos that are not actually generic to all archs.
518  */
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520     [EAGAIN]		= TARGET_EAGAIN,
521     [EIDRM]		= TARGET_EIDRM,
522     [ECHRNG]		= TARGET_ECHRNG,
523     [EL2NSYNC]		= TARGET_EL2NSYNC,
524     [EL3HLT]		= TARGET_EL3HLT,
525     [EL3RST]		= TARGET_EL3RST,
526     [ELNRNG]		= TARGET_ELNRNG,
527     [EUNATCH]		= TARGET_EUNATCH,
528     [ENOCSI]		= TARGET_ENOCSI,
529     [EL2HLT]		= TARGET_EL2HLT,
530     [EDEADLK]		= TARGET_EDEADLK,
531     [ENOLCK]		= TARGET_ENOLCK,
532     [EBADE]		= TARGET_EBADE,
533     [EBADR]		= TARGET_EBADR,
534     [EXFULL]		= TARGET_EXFULL,
535     [ENOANO]		= TARGET_ENOANO,
536     [EBADRQC]		= TARGET_EBADRQC,
537     [EBADSLT]		= TARGET_EBADSLT,
538     [EBFONT]		= TARGET_EBFONT,
539     [ENOSTR]		= TARGET_ENOSTR,
540     [ENODATA]		= TARGET_ENODATA,
541     [ETIME]		= TARGET_ETIME,
542     [ENOSR]		= TARGET_ENOSR,
543     [ENONET]		= TARGET_ENONET,
544     [ENOPKG]		= TARGET_ENOPKG,
545     [EREMOTE]		= TARGET_EREMOTE,
546     [ENOLINK]		= TARGET_ENOLINK,
547     [EADV]		= TARGET_EADV,
548     [ESRMNT]		= TARGET_ESRMNT,
549     [ECOMM]		= TARGET_ECOMM,
550     [EPROTO]		= TARGET_EPROTO,
551     [EDOTDOT]		= TARGET_EDOTDOT,
552     [EMULTIHOP]		= TARGET_EMULTIHOP,
553     [EBADMSG]		= TARGET_EBADMSG,
554     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
555     [EOVERFLOW]		= TARGET_EOVERFLOW,
556     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
557     [EBADFD]		= TARGET_EBADFD,
558     [EREMCHG]		= TARGET_EREMCHG,
559     [ELIBACC]		= TARGET_ELIBACC,
560     [ELIBBAD]		= TARGET_ELIBBAD,
561     [ELIBSCN]		= TARGET_ELIBSCN,
562     [ELIBMAX]		= TARGET_ELIBMAX,
563     [ELIBEXEC]		= TARGET_ELIBEXEC,
564     [EILSEQ]		= TARGET_EILSEQ,
565     [ENOSYS]		= TARGET_ENOSYS,
566     [ELOOP]		= TARGET_ELOOP,
567     [ERESTART]		= TARGET_ERESTART,
568     [ESTRPIPE]		= TARGET_ESTRPIPE,
569     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
570     [EUSERS]		= TARGET_EUSERS,
571     [ENOTSOCK]		= TARGET_ENOTSOCK,
572     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
573     [EMSGSIZE]		= TARGET_EMSGSIZE,
574     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
575     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
576     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
577     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
578     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
579     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
580     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
581     [EADDRINUSE]	= TARGET_EADDRINUSE,
582     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
583     [ENETDOWN]		= TARGET_ENETDOWN,
584     [ENETUNREACH]	= TARGET_ENETUNREACH,
585     [ENETRESET]		= TARGET_ENETRESET,
586     [ECONNABORTED]	= TARGET_ECONNABORTED,
587     [ECONNRESET]	= TARGET_ECONNRESET,
588     [ENOBUFS]		= TARGET_ENOBUFS,
589     [EISCONN]		= TARGET_EISCONN,
590     [ENOTCONN]		= TARGET_ENOTCONN,
591     [EUCLEAN]		= TARGET_EUCLEAN,
592     [ENOTNAM]		= TARGET_ENOTNAM,
593     [ENAVAIL]		= TARGET_ENAVAIL,
594     [EISNAM]		= TARGET_EISNAM,
595     [EREMOTEIO]		= TARGET_EREMOTEIO,
596     [EDQUOT]            = TARGET_EDQUOT,
597     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
598     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
599     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
600     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
601     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
602     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
603     [EALREADY]		= TARGET_EALREADY,
604     [EINPROGRESS]	= TARGET_EINPROGRESS,
605     [ESTALE]		= TARGET_ESTALE,
606     [ECANCELED]		= TARGET_ECANCELED,
607     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
608     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610     [ENOKEY]		= TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628     [ENOMSG]            = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631     [ERFKILL]           = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634     [EHWPOISON]         = TARGET_EHWPOISON,
635 #endif
636 };
637 
638 static inline int host_to_target_errno(int err)
639 {
640     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641         host_to_target_errno_table[err]) {
642         return host_to_target_errno_table[err];
643     }
644     return err;
645 }
646 
647 static inline int target_to_host_errno(int err)
648 {
649     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650         target_to_host_errno_table[err]) {
651         return target_to_host_errno_table[err];
652     }
653     return err;
654 }
655 
656 static inline abi_long get_errno(abi_long ret)
657 {
658     if (ret == -1)
659         return -host_to_target_errno(errno);
660     else
661         return ret;
662 }
663 
664 const char *target_strerror(int err)
665 {
666     if (err == TARGET_ERESTARTSYS) {
667         return "To be restarted";
668     }
669     if (err == TARGET_QEMU_ESIGRETURN) {
670         return "Successful exit from sigreturn";
671     }
672 
673     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674         return NULL;
675     }
676     return strerror(target_to_host_errno(err));
677 }
678 
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
681 { \
682     return safe_syscall(__NR_##name); \
683 }
684 
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
687 { \
688     return safe_syscall(__NR_##name, arg1); \
689 }
690 
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2); \
695 }
696 
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
701 }
702 
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
708 }
709 
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
716 }
717 
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719     type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721     type5 arg5, type6 arg6) \
722 { \
723     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
724 }
725 
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729               int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731               struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733               int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744               const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752               unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754               unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756               socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765               const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767               int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769               struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772               const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776               int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778               long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780               unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783  * wrappers for the sub-operations to hide this implementation detail.
784  * Annoyingly we can't include linux/ipc.h to get the constant definitions
785  * for the call parameter because some structs in there conflict with the
786  * sys/ipc.h ones. So we just define them here, and rely on them being
787  * the same for all host architectures.
788  */
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
793 
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795               void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
797 {
798     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
799 }
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
801 {
802     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
803 }
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805                            const struct timespec *timeout)
806 {
807     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808                     (long)timeout);
809 }
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813               size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818  * "third argument might be integer or pointer or not present" behaviour of
819  * the libc function.
820  */
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824  *  use the flock64 struct rather than unsuffixed flock
825  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
826  */
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
832 
833 static inline int host_to_target_sock_type(int host_type)
834 {
835     int target_type;
836 
837     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838     case SOCK_DGRAM:
839         target_type = TARGET_SOCK_DGRAM;
840         break;
841     case SOCK_STREAM:
842         target_type = TARGET_SOCK_STREAM;
843         break;
844     default:
845         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846         break;
847     }
848 
849 #if defined(SOCK_CLOEXEC)
850     if (host_type & SOCK_CLOEXEC) {
851         target_type |= TARGET_SOCK_CLOEXEC;
852     }
853 #endif
854 
855 #if defined(SOCK_NONBLOCK)
856     if (host_type & SOCK_NONBLOCK) {
857         target_type |= TARGET_SOCK_NONBLOCK;
858     }
859 #endif
860 
861     return target_type;
862 }
863 
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
867 
868 void target_set_brk(abi_ulong new_brk)
869 {
870     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871     brk_page = HOST_PAGE_ALIGN(target_brk);
872 }
873 
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
876 
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
879 {
880     abi_long mapped_addr;
881     abi_ulong new_alloc_size;
882 
883     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
884 
885     if (!new_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887         return target_brk;
888     }
889     if (new_brk < target_original_brk) {
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891                    target_brk);
892         return target_brk;
893     }
894 
895     /* If the new brk is less than the highest page reserved to the
896      * target heap allocation, set it and we're almost done...  */
897     if (new_brk <= brk_page) {
898         /* Heap contents are initialized to zero, as for anonymous
899          * mapped pages.  */
900         if (new_brk > target_brk) {
901             memset(g2h(target_brk), 0, new_brk - target_brk);
902         }
903 	target_brk = new_brk;
904         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 	return target_brk;
906     }
907 
908     /* We need to allocate more memory after the brk... Note that
909      * we don't use MAP_FIXED because that will map over the top of
910      * any existing mapping (like the one with the host libc or qemu
911      * itself); instead we treat "mapped but at wrong address" as
912      * a failure and unmap again.
913      */
914     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916                                         PROT_READ|PROT_WRITE,
917                                         MAP_ANON|MAP_PRIVATE, 0, 0));
918 
919     if (mapped_addr == brk_page) {
920         /* Heap contents are initialized to zero, as for anonymous
921          * mapped pages.  Technically the new pages are already
922          * initialized to zero since they *are* anonymous mapped
923          * pages, however we have to take care with the contents that
924          * come from the remaining part of the previous page: it may
925          * contains garbage data due to a previous heap usage (grown
926          * then shrunken).  */
927         memset(g2h(target_brk), 0, brk_page - target_brk);
928 
929         target_brk = new_brk;
930         brk_page = HOST_PAGE_ALIGN(target_brk);
931         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932             target_brk);
933         return target_brk;
934     } else if (mapped_addr != -1) {
935         /* Mapped but at wrong address, meaning there wasn't actually
936          * enough space for this brk.
937          */
938         target_munmap(mapped_addr, new_alloc_size);
939         mapped_addr = -1;
940         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
941     }
942     else {
943         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
944     }
945 
946 #if defined(TARGET_ALPHA)
947     /* We (partially) emulate OSF/1 on Alpha, which requires we
948        return a proper errno, not an unchanged brk value.  */
949     return -TARGET_ENOMEM;
950 #endif
951     /* For everything else, return the previous break. */
952     return target_brk;
953 }
954 
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956                                             abi_ulong target_fds_addr,
957                                             int n)
958 {
959     int i, nw, j, k;
960     abi_ulong b, *target_fds;
961 
962     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963     if (!(target_fds = lock_user(VERIFY_READ,
964                                  target_fds_addr,
965                                  sizeof(abi_ulong) * nw,
966                                  1)))
967         return -TARGET_EFAULT;
968 
969     FD_ZERO(fds);
970     k = 0;
971     for (i = 0; i < nw; i++) {
972         /* grab the abi_ulong */
973         __get_user(b, &target_fds[i]);
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             /* check the bit inside the abi_ulong */
976             if ((b >> j) & 1)
977                 FD_SET(k, fds);
978             k++;
979         }
980     }
981 
982     unlock_user(target_fds, target_fds_addr, 0);
983 
984     return 0;
985 }
986 
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988                                                  abi_ulong target_fds_addr,
989                                                  int n)
990 {
991     if (target_fds_addr) {
992         if (copy_from_user_fdset(fds, target_fds_addr, n))
993             return -TARGET_EFAULT;
994         *fds_ptr = fds;
995     } else {
996         *fds_ptr = NULL;
997     }
998     return 0;
999 }
1000 
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002                                           const fd_set *fds,
1003                                           int n)
1004 {
1005     int i, nw, j, k;
1006     abi_long v;
1007     abi_ulong *target_fds;
1008 
1009     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010     if (!(target_fds = lock_user(VERIFY_WRITE,
1011                                  target_fds_addr,
1012                                  sizeof(abi_ulong) * nw,
1013                                  0)))
1014         return -TARGET_EFAULT;
1015 
1016     k = 0;
1017     for (i = 0; i < nw; i++) {
1018         v = 0;
1019         for (j = 0; j < TARGET_ABI_BITS; j++) {
1020             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021             k++;
1022         }
1023         __put_user(v, &target_fds[i]);
1024     }
1025 
1026     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1027 
1028     return 0;
1029 }
1030 
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1036 
1037 static inline abi_long host_to_target_clock_t(long ticks)
1038 {
1039 #if HOST_HZ == TARGET_HZ
1040     return ticks;
1041 #else
1042     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1044 }
1045 
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047                                              const struct rusage *rusage)
1048 {
1049     struct target_rusage *target_rusage;
1050 
1051     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052         return -TARGET_EFAULT;
1053     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071     unlock_user_struct(target_rusage, target_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1093 {
1094     abi_ulong target_rlim_swap;
1095     abi_ulong result;
1096 
1097     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098         target_rlim_swap = TARGET_RLIM_INFINITY;
1099     else
1100         target_rlim_swap = rlim;
1101     result = tswapal(target_rlim_swap);
1102 
1103     return result;
1104 }
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150         return -TARGET_EFAULT;
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161                                             const struct timeval *tv)
1162 {
1163     struct target_timeval *target_tv;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166         return -TARGET_EFAULT;
1167 
1168     __put_user(tv->tv_sec, &target_tv->tv_sec);
1169     __put_user(tv->tv_usec, &target_tv->tv_usec);
1170 
1171     unlock_user_struct(target_tv, target_tv_addr, 1);
1172 
1173     return 0;
1174 }
1175 
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177                                                abi_ulong target_tz_addr)
1178 {
1179     struct target_timezone *target_tz;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184 
1185     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1187 
1188     unlock_user_struct(target_tz, target_tz_addr, 0);
1189 
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1195 
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197                                               abi_ulong target_mq_attr_addr)
1198 {
1199     struct target_mq_attr *target_mq_attr;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202                           target_mq_attr_addr, 1))
1203         return -TARGET_EFAULT;
1204 
1205     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1209 
1210     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1211 
1212     return 0;
1213 }
1214 
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216                                             const struct mq_attr *attr)
1217 {
1218     struct target_mq_attr *target_mq_attr;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221                           target_mq_attr_addr, 0))
1222         return -TARGET_EFAULT;
1223 
1224     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1228 
1229     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1239                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1240 {
1241     fd_set rfds, wfds, efds;
1242     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243     struct timeval tv;
1244     struct timespec ts, *ts_ptr;
1245     abi_long ret;
1246 
1247     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252     if (ret) {
1253         return ret;
1254     }
1255     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256     if (ret) {
1257         return ret;
1258     }
1259 
1260     if (target_tv_addr) {
1261         if (copy_from_user_timeval(&tv, target_tv_addr))
1262             return -TARGET_EFAULT;
1263         ts.tv_sec = tv.tv_sec;
1264         ts.tv_nsec = tv.tv_usec * 1000;
1265         ts_ptr = &ts;
1266     } else {
1267         ts_ptr = NULL;
1268     }
1269 
1270     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271                                   ts_ptr, NULL));
1272 
1273     if (!is_error(ret)) {
1274         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275             return -TARGET_EFAULT;
1276         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277             return -TARGET_EFAULT;
1278         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279             return -TARGET_EFAULT;
1280 
1281         if (target_tv_addr) {
1282             tv.tv_sec = ts.tv_sec;
1283             tv.tv_usec = ts.tv_nsec / 1000;
1284             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285                 return -TARGET_EFAULT;
1286             }
1287         }
1288     }
1289 
1290     return ret;
1291 }
1292 
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1295 {
1296     struct target_sel_arg_struct *sel;
1297     abi_ulong inp, outp, exp, tvp;
1298     long nsel;
1299 
1300     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301         return -TARGET_EFAULT;
1302     }
1303 
1304     nsel = tswapal(sel->n);
1305     inp = tswapal(sel->inp);
1306     outp = tswapal(sel->outp);
1307     exp = tswapal(sel->exp);
1308     tvp = tswapal(sel->tvp);
1309 
1310     unlock_user_struct(sel, arg1, 0);
1311 
1312     return do_select(nsel, inp, outp, exp, tvp);
1313 }
1314 #endif
1315 #endif
1316 
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1318 {
1319 #ifdef CONFIG_PIPE2
1320     return pipe2(host_pipe, flags);
1321 #else
1322     return -ENOSYS;
1323 #endif
1324 }
1325 
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327                         int flags, int is_pipe2)
1328 {
1329     int host_pipe[2];
1330     abi_long ret;
1331     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1332 
1333     if (is_error(ret))
1334         return get_errno(ret);
1335 
1336     /* Several targets have special calling conventions for the original
1337        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1338     if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341         return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344         return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347         return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350         return host_pipe[0];
1351 #endif
1352     }
1353 
1354     if (put_user_s32(host_pipe[0], pipedes)
1355         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356         return -TARGET_EFAULT;
1357     return get_errno(ret);
1358 }
1359 
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361                                               abi_ulong target_addr,
1362                                               socklen_t len)
1363 {
1364     struct target_ip_mreqn *target_smreqn;
1365 
1366     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367     if (!target_smreqn)
1368         return -TARGET_EFAULT;
1369     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371     if (len == sizeof(struct target_ip_mreqn))
1372         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373     unlock_user(target_smreqn, target_addr, 0);
1374 
1375     return 0;
1376 }
1377 
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379                                                abi_ulong target_addr,
1380                                                socklen_t len)
1381 {
1382     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383     sa_family_t sa_family;
1384     struct target_sockaddr *target_saddr;
1385 
1386     if (fd_trans_target_to_host_addr(fd)) {
1387         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1388     }
1389 
1390     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391     if (!target_saddr)
1392         return -TARGET_EFAULT;
1393 
1394     sa_family = tswap16(target_saddr->sa_family);
1395 
1396     /* Oops. The caller might send a incomplete sun_path; sun_path
1397      * must be terminated by \0 (see the manual page), but
1398      * unfortunately it is quite common to specify sockaddr_un
1399      * length as "strlen(x->sun_path)" while it should be
1400      * "strlen(...) + 1". We'll fix that here if needed.
1401      * Linux kernel has a similar feature.
1402      */
1403 
1404     if (sa_family == AF_UNIX) {
1405         if (len < unix_maxlen && len > 0) {
1406             char *cp = (char*)target_saddr;
1407 
1408             if ( cp[len-1] && !cp[len] )
1409                 len++;
1410         }
1411         if (len > unix_maxlen)
1412             len = unix_maxlen;
1413     }
1414 
1415     memcpy(addr, target_saddr, len);
1416     addr->sa_family = sa_family;
1417     if (sa_family == AF_NETLINK) {
1418         struct sockaddr_nl *nladdr;
1419 
1420         nladdr = (struct sockaddr_nl *)addr;
1421         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423     } else if (sa_family == AF_PACKET) {
1424 	struct target_sockaddr_ll *lladdr;
1425 
1426 	lladdr = (struct target_sockaddr_ll *)addr;
1427 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1429     }
1430     unlock_user(target_saddr, target_addr, 0);
1431 
1432     return 0;
1433 }
1434 
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436                                                struct sockaddr *addr,
1437                                                socklen_t len)
1438 {
1439     struct target_sockaddr *target_saddr;
1440 
1441     if (len == 0) {
1442         return 0;
1443     }
1444     assert(addr);
1445 
1446     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447     if (!target_saddr)
1448         return -TARGET_EFAULT;
1449     memcpy(target_saddr, addr, len);
1450     if (len >= offsetof(struct target_sockaddr, sa_family) +
1451         sizeof(target_saddr->sa_family)) {
1452         target_saddr->sa_family = tswap16(addr->sa_family);
1453     }
1454     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458     } else if (addr->sa_family == AF_PACKET) {
1459         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462     } else if (addr->sa_family == AF_INET6 &&
1463                len >= sizeof(struct target_sockaddr_in6)) {
1464         struct target_sockaddr_in6 *target_in6 =
1465                (struct target_sockaddr_in6 *)target_saddr;
1466         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1467     }
1468     unlock_user(target_saddr, target_addr, len);
1469 
1470     return 0;
1471 }
1472 
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474                                            struct target_msghdr *target_msgh)
1475 {
1476     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477     abi_long msg_controllen;
1478     abi_ulong target_cmsg_addr;
1479     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480     socklen_t space = 0;
1481 
1482     msg_controllen = tswapal(target_msgh->msg_controllen);
1483     if (msg_controllen < sizeof (struct target_cmsghdr))
1484         goto the_end;
1485     target_cmsg_addr = tswapal(target_msgh->msg_control);
1486     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487     target_cmsg_start = target_cmsg;
1488     if (!target_cmsg)
1489         return -TARGET_EFAULT;
1490 
1491     while (cmsg && target_cmsg) {
1492         void *data = CMSG_DATA(cmsg);
1493         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1494 
1495         int len = tswapal(target_cmsg->cmsg_len)
1496             - sizeof(struct target_cmsghdr);
1497 
1498         space += CMSG_SPACE(len);
1499         if (space > msgh->msg_controllen) {
1500             space -= CMSG_SPACE(len);
1501             /* This is a QEMU bug, since we allocated the payload
1502              * area ourselves (unlike overflow in host-to-target
1503              * conversion, which is just the guest giving us a buffer
1504              * that's too small). It can't happen for the payload types
1505              * we currently support; if it becomes an issue in future
1506              * we would need to improve our allocation strategy to
1507              * something more intelligent than "twice the size of the
1508              * target buffer we're reading from".
1509              */
1510             gemu_log("Host cmsg overflow\n");
1511             break;
1512         }
1513 
1514         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515             cmsg->cmsg_level = SOL_SOCKET;
1516         } else {
1517             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1518         }
1519         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520         cmsg->cmsg_len = CMSG_LEN(len);
1521 
1522         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523             int *fd = (int *)data;
1524             int *target_fd = (int *)target_data;
1525             int i, numfds = len / sizeof(int);
1526 
1527             for (i = 0; i < numfds; i++) {
1528                 __get_user(fd[i], target_fd + i);
1529             }
1530         } else if (cmsg->cmsg_level == SOL_SOCKET
1531                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1532             struct ucred *cred = (struct ucred *)data;
1533             struct target_ucred *target_cred =
1534                 (struct target_ucred *)target_data;
1535 
1536             __get_user(cred->pid, &target_cred->pid);
1537             __get_user(cred->uid, &target_cred->uid);
1538             __get_user(cred->gid, &target_cred->gid);
1539         } else {
1540             gemu_log("Unsupported ancillary data: %d/%d\n",
1541                                         cmsg->cmsg_level, cmsg->cmsg_type);
1542             memcpy(data, target_data, len);
1543         }
1544 
1545         cmsg = CMSG_NXTHDR(msgh, cmsg);
1546         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547                                          target_cmsg_start);
1548     }
1549     unlock_user(target_cmsg, target_cmsg_addr, 0);
1550  the_end:
1551     msgh->msg_controllen = space;
1552     return 0;
1553 }
1554 
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556                                            struct msghdr *msgh)
1557 {
1558     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559     abi_long msg_controllen;
1560     abi_ulong target_cmsg_addr;
1561     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562     socklen_t space = 0;
1563 
1564     msg_controllen = tswapal(target_msgh->msg_controllen);
1565     if (msg_controllen < sizeof (struct target_cmsghdr))
1566         goto the_end;
1567     target_cmsg_addr = tswapal(target_msgh->msg_control);
1568     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569     target_cmsg_start = target_cmsg;
1570     if (!target_cmsg)
1571         return -TARGET_EFAULT;
1572 
1573     while (cmsg && target_cmsg) {
1574         void *data = CMSG_DATA(cmsg);
1575         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1576 
1577         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578         int tgt_len, tgt_space;
1579 
1580         /* We never copy a half-header but may copy half-data;
1581          * this is Linux's behaviour in put_cmsg(). Note that
1582          * truncation here is a guest problem (which we report
1583          * to the guest via the CTRUNC bit), unlike truncation
1584          * in target_to_host_cmsg, which is a QEMU bug.
1585          */
1586         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588             break;
1589         }
1590 
1591         if (cmsg->cmsg_level == SOL_SOCKET) {
1592             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593         } else {
1594             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1595         }
1596         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1597 
1598         /* Payload types which need a different size of payload on
1599          * the target must adjust tgt_len here.
1600          */
1601         tgt_len = len;
1602         switch (cmsg->cmsg_level) {
1603         case SOL_SOCKET:
1604             switch (cmsg->cmsg_type) {
1605             case SO_TIMESTAMP:
1606                 tgt_len = sizeof(struct target_timeval);
1607                 break;
1608             default:
1609                 break;
1610             }
1611             break;
1612         default:
1613             break;
1614         }
1615 
1616         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1619         }
1620 
1621         /* We must now copy-and-convert len bytes of payload
1622          * into tgt_len bytes of destination space. Bear in mind
1623          * that in both source and destination we may be dealing
1624          * with a truncated value!
1625          */
1626         switch (cmsg->cmsg_level) {
1627         case SOL_SOCKET:
1628             switch (cmsg->cmsg_type) {
1629             case SCM_RIGHTS:
1630             {
1631                 int *fd = (int *)data;
1632                 int *target_fd = (int *)target_data;
1633                 int i, numfds = tgt_len / sizeof(int);
1634 
1635                 for (i = 0; i < numfds; i++) {
1636                     __put_user(fd[i], target_fd + i);
1637                 }
1638                 break;
1639             }
1640             case SO_TIMESTAMP:
1641             {
1642                 struct timeval *tv = (struct timeval *)data;
1643                 struct target_timeval *target_tv =
1644                     (struct target_timeval *)target_data;
1645 
1646                 if (len != sizeof(struct timeval) ||
1647                     tgt_len != sizeof(struct target_timeval)) {
1648                     goto unimplemented;
1649                 }
1650 
1651                 /* copy struct timeval to target */
1652                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654                 break;
1655             }
1656             case SCM_CREDENTIALS:
1657             {
1658                 struct ucred *cred = (struct ucred *)data;
1659                 struct target_ucred *target_cred =
1660                     (struct target_ucred *)target_data;
1661 
1662                 __put_user(cred->pid, &target_cred->pid);
1663                 __put_user(cred->uid, &target_cred->uid);
1664                 __put_user(cred->gid, &target_cred->gid);
1665                 break;
1666             }
1667             default:
1668                 goto unimplemented;
1669             }
1670             break;
1671 
1672         case SOL_IP:
1673             switch (cmsg->cmsg_type) {
1674             case IP_TTL:
1675             {
1676                 uint32_t *v = (uint32_t *)data;
1677                 uint32_t *t_int = (uint32_t *)target_data;
1678 
1679                 if (len != sizeof(uint32_t) ||
1680                     tgt_len != sizeof(uint32_t)) {
1681                     goto unimplemented;
1682                 }
1683                 __put_user(*v, t_int);
1684                 break;
1685             }
1686             case IP_RECVERR:
1687             {
1688                 struct errhdr_t {
1689                    struct sock_extended_err ee;
1690                    struct sockaddr_in offender;
1691                 };
1692                 struct errhdr_t *errh = (struct errhdr_t *)data;
1693                 struct errhdr_t *target_errh =
1694                     (struct errhdr_t *)target_data;
1695 
1696                 if (len != sizeof(struct errhdr_t) ||
1697                     tgt_len != sizeof(struct errhdr_t)) {
1698                     goto unimplemented;
1699                 }
1700                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1703                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708                     (void *) &errh->offender, sizeof(errh->offender));
1709                 break;
1710             }
1711             default:
1712                 goto unimplemented;
1713             }
1714             break;
1715 
1716         case SOL_IPV6:
1717             switch (cmsg->cmsg_type) {
1718             case IPV6_HOPLIMIT:
1719             {
1720                 uint32_t *v = (uint32_t *)data;
1721                 uint32_t *t_int = (uint32_t *)target_data;
1722 
1723                 if (len != sizeof(uint32_t) ||
1724                     tgt_len != sizeof(uint32_t)) {
1725                     goto unimplemented;
1726                 }
1727                 __put_user(*v, t_int);
1728                 break;
1729             }
1730             case IPV6_RECVERR:
1731             {
1732                 struct errhdr6_t {
1733                    struct sock_extended_err ee;
1734                    struct sockaddr_in6 offender;
1735                 };
1736                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737                 struct errhdr6_t *target_errh =
1738                     (struct errhdr6_t *)target_data;
1739 
1740                 if (len != sizeof(struct errhdr6_t) ||
1741                     tgt_len != sizeof(struct errhdr6_t)) {
1742                     goto unimplemented;
1743                 }
1744                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1747                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752                     (void *) &errh->offender, sizeof(errh->offender));
1753                 break;
1754             }
1755             default:
1756                 goto unimplemented;
1757             }
1758             break;
1759 
1760         default:
1761         unimplemented:
1762             gemu_log("Unsupported ancillary data: %d/%d\n",
1763                                         cmsg->cmsg_level, cmsg->cmsg_type);
1764             memcpy(target_data, data, MIN(len, tgt_len));
1765             if (tgt_len > len) {
1766                 memset(target_data + len, 0, tgt_len - len);
1767             }
1768         }
1769 
1770         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772         if (msg_controllen < tgt_space) {
1773             tgt_space = msg_controllen;
1774         }
1775         msg_controllen -= tgt_space;
1776         space += tgt_space;
1777         cmsg = CMSG_NXTHDR(msgh, cmsg);
1778         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779                                          target_cmsg_start);
1780     }
1781     unlock_user(target_cmsg, target_cmsg_addr, space);
1782  the_end:
1783     target_msgh->msg_controllen = tswapal(space);
1784     return 0;
1785 }
1786 
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789                               abi_ulong optval_addr, socklen_t optlen)
1790 {
1791     abi_long ret;
1792     int val;
1793     struct ip_mreqn *ip_mreq;
1794     struct ip_mreq_source *ip_mreq_source;
1795 
1796     switch(level) {
1797     case SOL_TCP:
1798         /* TCP options all take an 'int' value.  */
1799         if (optlen < sizeof(uint32_t))
1800             return -TARGET_EINVAL;
1801 
1802         if (get_user_u32(val, optval_addr))
1803             return -TARGET_EFAULT;
1804         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805         break;
1806     case SOL_IP:
1807         switch(optname) {
1808         case IP_TOS:
1809         case IP_TTL:
1810         case IP_HDRINCL:
1811         case IP_ROUTER_ALERT:
1812         case IP_RECVOPTS:
1813         case IP_RETOPTS:
1814         case IP_PKTINFO:
1815         case IP_MTU_DISCOVER:
1816         case IP_RECVERR:
1817         case IP_RECVTTL:
1818         case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820         case IP_FREEBIND:
1821 #endif
1822         case IP_MULTICAST_TTL:
1823         case IP_MULTICAST_LOOP:
1824             val = 0;
1825             if (optlen >= sizeof(uint32_t)) {
1826                 if (get_user_u32(val, optval_addr))
1827                     return -TARGET_EFAULT;
1828             } else if (optlen >= 1) {
1829                 if (get_user_u8(val, optval_addr))
1830                     return -TARGET_EFAULT;
1831             }
1832             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833             break;
1834         case IP_ADD_MEMBERSHIP:
1835         case IP_DROP_MEMBERSHIP:
1836             if (optlen < sizeof (struct target_ip_mreq) ||
1837                 optlen > sizeof (struct target_ip_mreqn))
1838                 return -TARGET_EINVAL;
1839 
1840             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843             break;
1844 
1845         case IP_BLOCK_SOURCE:
1846         case IP_UNBLOCK_SOURCE:
1847         case IP_ADD_SOURCE_MEMBERSHIP:
1848         case IP_DROP_SOURCE_MEMBERSHIP:
1849             if (optlen != sizeof (struct target_ip_mreq_source))
1850                 return -TARGET_EINVAL;
1851 
1852             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854             unlock_user (ip_mreq_source, optval_addr, 0);
1855             break;
1856 
1857         default:
1858             goto unimplemented;
1859         }
1860         break;
1861     case SOL_IPV6:
1862         switch (optname) {
1863         case IPV6_MTU_DISCOVER:
1864         case IPV6_MTU:
1865         case IPV6_V6ONLY:
1866         case IPV6_RECVPKTINFO:
1867         case IPV6_UNICAST_HOPS:
1868         case IPV6_MULTICAST_HOPS:
1869         case IPV6_MULTICAST_LOOP:
1870         case IPV6_RECVERR:
1871         case IPV6_RECVHOPLIMIT:
1872         case IPV6_2292HOPLIMIT:
1873         case IPV6_CHECKSUM:
1874             val = 0;
1875             if (optlen < sizeof(uint32_t)) {
1876                 return -TARGET_EINVAL;
1877             }
1878             if (get_user_u32(val, optval_addr)) {
1879                 return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname,
1882                                        &val, sizeof(val)));
1883             break;
1884         case IPV6_PKTINFO:
1885         {
1886             struct in6_pktinfo pki;
1887 
1888             if (optlen < sizeof(pki)) {
1889                 return -TARGET_EINVAL;
1890             }
1891 
1892             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893                 return -TARGET_EFAULT;
1894             }
1895 
1896             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1897 
1898             ret = get_errno(setsockopt(sockfd, level, optname,
1899                                        &pki, sizeof(pki)));
1900             break;
1901         }
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_ICMPV6:
1907         switch (optname) {
1908         case ICMPV6_FILTER:
1909         {
1910             struct icmp6_filter icmp6f;
1911 
1912             if (optlen > sizeof(icmp6f)) {
1913                 optlen = sizeof(icmp6f);
1914             }
1915 
1916             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917                 return -TARGET_EFAULT;
1918             }
1919 
1920             for (val = 0; val < 8; val++) {
1921                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1922             }
1923 
1924             ret = get_errno(setsockopt(sockfd, level, optname,
1925                                        &icmp6f, optlen));
1926             break;
1927         }
1928         default:
1929             goto unimplemented;
1930         }
1931         break;
1932     case SOL_RAW:
1933         switch (optname) {
1934         case ICMP_FILTER:
1935         case IPV6_CHECKSUM:
1936             /* those take an u32 value */
1937             if (optlen < sizeof(uint32_t)) {
1938                 return -TARGET_EINVAL;
1939             }
1940 
1941             if (get_user_u32(val, optval_addr)) {
1942                 return -TARGET_EFAULT;
1943             }
1944             ret = get_errno(setsockopt(sockfd, level, optname,
1945                                        &val, sizeof(val)));
1946             break;
1947 
1948         default:
1949             goto unimplemented;
1950         }
1951         break;
1952     case TARGET_SOL_SOCKET:
1953         switch (optname) {
1954         case TARGET_SO_RCVTIMEO:
1955         {
1956                 struct timeval tv;
1957 
1958                 optname = SO_RCVTIMEO;
1959 
1960 set_timeout:
1961                 if (optlen != sizeof(struct target_timeval)) {
1962                     return -TARGET_EINVAL;
1963                 }
1964 
1965                 if (copy_from_user_timeval(&tv, optval_addr)) {
1966                     return -TARGET_EFAULT;
1967                 }
1968 
1969                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970                                 &tv, sizeof(tv)));
1971                 return ret;
1972         }
1973         case TARGET_SO_SNDTIMEO:
1974                 optname = SO_SNDTIMEO;
1975                 goto set_timeout;
1976         case TARGET_SO_ATTACH_FILTER:
1977         {
1978                 struct target_sock_fprog *tfprog;
1979                 struct target_sock_filter *tfilter;
1980                 struct sock_fprog fprog;
1981                 struct sock_filter *filter;
1982                 int i;
1983 
1984                 if (optlen != sizeof(*tfprog)) {
1985                     return -TARGET_EINVAL;
1986                 }
1987                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988                     return -TARGET_EFAULT;
1989                 }
1990                 if (!lock_user_struct(VERIFY_READ, tfilter,
1991                                       tswapal(tfprog->filter), 0)) {
1992                     unlock_user_struct(tfprog, optval_addr, 1);
1993                     return -TARGET_EFAULT;
1994                 }
1995 
1996                 fprog.len = tswap16(tfprog->len);
1997                 filter = g_try_new(struct sock_filter, fprog.len);
1998                 if (filter == NULL) {
1999                     unlock_user_struct(tfilter, tfprog->filter, 1);
2000                     unlock_user_struct(tfprog, optval_addr, 1);
2001                     return -TARGET_ENOMEM;
2002                 }
2003                 for (i = 0; i < fprog.len; i++) {
2004                     filter[i].code = tswap16(tfilter[i].code);
2005                     filter[i].jt = tfilter[i].jt;
2006                     filter[i].jf = tfilter[i].jf;
2007                     filter[i].k = tswap32(tfilter[i].k);
2008                 }
2009                 fprog.filter = filter;
2010 
2011                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013                 g_free(filter);
2014 
2015                 unlock_user_struct(tfilter, tfprog->filter, 1);
2016                 unlock_user_struct(tfprog, optval_addr, 1);
2017                 return ret;
2018         }
2019 	case TARGET_SO_BINDTODEVICE:
2020 	{
2021 		char *dev_ifname, *addr_ifname;
2022 
2023 		if (optlen > IFNAMSIZ - 1) {
2024 		    optlen = IFNAMSIZ - 1;
2025 		}
2026 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 		if (!dev_ifname) {
2028 		    return -TARGET_EFAULT;
2029 		}
2030 		optname = SO_BINDTODEVICE;
2031 		addr_ifname = alloca(IFNAMSIZ);
2032 		memcpy(addr_ifname, dev_ifname, optlen);
2033 		addr_ifname[optlen] = 0;
2034 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035                                            addr_ifname, optlen));
2036 		unlock_user (dev_ifname, optval_addr, 0);
2037 		return ret;
2038 	}
2039         case TARGET_SO_LINGER:
2040         {
2041                 struct linger lg;
2042                 struct target_linger *tlg;
2043 
2044                 if (optlen != sizeof(struct target_linger)) {
2045                     return -TARGET_EINVAL;
2046                 }
2047                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048                     return -TARGET_EFAULT;
2049                 }
2050                 __get_user(lg.l_onoff, &tlg->l_onoff);
2051                 __get_user(lg.l_linger, &tlg->l_linger);
2052                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053                                 &lg, sizeof(lg)));
2054                 unlock_user_struct(tlg, optval_addr, 0);
2055                 return ret;
2056         }
2057             /* Options with 'int' argument.  */
2058         case TARGET_SO_DEBUG:
2059 		optname = SO_DEBUG;
2060 		break;
2061         case TARGET_SO_REUSEADDR:
2062 		optname = SO_REUSEADDR;
2063 		break;
2064 #ifdef SO_REUSEPORT
2065         case TARGET_SO_REUSEPORT:
2066                 optname = SO_REUSEPORT;
2067                 break;
2068 #endif
2069         case TARGET_SO_TYPE:
2070 		optname = SO_TYPE;
2071 		break;
2072         case TARGET_SO_ERROR:
2073 		optname = SO_ERROR;
2074 		break;
2075         case TARGET_SO_DONTROUTE:
2076 		optname = SO_DONTROUTE;
2077 		break;
2078         case TARGET_SO_BROADCAST:
2079 		optname = SO_BROADCAST;
2080 		break;
2081         case TARGET_SO_SNDBUF:
2082 		optname = SO_SNDBUF;
2083 		break;
2084         case TARGET_SO_SNDBUFFORCE:
2085                 optname = SO_SNDBUFFORCE;
2086                 break;
2087         case TARGET_SO_RCVBUF:
2088 		optname = SO_RCVBUF;
2089 		break;
2090         case TARGET_SO_RCVBUFFORCE:
2091                 optname = SO_RCVBUFFORCE;
2092                 break;
2093         case TARGET_SO_KEEPALIVE:
2094 		optname = SO_KEEPALIVE;
2095 		break;
2096         case TARGET_SO_OOBINLINE:
2097 		optname = SO_OOBINLINE;
2098 		break;
2099         case TARGET_SO_NO_CHECK:
2100 		optname = SO_NO_CHECK;
2101 		break;
2102         case TARGET_SO_PRIORITY:
2103 		optname = SO_PRIORITY;
2104 		break;
2105 #ifdef SO_BSDCOMPAT
2106         case TARGET_SO_BSDCOMPAT:
2107 		optname = SO_BSDCOMPAT;
2108 		break;
2109 #endif
2110         case TARGET_SO_PASSCRED:
2111 		optname = SO_PASSCRED;
2112 		break;
2113         case TARGET_SO_PASSSEC:
2114                 optname = SO_PASSSEC;
2115                 break;
2116         case TARGET_SO_TIMESTAMP:
2117 		optname = SO_TIMESTAMP;
2118 		break;
2119         case TARGET_SO_RCVLOWAT:
2120 		optname = SO_RCVLOWAT;
2121 		break;
2122         default:
2123             goto unimplemented;
2124         }
2125 	if (optlen < sizeof(uint32_t))
2126             return -TARGET_EINVAL;
2127 
2128 	if (get_user_u32(val, optval_addr))
2129             return -TARGET_EFAULT;
2130 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131         break;
2132     default:
2133     unimplemented:
2134         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135         ret = -TARGET_ENOPROTOOPT;
2136     }
2137     return ret;
2138 }
2139 
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142                               abi_ulong optval_addr, abi_ulong optlen)
2143 {
2144     abi_long ret;
2145     int len, val;
2146     socklen_t lv;
2147 
2148     switch(level) {
2149     case TARGET_SOL_SOCKET:
2150         level = SOL_SOCKET;
2151         switch (optname) {
2152         /* These don't just return a single integer */
2153         case TARGET_SO_RCVTIMEO:
2154         case TARGET_SO_SNDTIMEO:
2155         case TARGET_SO_PEERNAME:
2156             goto unimplemented;
2157         case TARGET_SO_PEERCRED: {
2158             struct ucred cr;
2159             socklen_t crlen;
2160             struct target_ucred *tcr;
2161 
2162             if (get_user_u32(len, optlen)) {
2163                 return -TARGET_EFAULT;
2164             }
2165             if (len < 0) {
2166                 return -TARGET_EINVAL;
2167             }
2168 
2169             crlen = sizeof(cr);
2170             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171                                        &cr, &crlen));
2172             if (ret < 0) {
2173                 return ret;
2174             }
2175             if (len > crlen) {
2176                 len = crlen;
2177             }
2178             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             __put_user(cr.pid, &tcr->pid);
2182             __put_user(cr.uid, &tcr->uid);
2183             __put_user(cr.gid, &tcr->gid);
2184             unlock_user_struct(tcr, optval_addr, 1);
2185             if (put_user_u32(len, optlen)) {
2186                 return -TARGET_EFAULT;
2187             }
2188             break;
2189         }
2190         case TARGET_SO_LINGER:
2191         {
2192             struct linger lg;
2193             socklen_t lglen;
2194             struct target_linger *tlg;
2195 
2196             if (get_user_u32(len, optlen)) {
2197                 return -TARGET_EFAULT;
2198             }
2199             if (len < 0) {
2200                 return -TARGET_EINVAL;
2201             }
2202 
2203             lglen = sizeof(lg);
2204             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205                                        &lg, &lglen));
2206             if (ret < 0) {
2207                 return ret;
2208             }
2209             if (len > lglen) {
2210                 len = lglen;
2211             }
2212             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             __put_user(lg.l_onoff, &tlg->l_onoff);
2216             __put_user(lg.l_linger, &tlg->l_linger);
2217             unlock_user_struct(tlg, optval_addr, 1);
2218             if (put_user_u32(len, optlen)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             break;
2222         }
2223         /* Options with 'int' argument.  */
2224         case TARGET_SO_DEBUG:
2225             optname = SO_DEBUG;
2226             goto int_case;
2227         case TARGET_SO_REUSEADDR:
2228             optname = SO_REUSEADDR;
2229             goto int_case;
2230 #ifdef SO_REUSEPORT
2231         case TARGET_SO_REUSEPORT:
2232             optname = SO_REUSEPORT;
2233             goto int_case;
2234 #endif
2235         case TARGET_SO_TYPE:
2236             optname = SO_TYPE;
2237             goto int_case;
2238         case TARGET_SO_ERROR:
2239             optname = SO_ERROR;
2240             goto int_case;
2241         case TARGET_SO_DONTROUTE:
2242             optname = SO_DONTROUTE;
2243             goto int_case;
2244         case TARGET_SO_BROADCAST:
2245             optname = SO_BROADCAST;
2246             goto int_case;
2247         case TARGET_SO_SNDBUF:
2248             optname = SO_SNDBUF;
2249             goto int_case;
2250         case TARGET_SO_RCVBUF:
2251             optname = SO_RCVBUF;
2252             goto int_case;
2253         case TARGET_SO_KEEPALIVE:
2254             optname = SO_KEEPALIVE;
2255             goto int_case;
2256         case TARGET_SO_OOBINLINE:
2257             optname = SO_OOBINLINE;
2258             goto int_case;
2259         case TARGET_SO_NO_CHECK:
2260             optname = SO_NO_CHECK;
2261             goto int_case;
2262         case TARGET_SO_PRIORITY:
2263             optname = SO_PRIORITY;
2264             goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266         case TARGET_SO_BSDCOMPAT:
2267             optname = SO_BSDCOMPAT;
2268             goto int_case;
2269 #endif
2270         case TARGET_SO_PASSCRED:
2271             optname = SO_PASSCRED;
2272             goto int_case;
2273         case TARGET_SO_TIMESTAMP:
2274             optname = SO_TIMESTAMP;
2275             goto int_case;
2276         case TARGET_SO_RCVLOWAT:
2277             optname = SO_RCVLOWAT;
2278             goto int_case;
2279         case TARGET_SO_ACCEPTCONN:
2280             optname = SO_ACCEPTCONN;
2281             goto int_case;
2282         default:
2283             goto int_case;
2284         }
2285         break;
2286     case SOL_TCP:
2287         /* TCP options all take an 'int' value.  */
2288     int_case:
2289         if (get_user_u32(len, optlen))
2290             return -TARGET_EFAULT;
2291         if (len < 0)
2292             return -TARGET_EINVAL;
2293         lv = sizeof(lv);
2294         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295         if (ret < 0)
2296             return ret;
2297         if (optname == SO_TYPE) {
2298             val = host_to_target_sock_type(val);
2299         }
2300         if (len > lv)
2301             len = lv;
2302         if (len == 4) {
2303             if (put_user_u32(val, optval_addr))
2304                 return -TARGET_EFAULT;
2305         } else {
2306             if (put_user_u8(val, optval_addr))
2307                 return -TARGET_EFAULT;
2308         }
2309         if (put_user_u32(len, optlen))
2310             return -TARGET_EFAULT;
2311         break;
2312     case SOL_IP:
2313         switch(optname) {
2314         case IP_TOS:
2315         case IP_TTL:
2316         case IP_HDRINCL:
2317         case IP_ROUTER_ALERT:
2318         case IP_RECVOPTS:
2319         case IP_RETOPTS:
2320         case IP_PKTINFO:
2321         case IP_MTU_DISCOVER:
2322         case IP_RECVERR:
2323         case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325         case IP_FREEBIND:
2326 #endif
2327         case IP_MULTICAST_TTL:
2328         case IP_MULTICAST_LOOP:
2329             if (get_user_u32(len, optlen))
2330                 return -TARGET_EFAULT;
2331             if (len < 0)
2332                 return -TARGET_EINVAL;
2333             lv = sizeof(lv);
2334             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335             if (ret < 0)
2336                 return ret;
2337             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338                 len = 1;
2339                 if (put_user_u32(len, optlen)
2340                     || put_user_u8(val, optval_addr))
2341                     return -TARGET_EFAULT;
2342             } else {
2343                 if (len > sizeof(int))
2344                     len = sizeof(int);
2345                 if (put_user_u32(len, optlen)
2346                     || put_user_u32(val, optval_addr))
2347                     return -TARGET_EFAULT;
2348             }
2349             break;
2350         default:
2351             ret = -TARGET_ENOPROTOOPT;
2352             break;
2353         }
2354         break;
2355     case SOL_IPV6:
2356         switch (optname) {
2357         case IPV6_MTU_DISCOVER:
2358         case IPV6_MTU:
2359         case IPV6_V6ONLY:
2360         case IPV6_RECVPKTINFO:
2361         case IPV6_UNICAST_HOPS:
2362         case IPV6_MULTICAST_HOPS:
2363         case IPV6_MULTICAST_LOOP:
2364         case IPV6_RECVERR:
2365         case IPV6_RECVHOPLIMIT:
2366         case IPV6_2292HOPLIMIT:
2367         case IPV6_CHECKSUM:
2368             if (get_user_u32(len, optlen))
2369                 return -TARGET_EFAULT;
2370             if (len < 0)
2371                 return -TARGET_EINVAL;
2372             lv = sizeof(lv);
2373             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2374             if (ret < 0)
2375                 return ret;
2376             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2377                 len = 1;
2378                 if (put_user_u32(len, optlen)
2379                     || put_user_u8(val, optval_addr))
2380                     return -TARGET_EFAULT;
2381             } else {
2382                 if (len > sizeof(int))
2383                     len = sizeof(int);
2384                 if (put_user_u32(len, optlen)
2385                     || put_user_u32(val, optval_addr))
2386                     return -TARGET_EFAULT;
2387             }
2388             break;
2389         default:
2390             ret = -TARGET_ENOPROTOOPT;
2391             break;
2392         }
2393         break;
2394     default:
2395     unimplemented:
2396         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2397                  level, optname);
2398         ret = -TARGET_EOPNOTSUPP;
2399         break;
2400     }
2401     return ret;
2402 }
2403 
2404 /* Convert target low/high pair representing file offset into the host
2405  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406  * as the kernel doesn't handle them either.
2407  */
2408 static void target_to_host_low_high(abi_ulong tlow,
2409                                     abi_ulong thigh,
2410                                     unsigned long *hlow,
2411                                     unsigned long *hhigh)
2412 {
2413     uint64_t off = tlow |
2414         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415         TARGET_LONG_BITS / 2;
2416 
2417     *hlow = off;
2418     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2419 }
2420 
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422                                 abi_ulong count, int copy)
2423 {
2424     struct target_iovec *target_vec;
2425     struct iovec *vec;
2426     abi_ulong total_len, max_len;
2427     int i;
2428     int err = 0;
2429     bool bad_address = false;
2430 
2431     if (count == 0) {
2432         errno = 0;
2433         return NULL;
2434     }
2435     if (count > IOV_MAX) {
2436         errno = EINVAL;
2437         return NULL;
2438     }
2439 
2440     vec = g_try_new0(struct iovec, count);
2441     if (vec == NULL) {
2442         errno = ENOMEM;
2443         return NULL;
2444     }
2445 
2446     target_vec = lock_user(VERIFY_READ, target_addr,
2447                            count * sizeof(struct target_iovec), 1);
2448     if (target_vec == NULL) {
2449         err = EFAULT;
2450         goto fail2;
2451     }
2452 
2453     /* ??? If host page size > target page size, this will result in a
2454        value larger than what we can actually support.  */
2455     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2456     total_len = 0;
2457 
2458     for (i = 0; i < count; i++) {
2459         abi_ulong base = tswapal(target_vec[i].iov_base);
2460         abi_long len = tswapal(target_vec[i].iov_len);
2461 
2462         if (len < 0) {
2463             err = EINVAL;
2464             goto fail;
2465         } else if (len == 0) {
2466             /* Zero length pointer is ignored.  */
2467             vec[i].iov_base = 0;
2468         } else {
2469             vec[i].iov_base = lock_user(type, base, len, copy);
2470             /* If the first buffer pointer is bad, this is a fault.  But
2471              * subsequent bad buffers will result in a partial write; this
2472              * is realized by filling the vector with null pointers and
2473              * zero lengths. */
2474             if (!vec[i].iov_base) {
2475                 if (i == 0) {
2476                     err = EFAULT;
2477                     goto fail;
2478                 } else {
2479                     bad_address = true;
2480                 }
2481             }
2482             if (bad_address) {
2483                 len = 0;
2484             }
2485             if (len > max_len - total_len) {
2486                 len = max_len - total_len;
2487             }
2488         }
2489         vec[i].iov_len = len;
2490         total_len += len;
2491     }
2492 
2493     unlock_user(target_vec, target_addr, 0);
2494     return vec;
2495 
2496  fail:
2497     while (--i >= 0) {
2498         if (tswapal(target_vec[i].iov_len) > 0) {
2499             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2500         }
2501     }
2502     unlock_user(target_vec, target_addr, 0);
2503  fail2:
2504     g_free(vec);
2505     errno = err;
2506     return NULL;
2507 }
2508 
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510                          abi_ulong count, int copy)
2511 {
2512     struct target_iovec *target_vec;
2513     int i;
2514 
2515     target_vec = lock_user(VERIFY_READ, target_addr,
2516                            count * sizeof(struct target_iovec), 1);
2517     if (target_vec) {
2518         for (i = 0; i < count; i++) {
2519             abi_ulong base = tswapal(target_vec[i].iov_base);
2520             abi_long len = tswapal(target_vec[i].iov_len);
2521             if (len < 0) {
2522                 break;
2523             }
2524             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2525         }
2526         unlock_user(target_vec, target_addr, 0);
2527     }
2528 
2529     g_free(vec);
2530 }
2531 
2532 static inline int target_to_host_sock_type(int *type)
2533 {
2534     int host_type = 0;
2535     int target_type = *type;
2536 
2537     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538     case TARGET_SOCK_DGRAM:
2539         host_type = SOCK_DGRAM;
2540         break;
2541     case TARGET_SOCK_STREAM:
2542         host_type = SOCK_STREAM;
2543         break;
2544     default:
2545         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2546         break;
2547     }
2548     if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550         host_type |= SOCK_CLOEXEC;
2551 #else
2552         return -TARGET_EINVAL;
2553 #endif
2554     }
2555     if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557         host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559         return -TARGET_EINVAL;
2560 #endif
2561     }
2562     *type = host_type;
2563     return 0;
2564 }
2565 
2566 /* Try to emulate socket type flags after socket creation.  */
2567 static int sock_flags_fixup(int fd, int target_type)
2568 {
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570     if (target_type & TARGET_SOCK_NONBLOCK) {
2571         int flags = fcntl(fd, F_GETFL);
2572         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2573             close(fd);
2574             return -TARGET_EINVAL;
2575         }
2576     }
2577 #endif
2578     return fd;
2579 }
2580 
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2583 {
2584     int target_type = type;
2585     int ret;
2586 
2587     ret = target_to_host_sock_type(&type);
2588     if (ret) {
2589         return ret;
2590     }
2591 
2592     if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594          protocol == NETLINK_ROUTE ||
2595 #endif
2596          protocol == NETLINK_KOBJECT_UEVENT ||
2597          protocol == NETLINK_AUDIT)) {
2598         return -EPFNOSUPPORT;
2599     }
2600 
2601     if (domain == AF_PACKET ||
2602         (domain == AF_INET && type == SOCK_PACKET)) {
2603         protocol = tswap16(protocol);
2604     }
2605 
2606     ret = get_errno(socket(domain, type, protocol));
2607     if (ret >= 0) {
2608         ret = sock_flags_fixup(ret, target_type);
2609         if (type == SOCK_PACKET) {
2610             /* Manage an obsolete case :
2611              * if socket type is SOCK_PACKET, bind by name
2612              */
2613             fd_trans_register(ret, &target_packet_trans);
2614         } else if (domain == PF_NETLINK) {
2615             switch (protocol) {
2616 #ifdef CONFIG_RTNETLINK
2617             case NETLINK_ROUTE:
2618                 fd_trans_register(ret, &target_netlink_route_trans);
2619                 break;
2620 #endif
2621             case NETLINK_KOBJECT_UEVENT:
2622                 /* nothing to do: messages are strings */
2623                 break;
2624             case NETLINK_AUDIT:
2625                 fd_trans_register(ret, &target_netlink_audit_trans);
2626                 break;
2627             default:
2628                 g_assert_not_reached();
2629             }
2630         }
2631     }
2632     return ret;
2633 }
2634 
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2637                         socklen_t addrlen)
2638 {
2639     void *addr;
2640     abi_long ret;
2641 
2642     if ((int)addrlen < 0) {
2643         return -TARGET_EINVAL;
2644     }
2645 
2646     addr = alloca(addrlen+1);
2647 
2648     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2649     if (ret)
2650         return ret;
2651 
2652     return get_errno(bind(sockfd, addr, addrlen));
2653 }
2654 
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2657                            socklen_t addrlen)
2658 {
2659     void *addr;
2660     abi_long ret;
2661 
2662     if ((int)addrlen < 0) {
2663         return -TARGET_EINVAL;
2664     }
2665 
2666     addr = alloca(addrlen+1);
2667 
2668     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2669     if (ret)
2670         return ret;
2671 
2672     return get_errno(safe_connect(sockfd, addr, addrlen));
2673 }
2674 
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677                                       int flags, int send)
2678 {
2679     abi_long ret, len;
2680     struct msghdr msg;
2681     abi_ulong count;
2682     struct iovec *vec;
2683     abi_ulong target_vec;
2684 
2685     if (msgp->msg_name) {
2686         msg.msg_namelen = tswap32(msgp->msg_namelen);
2687         msg.msg_name = alloca(msg.msg_namelen+1);
2688         ret = target_to_host_sockaddr(fd, msg.msg_name,
2689                                       tswapal(msgp->msg_name),
2690                                       msg.msg_namelen);
2691         if (ret == -TARGET_EFAULT) {
2692             /* For connected sockets msg_name and msg_namelen must
2693              * be ignored, so returning EFAULT immediately is wrong.
2694              * Instead, pass a bad msg_name to the host kernel, and
2695              * let it decide whether to return EFAULT or not.
2696              */
2697             msg.msg_name = (void *)-1;
2698         } else if (ret) {
2699             goto out2;
2700         }
2701     } else {
2702         msg.msg_name = NULL;
2703         msg.msg_namelen = 0;
2704     }
2705     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706     msg.msg_control = alloca(msg.msg_controllen);
2707     memset(msg.msg_control, 0, msg.msg_controllen);
2708 
2709     msg.msg_flags = tswap32(msgp->msg_flags);
2710 
2711     count = tswapal(msgp->msg_iovlen);
2712     target_vec = tswapal(msgp->msg_iov);
2713 
2714     if (count > IOV_MAX) {
2715         /* sendrcvmsg returns a different errno for this condition than
2716          * readv/writev, so we must catch it here before lock_iovec() does.
2717          */
2718         ret = -TARGET_EMSGSIZE;
2719         goto out2;
2720     }
2721 
2722     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723                      target_vec, count, send);
2724     if (vec == NULL) {
2725         ret = -host_to_target_errno(errno);
2726         goto out2;
2727     }
2728     msg.msg_iovlen = count;
2729     msg.msg_iov = vec;
2730 
2731     if (send) {
2732         if (fd_trans_target_to_host_data(fd)) {
2733             void *host_msg;
2734 
2735             host_msg = g_malloc(msg.msg_iov->iov_len);
2736             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737             ret = fd_trans_target_to_host_data(fd)(host_msg,
2738                                                    msg.msg_iov->iov_len);
2739             if (ret >= 0) {
2740                 msg.msg_iov->iov_base = host_msg;
2741                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2742             }
2743             g_free(host_msg);
2744         } else {
2745             ret = target_to_host_cmsg(&msg, msgp);
2746             if (ret == 0) {
2747                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2748             }
2749         }
2750     } else {
2751         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752         if (!is_error(ret)) {
2753             len = ret;
2754             if (fd_trans_host_to_target_data(fd)) {
2755                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756                                                MIN(msg.msg_iov->iov_len, len));
2757             } else {
2758                 ret = host_to_target_cmsg(msgp, &msg);
2759             }
2760             if (!is_error(ret)) {
2761                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2763                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2764                                     msg.msg_name, msg.msg_namelen);
2765                     if (ret) {
2766                         goto out;
2767                     }
2768                 }
2769 
2770                 ret = len;
2771             }
2772         }
2773     }
2774 
2775 out:
2776     unlock_iovec(vec, target_vec, count, !send);
2777 out2:
2778     return ret;
2779 }
2780 
2781 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2782                                int flags, int send)
2783 {
2784     abi_long ret;
2785     struct target_msghdr *msgp;
2786 
2787     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2788                           msgp,
2789                           target_msg,
2790                           send ? 1 : 0)) {
2791         return -TARGET_EFAULT;
2792     }
2793     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2794     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2795     return ret;
2796 }
2797 
2798 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2799  * so it might not have this *mmsg-specific flag either.
2800  */
2801 #ifndef MSG_WAITFORONE
2802 #define MSG_WAITFORONE 0x10000
2803 #endif
2804 
2805 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2806                                 unsigned int vlen, unsigned int flags,
2807                                 int send)
2808 {
2809     struct target_mmsghdr *mmsgp;
2810     abi_long ret = 0;
2811     int i;
2812 
2813     if (vlen > UIO_MAXIOV) {
2814         vlen = UIO_MAXIOV;
2815     }
2816 
2817     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2818     if (!mmsgp) {
2819         return -TARGET_EFAULT;
2820     }
2821 
2822     for (i = 0; i < vlen; i++) {
2823         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2824         if (is_error(ret)) {
2825             break;
2826         }
2827         mmsgp[i].msg_len = tswap32(ret);
2828         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2829         if (flags & MSG_WAITFORONE) {
2830             flags |= MSG_DONTWAIT;
2831         }
2832     }
2833 
2834     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2835 
2836     /* Return number of datagrams sent if we sent any at all;
2837      * otherwise return the error.
2838      */
2839     if (i) {
2840         return i;
2841     }
2842     return ret;
2843 }
2844 
2845 /* do_accept4() Must return target values and target errnos. */
2846 static abi_long do_accept4(int fd, abi_ulong target_addr,
2847                            abi_ulong target_addrlen_addr, int flags)
2848 {
2849     socklen_t addrlen;
2850     void *addr;
2851     abi_long ret;
2852     int host_flags;
2853 
2854     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2855 
2856     if (target_addr == 0) {
2857         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2858     }
2859 
2860     /* linux returns EINVAL if addrlen pointer is invalid */
2861     if (get_user_u32(addrlen, target_addrlen_addr))
2862         return -TARGET_EINVAL;
2863 
2864     if ((int)addrlen < 0) {
2865         return -TARGET_EINVAL;
2866     }
2867 
2868     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2869         return -TARGET_EINVAL;
2870 
2871     addr = alloca(addrlen);
2872 
2873     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
2874     if (!is_error(ret)) {
2875         host_to_target_sockaddr(target_addr, addr, addrlen);
2876         if (put_user_u32(addrlen, target_addrlen_addr))
2877             ret = -TARGET_EFAULT;
2878     }
2879     return ret;
2880 }
2881 
2882 /* do_getpeername() Must return target values and target errnos. */
2883 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2884                                abi_ulong target_addrlen_addr)
2885 {
2886     socklen_t addrlen;
2887     void *addr;
2888     abi_long ret;
2889 
2890     if (get_user_u32(addrlen, target_addrlen_addr))
2891         return -TARGET_EFAULT;
2892 
2893     if ((int)addrlen < 0) {
2894         return -TARGET_EINVAL;
2895     }
2896 
2897     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2898         return -TARGET_EFAULT;
2899 
2900     addr = alloca(addrlen);
2901 
2902     ret = get_errno(getpeername(fd, addr, &addrlen));
2903     if (!is_error(ret)) {
2904         host_to_target_sockaddr(target_addr, addr, addrlen);
2905         if (put_user_u32(addrlen, target_addrlen_addr))
2906             ret = -TARGET_EFAULT;
2907     }
2908     return ret;
2909 }
2910 
2911 /* do_getsockname() Must return target values and target errnos. */
2912 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2913                                abi_ulong target_addrlen_addr)
2914 {
2915     socklen_t addrlen;
2916     void *addr;
2917     abi_long ret;
2918 
2919     if (get_user_u32(addrlen, target_addrlen_addr))
2920         return -TARGET_EFAULT;
2921 
2922     if ((int)addrlen < 0) {
2923         return -TARGET_EINVAL;
2924     }
2925 
2926     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2927         return -TARGET_EFAULT;
2928 
2929     addr = alloca(addrlen);
2930 
2931     ret = get_errno(getsockname(fd, addr, &addrlen));
2932     if (!is_error(ret)) {
2933         host_to_target_sockaddr(target_addr, addr, addrlen);
2934         if (put_user_u32(addrlen, target_addrlen_addr))
2935             ret = -TARGET_EFAULT;
2936     }
2937     return ret;
2938 }
2939 
2940 /* do_socketpair() Must return target values and target errnos. */
2941 static abi_long do_socketpair(int domain, int type, int protocol,
2942                               abi_ulong target_tab_addr)
2943 {
2944     int tab[2];
2945     abi_long ret;
2946 
2947     target_to_host_sock_type(&type);
2948 
2949     ret = get_errno(socketpair(domain, type, protocol, tab));
2950     if (!is_error(ret)) {
2951         if (put_user_s32(tab[0], target_tab_addr)
2952             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2953             ret = -TARGET_EFAULT;
2954     }
2955     return ret;
2956 }
2957 
2958 /* do_sendto() Must return target values and target errnos. */
2959 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2960                           abi_ulong target_addr, socklen_t addrlen)
2961 {
2962     void *addr;
2963     void *host_msg;
2964     void *copy_msg = NULL;
2965     abi_long ret;
2966 
2967     if ((int)addrlen < 0) {
2968         return -TARGET_EINVAL;
2969     }
2970 
2971     host_msg = lock_user(VERIFY_READ, msg, len, 1);
2972     if (!host_msg)
2973         return -TARGET_EFAULT;
2974     if (fd_trans_target_to_host_data(fd)) {
2975         copy_msg = host_msg;
2976         host_msg = g_malloc(len);
2977         memcpy(host_msg, copy_msg, len);
2978         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2979         if (ret < 0) {
2980             goto fail;
2981         }
2982     }
2983     if (target_addr) {
2984         addr = alloca(addrlen+1);
2985         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2986         if (ret) {
2987             goto fail;
2988         }
2989         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2990     } else {
2991         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2992     }
2993 fail:
2994     if (copy_msg) {
2995         g_free(host_msg);
2996         host_msg = copy_msg;
2997     }
2998     unlock_user(host_msg, msg, 0);
2999     return ret;
3000 }
3001 
3002 /* do_recvfrom() Must return target values and target errnos. */
3003 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3004                             abi_ulong target_addr,
3005                             abi_ulong target_addrlen)
3006 {
3007     socklen_t addrlen;
3008     void *addr;
3009     void *host_msg;
3010     abi_long ret;
3011 
3012     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3013     if (!host_msg)
3014         return -TARGET_EFAULT;
3015     if (target_addr) {
3016         if (get_user_u32(addrlen, target_addrlen)) {
3017             ret = -TARGET_EFAULT;
3018             goto fail;
3019         }
3020         if ((int)addrlen < 0) {
3021             ret = -TARGET_EINVAL;
3022             goto fail;
3023         }
3024         addr = alloca(addrlen);
3025         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3026                                       addr, &addrlen));
3027     } else {
3028         addr = NULL; /* To keep compiler quiet.  */
3029         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3030     }
3031     if (!is_error(ret)) {
3032         if (fd_trans_host_to_target_data(fd)) {
3033             abi_long trans;
3034             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3035             if (is_error(trans)) {
3036                 ret = trans;
3037                 goto fail;
3038             }
3039         }
3040         if (target_addr) {
3041             host_to_target_sockaddr(target_addr, addr, addrlen);
3042             if (put_user_u32(addrlen, target_addrlen)) {
3043                 ret = -TARGET_EFAULT;
3044                 goto fail;
3045             }
3046         }
3047         unlock_user(host_msg, msg, len);
3048     } else {
3049 fail:
3050         unlock_user(host_msg, msg, 0);
3051     }
3052     return ret;
3053 }
3054 
3055 #ifdef TARGET_NR_socketcall
3056 /* do_socketcall() must return target values and target errnos. */
3057 static abi_long do_socketcall(int num, abi_ulong vptr)
3058 {
3059     static const unsigned nargs[] = { /* number of arguments per operation */
3060         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3061         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3062         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3063         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3064         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3065         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3066         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3067         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3068         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3069         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3070         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3071         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3072         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3073         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3074         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3075         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3076         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3077         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3078         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3079         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3080     };
3081     abi_long a[6]; /* max 6 args */
3082     unsigned i;
3083 
3084     /* check the range of the first argument num */
3085     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3086     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3087         return -TARGET_EINVAL;
3088     }
3089     /* ensure we have space for args */
3090     if (nargs[num] > ARRAY_SIZE(a)) {
3091         return -TARGET_EINVAL;
3092     }
3093     /* collect the arguments in a[] according to nargs[] */
3094     for (i = 0; i < nargs[num]; ++i) {
3095         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3096             return -TARGET_EFAULT;
3097         }
3098     }
3099     /* now when we have the args, invoke the appropriate underlying function */
3100     switch (num) {
3101     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3102         return do_socket(a[0], a[1], a[2]);
3103     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3104         return do_bind(a[0], a[1], a[2]);
3105     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3106         return do_connect(a[0], a[1], a[2]);
3107     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3108         return get_errno(listen(a[0], a[1]));
3109     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3110         return do_accept4(a[0], a[1], a[2], 0);
3111     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3112         return do_getsockname(a[0], a[1], a[2]);
3113     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3114         return do_getpeername(a[0], a[1], a[2]);
3115     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3116         return do_socketpair(a[0], a[1], a[2], a[3]);
3117     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3118         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3119     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3120         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3121     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3122         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3123     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3124         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3125     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3126         return get_errno(shutdown(a[0], a[1]));
3127     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3128         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3129     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3130         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3131     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3132         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3133     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3134         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3135     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3136         return do_accept4(a[0], a[1], a[2], a[3]);
3137     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3138         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3139     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3140         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3141     default:
3142         gemu_log("Unsupported socketcall: %d\n", num);
3143         return -TARGET_EINVAL;
3144     }
3145 }
3146 #endif
3147 
3148 #define N_SHM_REGIONS	32
3149 
3150 static struct shm_region {
3151     abi_ulong start;
3152     abi_ulong size;
3153     bool in_use;
3154 } shm_regions[N_SHM_REGIONS];
3155 
3156 #ifndef TARGET_SEMID64_DS
3157 /* asm-generic version of this struct */
3158 struct target_semid64_ds
3159 {
3160   struct target_ipc_perm sem_perm;
3161   abi_ulong sem_otime;
3162 #if TARGET_ABI_BITS == 32
3163   abi_ulong __unused1;
3164 #endif
3165   abi_ulong sem_ctime;
3166 #if TARGET_ABI_BITS == 32
3167   abi_ulong __unused2;
3168 #endif
3169   abi_ulong sem_nsems;
3170   abi_ulong __unused3;
3171   abi_ulong __unused4;
3172 };
3173 #endif
3174 
3175 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3176                                                abi_ulong target_addr)
3177 {
3178     struct target_ipc_perm *target_ip;
3179     struct target_semid64_ds *target_sd;
3180 
3181     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3182         return -TARGET_EFAULT;
3183     target_ip = &(target_sd->sem_perm);
3184     host_ip->__key = tswap32(target_ip->__key);
3185     host_ip->uid = tswap32(target_ip->uid);
3186     host_ip->gid = tswap32(target_ip->gid);
3187     host_ip->cuid = tswap32(target_ip->cuid);
3188     host_ip->cgid = tswap32(target_ip->cgid);
3189 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3190     host_ip->mode = tswap32(target_ip->mode);
3191 #else
3192     host_ip->mode = tswap16(target_ip->mode);
3193 #endif
3194 #if defined(TARGET_PPC)
3195     host_ip->__seq = tswap32(target_ip->__seq);
3196 #else
3197     host_ip->__seq = tswap16(target_ip->__seq);
3198 #endif
3199     unlock_user_struct(target_sd, target_addr, 0);
3200     return 0;
3201 }
3202 
3203 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3204                                                struct ipc_perm *host_ip)
3205 {
3206     struct target_ipc_perm *target_ip;
3207     struct target_semid64_ds *target_sd;
3208 
3209     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3210         return -TARGET_EFAULT;
3211     target_ip = &(target_sd->sem_perm);
3212     target_ip->__key = tswap32(host_ip->__key);
3213     target_ip->uid = tswap32(host_ip->uid);
3214     target_ip->gid = tswap32(host_ip->gid);
3215     target_ip->cuid = tswap32(host_ip->cuid);
3216     target_ip->cgid = tswap32(host_ip->cgid);
3217 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3218     target_ip->mode = tswap32(host_ip->mode);
3219 #else
3220     target_ip->mode = tswap16(host_ip->mode);
3221 #endif
3222 #if defined(TARGET_PPC)
3223     target_ip->__seq = tswap32(host_ip->__seq);
3224 #else
3225     target_ip->__seq = tswap16(host_ip->__seq);
3226 #endif
3227     unlock_user_struct(target_sd, target_addr, 1);
3228     return 0;
3229 }
3230 
3231 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3232                                                abi_ulong target_addr)
3233 {
3234     struct target_semid64_ds *target_sd;
3235 
3236     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3237         return -TARGET_EFAULT;
3238     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3239         return -TARGET_EFAULT;
3240     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3241     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3242     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3243     unlock_user_struct(target_sd, target_addr, 0);
3244     return 0;
3245 }
3246 
3247 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3248                                                struct semid_ds *host_sd)
3249 {
3250     struct target_semid64_ds *target_sd;
3251 
3252     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3253         return -TARGET_EFAULT;
3254     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3255         return -TARGET_EFAULT;
3256     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3257     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3258     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3259     unlock_user_struct(target_sd, target_addr, 1);
3260     return 0;
3261 }
3262 
3263 struct target_seminfo {
3264     int semmap;
3265     int semmni;
3266     int semmns;
3267     int semmnu;
3268     int semmsl;
3269     int semopm;
3270     int semume;
3271     int semusz;
3272     int semvmx;
3273     int semaem;
3274 };
3275 
3276 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3277                                               struct seminfo *host_seminfo)
3278 {
3279     struct target_seminfo *target_seminfo;
3280     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3281         return -TARGET_EFAULT;
3282     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3283     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3284     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3285     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3286     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3287     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3288     __put_user(host_seminfo->semume, &target_seminfo->semume);
3289     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3290     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3291     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3292     unlock_user_struct(target_seminfo, target_addr, 1);
3293     return 0;
3294 }
3295 
3296 union semun {
3297 	int val;
3298 	struct semid_ds *buf;
3299 	unsigned short *array;
3300 	struct seminfo *__buf;
3301 };
3302 
3303 union target_semun {
3304 	int val;
3305 	abi_ulong buf;
3306 	abi_ulong array;
3307 	abi_ulong __buf;
3308 };
3309 
3310 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3311                                                abi_ulong target_addr)
3312 {
3313     int nsems;
3314     unsigned short *array;
3315     union semun semun;
3316     struct semid_ds semid_ds;
3317     int i, ret;
3318 
3319     semun.buf = &semid_ds;
3320 
3321     ret = semctl(semid, 0, IPC_STAT, semun);
3322     if (ret == -1)
3323         return get_errno(ret);
3324 
3325     nsems = semid_ds.sem_nsems;
3326 
3327     *host_array = g_try_new(unsigned short, nsems);
3328     if (!*host_array) {
3329         return -TARGET_ENOMEM;
3330     }
3331     array = lock_user(VERIFY_READ, target_addr,
3332                       nsems*sizeof(unsigned short), 1);
3333     if (!array) {
3334         g_free(*host_array);
3335         return -TARGET_EFAULT;
3336     }
3337 
3338     for(i=0; i<nsems; i++) {
3339         __get_user((*host_array)[i], &array[i]);
3340     }
3341     unlock_user(array, target_addr, 0);
3342 
3343     return 0;
3344 }
3345 
3346 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3347                                                unsigned short **host_array)
3348 {
3349     int nsems;
3350     unsigned short *array;
3351     union semun semun;
3352     struct semid_ds semid_ds;
3353     int i, ret;
3354 
3355     semun.buf = &semid_ds;
3356 
3357     ret = semctl(semid, 0, IPC_STAT, semun);
3358     if (ret == -1)
3359         return get_errno(ret);
3360 
3361     nsems = semid_ds.sem_nsems;
3362 
3363     array = lock_user(VERIFY_WRITE, target_addr,
3364                       nsems*sizeof(unsigned short), 0);
3365     if (!array)
3366         return -TARGET_EFAULT;
3367 
3368     for(i=0; i<nsems; i++) {
3369         __put_user((*host_array)[i], &array[i]);
3370     }
3371     g_free(*host_array);
3372     unlock_user(array, target_addr, 1);
3373 
3374     return 0;
3375 }
3376 
3377 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3378                                  abi_ulong target_arg)
3379 {
3380     union target_semun target_su = { .buf = target_arg };
3381     union semun arg;
3382     struct semid_ds dsarg;
3383     unsigned short *array = NULL;
3384     struct seminfo seminfo;
3385     abi_long ret = -TARGET_EINVAL;
3386     abi_long err;
3387     cmd &= 0xff;
3388 
3389     switch( cmd ) {
3390 	case GETVAL:
3391 	case SETVAL:
3392             /* In 64 bit cross-endian situations, we will erroneously pick up
3393              * the wrong half of the union for the "val" element.  To rectify
3394              * this, the entire 8-byte structure is byteswapped, followed by
3395 	     * a swap of the 4 byte val field. In other cases, the data is
3396 	     * already in proper host byte order. */
3397 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3398 		target_su.buf = tswapal(target_su.buf);
3399 		arg.val = tswap32(target_su.val);
3400 	    } else {
3401 		arg.val = target_su.val;
3402 	    }
3403             ret = get_errno(semctl(semid, semnum, cmd, arg));
3404             break;
3405 	case GETALL:
3406 	case SETALL:
3407             err = target_to_host_semarray(semid, &array, target_su.array);
3408             if (err)
3409                 return err;
3410             arg.array = array;
3411             ret = get_errno(semctl(semid, semnum, cmd, arg));
3412             err = host_to_target_semarray(semid, target_su.array, &array);
3413             if (err)
3414                 return err;
3415             break;
3416 	case IPC_STAT:
3417 	case IPC_SET:
3418 	case SEM_STAT:
3419             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3420             if (err)
3421                 return err;
3422             arg.buf = &dsarg;
3423             ret = get_errno(semctl(semid, semnum, cmd, arg));
3424             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3425             if (err)
3426                 return err;
3427             break;
3428 	case IPC_INFO:
3429 	case SEM_INFO:
3430             arg.__buf = &seminfo;
3431             ret = get_errno(semctl(semid, semnum, cmd, arg));
3432             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3433             if (err)
3434                 return err;
3435             break;
3436 	case IPC_RMID:
3437 	case GETPID:
3438 	case GETNCNT:
3439 	case GETZCNT:
3440             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3441             break;
3442     }
3443 
3444     return ret;
3445 }
3446 
3447 struct target_sembuf {
3448     unsigned short sem_num;
3449     short sem_op;
3450     short sem_flg;
3451 };
3452 
3453 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3454                                              abi_ulong target_addr,
3455                                              unsigned nsops)
3456 {
3457     struct target_sembuf *target_sembuf;
3458     int i;
3459 
3460     target_sembuf = lock_user(VERIFY_READ, target_addr,
3461                               nsops*sizeof(struct target_sembuf), 1);
3462     if (!target_sembuf)
3463         return -TARGET_EFAULT;
3464 
3465     for(i=0; i<nsops; i++) {
3466         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3467         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3468         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3469     }
3470 
3471     unlock_user(target_sembuf, target_addr, 0);
3472 
3473     return 0;
3474 }
3475 
3476 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3477 {
3478     struct sembuf sops[nsops];
3479 
3480     if (target_to_host_sembuf(sops, ptr, nsops))
3481         return -TARGET_EFAULT;
3482 
3483     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3484 }
3485 
3486 struct target_msqid_ds
3487 {
3488     struct target_ipc_perm msg_perm;
3489     abi_ulong msg_stime;
3490 #if TARGET_ABI_BITS == 32
3491     abi_ulong __unused1;
3492 #endif
3493     abi_ulong msg_rtime;
3494 #if TARGET_ABI_BITS == 32
3495     abi_ulong __unused2;
3496 #endif
3497     abi_ulong msg_ctime;
3498 #if TARGET_ABI_BITS == 32
3499     abi_ulong __unused3;
3500 #endif
3501     abi_ulong __msg_cbytes;
3502     abi_ulong msg_qnum;
3503     abi_ulong msg_qbytes;
3504     abi_ulong msg_lspid;
3505     abi_ulong msg_lrpid;
3506     abi_ulong __unused4;
3507     abi_ulong __unused5;
3508 };
3509 
3510 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3511                                                abi_ulong target_addr)
3512 {
3513     struct target_msqid_ds *target_md;
3514 
3515     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3516         return -TARGET_EFAULT;
3517     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3518         return -TARGET_EFAULT;
3519     host_md->msg_stime = tswapal(target_md->msg_stime);
3520     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3521     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3522     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3523     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3524     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3525     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3526     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3527     unlock_user_struct(target_md, target_addr, 0);
3528     return 0;
3529 }
3530 
3531 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3532                                                struct msqid_ds *host_md)
3533 {
3534     struct target_msqid_ds *target_md;
3535 
3536     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3537         return -TARGET_EFAULT;
3538     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3539         return -TARGET_EFAULT;
3540     target_md->msg_stime = tswapal(host_md->msg_stime);
3541     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3542     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3543     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3544     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3545     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3546     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3547     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3548     unlock_user_struct(target_md, target_addr, 1);
3549     return 0;
3550 }
3551 
3552 struct target_msginfo {
3553     int msgpool;
3554     int msgmap;
3555     int msgmax;
3556     int msgmnb;
3557     int msgmni;
3558     int msgssz;
3559     int msgtql;
3560     unsigned short int msgseg;
3561 };
3562 
3563 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3564                                               struct msginfo *host_msginfo)
3565 {
3566     struct target_msginfo *target_msginfo;
3567     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3568         return -TARGET_EFAULT;
3569     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3570     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3571     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3572     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3573     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3574     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3575     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3576     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3577     unlock_user_struct(target_msginfo, target_addr, 1);
3578     return 0;
3579 }
3580 
3581 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3582 {
3583     struct msqid_ds dsarg;
3584     struct msginfo msginfo;
3585     abi_long ret = -TARGET_EINVAL;
3586 
3587     cmd &= 0xff;
3588 
3589     switch (cmd) {
3590     case IPC_STAT:
3591     case IPC_SET:
3592     case MSG_STAT:
3593         if (target_to_host_msqid_ds(&dsarg,ptr))
3594             return -TARGET_EFAULT;
3595         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3596         if (host_to_target_msqid_ds(ptr,&dsarg))
3597             return -TARGET_EFAULT;
3598         break;
3599     case IPC_RMID:
3600         ret = get_errno(msgctl(msgid, cmd, NULL));
3601         break;
3602     case IPC_INFO:
3603     case MSG_INFO:
3604         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3605         if (host_to_target_msginfo(ptr, &msginfo))
3606             return -TARGET_EFAULT;
3607         break;
3608     }
3609 
3610     return ret;
3611 }
3612 
3613 struct target_msgbuf {
3614     abi_long mtype;
3615     char	mtext[1];
3616 };
3617 
3618 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3619                                  ssize_t msgsz, int msgflg)
3620 {
3621     struct target_msgbuf *target_mb;
3622     struct msgbuf *host_mb;
3623     abi_long ret = 0;
3624 
3625     if (msgsz < 0) {
3626         return -TARGET_EINVAL;
3627     }
3628 
3629     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3630         return -TARGET_EFAULT;
3631     host_mb = g_try_malloc(msgsz + sizeof(long));
3632     if (!host_mb) {
3633         unlock_user_struct(target_mb, msgp, 0);
3634         return -TARGET_ENOMEM;
3635     }
3636     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3637     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3638     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3639     g_free(host_mb);
3640     unlock_user_struct(target_mb, msgp, 0);
3641 
3642     return ret;
3643 }
3644 
3645 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3646                                  ssize_t msgsz, abi_long msgtyp,
3647                                  int msgflg)
3648 {
3649     struct target_msgbuf *target_mb;
3650     char *target_mtext;
3651     struct msgbuf *host_mb;
3652     abi_long ret = 0;
3653 
3654     if (msgsz < 0) {
3655         return -TARGET_EINVAL;
3656     }
3657 
3658     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3659         return -TARGET_EFAULT;
3660 
3661     host_mb = g_try_malloc(msgsz + sizeof(long));
3662     if (!host_mb) {
3663         ret = -TARGET_ENOMEM;
3664         goto end;
3665     }
3666     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3667 
3668     if (ret > 0) {
3669         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3670         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3671         if (!target_mtext) {
3672             ret = -TARGET_EFAULT;
3673             goto end;
3674         }
3675         memcpy(target_mb->mtext, host_mb->mtext, ret);
3676         unlock_user(target_mtext, target_mtext_addr, ret);
3677     }
3678 
3679     target_mb->mtype = tswapal(host_mb->mtype);
3680 
3681 end:
3682     if (target_mb)
3683         unlock_user_struct(target_mb, msgp, 1);
3684     g_free(host_mb);
3685     return ret;
3686 }
3687 
3688 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3689                                                abi_ulong target_addr)
3690 {
3691     struct target_shmid_ds *target_sd;
3692 
3693     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3694         return -TARGET_EFAULT;
3695     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3696         return -TARGET_EFAULT;
3697     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3698     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3699     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3700     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3701     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3702     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3703     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3704     unlock_user_struct(target_sd, target_addr, 0);
3705     return 0;
3706 }
3707 
3708 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3709                                                struct shmid_ds *host_sd)
3710 {
3711     struct target_shmid_ds *target_sd;
3712 
3713     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3714         return -TARGET_EFAULT;
3715     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3716         return -TARGET_EFAULT;
3717     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3718     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3719     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3720     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3721     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3722     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3723     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3724     unlock_user_struct(target_sd, target_addr, 1);
3725     return 0;
3726 }
3727 
3728 struct  target_shminfo {
3729     abi_ulong shmmax;
3730     abi_ulong shmmin;
3731     abi_ulong shmmni;
3732     abi_ulong shmseg;
3733     abi_ulong shmall;
3734 };
3735 
3736 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3737                                               struct shminfo *host_shminfo)
3738 {
3739     struct target_shminfo *target_shminfo;
3740     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3741         return -TARGET_EFAULT;
3742     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3743     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3744     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3745     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3746     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3747     unlock_user_struct(target_shminfo, target_addr, 1);
3748     return 0;
3749 }
3750 
3751 struct target_shm_info {
3752     int used_ids;
3753     abi_ulong shm_tot;
3754     abi_ulong shm_rss;
3755     abi_ulong shm_swp;
3756     abi_ulong swap_attempts;
3757     abi_ulong swap_successes;
3758 };
3759 
3760 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3761                                                struct shm_info *host_shm_info)
3762 {
3763     struct target_shm_info *target_shm_info;
3764     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3765         return -TARGET_EFAULT;
3766     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3767     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3768     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3769     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3770     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3771     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3772     unlock_user_struct(target_shm_info, target_addr, 1);
3773     return 0;
3774 }
3775 
3776 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3777 {
3778     struct shmid_ds dsarg;
3779     struct shminfo shminfo;
3780     struct shm_info shm_info;
3781     abi_long ret = -TARGET_EINVAL;
3782 
3783     cmd &= 0xff;
3784 
3785     switch(cmd) {
3786     case IPC_STAT:
3787     case IPC_SET:
3788     case SHM_STAT:
3789         if (target_to_host_shmid_ds(&dsarg, buf))
3790             return -TARGET_EFAULT;
3791         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3792         if (host_to_target_shmid_ds(buf, &dsarg))
3793             return -TARGET_EFAULT;
3794         break;
3795     case IPC_INFO:
3796         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3797         if (host_to_target_shminfo(buf, &shminfo))
3798             return -TARGET_EFAULT;
3799         break;
3800     case SHM_INFO:
3801         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3802         if (host_to_target_shm_info(buf, &shm_info))
3803             return -TARGET_EFAULT;
3804         break;
3805     case IPC_RMID:
3806     case SHM_LOCK:
3807     case SHM_UNLOCK:
3808         ret = get_errno(shmctl(shmid, cmd, NULL));
3809         break;
3810     }
3811 
3812     return ret;
3813 }
3814 
3815 #ifndef TARGET_FORCE_SHMLBA
3816 /* For most architectures, SHMLBA is the same as the page size;
3817  * some architectures have larger values, in which case they should
3818  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3819  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3820  * and defining its own value for SHMLBA.
3821  *
3822  * The kernel also permits SHMLBA to be set by the architecture to a
3823  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3824  * this means that addresses are rounded to the large size if
3825  * SHM_RND is set but addresses not aligned to that size are not rejected
3826  * as long as they are at least page-aligned. Since the only architecture
3827  * which uses this is ia64 this code doesn't provide for that oddity.
3828  */
3829 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3830 {
3831     return TARGET_PAGE_SIZE;
3832 }
3833 #endif
3834 
3835 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3836                                  int shmid, abi_ulong shmaddr, int shmflg)
3837 {
3838     abi_long raddr;
3839     void *host_raddr;
3840     struct shmid_ds shm_info;
3841     int i,ret;
3842     abi_ulong shmlba;
3843 
3844     /* find out the length of the shared memory segment */
3845     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3846     if (is_error(ret)) {
3847         /* can't get length, bail out */
3848         return ret;
3849     }
3850 
3851     shmlba = target_shmlba(cpu_env);
3852 
3853     if (shmaddr & (shmlba - 1)) {
3854         if (shmflg & SHM_RND) {
3855             shmaddr &= ~(shmlba - 1);
3856         } else {
3857             return -TARGET_EINVAL;
3858         }
3859     }
3860     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3861         return -TARGET_EINVAL;
3862     }
3863 
3864     mmap_lock();
3865 
3866     if (shmaddr)
3867         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3868     else {
3869         abi_ulong mmap_start;
3870 
3871         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3872 
3873         if (mmap_start == -1) {
3874             errno = ENOMEM;
3875             host_raddr = (void *)-1;
3876         } else
3877             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3878     }
3879 
3880     if (host_raddr == (void *)-1) {
3881         mmap_unlock();
3882         return get_errno((long)host_raddr);
3883     }
3884     raddr=h2g((unsigned long)host_raddr);
3885 
3886     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3887                    PAGE_VALID | PAGE_READ |
3888                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3889 
3890     for (i = 0; i < N_SHM_REGIONS; i++) {
3891         if (!shm_regions[i].in_use) {
3892             shm_regions[i].in_use = true;
3893             shm_regions[i].start = raddr;
3894             shm_regions[i].size = shm_info.shm_segsz;
3895             break;
3896         }
3897     }
3898 
3899     mmap_unlock();
3900     return raddr;
3901 
3902 }
3903 
3904 static inline abi_long do_shmdt(abi_ulong shmaddr)
3905 {
3906     int i;
3907     abi_long rv;
3908 
3909     mmap_lock();
3910 
3911     for (i = 0; i < N_SHM_REGIONS; ++i) {
3912         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3913             shm_regions[i].in_use = false;
3914             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3915             break;
3916         }
3917     }
3918     rv = get_errno(shmdt(g2h(shmaddr)));
3919 
3920     mmap_unlock();
3921 
3922     return rv;
3923 }
3924 
3925 #ifdef TARGET_NR_ipc
3926 /* ??? This only works with linear mappings.  */
3927 /* do_ipc() must return target values and target errnos. */
3928 static abi_long do_ipc(CPUArchState *cpu_env,
3929                        unsigned int call, abi_long first,
3930                        abi_long second, abi_long third,
3931                        abi_long ptr, abi_long fifth)
3932 {
3933     int version;
3934     abi_long ret = 0;
3935 
3936     version = call >> 16;
3937     call &= 0xffff;
3938 
3939     switch (call) {
3940     case IPCOP_semop:
3941         ret = do_semop(first, ptr, second);
3942         break;
3943 
3944     case IPCOP_semget:
3945         ret = get_errno(semget(first, second, third));
3946         break;
3947 
3948     case IPCOP_semctl: {
3949         /* The semun argument to semctl is passed by value, so dereference the
3950          * ptr argument. */
3951         abi_ulong atptr;
3952         get_user_ual(atptr, ptr);
3953         ret = do_semctl(first, second, third, atptr);
3954         break;
3955     }
3956 
3957     case IPCOP_msgget:
3958         ret = get_errno(msgget(first, second));
3959         break;
3960 
3961     case IPCOP_msgsnd:
3962         ret = do_msgsnd(first, ptr, second, third);
3963         break;
3964 
3965     case IPCOP_msgctl:
3966         ret = do_msgctl(first, second, ptr);
3967         break;
3968 
3969     case IPCOP_msgrcv:
3970         switch (version) {
3971         case 0:
3972             {
3973                 struct target_ipc_kludge {
3974                     abi_long msgp;
3975                     abi_long msgtyp;
3976                 } *tmp;
3977 
3978                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3979                     ret = -TARGET_EFAULT;
3980                     break;
3981                 }
3982 
3983                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3984 
3985                 unlock_user_struct(tmp, ptr, 0);
3986                 break;
3987             }
3988         default:
3989             ret = do_msgrcv(first, ptr, second, fifth, third);
3990         }
3991         break;
3992 
3993     case IPCOP_shmat:
3994         switch (version) {
3995         default:
3996         {
3997             abi_ulong raddr;
3998             raddr = do_shmat(cpu_env, first, ptr, second);
3999             if (is_error(raddr))
4000                 return get_errno(raddr);
4001             if (put_user_ual(raddr, third))
4002                 return -TARGET_EFAULT;
4003             break;
4004         }
4005         case 1:
4006             ret = -TARGET_EINVAL;
4007             break;
4008         }
4009 	break;
4010     case IPCOP_shmdt:
4011         ret = do_shmdt(ptr);
4012 	break;
4013 
4014     case IPCOP_shmget:
4015 	/* IPC_* flag values are the same on all linux platforms */
4016 	ret = get_errno(shmget(first, second, third));
4017 	break;
4018 
4019 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4020     case IPCOP_shmctl:
4021         ret = do_shmctl(first, second, ptr);
4022         break;
4023     default:
4024 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4025 	ret = -TARGET_ENOSYS;
4026 	break;
4027     }
4028     return ret;
4029 }
4030 #endif
4031 
4032 /* kernel structure types definitions */
4033 
4034 #define STRUCT(name, ...) STRUCT_ ## name,
4035 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4036 enum {
4037 #include "syscall_types.h"
4038 STRUCT_MAX
4039 };
4040 #undef STRUCT
4041 #undef STRUCT_SPECIAL
4042 
4043 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4044 #define STRUCT_SPECIAL(name)
4045 #include "syscall_types.h"
4046 #undef STRUCT
4047 #undef STRUCT_SPECIAL
4048 
4049 typedef struct IOCTLEntry IOCTLEntry;
4050 
4051 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4052                              int fd, int cmd, abi_long arg);
4053 
4054 struct IOCTLEntry {
4055     int target_cmd;
4056     unsigned int host_cmd;
4057     const char *name;
4058     int access;
4059     do_ioctl_fn *do_ioctl;
4060     const argtype arg_type[5];
4061 };
4062 
4063 #define IOC_R 0x0001
4064 #define IOC_W 0x0002
4065 #define IOC_RW (IOC_R | IOC_W)
4066 
4067 #define MAX_STRUCT_SIZE 4096
4068 
4069 #ifdef CONFIG_FIEMAP
4070 /* So fiemap access checks don't overflow on 32 bit systems.
4071  * This is very slightly smaller than the limit imposed by
4072  * the underlying kernel.
4073  */
4074 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4075                             / sizeof(struct fiemap_extent))
4076 
4077 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4078                                        int fd, int cmd, abi_long arg)
4079 {
4080     /* The parameter for this ioctl is a struct fiemap followed
4081      * by an array of struct fiemap_extent whose size is set
4082      * in fiemap->fm_extent_count. The array is filled in by the
4083      * ioctl.
4084      */
4085     int target_size_in, target_size_out;
4086     struct fiemap *fm;
4087     const argtype *arg_type = ie->arg_type;
4088     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4089     void *argptr, *p;
4090     abi_long ret;
4091     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4092     uint32_t outbufsz;
4093     int free_fm = 0;
4094 
4095     assert(arg_type[0] == TYPE_PTR);
4096     assert(ie->access == IOC_RW);
4097     arg_type++;
4098     target_size_in = thunk_type_size(arg_type, 0);
4099     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4100     if (!argptr) {
4101         return -TARGET_EFAULT;
4102     }
4103     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4104     unlock_user(argptr, arg, 0);
4105     fm = (struct fiemap *)buf_temp;
4106     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     outbufsz = sizeof (*fm) +
4111         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4112 
4113     if (outbufsz > MAX_STRUCT_SIZE) {
4114         /* We can't fit all the extents into the fixed size buffer.
4115          * Allocate one that is large enough and use it instead.
4116          */
4117         fm = g_try_malloc(outbufsz);
4118         if (!fm) {
4119             return -TARGET_ENOMEM;
4120         }
4121         memcpy(fm, buf_temp, sizeof(struct fiemap));
4122         free_fm = 1;
4123     }
4124     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4125     if (!is_error(ret)) {
4126         target_size_out = target_size_in;
4127         /* An extent_count of 0 means we were only counting the extents
4128          * so there are no structs to copy
4129          */
4130         if (fm->fm_extent_count != 0) {
4131             target_size_out += fm->fm_mapped_extents * extent_size;
4132         }
4133         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4134         if (!argptr) {
4135             ret = -TARGET_EFAULT;
4136         } else {
4137             /* Convert the struct fiemap */
4138             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4139             if (fm->fm_extent_count != 0) {
4140                 p = argptr + target_size_in;
4141                 /* ...and then all the struct fiemap_extents */
4142                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4143                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4144                                   THUNK_TARGET);
4145                     p += extent_size;
4146                 }
4147             }
4148             unlock_user(argptr, arg, target_size_out);
4149         }
4150     }
4151     if (free_fm) {
4152         g_free(fm);
4153     }
4154     return ret;
4155 }
4156 #endif
4157 
4158 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4159                                 int fd, int cmd, abi_long arg)
4160 {
4161     const argtype *arg_type = ie->arg_type;
4162     int target_size;
4163     void *argptr;
4164     int ret;
4165     struct ifconf *host_ifconf;
4166     uint32_t outbufsz;
4167     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4168     int target_ifreq_size;
4169     int nb_ifreq;
4170     int free_buf = 0;
4171     int i;
4172     int target_ifc_len;
4173     abi_long target_ifc_buf;
4174     int host_ifc_len;
4175     char *host_ifc_buf;
4176 
4177     assert(arg_type[0] == TYPE_PTR);
4178     assert(ie->access == IOC_RW);
4179 
4180     arg_type++;
4181     target_size = thunk_type_size(arg_type, 0);
4182 
4183     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4184     if (!argptr)
4185         return -TARGET_EFAULT;
4186     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4187     unlock_user(argptr, arg, 0);
4188 
4189     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4190     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4191     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4192 
4193     if (target_ifc_buf != 0) {
4194         target_ifc_len = host_ifconf->ifc_len;
4195         nb_ifreq = target_ifc_len / target_ifreq_size;
4196         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4197 
4198         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4199         if (outbufsz > MAX_STRUCT_SIZE) {
4200             /*
4201              * We can't fit all the extents into the fixed size buffer.
4202              * Allocate one that is large enough and use it instead.
4203              */
4204             host_ifconf = malloc(outbufsz);
4205             if (!host_ifconf) {
4206                 return -TARGET_ENOMEM;
4207             }
4208             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4209             free_buf = 1;
4210         }
4211         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4212 
4213         host_ifconf->ifc_len = host_ifc_len;
4214     } else {
4215       host_ifc_buf = NULL;
4216     }
4217     host_ifconf->ifc_buf = host_ifc_buf;
4218 
4219     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4220     if (!is_error(ret)) {
4221 	/* convert host ifc_len to target ifc_len */
4222 
4223         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4224         target_ifc_len = nb_ifreq * target_ifreq_size;
4225         host_ifconf->ifc_len = target_ifc_len;
4226 
4227 	/* restore target ifc_buf */
4228 
4229         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4230 
4231 	/* copy struct ifconf to target user */
4232 
4233         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4234         if (!argptr)
4235             return -TARGET_EFAULT;
4236         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4237         unlock_user(argptr, arg, target_size);
4238 
4239         if (target_ifc_buf != 0) {
4240             /* copy ifreq[] to target user */
4241             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4242             for (i = 0; i < nb_ifreq ; i++) {
4243                 thunk_convert(argptr + i * target_ifreq_size,
4244                               host_ifc_buf + i * sizeof(struct ifreq),
4245                               ifreq_arg_type, THUNK_TARGET);
4246             }
4247             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4248         }
4249     }
4250 
4251     if (free_buf) {
4252         free(host_ifconf);
4253     }
4254 
4255     return ret;
4256 }
4257 
4258 #if defined(CONFIG_USBFS)
4259 #if HOST_LONG_BITS > 64
4260 #error USBDEVFS thunks do not support >64 bit hosts yet.
4261 #endif
4262 struct live_urb {
4263     uint64_t target_urb_adr;
4264     uint64_t target_buf_adr;
4265     char *target_buf_ptr;
4266     struct usbdevfs_urb host_urb;
4267 };
4268 
4269 static GHashTable *usbdevfs_urb_hashtable(void)
4270 {
4271     static GHashTable *urb_hashtable;
4272 
4273     if (!urb_hashtable) {
4274         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4275     }
4276     return urb_hashtable;
4277 }
4278 
4279 static void urb_hashtable_insert(struct live_urb *urb)
4280 {
4281     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4282     g_hash_table_insert(urb_hashtable, urb, urb);
4283 }
4284 
4285 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4286 {
4287     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4288     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4289 }
4290 
4291 static void urb_hashtable_remove(struct live_urb *urb)
4292 {
4293     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4294     g_hash_table_remove(urb_hashtable, urb);
4295 }
4296 
4297 static abi_long
4298 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4299                           int fd, int cmd, abi_long arg)
4300 {
4301     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4302     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4303     struct live_urb *lurb;
4304     void *argptr;
4305     uint64_t hurb;
4306     int target_size;
4307     uintptr_t target_urb_adr;
4308     abi_long ret;
4309 
4310     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4311 
4312     memset(buf_temp, 0, sizeof(uint64_t));
4313     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4314     if (is_error(ret)) {
4315         return ret;
4316     }
4317 
4318     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4319     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4320     if (!lurb->target_urb_adr) {
4321         return -TARGET_EFAULT;
4322     }
4323     urb_hashtable_remove(lurb);
4324     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4325         lurb->host_urb.buffer_length);
4326     lurb->target_buf_ptr = NULL;
4327 
4328     /* restore the guest buffer pointer */
4329     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4330 
4331     /* update the guest urb struct */
4332     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4333     if (!argptr) {
4334         g_free(lurb);
4335         return -TARGET_EFAULT;
4336     }
4337     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4338     unlock_user(argptr, lurb->target_urb_adr, target_size);
4339 
4340     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4341     /* write back the urb handle */
4342     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4343     if (!argptr) {
4344         g_free(lurb);
4345         return -TARGET_EFAULT;
4346     }
4347 
4348     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4349     target_urb_adr = lurb->target_urb_adr;
4350     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4351     unlock_user(argptr, arg, target_size);
4352 
4353     g_free(lurb);
4354     return ret;
4355 }
4356 
4357 static abi_long
4358 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4359                              uint8_t *buf_temp __attribute__((unused)),
4360                              int fd, int cmd, abi_long arg)
4361 {
4362     struct live_urb *lurb;
4363 
4364     /* map target address back to host URB with metadata. */
4365     lurb = urb_hashtable_lookup(arg);
4366     if (!lurb) {
4367         return -TARGET_EFAULT;
4368     }
4369     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4370 }
4371 
4372 static abi_long
4373 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4374                             int fd, int cmd, abi_long arg)
4375 {
4376     const argtype *arg_type = ie->arg_type;
4377     int target_size;
4378     abi_long ret;
4379     void *argptr;
4380     int rw_dir;
4381     struct live_urb *lurb;
4382 
4383     /*
4384      * each submitted URB needs to map to a unique ID for the
4385      * kernel, and that unique ID needs to be a pointer to
4386      * host memory.  hence, we need to malloc for each URB.
4387      * isochronous transfers have a variable length struct.
4388      */
4389     arg_type++;
4390     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4391 
4392     /* construct host copy of urb and metadata */
4393     lurb = g_try_malloc0(sizeof(struct live_urb));
4394     if (!lurb) {
4395         return -TARGET_ENOMEM;
4396     }
4397 
4398     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4399     if (!argptr) {
4400         g_free(lurb);
4401         return -TARGET_EFAULT;
4402     }
4403     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4404     unlock_user(argptr, arg, 0);
4405 
4406     lurb->target_urb_adr = arg;
4407     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4408 
4409     /* buffer space used depends on endpoint type so lock the entire buffer */
4410     /* control type urbs should check the buffer contents for true direction */
4411     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4412     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4413         lurb->host_urb.buffer_length, 1);
4414     if (lurb->target_buf_ptr == NULL) {
4415         g_free(lurb);
4416         return -TARGET_EFAULT;
4417     }
4418 
4419     /* update buffer pointer in host copy */
4420     lurb->host_urb.buffer = lurb->target_buf_ptr;
4421 
4422     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4423     if (is_error(ret)) {
4424         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4425         g_free(lurb);
4426     } else {
4427         urb_hashtable_insert(lurb);
4428     }
4429 
4430     return ret;
4431 }
4432 #endif /* CONFIG_USBFS */
4433 
4434 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4435                             int cmd, abi_long arg)
4436 {
4437     void *argptr;
4438     struct dm_ioctl *host_dm;
4439     abi_long guest_data;
4440     uint32_t guest_data_size;
4441     int target_size;
4442     const argtype *arg_type = ie->arg_type;
4443     abi_long ret;
4444     void *big_buf = NULL;
4445     char *host_data;
4446 
4447     arg_type++;
4448     target_size = thunk_type_size(arg_type, 0);
4449     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4450     if (!argptr) {
4451         ret = -TARGET_EFAULT;
4452         goto out;
4453     }
4454     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4455     unlock_user(argptr, arg, 0);
4456 
4457     /* buf_temp is too small, so fetch things into a bigger buffer */
4458     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4459     memcpy(big_buf, buf_temp, target_size);
4460     buf_temp = big_buf;
4461     host_dm = big_buf;
4462 
4463     guest_data = arg + host_dm->data_start;
4464     if ((guest_data - arg) < 0) {
4465         ret = -TARGET_EINVAL;
4466         goto out;
4467     }
4468     guest_data_size = host_dm->data_size - host_dm->data_start;
4469     host_data = (char*)host_dm + host_dm->data_start;
4470 
4471     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4472     if (!argptr) {
4473         ret = -TARGET_EFAULT;
4474         goto out;
4475     }
4476 
4477     switch (ie->host_cmd) {
4478     case DM_REMOVE_ALL:
4479     case DM_LIST_DEVICES:
4480     case DM_DEV_CREATE:
4481     case DM_DEV_REMOVE:
4482     case DM_DEV_SUSPEND:
4483     case DM_DEV_STATUS:
4484     case DM_DEV_WAIT:
4485     case DM_TABLE_STATUS:
4486     case DM_TABLE_CLEAR:
4487     case DM_TABLE_DEPS:
4488     case DM_LIST_VERSIONS:
4489         /* no input data */
4490         break;
4491     case DM_DEV_RENAME:
4492     case DM_DEV_SET_GEOMETRY:
4493         /* data contains only strings */
4494         memcpy(host_data, argptr, guest_data_size);
4495         break;
4496     case DM_TARGET_MSG:
4497         memcpy(host_data, argptr, guest_data_size);
4498         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4499         break;
4500     case DM_TABLE_LOAD:
4501     {
4502         void *gspec = argptr;
4503         void *cur_data = host_data;
4504         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4505         int spec_size = thunk_type_size(arg_type, 0);
4506         int i;
4507 
4508         for (i = 0; i < host_dm->target_count; i++) {
4509             struct dm_target_spec *spec = cur_data;
4510             uint32_t next;
4511             int slen;
4512 
4513             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4514             slen = strlen((char*)gspec + spec_size) + 1;
4515             next = spec->next;
4516             spec->next = sizeof(*spec) + slen;
4517             strcpy((char*)&spec[1], gspec + spec_size);
4518             gspec += next;
4519             cur_data += spec->next;
4520         }
4521         break;
4522     }
4523     default:
4524         ret = -TARGET_EINVAL;
4525         unlock_user(argptr, guest_data, 0);
4526         goto out;
4527     }
4528     unlock_user(argptr, guest_data, 0);
4529 
4530     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4531     if (!is_error(ret)) {
4532         guest_data = arg + host_dm->data_start;
4533         guest_data_size = host_dm->data_size - host_dm->data_start;
4534         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4535         switch (ie->host_cmd) {
4536         case DM_REMOVE_ALL:
4537         case DM_DEV_CREATE:
4538         case DM_DEV_REMOVE:
4539         case DM_DEV_RENAME:
4540         case DM_DEV_SUSPEND:
4541         case DM_DEV_STATUS:
4542         case DM_TABLE_LOAD:
4543         case DM_TABLE_CLEAR:
4544         case DM_TARGET_MSG:
4545         case DM_DEV_SET_GEOMETRY:
4546             /* no return data */
4547             break;
4548         case DM_LIST_DEVICES:
4549         {
4550             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4551             uint32_t remaining_data = guest_data_size;
4552             void *cur_data = argptr;
4553             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4554             int nl_size = 12; /* can't use thunk_size due to alignment */
4555 
4556             while (1) {
4557                 uint32_t next = nl->next;
4558                 if (next) {
4559                     nl->next = nl_size + (strlen(nl->name) + 1);
4560                 }
4561                 if (remaining_data < nl->next) {
4562                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4563                     break;
4564                 }
4565                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4566                 strcpy(cur_data + nl_size, nl->name);
4567                 cur_data += nl->next;
4568                 remaining_data -= nl->next;
4569                 if (!next) {
4570                     break;
4571                 }
4572                 nl = (void*)nl + next;
4573             }
4574             break;
4575         }
4576         case DM_DEV_WAIT:
4577         case DM_TABLE_STATUS:
4578         {
4579             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4580             void *cur_data = argptr;
4581             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4582             int spec_size = thunk_type_size(arg_type, 0);
4583             int i;
4584 
4585             for (i = 0; i < host_dm->target_count; i++) {
4586                 uint32_t next = spec->next;
4587                 int slen = strlen((char*)&spec[1]) + 1;
4588                 spec->next = (cur_data - argptr) + spec_size + slen;
4589                 if (guest_data_size < spec->next) {
4590                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4591                     break;
4592                 }
4593                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4594                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4595                 cur_data = argptr + spec->next;
4596                 spec = (void*)host_dm + host_dm->data_start + next;
4597             }
4598             break;
4599         }
4600         case DM_TABLE_DEPS:
4601         {
4602             void *hdata = (void*)host_dm + host_dm->data_start;
4603             int count = *(uint32_t*)hdata;
4604             uint64_t *hdev = hdata + 8;
4605             uint64_t *gdev = argptr + 8;
4606             int i;
4607 
4608             *(uint32_t*)argptr = tswap32(count);
4609             for (i = 0; i < count; i++) {
4610                 *gdev = tswap64(*hdev);
4611                 gdev++;
4612                 hdev++;
4613             }
4614             break;
4615         }
4616         case DM_LIST_VERSIONS:
4617         {
4618             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4619             uint32_t remaining_data = guest_data_size;
4620             void *cur_data = argptr;
4621             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4622             int vers_size = thunk_type_size(arg_type, 0);
4623 
4624             while (1) {
4625                 uint32_t next = vers->next;
4626                 if (next) {
4627                     vers->next = vers_size + (strlen(vers->name) + 1);
4628                 }
4629                 if (remaining_data < vers->next) {
4630                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4631                     break;
4632                 }
4633                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4634                 strcpy(cur_data + vers_size, vers->name);
4635                 cur_data += vers->next;
4636                 remaining_data -= vers->next;
4637                 if (!next) {
4638                     break;
4639                 }
4640                 vers = (void*)vers + next;
4641             }
4642             break;
4643         }
4644         default:
4645             unlock_user(argptr, guest_data, 0);
4646             ret = -TARGET_EINVAL;
4647             goto out;
4648         }
4649         unlock_user(argptr, guest_data, guest_data_size);
4650 
4651         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4652         if (!argptr) {
4653             ret = -TARGET_EFAULT;
4654             goto out;
4655         }
4656         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4657         unlock_user(argptr, arg, target_size);
4658     }
4659 out:
4660     g_free(big_buf);
4661     return ret;
4662 }
4663 
4664 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4665                                int cmd, abi_long arg)
4666 {
4667     void *argptr;
4668     int target_size;
4669     const argtype *arg_type = ie->arg_type;
4670     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4671     abi_long ret;
4672 
4673     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4674     struct blkpg_partition host_part;
4675 
4676     /* Read and convert blkpg */
4677     arg_type++;
4678     target_size = thunk_type_size(arg_type, 0);
4679     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4680     if (!argptr) {
4681         ret = -TARGET_EFAULT;
4682         goto out;
4683     }
4684     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4685     unlock_user(argptr, arg, 0);
4686 
4687     switch (host_blkpg->op) {
4688     case BLKPG_ADD_PARTITION:
4689     case BLKPG_DEL_PARTITION:
4690         /* payload is struct blkpg_partition */
4691         break;
4692     default:
4693         /* Unknown opcode */
4694         ret = -TARGET_EINVAL;
4695         goto out;
4696     }
4697 
4698     /* Read and convert blkpg->data */
4699     arg = (abi_long)(uintptr_t)host_blkpg->data;
4700     target_size = thunk_type_size(part_arg_type, 0);
4701     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4702     if (!argptr) {
4703         ret = -TARGET_EFAULT;
4704         goto out;
4705     }
4706     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4707     unlock_user(argptr, arg, 0);
4708 
4709     /* Swizzle the data pointer to our local copy and call! */
4710     host_blkpg->data = &host_part;
4711     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4712 
4713 out:
4714     return ret;
4715 }
4716 
4717 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4718                                 int fd, int cmd, abi_long arg)
4719 {
4720     const argtype *arg_type = ie->arg_type;
4721     const StructEntry *se;
4722     const argtype *field_types;
4723     const int *dst_offsets, *src_offsets;
4724     int target_size;
4725     void *argptr;
4726     abi_ulong *target_rt_dev_ptr;
4727     unsigned long *host_rt_dev_ptr;
4728     abi_long ret;
4729     int i;
4730 
4731     assert(ie->access == IOC_W);
4732     assert(*arg_type == TYPE_PTR);
4733     arg_type++;
4734     assert(*arg_type == TYPE_STRUCT);
4735     target_size = thunk_type_size(arg_type, 0);
4736     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4737     if (!argptr) {
4738         return -TARGET_EFAULT;
4739     }
4740     arg_type++;
4741     assert(*arg_type == (int)STRUCT_rtentry);
4742     se = struct_entries + *arg_type++;
4743     assert(se->convert[0] == NULL);
4744     /* convert struct here to be able to catch rt_dev string */
4745     field_types = se->field_types;
4746     dst_offsets = se->field_offsets[THUNK_HOST];
4747     src_offsets = se->field_offsets[THUNK_TARGET];
4748     for (i = 0; i < se->nb_fields; i++) {
4749         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4750             assert(*field_types == TYPE_PTRVOID);
4751             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4752             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4753             if (*target_rt_dev_ptr != 0) {
4754                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4755                                                   tswapal(*target_rt_dev_ptr));
4756                 if (!*host_rt_dev_ptr) {
4757                     unlock_user(argptr, arg, 0);
4758                     return -TARGET_EFAULT;
4759                 }
4760             } else {
4761                 *host_rt_dev_ptr = 0;
4762             }
4763             field_types++;
4764             continue;
4765         }
4766         field_types = thunk_convert(buf_temp + dst_offsets[i],
4767                                     argptr + src_offsets[i],
4768                                     field_types, THUNK_HOST);
4769     }
4770     unlock_user(argptr, arg, 0);
4771 
4772     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4773     if (*host_rt_dev_ptr != 0) {
4774         unlock_user((void *)*host_rt_dev_ptr,
4775                     *target_rt_dev_ptr, 0);
4776     }
4777     return ret;
4778 }
4779 
4780 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4781                                      int fd, int cmd, abi_long arg)
4782 {
4783     int sig = target_to_host_signal(arg);
4784     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4785 }
4786 
4787 #ifdef TIOCGPTPEER
4788 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4789                                      int fd, int cmd, abi_long arg)
4790 {
4791     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4792     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4793 }
4794 #endif
4795 
4796 static IOCTLEntry ioctl_entries[] = {
4797 #define IOCTL(cmd, access, ...) \
4798     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4799 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4800     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4801 #define IOCTL_IGNORE(cmd) \
4802     { TARGET_ ## cmd, 0, #cmd },
4803 #include "ioctls.h"
4804     { 0, 0, },
4805 };
4806 
4807 /* ??? Implement proper locking for ioctls.  */
4808 /* do_ioctl() Must return target values and target errnos. */
4809 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4810 {
4811     const IOCTLEntry *ie;
4812     const argtype *arg_type;
4813     abi_long ret;
4814     uint8_t buf_temp[MAX_STRUCT_SIZE];
4815     int target_size;
4816     void *argptr;
4817 
4818     ie = ioctl_entries;
4819     for(;;) {
4820         if (ie->target_cmd == 0) {
4821             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4822             return -TARGET_ENOSYS;
4823         }
4824         if (ie->target_cmd == cmd)
4825             break;
4826         ie++;
4827     }
4828     arg_type = ie->arg_type;
4829     if (ie->do_ioctl) {
4830         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4831     } else if (!ie->host_cmd) {
4832         /* Some architectures define BSD ioctls in their headers
4833            that are not implemented in Linux.  */
4834         return -TARGET_ENOSYS;
4835     }
4836 
4837     switch(arg_type[0]) {
4838     case TYPE_NULL:
4839         /* no argument */
4840         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4841         break;
4842     case TYPE_PTRVOID:
4843     case TYPE_INT:
4844         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4845         break;
4846     case TYPE_PTR:
4847         arg_type++;
4848         target_size = thunk_type_size(arg_type, 0);
4849         switch(ie->access) {
4850         case IOC_R:
4851             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4852             if (!is_error(ret)) {
4853                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4854                 if (!argptr)
4855                     return -TARGET_EFAULT;
4856                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4857                 unlock_user(argptr, arg, target_size);
4858             }
4859             break;
4860         case IOC_W:
4861             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4862             if (!argptr)
4863                 return -TARGET_EFAULT;
4864             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4865             unlock_user(argptr, arg, 0);
4866             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4867             break;
4868         default:
4869         case IOC_RW:
4870             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4871             if (!argptr)
4872                 return -TARGET_EFAULT;
4873             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4874             unlock_user(argptr, arg, 0);
4875             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4876             if (!is_error(ret)) {
4877                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4878                 if (!argptr)
4879                     return -TARGET_EFAULT;
4880                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4881                 unlock_user(argptr, arg, target_size);
4882             }
4883             break;
4884         }
4885         break;
4886     default:
4887         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4888                  (long)cmd, arg_type[0]);
4889         ret = -TARGET_ENOSYS;
4890         break;
4891     }
4892     return ret;
4893 }
4894 
4895 static const bitmask_transtbl iflag_tbl[] = {
4896         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4897         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4898         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4899         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4900         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4901         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4902         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4903         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4904         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4905         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4906         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4907         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4908         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4909         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4910         { 0, 0, 0, 0 }
4911 };
4912 
4913 static const bitmask_transtbl oflag_tbl[] = {
4914 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4915 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4916 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4917 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4918 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4919 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4920 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4921 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4922 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4923 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4924 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4925 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4926 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4927 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4928 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4929 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4930 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4931 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4932 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4933 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4934 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4935 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4936 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4937 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4938 	{ 0, 0, 0, 0 }
4939 };
4940 
4941 static const bitmask_transtbl cflag_tbl[] = {
4942 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4943 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4944 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4945 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4946 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4947 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4948 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4949 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4950 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4951 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4952 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4953 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4954 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4955 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4956 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4957 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4958 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4959 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4960 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4961 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4962 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4963 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4964 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4965 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4966 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4967 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4968 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4969 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4970 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4971 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4972 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4973 	{ 0, 0, 0, 0 }
4974 };
4975 
4976 static const bitmask_transtbl lflag_tbl[] = {
4977 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4978 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4979 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4980 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4981 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4982 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4983 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4984 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4985 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4986 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4987 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4988 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4989 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4990 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4991 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4992 	{ 0, 0, 0, 0 }
4993 };
4994 
4995 static void target_to_host_termios (void *dst, const void *src)
4996 {
4997     struct host_termios *host = dst;
4998     const struct target_termios *target = src;
4999 
5000     host->c_iflag =
5001         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5002     host->c_oflag =
5003         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5004     host->c_cflag =
5005         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5006     host->c_lflag =
5007         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5008     host->c_line = target->c_line;
5009 
5010     memset(host->c_cc, 0, sizeof(host->c_cc));
5011     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5012     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5013     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5014     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5015     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5016     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5017     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5018     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5019     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5020     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5021     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5022     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5023     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5024     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5025     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5026     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5027     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5028 }
5029 
5030 static void host_to_target_termios (void *dst, const void *src)
5031 {
5032     struct target_termios *target = dst;
5033     const struct host_termios *host = src;
5034 
5035     target->c_iflag =
5036         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5037     target->c_oflag =
5038         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5039     target->c_cflag =
5040         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5041     target->c_lflag =
5042         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5043     target->c_line = host->c_line;
5044 
5045     memset(target->c_cc, 0, sizeof(target->c_cc));
5046     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5047     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5048     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5049     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5050     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5051     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5052     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5053     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5054     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5055     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5056     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5057     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5058     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5059     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5060     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5061     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5062     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5063 }
5064 
5065 static const StructEntry struct_termios_def = {
5066     .convert = { host_to_target_termios, target_to_host_termios },
5067     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5068     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5069 };
5070 
5071 static bitmask_transtbl mmap_flags_tbl[] = {
5072     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5073     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5074     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5075     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5076       MAP_ANONYMOUS, MAP_ANONYMOUS },
5077     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5078       MAP_GROWSDOWN, MAP_GROWSDOWN },
5079     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5080       MAP_DENYWRITE, MAP_DENYWRITE },
5081     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5082       MAP_EXECUTABLE, MAP_EXECUTABLE },
5083     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5084     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5085       MAP_NORESERVE, MAP_NORESERVE },
5086     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5087     /* MAP_STACK had been ignored by the kernel for quite some time.
5088        Recognize it for the target insofar as we do not want to pass
5089        it through to the host.  */
5090     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5091     { 0, 0, 0, 0 }
5092 };
5093 
5094 #if defined(TARGET_I386)
5095 
5096 /* NOTE: there is really one LDT for all the threads */
5097 static uint8_t *ldt_table;
5098 
5099 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5100 {
5101     int size;
5102     void *p;
5103 
5104     if (!ldt_table)
5105         return 0;
5106     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5107     if (size > bytecount)
5108         size = bytecount;
5109     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5110     if (!p)
5111         return -TARGET_EFAULT;
5112     /* ??? Should this by byteswapped?  */
5113     memcpy(p, ldt_table, size);
5114     unlock_user(p, ptr, size);
5115     return size;
5116 }
5117 
5118 /* XXX: add locking support */
5119 static abi_long write_ldt(CPUX86State *env,
5120                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5121 {
5122     struct target_modify_ldt_ldt_s ldt_info;
5123     struct target_modify_ldt_ldt_s *target_ldt_info;
5124     int seg_32bit, contents, read_exec_only, limit_in_pages;
5125     int seg_not_present, useable, lm;
5126     uint32_t *lp, entry_1, entry_2;
5127 
5128     if (bytecount != sizeof(ldt_info))
5129         return -TARGET_EINVAL;
5130     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5131         return -TARGET_EFAULT;
5132     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5133     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5134     ldt_info.limit = tswap32(target_ldt_info->limit);
5135     ldt_info.flags = tswap32(target_ldt_info->flags);
5136     unlock_user_struct(target_ldt_info, ptr, 0);
5137 
5138     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5139         return -TARGET_EINVAL;
5140     seg_32bit = ldt_info.flags & 1;
5141     contents = (ldt_info.flags >> 1) & 3;
5142     read_exec_only = (ldt_info.flags >> 3) & 1;
5143     limit_in_pages = (ldt_info.flags >> 4) & 1;
5144     seg_not_present = (ldt_info.flags >> 5) & 1;
5145     useable = (ldt_info.flags >> 6) & 1;
5146 #ifdef TARGET_ABI32
5147     lm = 0;
5148 #else
5149     lm = (ldt_info.flags >> 7) & 1;
5150 #endif
5151     if (contents == 3) {
5152         if (oldmode)
5153             return -TARGET_EINVAL;
5154         if (seg_not_present == 0)
5155             return -TARGET_EINVAL;
5156     }
5157     /* allocate the LDT */
5158     if (!ldt_table) {
5159         env->ldt.base = target_mmap(0,
5160                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5161                                     PROT_READ|PROT_WRITE,
5162                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5163         if (env->ldt.base == -1)
5164             return -TARGET_ENOMEM;
5165         memset(g2h(env->ldt.base), 0,
5166                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5167         env->ldt.limit = 0xffff;
5168         ldt_table = g2h(env->ldt.base);
5169     }
5170 
5171     /* NOTE: same code as Linux kernel */
5172     /* Allow LDTs to be cleared by the user. */
5173     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5174         if (oldmode ||
5175             (contents == 0		&&
5176              read_exec_only == 1	&&
5177              seg_32bit == 0		&&
5178              limit_in_pages == 0	&&
5179              seg_not_present == 1	&&
5180              useable == 0 )) {
5181             entry_1 = 0;
5182             entry_2 = 0;
5183             goto install;
5184         }
5185     }
5186 
5187     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5188         (ldt_info.limit & 0x0ffff);
5189     entry_2 = (ldt_info.base_addr & 0xff000000) |
5190         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5191         (ldt_info.limit & 0xf0000) |
5192         ((read_exec_only ^ 1) << 9) |
5193         (contents << 10) |
5194         ((seg_not_present ^ 1) << 15) |
5195         (seg_32bit << 22) |
5196         (limit_in_pages << 23) |
5197         (lm << 21) |
5198         0x7000;
5199     if (!oldmode)
5200         entry_2 |= (useable << 20);
5201 
5202     /* Install the new entry ...  */
5203 install:
5204     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5205     lp[0] = tswap32(entry_1);
5206     lp[1] = tswap32(entry_2);
5207     return 0;
5208 }
5209 
5210 /* specific and weird i386 syscalls */
5211 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5212                               unsigned long bytecount)
5213 {
5214     abi_long ret;
5215 
5216     switch (func) {
5217     case 0:
5218         ret = read_ldt(ptr, bytecount);
5219         break;
5220     case 1:
5221         ret = write_ldt(env, ptr, bytecount, 1);
5222         break;
5223     case 0x11:
5224         ret = write_ldt(env, ptr, bytecount, 0);
5225         break;
5226     default:
5227         ret = -TARGET_ENOSYS;
5228         break;
5229     }
5230     return ret;
5231 }
5232 
5233 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5234 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5235 {
5236     uint64_t *gdt_table = g2h(env->gdt.base);
5237     struct target_modify_ldt_ldt_s ldt_info;
5238     struct target_modify_ldt_ldt_s *target_ldt_info;
5239     int seg_32bit, contents, read_exec_only, limit_in_pages;
5240     int seg_not_present, useable, lm;
5241     uint32_t *lp, entry_1, entry_2;
5242     int i;
5243 
5244     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5245     if (!target_ldt_info)
5246         return -TARGET_EFAULT;
5247     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5248     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5249     ldt_info.limit = tswap32(target_ldt_info->limit);
5250     ldt_info.flags = tswap32(target_ldt_info->flags);
5251     if (ldt_info.entry_number == -1) {
5252         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5253             if (gdt_table[i] == 0) {
5254                 ldt_info.entry_number = i;
5255                 target_ldt_info->entry_number = tswap32(i);
5256                 break;
5257             }
5258         }
5259     }
5260     unlock_user_struct(target_ldt_info, ptr, 1);
5261 
5262     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5263         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5264            return -TARGET_EINVAL;
5265     seg_32bit = ldt_info.flags & 1;
5266     contents = (ldt_info.flags >> 1) & 3;
5267     read_exec_only = (ldt_info.flags >> 3) & 1;
5268     limit_in_pages = (ldt_info.flags >> 4) & 1;
5269     seg_not_present = (ldt_info.flags >> 5) & 1;
5270     useable = (ldt_info.flags >> 6) & 1;
5271 #ifdef TARGET_ABI32
5272     lm = 0;
5273 #else
5274     lm = (ldt_info.flags >> 7) & 1;
5275 #endif
5276 
5277     if (contents == 3) {
5278         if (seg_not_present == 0)
5279             return -TARGET_EINVAL;
5280     }
5281 
5282     /* NOTE: same code as Linux kernel */
5283     /* Allow LDTs to be cleared by the user. */
5284     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5285         if ((contents == 0             &&
5286              read_exec_only == 1       &&
5287              seg_32bit == 0            &&
5288              limit_in_pages == 0       &&
5289              seg_not_present == 1      &&
5290              useable == 0 )) {
5291             entry_1 = 0;
5292             entry_2 = 0;
5293             goto install;
5294         }
5295     }
5296 
5297     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5298         (ldt_info.limit & 0x0ffff);
5299     entry_2 = (ldt_info.base_addr & 0xff000000) |
5300         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5301         (ldt_info.limit & 0xf0000) |
5302         ((read_exec_only ^ 1) << 9) |
5303         (contents << 10) |
5304         ((seg_not_present ^ 1) << 15) |
5305         (seg_32bit << 22) |
5306         (limit_in_pages << 23) |
5307         (useable << 20) |
5308         (lm << 21) |
5309         0x7000;
5310 
5311     /* Install the new entry ...  */
5312 install:
5313     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5314     lp[0] = tswap32(entry_1);
5315     lp[1] = tswap32(entry_2);
5316     return 0;
5317 }
5318 
5319 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5320 {
5321     struct target_modify_ldt_ldt_s *target_ldt_info;
5322     uint64_t *gdt_table = g2h(env->gdt.base);
5323     uint32_t base_addr, limit, flags;
5324     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5325     int seg_not_present, useable, lm;
5326     uint32_t *lp, entry_1, entry_2;
5327 
5328     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5329     if (!target_ldt_info)
5330         return -TARGET_EFAULT;
5331     idx = tswap32(target_ldt_info->entry_number);
5332     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5333         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5334         unlock_user_struct(target_ldt_info, ptr, 1);
5335         return -TARGET_EINVAL;
5336     }
5337     lp = (uint32_t *)(gdt_table + idx);
5338     entry_1 = tswap32(lp[0]);
5339     entry_2 = tswap32(lp[1]);
5340 
5341     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5342     contents = (entry_2 >> 10) & 3;
5343     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5344     seg_32bit = (entry_2 >> 22) & 1;
5345     limit_in_pages = (entry_2 >> 23) & 1;
5346     useable = (entry_2 >> 20) & 1;
5347 #ifdef TARGET_ABI32
5348     lm = 0;
5349 #else
5350     lm = (entry_2 >> 21) & 1;
5351 #endif
5352     flags = (seg_32bit << 0) | (contents << 1) |
5353         (read_exec_only << 3) | (limit_in_pages << 4) |
5354         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5355     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5356     base_addr = (entry_1 >> 16) |
5357         (entry_2 & 0xff000000) |
5358         ((entry_2 & 0xff) << 16);
5359     target_ldt_info->base_addr = tswapal(base_addr);
5360     target_ldt_info->limit = tswap32(limit);
5361     target_ldt_info->flags = tswap32(flags);
5362     unlock_user_struct(target_ldt_info, ptr, 1);
5363     return 0;
5364 }
5365 #endif /* TARGET_I386 && TARGET_ABI32 */
5366 
5367 #ifndef TARGET_ABI32
5368 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5369 {
5370     abi_long ret = 0;
5371     abi_ulong val;
5372     int idx;
5373 
5374     switch(code) {
5375     case TARGET_ARCH_SET_GS:
5376     case TARGET_ARCH_SET_FS:
5377         if (code == TARGET_ARCH_SET_GS)
5378             idx = R_GS;
5379         else
5380             idx = R_FS;
5381         cpu_x86_load_seg(env, idx, 0);
5382         env->segs[idx].base = addr;
5383         break;
5384     case TARGET_ARCH_GET_GS:
5385     case TARGET_ARCH_GET_FS:
5386         if (code == TARGET_ARCH_GET_GS)
5387             idx = R_GS;
5388         else
5389             idx = R_FS;
5390         val = env->segs[idx].base;
5391         if (put_user(val, addr, abi_ulong))
5392             ret = -TARGET_EFAULT;
5393         break;
5394     default:
5395         ret = -TARGET_EINVAL;
5396         break;
5397     }
5398     return ret;
5399 }
5400 #endif
5401 
5402 #endif /* defined(TARGET_I386) */
5403 
5404 #define NEW_STACK_SIZE 0x40000
5405 
5406 
5407 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5408 typedef struct {
5409     CPUArchState *env;
5410     pthread_mutex_t mutex;
5411     pthread_cond_t cond;
5412     pthread_t thread;
5413     uint32_t tid;
5414     abi_ulong child_tidptr;
5415     abi_ulong parent_tidptr;
5416     sigset_t sigmask;
5417 } new_thread_info;
5418 
5419 static void *clone_func(void *arg)
5420 {
5421     new_thread_info *info = arg;
5422     CPUArchState *env;
5423     CPUState *cpu;
5424     TaskState *ts;
5425 
5426     rcu_register_thread();
5427     tcg_register_thread();
5428     env = info->env;
5429     cpu = ENV_GET_CPU(env);
5430     thread_cpu = cpu;
5431     ts = (TaskState *)cpu->opaque;
5432     info->tid = gettid();
5433     task_settid(ts);
5434     if (info->child_tidptr)
5435         put_user_u32(info->tid, info->child_tidptr);
5436     if (info->parent_tidptr)
5437         put_user_u32(info->tid, info->parent_tidptr);
5438     /* Enable signals.  */
5439     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5440     /* Signal to the parent that we're ready.  */
5441     pthread_mutex_lock(&info->mutex);
5442     pthread_cond_broadcast(&info->cond);
5443     pthread_mutex_unlock(&info->mutex);
5444     /* Wait until the parent has finished initializing the tls state.  */
5445     pthread_mutex_lock(&clone_lock);
5446     pthread_mutex_unlock(&clone_lock);
5447     cpu_loop(env);
5448     /* never exits */
5449     return NULL;
5450 }
5451 
5452 /* do_fork() Must return host values and target errnos (unlike most
5453    do_*() functions). */
5454 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5455                    abi_ulong parent_tidptr, target_ulong newtls,
5456                    abi_ulong child_tidptr)
5457 {
5458     CPUState *cpu = ENV_GET_CPU(env);
5459     int ret;
5460     TaskState *ts;
5461     CPUState *new_cpu;
5462     CPUArchState *new_env;
5463     sigset_t sigmask;
5464 
5465     flags &= ~CLONE_IGNORED_FLAGS;
5466 
5467     /* Emulate vfork() with fork() */
5468     if (flags & CLONE_VFORK)
5469         flags &= ~(CLONE_VFORK | CLONE_VM);
5470 
5471     if (flags & CLONE_VM) {
5472         TaskState *parent_ts = (TaskState *)cpu->opaque;
5473         new_thread_info info;
5474         pthread_attr_t attr;
5475 
5476         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5477             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5478             return -TARGET_EINVAL;
5479         }
5480 
5481         ts = g_new0(TaskState, 1);
5482         init_task_state(ts);
5483 
5484         /* Grab a mutex so that thread setup appears atomic.  */
5485         pthread_mutex_lock(&clone_lock);
5486 
5487         /* we create a new CPU instance. */
5488         new_env = cpu_copy(env);
5489         /* Init regs that differ from the parent.  */
5490         cpu_clone_regs(new_env, newsp);
5491         new_cpu = ENV_GET_CPU(new_env);
5492         new_cpu->opaque = ts;
5493         ts->bprm = parent_ts->bprm;
5494         ts->info = parent_ts->info;
5495         ts->signal_mask = parent_ts->signal_mask;
5496 
5497         if (flags & CLONE_CHILD_CLEARTID) {
5498             ts->child_tidptr = child_tidptr;
5499         }
5500 
5501         if (flags & CLONE_SETTLS) {
5502             cpu_set_tls (new_env, newtls);
5503         }
5504 
5505         memset(&info, 0, sizeof(info));
5506         pthread_mutex_init(&info.mutex, NULL);
5507         pthread_mutex_lock(&info.mutex);
5508         pthread_cond_init(&info.cond, NULL);
5509         info.env = new_env;
5510         if (flags & CLONE_CHILD_SETTID) {
5511             info.child_tidptr = child_tidptr;
5512         }
5513         if (flags & CLONE_PARENT_SETTID) {
5514             info.parent_tidptr = parent_tidptr;
5515         }
5516 
5517         ret = pthread_attr_init(&attr);
5518         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5519         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5520         /* It is not safe to deliver signals until the child has finished
5521            initializing, so temporarily block all signals.  */
5522         sigfillset(&sigmask);
5523         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5524 
5525         /* If this is our first additional thread, we need to ensure we
5526          * generate code for parallel execution and flush old translations.
5527          */
5528         if (!parallel_cpus) {
5529             parallel_cpus = true;
5530             tb_flush(cpu);
5531         }
5532 
5533         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5534         /* TODO: Free new CPU state if thread creation failed.  */
5535 
5536         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5537         pthread_attr_destroy(&attr);
5538         if (ret == 0) {
5539             /* Wait for the child to initialize.  */
5540             pthread_cond_wait(&info.cond, &info.mutex);
5541             ret = info.tid;
5542         } else {
5543             ret = -1;
5544         }
5545         pthread_mutex_unlock(&info.mutex);
5546         pthread_cond_destroy(&info.cond);
5547         pthread_mutex_destroy(&info.mutex);
5548         pthread_mutex_unlock(&clone_lock);
5549     } else {
5550         /* if no CLONE_VM, we consider it is a fork */
5551         if (flags & CLONE_INVALID_FORK_FLAGS) {
5552             return -TARGET_EINVAL;
5553         }
5554 
5555         /* We can't support custom termination signals */
5556         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5557             return -TARGET_EINVAL;
5558         }
5559 
5560         if (block_signals()) {
5561             return -TARGET_ERESTARTSYS;
5562         }
5563 
5564         fork_start();
5565         ret = fork();
5566         if (ret == 0) {
5567             /* Child Process.  */
5568             cpu_clone_regs(env, newsp);
5569             fork_end(1);
5570             /* There is a race condition here.  The parent process could
5571                theoretically read the TID in the child process before the child
5572                tid is set.  This would require using either ptrace
5573                (not implemented) or having *_tidptr to point at a shared memory
5574                mapping.  We can't repeat the spinlock hack used above because
5575                the child process gets its own copy of the lock.  */
5576             if (flags & CLONE_CHILD_SETTID)
5577                 put_user_u32(gettid(), child_tidptr);
5578             if (flags & CLONE_PARENT_SETTID)
5579                 put_user_u32(gettid(), parent_tidptr);
5580             ts = (TaskState *)cpu->opaque;
5581             if (flags & CLONE_SETTLS)
5582                 cpu_set_tls (env, newtls);
5583             if (flags & CLONE_CHILD_CLEARTID)
5584                 ts->child_tidptr = child_tidptr;
5585         } else {
5586             fork_end(0);
5587         }
5588     }
5589     return ret;
5590 }
5591 
5592 /* warning : doesn't handle linux specific flags... */
5593 static int target_to_host_fcntl_cmd(int cmd)
5594 {
5595     int ret;
5596 
5597     switch(cmd) {
5598     case TARGET_F_DUPFD:
5599     case TARGET_F_GETFD:
5600     case TARGET_F_SETFD:
5601     case TARGET_F_GETFL:
5602     case TARGET_F_SETFL:
5603         ret = cmd;
5604         break;
5605     case TARGET_F_GETLK:
5606         ret = F_GETLK64;
5607         break;
5608     case TARGET_F_SETLK:
5609         ret = F_SETLK64;
5610         break;
5611     case TARGET_F_SETLKW:
5612         ret = F_SETLKW64;
5613         break;
5614     case TARGET_F_GETOWN:
5615         ret = F_GETOWN;
5616         break;
5617     case TARGET_F_SETOWN:
5618         ret = F_SETOWN;
5619         break;
5620     case TARGET_F_GETSIG:
5621         ret = F_GETSIG;
5622         break;
5623     case TARGET_F_SETSIG:
5624         ret = F_SETSIG;
5625         break;
5626 #if TARGET_ABI_BITS == 32
5627     case TARGET_F_GETLK64:
5628         ret = F_GETLK64;
5629         break;
5630     case TARGET_F_SETLK64:
5631         ret = F_SETLK64;
5632         break;
5633     case TARGET_F_SETLKW64:
5634         ret = F_SETLKW64;
5635         break;
5636 #endif
5637     case TARGET_F_SETLEASE:
5638         ret = F_SETLEASE;
5639         break;
5640     case TARGET_F_GETLEASE:
5641         ret = F_GETLEASE;
5642         break;
5643 #ifdef F_DUPFD_CLOEXEC
5644     case TARGET_F_DUPFD_CLOEXEC:
5645         ret = F_DUPFD_CLOEXEC;
5646         break;
5647 #endif
5648     case TARGET_F_NOTIFY:
5649         ret = F_NOTIFY;
5650         break;
5651 #ifdef F_GETOWN_EX
5652     case TARGET_F_GETOWN_EX:
5653         ret = F_GETOWN_EX;
5654         break;
5655 #endif
5656 #ifdef F_SETOWN_EX
5657     case TARGET_F_SETOWN_EX:
5658         ret = F_SETOWN_EX;
5659         break;
5660 #endif
5661 #ifdef F_SETPIPE_SZ
5662     case TARGET_F_SETPIPE_SZ:
5663         ret = F_SETPIPE_SZ;
5664         break;
5665     case TARGET_F_GETPIPE_SZ:
5666         ret = F_GETPIPE_SZ;
5667         break;
5668 #endif
5669     default:
5670         ret = -TARGET_EINVAL;
5671         break;
5672     }
5673 
5674 #if defined(__powerpc64__)
5675     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5676      * is not supported by kernel. The glibc fcntl call actually adjusts
5677      * them to 5, 6 and 7 before making the syscall(). Since we make the
5678      * syscall directly, adjust to what is supported by the kernel.
5679      */
5680     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5681         ret -= F_GETLK64 - 5;
5682     }
5683 #endif
5684 
5685     return ret;
5686 }
5687 
5688 #define FLOCK_TRANSTBL \
5689     switch (type) { \
5690     TRANSTBL_CONVERT(F_RDLCK); \
5691     TRANSTBL_CONVERT(F_WRLCK); \
5692     TRANSTBL_CONVERT(F_UNLCK); \
5693     TRANSTBL_CONVERT(F_EXLCK); \
5694     TRANSTBL_CONVERT(F_SHLCK); \
5695     }
5696 
5697 static int target_to_host_flock(int type)
5698 {
5699 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5700     FLOCK_TRANSTBL
5701 #undef  TRANSTBL_CONVERT
5702     return -TARGET_EINVAL;
5703 }
5704 
5705 static int host_to_target_flock(int type)
5706 {
5707 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5708     FLOCK_TRANSTBL
5709 #undef  TRANSTBL_CONVERT
5710     /* if we don't know how to convert the value coming
5711      * from the host we copy to the target field as-is
5712      */
5713     return type;
5714 }
5715 
5716 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5717                                             abi_ulong target_flock_addr)
5718 {
5719     struct target_flock *target_fl;
5720     int l_type;
5721 
5722     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5723         return -TARGET_EFAULT;
5724     }
5725 
5726     __get_user(l_type, &target_fl->l_type);
5727     l_type = target_to_host_flock(l_type);
5728     if (l_type < 0) {
5729         return l_type;
5730     }
5731     fl->l_type = l_type;
5732     __get_user(fl->l_whence, &target_fl->l_whence);
5733     __get_user(fl->l_start, &target_fl->l_start);
5734     __get_user(fl->l_len, &target_fl->l_len);
5735     __get_user(fl->l_pid, &target_fl->l_pid);
5736     unlock_user_struct(target_fl, target_flock_addr, 0);
5737     return 0;
5738 }
5739 
5740 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5741                                           const struct flock64 *fl)
5742 {
5743     struct target_flock *target_fl;
5744     short l_type;
5745 
5746     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5747         return -TARGET_EFAULT;
5748     }
5749 
5750     l_type = host_to_target_flock(fl->l_type);
5751     __put_user(l_type, &target_fl->l_type);
5752     __put_user(fl->l_whence, &target_fl->l_whence);
5753     __put_user(fl->l_start, &target_fl->l_start);
5754     __put_user(fl->l_len, &target_fl->l_len);
5755     __put_user(fl->l_pid, &target_fl->l_pid);
5756     unlock_user_struct(target_fl, target_flock_addr, 1);
5757     return 0;
5758 }
5759 
5760 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5761 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5762 
5763 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5764 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5765                                                    abi_ulong target_flock_addr)
5766 {
5767     struct target_oabi_flock64 *target_fl;
5768     int l_type;
5769 
5770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5771         return -TARGET_EFAULT;
5772     }
5773 
5774     __get_user(l_type, &target_fl->l_type);
5775     l_type = target_to_host_flock(l_type);
5776     if (l_type < 0) {
5777         return l_type;
5778     }
5779     fl->l_type = l_type;
5780     __get_user(fl->l_whence, &target_fl->l_whence);
5781     __get_user(fl->l_start, &target_fl->l_start);
5782     __get_user(fl->l_len, &target_fl->l_len);
5783     __get_user(fl->l_pid, &target_fl->l_pid);
5784     unlock_user_struct(target_fl, target_flock_addr, 0);
5785     return 0;
5786 }
5787 
5788 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5789                                                  const struct flock64 *fl)
5790 {
5791     struct target_oabi_flock64 *target_fl;
5792     short l_type;
5793 
5794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5795         return -TARGET_EFAULT;
5796     }
5797 
5798     l_type = host_to_target_flock(fl->l_type);
5799     __put_user(l_type, &target_fl->l_type);
5800     __put_user(fl->l_whence, &target_fl->l_whence);
5801     __put_user(fl->l_start, &target_fl->l_start);
5802     __put_user(fl->l_len, &target_fl->l_len);
5803     __put_user(fl->l_pid, &target_fl->l_pid);
5804     unlock_user_struct(target_fl, target_flock_addr, 1);
5805     return 0;
5806 }
5807 #endif
5808 
5809 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5810                                               abi_ulong target_flock_addr)
5811 {
5812     struct target_flock64 *target_fl;
5813     int l_type;
5814 
5815     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5816         return -TARGET_EFAULT;
5817     }
5818 
5819     __get_user(l_type, &target_fl->l_type);
5820     l_type = target_to_host_flock(l_type);
5821     if (l_type < 0) {
5822         return l_type;
5823     }
5824     fl->l_type = l_type;
5825     __get_user(fl->l_whence, &target_fl->l_whence);
5826     __get_user(fl->l_start, &target_fl->l_start);
5827     __get_user(fl->l_len, &target_fl->l_len);
5828     __get_user(fl->l_pid, &target_fl->l_pid);
5829     unlock_user_struct(target_fl, target_flock_addr, 0);
5830     return 0;
5831 }
5832 
5833 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5834                                             const struct flock64 *fl)
5835 {
5836     struct target_flock64 *target_fl;
5837     short l_type;
5838 
5839     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5840         return -TARGET_EFAULT;
5841     }
5842 
5843     l_type = host_to_target_flock(fl->l_type);
5844     __put_user(l_type, &target_fl->l_type);
5845     __put_user(fl->l_whence, &target_fl->l_whence);
5846     __put_user(fl->l_start, &target_fl->l_start);
5847     __put_user(fl->l_len, &target_fl->l_len);
5848     __put_user(fl->l_pid, &target_fl->l_pid);
5849     unlock_user_struct(target_fl, target_flock_addr, 1);
5850     return 0;
5851 }
5852 
5853 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5854 {
5855     struct flock64 fl64;
5856 #ifdef F_GETOWN_EX
5857     struct f_owner_ex fox;
5858     struct target_f_owner_ex *target_fox;
5859 #endif
5860     abi_long ret;
5861     int host_cmd = target_to_host_fcntl_cmd(cmd);
5862 
5863     if (host_cmd == -TARGET_EINVAL)
5864 	    return host_cmd;
5865 
5866     switch(cmd) {
5867     case TARGET_F_GETLK:
5868         ret = copy_from_user_flock(&fl64, arg);
5869         if (ret) {
5870             return ret;
5871         }
5872         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5873         if (ret == 0) {
5874             ret = copy_to_user_flock(arg, &fl64);
5875         }
5876         break;
5877 
5878     case TARGET_F_SETLK:
5879     case TARGET_F_SETLKW:
5880         ret = copy_from_user_flock(&fl64, arg);
5881         if (ret) {
5882             return ret;
5883         }
5884         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5885         break;
5886 
5887     case TARGET_F_GETLK64:
5888         ret = copy_from_user_flock64(&fl64, arg);
5889         if (ret) {
5890             return ret;
5891         }
5892         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5893         if (ret == 0) {
5894             ret = copy_to_user_flock64(arg, &fl64);
5895         }
5896         break;
5897     case TARGET_F_SETLK64:
5898     case TARGET_F_SETLKW64:
5899         ret = copy_from_user_flock64(&fl64, arg);
5900         if (ret) {
5901             return ret;
5902         }
5903         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5904         break;
5905 
5906     case TARGET_F_GETFL:
5907         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5908         if (ret >= 0) {
5909             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5910         }
5911         break;
5912 
5913     case TARGET_F_SETFL:
5914         ret = get_errno(safe_fcntl(fd, host_cmd,
5915                                    target_to_host_bitmask(arg,
5916                                                           fcntl_flags_tbl)));
5917         break;
5918 
5919 #ifdef F_GETOWN_EX
5920     case TARGET_F_GETOWN_EX:
5921         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5922         if (ret >= 0) {
5923             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5924                 return -TARGET_EFAULT;
5925             target_fox->type = tswap32(fox.type);
5926             target_fox->pid = tswap32(fox.pid);
5927             unlock_user_struct(target_fox, arg, 1);
5928         }
5929         break;
5930 #endif
5931 
5932 #ifdef F_SETOWN_EX
5933     case TARGET_F_SETOWN_EX:
5934         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5935             return -TARGET_EFAULT;
5936         fox.type = tswap32(target_fox->type);
5937         fox.pid = tswap32(target_fox->pid);
5938         unlock_user_struct(target_fox, arg, 0);
5939         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5940         break;
5941 #endif
5942 
5943     case TARGET_F_SETOWN:
5944     case TARGET_F_GETOWN:
5945     case TARGET_F_SETSIG:
5946     case TARGET_F_GETSIG:
5947     case TARGET_F_SETLEASE:
5948     case TARGET_F_GETLEASE:
5949     case TARGET_F_SETPIPE_SZ:
5950     case TARGET_F_GETPIPE_SZ:
5951         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5952         break;
5953 
5954     default:
5955         ret = get_errno(safe_fcntl(fd, cmd, arg));
5956         break;
5957     }
5958     return ret;
5959 }
5960 
5961 #ifdef USE_UID16
5962 
5963 static inline int high2lowuid(int uid)
5964 {
5965     if (uid > 65535)
5966         return 65534;
5967     else
5968         return uid;
5969 }
5970 
5971 static inline int high2lowgid(int gid)
5972 {
5973     if (gid > 65535)
5974         return 65534;
5975     else
5976         return gid;
5977 }
5978 
5979 static inline int low2highuid(int uid)
5980 {
5981     if ((int16_t)uid == -1)
5982         return -1;
5983     else
5984         return uid;
5985 }
5986 
5987 static inline int low2highgid(int gid)
5988 {
5989     if ((int16_t)gid == -1)
5990         return -1;
5991     else
5992         return gid;
5993 }
5994 static inline int tswapid(int id)
5995 {
5996     return tswap16(id);
5997 }
5998 
5999 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6000 
6001 #else /* !USE_UID16 */
6002 static inline int high2lowuid(int uid)
6003 {
6004     return uid;
6005 }
6006 static inline int high2lowgid(int gid)
6007 {
6008     return gid;
6009 }
6010 static inline int low2highuid(int uid)
6011 {
6012     return uid;
6013 }
6014 static inline int low2highgid(int gid)
6015 {
6016     return gid;
6017 }
6018 static inline int tswapid(int id)
6019 {
6020     return tswap32(id);
6021 }
6022 
6023 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6024 
6025 #endif /* USE_UID16 */
6026 
6027 /* We must do direct syscalls for setting UID/GID, because we want to
6028  * implement the Linux system call semantics of "change only for this thread",
6029  * not the libc/POSIX semantics of "change for all threads in process".
6030  * (See http://ewontfix.com/17/ for more details.)
6031  * We use the 32-bit version of the syscalls if present; if it is not
6032  * then either the host architecture supports 32-bit UIDs natively with
6033  * the standard syscall, or the 16-bit UID is the best we can do.
6034  */
6035 #ifdef __NR_setuid32
6036 #define __NR_sys_setuid __NR_setuid32
6037 #else
6038 #define __NR_sys_setuid __NR_setuid
6039 #endif
6040 #ifdef __NR_setgid32
6041 #define __NR_sys_setgid __NR_setgid32
6042 #else
6043 #define __NR_sys_setgid __NR_setgid
6044 #endif
6045 #ifdef __NR_setresuid32
6046 #define __NR_sys_setresuid __NR_setresuid32
6047 #else
6048 #define __NR_sys_setresuid __NR_setresuid
6049 #endif
6050 #ifdef __NR_setresgid32
6051 #define __NR_sys_setresgid __NR_setresgid32
6052 #else
6053 #define __NR_sys_setresgid __NR_setresgid
6054 #endif
6055 
6056 _syscall1(int, sys_setuid, uid_t, uid)
6057 _syscall1(int, sys_setgid, gid_t, gid)
6058 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6059 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6060 
6061 void syscall_init(void)
6062 {
6063     IOCTLEntry *ie;
6064     const argtype *arg_type;
6065     int size;
6066     int i;
6067 
6068     thunk_init(STRUCT_MAX);
6069 
6070 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6071 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6072 #include "syscall_types.h"
6073 #undef STRUCT
6074 #undef STRUCT_SPECIAL
6075 
6076     /* Build target_to_host_errno_table[] table from
6077      * host_to_target_errno_table[]. */
6078     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6079         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6080     }
6081 
6082     /* we patch the ioctl size if necessary. We rely on the fact that
6083        no ioctl has all the bits at '1' in the size field */
6084     ie = ioctl_entries;
6085     while (ie->target_cmd != 0) {
6086         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6087             TARGET_IOC_SIZEMASK) {
6088             arg_type = ie->arg_type;
6089             if (arg_type[0] != TYPE_PTR) {
6090                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6091                         ie->target_cmd);
6092                 exit(1);
6093             }
6094             arg_type++;
6095             size = thunk_type_size(arg_type, 0);
6096             ie->target_cmd = (ie->target_cmd &
6097                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6098                 (size << TARGET_IOC_SIZESHIFT);
6099         }
6100 
6101         /* automatic consistency check if same arch */
6102 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6103     (defined(__x86_64__) && defined(TARGET_X86_64))
6104         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6105             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6106                     ie->name, ie->target_cmd, ie->host_cmd);
6107         }
6108 #endif
6109         ie++;
6110     }
6111 }
6112 
6113 #if TARGET_ABI_BITS == 32
6114 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6115 {
6116 #ifdef TARGET_WORDS_BIGENDIAN
6117     return ((uint64_t)word0 << 32) | word1;
6118 #else
6119     return ((uint64_t)word1 << 32) | word0;
6120 #endif
6121 }
6122 #else /* TARGET_ABI_BITS == 32 */
6123 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6124 {
6125     return word0;
6126 }
6127 #endif /* TARGET_ABI_BITS != 32 */
6128 
6129 #ifdef TARGET_NR_truncate64
6130 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6131                                          abi_long arg2,
6132                                          abi_long arg3,
6133                                          abi_long arg4)
6134 {
6135     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6136         arg2 = arg3;
6137         arg3 = arg4;
6138     }
6139     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6140 }
6141 #endif
6142 
6143 #ifdef TARGET_NR_ftruncate64
6144 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6145                                           abi_long arg2,
6146                                           abi_long arg3,
6147                                           abi_long arg4)
6148 {
6149     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6150         arg2 = arg3;
6151         arg3 = arg4;
6152     }
6153     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6154 }
6155 #endif
6156 
6157 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6158                                                abi_ulong target_addr)
6159 {
6160     struct target_timespec *target_ts;
6161 
6162     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6163         return -TARGET_EFAULT;
6164     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6165     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6166     unlock_user_struct(target_ts, target_addr, 0);
6167     return 0;
6168 }
6169 
6170 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6171                                                struct timespec *host_ts)
6172 {
6173     struct target_timespec *target_ts;
6174 
6175     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6176         return -TARGET_EFAULT;
6177     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6178     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6179     unlock_user_struct(target_ts, target_addr, 1);
6180     return 0;
6181 }
6182 
6183 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6184                                                  abi_ulong target_addr)
6185 {
6186     struct target_itimerspec *target_itspec;
6187 
6188     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6189         return -TARGET_EFAULT;
6190     }
6191 
6192     host_itspec->it_interval.tv_sec =
6193                             tswapal(target_itspec->it_interval.tv_sec);
6194     host_itspec->it_interval.tv_nsec =
6195                             tswapal(target_itspec->it_interval.tv_nsec);
6196     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6197     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6198 
6199     unlock_user_struct(target_itspec, target_addr, 1);
6200     return 0;
6201 }
6202 
6203 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6204                                                struct itimerspec *host_its)
6205 {
6206     struct target_itimerspec *target_itspec;
6207 
6208     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6209         return -TARGET_EFAULT;
6210     }
6211 
6212     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6213     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6214 
6215     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6216     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6217 
6218     unlock_user_struct(target_itspec, target_addr, 0);
6219     return 0;
6220 }
6221 
6222 static inline abi_long target_to_host_timex(struct timex *host_tx,
6223                                             abi_long target_addr)
6224 {
6225     struct target_timex *target_tx;
6226 
6227     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6228         return -TARGET_EFAULT;
6229     }
6230 
6231     __get_user(host_tx->modes, &target_tx->modes);
6232     __get_user(host_tx->offset, &target_tx->offset);
6233     __get_user(host_tx->freq, &target_tx->freq);
6234     __get_user(host_tx->maxerror, &target_tx->maxerror);
6235     __get_user(host_tx->esterror, &target_tx->esterror);
6236     __get_user(host_tx->status, &target_tx->status);
6237     __get_user(host_tx->constant, &target_tx->constant);
6238     __get_user(host_tx->precision, &target_tx->precision);
6239     __get_user(host_tx->tolerance, &target_tx->tolerance);
6240     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6241     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6242     __get_user(host_tx->tick, &target_tx->tick);
6243     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6244     __get_user(host_tx->jitter, &target_tx->jitter);
6245     __get_user(host_tx->shift, &target_tx->shift);
6246     __get_user(host_tx->stabil, &target_tx->stabil);
6247     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6248     __get_user(host_tx->calcnt, &target_tx->calcnt);
6249     __get_user(host_tx->errcnt, &target_tx->errcnt);
6250     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6251     __get_user(host_tx->tai, &target_tx->tai);
6252 
6253     unlock_user_struct(target_tx, target_addr, 0);
6254     return 0;
6255 }
6256 
6257 static inline abi_long host_to_target_timex(abi_long target_addr,
6258                                             struct timex *host_tx)
6259 {
6260     struct target_timex *target_tx;
6261 
6262     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6263         return -TARGET_EFAULT;
6264     }
6265 
6266     __put_user(host_tx->modes, &target_tx->modes);
6267     __put_user(host_tx->offset, &target_tx->offset);
6268     __put_user(host_tx->freq, &target_tx->freq);
6269     __put_user(host_tx->maxerror, &target_tx->maxerror);
6270     __put_user(host_tx->esterror, &target_tx->esterror);
6271     __put_user(host_tx->status, &target_tx->status);
6272     __put_user(host_tx->constant, &target_tx->constant);
6273     __put_user(host_tx->precision, &target_tx->precision);
6274     __put_user(host_tx->tolerance, &target_tx->tolerance);
6275     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6276     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6277     __put_user(host_tx->tick, &target_tx->tick);
6278     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6279     __put_user(host_tx->jitter, &target_tx->jitter);
6280     __put_user(host_tx->shift, &target_tx->shift);
6281     __put_user(host_tx->stabil, &target_tx->stabil);
6282     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6283     __put_user(host_tx->calcnt, &target_tx->calcnt);
6284     __put_user(host_tx->errcnt, &target_tx->errcnt);
6285     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6286     __put_user(host_tx->tai, &target_tx->tai);
6287 
6288     unlock_user_struct(target_tx, target_addr, 1);
6289     return 0;
6290 }
6291 
6292 
6293 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6294                                                abi_ulong target_addr)
6295 {
6296     struct target_sigevent *target_sevp;
6297 
6298     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6299         return -TARGET_EFAULT;
6300     }
6301 
6302     /* This union is awkward on 64 bit systems because it has a 32 bit
6303      * integer and a pointer in it; we follow the conversion approach
6304      * used for handling sigval types in signal.c so the guest should get
6305      * the correct value back even if we did a 64 bit byteswap and it's
6306      * using the 32 bit integer.
6307      */
6308     host_sevp->sigev_value.sival_ptr =
6309         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6310     host_sevp->sigev_signo =
6311         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6312     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6313     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6314 
6315     unlock_user_struct(target_sevp, target_addr, 1);
6316     return 0;
6317 }
6318 
6319 #if defined(TARGET_NR_mlockall)
6320 static inline int target_to_host_mlockall_arg(int arg)
6321 {
6322     int result = 0;
6323 
6324     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6325         result |= MCL_CURRENT;
6326     }
6327     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6328         result |= MCL_FUTURE;
6329     }
6330     return result;
6331 }
6332 #endif
6333 
6334 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6335      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6336      defined(TARGET_NR_newfstatat))
6337 static inline abi_long host_to_target_stat64(void *cpu_env,
6338                                              abi_ulong target_addr,
6339                                              struct stat *host_st)
6340 {
6341 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6342     if (((CPUARMState *)cpu_env)->eabi) {
6343         struct target_eabi_stat64 *target_st;
6344 
6345         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6346             return -TARGET_EFAULT;
6347         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6348         __put_user(host_st->st_dev, &target_st->st_dev);
6349         __put_user(host_st->st_ino, &target_st->st_ino);
6350 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6351         __put_user(host_st->st_ino, &target_st->__st_ino);
6352 #endif
6353         __put_user(host_st->st_mode, &target_st->st_mode);
6354         __put_user(host_st->st_nlink, &target_st->st_nlink);
6355         __put_user(host_st->st_uid, &target_st->st_uid);
6356         __put_user(host_st->st_gid, &target_st->st_gid);
6357         __put_user(host_st->st_rdev, &target_st->st_rdev);
6358         __put_user(host_st->st_size, &target_st->st_size);
6359         __put_user(host_st->st_blksize, &target_st->st_blksize);
6360         __put_user(host_st->st_blocks, &target_st->st_blocks);
6361         __put_user(host_st->st_atime, &target_st->target_st_atime);
6362         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6363         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6364         unlock_user_struct(target_st, target_addr, 1);
6365     } else
6366 #endif
6367     {
6368 #if defined(TARGET_HAS_STRUCT_STAT64)
6369         struct target_stat64 *target_st;
6370 #else
6371         struct target_stat *target_st;
6372 #endif
6373 
6374         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6375             return -TARGET_EFAULT;
6376         memset(target_st, 0, sizeof(*target_st));
6377         __put_user(host_st->st_dev, &target_st->st_dev);
6378         __put_user(host_st->st_ino, &target_st->st_ino);
6379 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6380         __put_user(host_st->st_ino, &target_st->__st_ino);
6381 #endif
6382         __put_user(host_st->st_mode, &target_st->st_mode);
6383         __put_user(host_st->st_nlink, &target_st->st_nlink);
6384         __put_user(host_st->st_uid, &target_st->st_uid);
6385         __put_user(host_st->st_gid, &target_st->st_gid);
6386         __put_user(host_st->st_rdev, &target_st->st_rdev);
6387         /* XXX: better use of kernel struct */
6388         __put_user(host_st->st_size, &target_st->st_size);
6389         __put_user(host_st->st_blksize, &target_st->st_blksize);
6390         __put_user(host_st->st_blocks, &target_st->st_blocks);
6391         __put_user(host_st->st_atime, &target_st->target_st_atime);
6392         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6393         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6394         unlock_user_struct(target_st, target_addr, 1);
6395     }
6396 
6397     return 0;
6398 }
6399 #endif
6400 
6401 /* ??? Using host futex calls even when target atomic operations
6402    are not really atomic probably breaks things.  However implementing
6403    futexes locally would make futexes shared between multiple processes
6404    tricky.  However they're probably useless because guest atomic
6405    operations won't work either.  */
6406 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6407                     target_ulong uaddr2, int val3)
6408 {
6409     struct timespec ts, *pts;
6410     int base_op;
6411 
6412     /* ??? We assume FUTEX_* constants are the same on both host
6413        and target.  */
6414 #ifdef FUTEX_CMD_MASK
6415     base_op = op & FUTEX_CMD_MASK;
6416 #else
6417     base_op = op;
6418 #endif
6419     switch (base_op) {
6420     case FUTEX_WAIT:
6421     case FUTEX_WAIT_BITSET:
6422         if (timeout) {
6423             pts = &ts;
6424             target_to_host_timespec(pts, timeout);
6425         } else {
6426             pts = NULL;
6427         }
6428         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6429                          pts, NULL, val3));
6430     case FUTEX_WAKE:
6431         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6432     case FUTEX_FD:
6433         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6434     case FUTEX_REQUEUE:
6435     case FUTEX_CMP_REQUEUE:
6436     case FUTEX_WAKE_OP:
6437         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6438            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6439            But the prototype takes a `struct timespec *'; insert casts
6440            to satisfy the compiler.  We do not need to tswap TIMEOUT
6441            since it's not compared to guest memory.  */
6442         pts = (struct timespec *)(uintptr_t) timeout;
6443         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6444                                     g2h(uaddr2),
6445                                     (base_op == FUTEX_CMP_REQUEUE
6446                                      ? tswap32(val3)
6447                                      : val3)));
6448     default:
6449         return -TARGET_ENOSYS;
6450     }
6451 }
6452 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6453 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6454                                      abi_long handle, abi_long mount_id,
6455                                      abi_long flags)
6456 {
6457     struct file_handle *target_fh;
6458     struct file_handle *fh;
6459     int mid = 0;
6460     abi_long ret;
6461     char *name;
6462     unsigned int size, total_size;
6463 
6464     if (get_user_s32(size, handle)) {
6465         return -TARGET_EFAULT;
6466     }
6467 
6468     name = lock_user_string(pathname);
6469     if (!name) {
6470         return -TARGET_EFAULT;
6471     }
6472 
6473     total_size = sizeof(struct file_handle) + size;
6474     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6475     if (!target_fh) {
6476         unlock_user(name, pathname, 0);
6477         return -TARGET_EFAULT;
6478     }
6479 
6480     fh = g_malloc0(total_size);
6481     fh->handle_bytes = size;
6482 
6483     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6484     unlock_user(name, pathname, 0);
6485 
6486     /* man name_to_handle_at(2):
6487      * Other than the use of the handle_bytes field, the caller should treat
6488      * the file_handle structure as an opaque data type
6489      */
6490 
6491     memcpy(target_fh, fh, total_size);
6492     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6493     target_fh->handle_type = tswap32(fh->handle_type);
6494     g_free(fh);
6495     unlock_user(target_fh, handle, total_size);
6496 
6497     if (put_user_s32(mid, mount_id)) {
6498         return -TARGET_EFAULT;
6499     }
6500 
6501     return ret;
6502 
6503 }
6504 #endif
6505 
6506 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6507 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6508                                      abi_long flags)
6509 {
6510     struct file_handle *target_fh;
6511     struct file_handle *fh;
6512     unsigned int size, total_size;
6513     abi_long ret;
6514 
6515     if (get_user_s32(size, handle)) {
6516         return -TARGET_EFAULT;
6517     }
6518 
6519     total_size = sizeof(struct file_handle) + size;
6520     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6521     if (!target_fh) {
6522         return -TARGET_EFAULT;
6523     }
6524 
6525     fh = g_memdup(target_fh, total_size);
6526     fh->handle_bytes = size;
6527     fh->handle_type = tswap32(target_fh->handle_type);
6528 
6529     ret = get_errno(open_by_handle_at(mount_fd, fh,
6530                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6531 
6532     g_free(fh);
6533 
6534     unlock_user(target_fh, handle, total_size);
6535 
6536     return ret;
6537 }
6538 #endif
6539 
6540 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6541 
6542 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6543 {
6544     int host_flags;
6545     target_sigset_t *target_mask;
6546     sigset_t host_mask;
6547     abi_long ret;
6548 
6549     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6550         return -TARGET_EINVAL;
6551     }
6552     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6553         return -TARGET_EFAULT;
6554     }
6555 
6556     target_to_host_sigset(&host_mask, target_mask);
6557 
6558     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6559 
6560     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6561     if (ret >= 0) {
6562         fd_trans_register(ret, &target_signalfd_trans);
6563     }
6564 
6565     unlock_user_struct(target_mask, mask, 0);
6566 
6567     return ret;
6568 }
6569 #endif
6570 
6571 /* Map host to target signal numbers for the wait family of syscalls.
6572    Assume all other status bits are the same.  */
6573 int host_to_target_waitstatus(int status)
6574 {
6575     if (WIFSIGNALED(status)) {
6576         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6577     }
6578     if (WIFSTOPPED(status)) {
6579         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6580                | (status & 0xff);
6581     }
6582     return status;
6583 }
6584 
6585 static int open_self_cmdline(void *cpu_env, int fd)
6586 {
6587     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6588     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6589     int i;
6590 
6591     for (i = 0; i < bprm->argc; i++) {
6592         size_t len = strlen(bprm->argv[i]) + 1;
6593 
6594         if (write(fd, bprm->argv[i], len) != len) {
6595             return -1;
6596         }
6597     }
6598 
6599     return 0;
6600 }
6601 
6602 static int open_self_maps(void *cpu_env, int fd)
6603 {
6604     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6605     TaskState *ts = cpu->opaque;
6606     FILE *fp;
6607     char *line = NULL;
6608     size_t len = 0;
6609     ssize_t read;
6610 
6611     fp = fopen("/proc/self/maps", "r");
6612     if (fp == NULL) {
6613         return -1;
6614     }
6615 
6616     while ((read = getline(&line, &len, fp)) != -1) {
6617         int fields, dev_maj, dev_min, inode;
6618         uint64_t min, max, offset;
6619         char flag_r, flag_w, flag_x, flag_p;
6620         char path[512] = "";
6621         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6622                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6623                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6624 
6625         if ((fields < 10) || (fields > 11)) {
6626             continue;
6627         }
6628         if (h2g_valid(min)) {
6629             int flags = page_get_flags(h2g(min));
6630             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6631             if (page_check_range(h2g(min), max - min, flags) == -1) {
6632                 continue;
6633             }
6634             if (h2g(min) == ts->info->stack_limit) {
6635                 pstrcpy(path, sizeof(path), "      [stack]");
6636             }
6637             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6638                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6639                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6640                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6641                     path[0] ? "         " : "", path);
6642         }
6643     }
6644 
6645     free(line);
6646     fclose(fp);
6647 
6648     return 0;
6649 }
6650 
6651 static int open_self_stat(void *cpu_env, int fd)
6652 {
6653     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6654     TaskState *ts = cpu->opaque;
6655     abi_ulong start_stack = ts->info->start_stack;
6656     int i;
6657 
6658     for (i = 0; i < 44; i++) {
6659       char buf[128];
6660       int len;
6661       uint64_t val = 0;
6662 
6663       if (i == 0) {
6664         /* pid */
6665         val = getpid();
6666         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6667       } else if (i == 1) {
6668         /* app name */
6669         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6670       } else if (i == 27) {
6671         /* stack bottom */
6672         val = start_stack;
6673         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6674       } else {
6675         /* for the rest, there is MasterCard */
6676         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6677       }
6678 
6679       len = strlen(buf);
6680       if (write(fd, buf, len) != len) {
6681           return -1;
6682       }
6683     }
6684 
6685     return 0;
6686 }
6687 
6688 static int open_self_auxv(void *cpu_env, int fd)
6689 {
6690     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6691     TaskState *ts = cpu->opaque;
6692     abi_ulong auxv = ts->info->saved_auxv;
6693     abi_ulong len = ts->info->auxv_len;
6694     char *ptr;
6695 
6696     /*
6697      * Auxiliary vector is stored in target process stack.
6698      * read in whole auxv vector and copy it to file
6699      */
6700     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6701     if (ptr != NULL) {
6702         while (len > 0) {
6703             ssize_t r;
6704             r = write(fd, ptr, len);
6705             if (r <= 0) {
6706                 break;
6707             }
6708             len -= r;
6709             ptr += r;
6710         }
6711         lseek(fd, 0, SEEK_SET);
6712         unlock_user(ptr, auxv, len);
6713     }
6714 
6715     return 0;
6716 }
6717 
6718 static int is_proc_myself(const char *filename, const char *entry)
6719 {
6720     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6721         filename += strlen("/proc/");
6722         if (!strncmp(filename, "self/", strlen("self/"))) {
6723             filename += strlen("self/");
6724         } else if (*filename >= '1' && *filename <= '9') {
6725             char myself[80];
6726             snprintf(myself, sizeof(myself), "%d/", getpid());
6727             if (!strncmp(filename, myself, strlen(myself))) {
6728                 filename += strlen(myself);
6729             } else {
6730                 return 0;
6731             }
6732         } else {
6733             return 0;
6734         }
6735         if (!strcmp(filename, entry)) {
6736             return 1;
6737         }
6738     }
6739     return 0;
6740 }
6741 
6742 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6743 static int is_proc(const char *filename, const char *entry)
6744 {
6745     return strcmp(filename, entry) == 0;
6746 }
6747 
6748 static int open_net_route(void *cpu_env, int fd)
6749 {
6750     FILE *fp;
6751     char *line = NULL;
6752     size_t len = 0;
6753     ssize_t read;
6754 
6755     fp = fopen("/proc/net/route", "r");
6756     if (fp == NULL) {
6757         return -1;
6758     }
6759 
6760     /* read header */
6761 
6762     read = getline(&line, &len, fp);
6763     dprintf(fd, "%s", line);
6764 
6765     /* read routes */
6766 
6767     while ((read = getline(&line, &len, fp)) != -1) {
6768         char iface[16];
6769         uint32_t dest, gw, mask;
6770         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6771         int fields;
6772 
6773         fields = sscanf(line,
6774                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6775                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6776                         &mask, &mtu, &window, &irtt);
6777         if (fields != 11) {
6778             continue;
6779         }
6780         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6781                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6782                 metric, tswap32(mask), mtu, window, irtt);
6783     }
6784 
6785     free(line);
6786     fclose(fp);
6787 
6788     return 0;
6789 }
6790 #endif
6791 
6792 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6793 {
6794     struct fake_open {
6795         const char *filename;
6796         int (*fill)(void *cpu_env, int fd);
6797         int (*cmp)(const char *s1, const char *s2);
6798     };
6799     const struct fake_open *fake_open;
6800     static const struct fake_open fakes[] = {
6801         { "maps", open_self_maps, is_proc_myself },
6802         { "stat", open_self_stat, is_proc_myself },
6803         { "auxv", open_self_auxv, is_proc_myself },
6804         { "cmdline", open_self_cmdline, is_proc_myself },
6805 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6806         { "/proc/net/route", open_net_route, is_proc },
6807 #endif
6808         { NULL, NULL, NULL }
6809     };
6810 
6811     if (is_proc_myself(pathname, "exe")) {
6812         int execfd = qemu_getauxval(AT_EXECFD);
6813         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6814     }
6815 
6816     for (fake_open = fakes; fake_open->filename; fake_open++) {
6817         if (fake_open->cmp(pathname, fake_open->filename)) {
6818             break;
6819         }
6820     }
6821 
6822     if (fake_open->filename) {
6823         const char *tmpdir;
6824         char filename[PATH_MAX];
6825         int fd, r;
6826 
6827         /* create temporary file to map stat to */
6828         tmpdir = getenv("TMPDIR");
6829         if (!tmpdir)
6830             tmpdir = "/tmp";
6831         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6832         fd = mkstemp(filename);
6833         if (fd < 0) {
6834             return fd;
6835         }
6836         unlink(filename);
6837 
6838         if ((r = fake_open->fill(cpu_env, fd))) {
6839             int e = errno;
6840             close(fd);
6841             errno = e;
6842             return r;
6843         }
6844         lseek(fd, 0, SEEK_SET);
6845 
6846         return fd;
6847     }
6848 
6849     return safe_openat(dirfd, path(pathname), flags, mode);
6850 }
6851 
6852 #define TIMER_MAGIC 0x0caf0000
6853 #define TIMER_MAGIC_MASK 0xffff0000
6854 
6855 /* Convert QEMU provided timer ID back to internal 16bit index format */
6856 static target_timer_t get_timer_id(abi_long arg)
6857 {
6858     target_timer_t timerid = arg;
6859 
6860     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6861         return -TARGET_EINVAL;
6862     }
6863 
6864     timerid &= 0xffff;
6865 
6866     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6867         return -TARGET_EINVAL;
6868     }
6869 
6870     return timerid;
6871 }
6872 
6873 static int target_to_host_cpu_mask(unsigned long *host_mask,
6874                                    size_t host_size,
6875                                    abi_ulong target_addr,
6876                                    size_t target_size)
6877 {
6878     unsigned target_bits = sizeof(abi_ulong) * 8;
6879     unsigned host_bits = sizeof(*host_mask) * 8;
6880     abi_ulong *target_mask;
6881     unsigned i, j;
6882 
6883     assert(host_size >= target_size);
6884 
6885     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6886     if (!target_mask) {
6887         return -TARGET_EFAULT;
6888     }
6889     memset(host_mask, 0, host_size);
6890 
6891     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6892         unsigned bit = i * target_bits;
6893         abi_ulong val;
6894 
6895         __get_user(val, &target_mask[i]);
6896         for (j = 0; j < target_bits; j++, bit++) {
6897             if (val & (1UL << j)) {
6898                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6899             }
6900         }
6901     }
6902 
6903     unlock_user(target_mask, target_addr, 0);
6904     return 0;
6905 }
6906 
6907 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6908                                    size_t host_size,
6909                                    abi_ulong target_addr,
6910                                    size_t target_size)
6911 {
6912     unsigned target_bits = sizeof(abi_ulong) * 8;
6913     unsigned host_bits = sizeof(*host_mask) * 8;
6914     abi_ulong *target_mask;
6915     unsigned i, j;
6916 
6917     assert(host_size >= target_size);
6918 
6919     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6920     if (!target_mask) {
6921         return -TARGET_EFAULT;
6922     }
6923 
6924     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6925         unsigned bit = i * target_bits;
6926         abi_ulong val = 0;
6927 
6928         for (j = 0; j < target_bits; j++, bit++) {
6929             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6930                 val |= 1UL << j;
6931             }
6932         }
6933         __put_user(val, &target_mask[i]);
6934     }
6935 
6936     unlock_user(target_mask, target_addr, target_size);
6937     return 0;
6938 }
6939 
6940 /* This is an internal helper for do_syscall so that it is easier
6941  * to have a single return point, so that actions, such as logging
6942  * of syscall results, can be performed.
6943  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6944  */
6945 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6946                             abi_long arg2, abi_long arg3, abi_long arg4,
6947                             abi_long arg5, abi_long arg6, abi_long arg7,
6948                             abi_long arg8)
6949 {
6950     CPUState *cpu = ENV_GET_CPU(cpu_env);
6951     abi_long ret;
6952 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6953     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6954     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6955     struct stat st;
6956 #endif
6957 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6958     || defined(TARGET_NR_fstatfs)
6959     struct statfs stfs;
6960 #endif
6961     void *p;
6962 
6963     switch(num) {
6964     case TARGET_NR_exit:
6965         /* In old applications this may be used to implement _exit(2).
6966            However in threaded applictions it is used for thread termination,
6967            and _exit_group is used for application termination.
6968            Do thread termination if we have more then one thread.  */
6969 
6970         if (block_signals()) {
6971             return -TARGET_ERESTARTSYS;
6972         }
6973 
6974         cpu_list_lock();
6975 
6976         if (CPU_NEXT(first_cpu)) {
6977             TaskState *ts;
6978 
6979             /* Remove the CPU from the list.  */
6980             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6981 
6982             cpu_list_unlock();
6983 
6984             ts = cpu->opaque;
6985             if (ts->child_tidptr) {
6986                 put_user_u32(0, ts->child_tidptr);
6987                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6988                           NULL, NULL, 0);
6989             }
6990             thread_cpu = NULL;
6991             object_unref(OBJECT(cpu));
6992             g_free(ts);
6993             rcu_unregister_thread();
6994             pthread_exit(NULL);
6995         }
6996 
6997         cpu_list_unlock();
6998         preexit_cleanup(cpu_env, arg1);
6999         _exit(arg1);
7000         return 0; /* avoid warning */
7001     case TARGET_NR_read:
7002         if (arg3 == 0) {
7003             return 0;
7004         } else {
7005             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7006                 return -TARGET_EFAULT;
7007             ret = get_errno(safe_read(arg1, p, arg3));
7008             if (ret >= 0 &&
7009                 fd_trans_host_to_target_data(arg1)) {
7010                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7011             }
7012             unlock_user(p, arg2, ret);
7013         }
7014         return ret;
7015     case TARGET_NR_write:
7016         if (arg2 == 0 && arg3 == 0) {
7017             return get_errno(safe_write(arg1, 0, 0));
7018         }
7019         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7020             return -TARGET_EFAULT;
7021         if (fd_trans_target_to_host_data(arg1)) {
7022             void *copy = g_malloc(arg3);
7023             memcpy(copy, p, arg3);
7024             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7025             if (ret >= 0) {
7026                 ret = get_errno(safe_write(arg1, copy, ret));
7027             }
7028             g_free(copy);
7029         } else {
7030             ret = get_errno(safe_write(arg1, p, arg3));
7031         }
7032         unlock_user(p, arg2, 0);
7033         return ret;
7034 
7035 #ifdef TARGET_NR_open
7036     case TARGET_NR_open:
7037         if (!(p = lock_user_string(arg1)))
7038             return -TARGET_EFAULT;
7039         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7040                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7041                                   arg3));
7042         fd_trans_unregister(ret);
7043         unlock_user(p, arg1, 0);
7044         return ret;
7045 #endif
7046     case TARGET_NR_openat:
7047         if (!(p = lock_user_string(arg2)))
7048             return -TARGET_EFAULT;
7049         ret = get_errno(do_openat(cpu_env, arg1, p,
7050                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7051                                   arg4));
7052         fd_trans_unregister(ret);
7053         unlock_user(p, arg2, 0);
7054         return ret;
7055 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7056     case TARGET_NR_name_to_handle_at:
7057         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7058         return ret;
7059 #endif
7060 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7061     case TARGET_NR_open_by_handle_at:
7062         ret = do_open_by_handle_at(arg1, arg2, arg3);
7063         fd_trans_unregister(ret);
7064         return ret;
7065 #endif
7066     case TARGET_NR_close:
7067         fd_trans_unregister(arg1);
7068         return get_errno(close(arg1));
7069 
7070     case TARGET_NR_brk:
7071         return do_brk(arg1);
7072 #ifdef TARGET_NR_fork
7073     case TARGET_NR_fork:
7074         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7075 #endif
7076 #ifdef TARGET_NR_waitpid
7077     case TARGET_NR_waitpid:
7078         {
7079             int status;
7080             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7081             if (!is_error(ret) && arg2 && ret
7082                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7083                 return -TARGET_EFAULT;
7084         }
7085         return ret;
7086 #endif
7087 #ifdef TARGET_NR_waitid
7088     case TARGET_NR_waitid:
7089         {
7090             siginfo_t info;
7091             info.si_pid = 0;
7092             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7093             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7094                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7095                     return -TARGET_EFAULT;
7096                 host_to_target_siginfo(p, &info);
7097                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7098             }
7099         }
7100         return ret;
7101 #endif
7102 #ifdef TARGET_NR_creat /* not on alpha */
7103     case TARGET_NR_creat:
7104         if (!(p = lock_user_string(arg1)))
7105             return -TARGET_EFAULT;
7106         ret = get_errno(creat(p, arg2));
7107         fd_trans_unregister(ret);
7108         unlock_user(p, arg1, 0);
7109         return ret;
7110 #endif
7111 #ifdef TARGET_NR_link
7112     case TARGET_NR_link:
7113         {
7114             void * p2;
7115             p = lock_user_string(arg1);
7116             p2 = lock_user_string(arg2);
7117             if (!p || !p2)
7118                 ret = -TARGET_EFAULT;
7119             else
7120                 ret = get_errno(link(p, p2));
7121             unlock_user(p2, arg2, 0);
7122             unlock_user(p, arg1, 0);
7123         }
7124         return ret;
7125 #endif
7126 #if defined(TARGET_NR_linkat)
7127     case TARGET_NR_linkat:
7128         {
7129             void * p2 = NULL;
7130             if (!arg2 || !arg4)
7131                 return -TARGET_EFAULT;
7132             p  = lock_user_string(arg2);
7133             p2 = lock_user_string(arg4);
7134             if (!p || !p2)
7135                 ret = -TARGET_EFAULT;
7136             else
7137                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7138             unlock_user(p, arg2, 0);
7139             unlock_user(p2, arg4, 0);
7140         }
7141         return ret;
7142 #endif
7143 #ifdef TARGET_NR_unlink
7144     case TARGET_NR_unlink:
7145         if (!(p = lock_user_string(arg1)))
7146             return -TARGET_EFAULT;
7147         ret = get_errno(unlink(p));
7148         unlock_user(p, arg1, 0);
7149         return ret;
7150 #endif
7151 #if defined(TARGET_NR_unlinkat)
7152     case TARGET_NR_unlinkat:
7153         if (!(p = lock_user_string(arg2)))
7154             return -TARGET_EFAULT;
7155         ret = get_errno(unlinkat(arg1, p, arg3));
7156         unlock_user(p, arg2, 0);
7157         return ret;
7158 #endif
7159     case TARGET_NR_execve:
7160         {
7161             char **argp, **envp;
7162             int argc, envc;
7163             abi_ulong gp;
7164             abi_ulong guest_argp;
7165             abi_ulong guest_envp;
7166             abi_ulong addr;
7167             char **q;
7168             int total_size = 0;
7169 
7170             argc = 0;
7171             guest_argp = arg2;
7172             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7173                 if (get_user_ual(addr, gp))
7174                     return -TARGET_EFAULT;
7175                 if (!addr)
7176                     break;
7177                 argc++;
7178             }
7179             envc = 0;
7180             guest_envp = arg3;
7181             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7182                 if (get_user_ual(addr, gp))
7183                     return -TARGET_EFAULT;
7184                 if (!addr)
7185                     break;
7186                 envc++;
7187             }
7188 
7189             argp = g_new0(char *, argc + 1);
7190             envp = g_new0(char *, envc + 1);
7191 
7192             for (gp = guest_argp, q = argp; gp;
7193                   gp += sizeof(abi_ulong), q++) {
7194                 if (get_user_ual(addr, gp))
7195                     goto execve_efault;
7196                 if (!addr)
7197                     break;
7198                 if (!(*q = lock_user_string(addr)))
7199                     goto execve_efault;
7200                 total_size += strlen(*q) + 1;
7201             }
7202             *q = NULL;
7203 
7204             for (gp = guest_envp, q = envp; gp;
7205                   gp += sizeof(abi_ulong), q++) {
7206                 if (get_user_ual(addr, gp))
7207                     goto execve_efault;
7208                 if (!addr)
7209                     break;
7210                 if (!(*q = lock_user_string(addr)))
7211                     goto execve_efault;
7212                 total_size += strlen(*q) + 1;
7213             }
7214             *q = NULL;
7215 
7216             if (!(p = lock_user_string(arg1)))
7217                 goto execve_efault;
7218             /* Although execve() is not an interruptible syscall it is
7219              * a special case where we must use the safe_syscall wrapper:
7220              * if we allow a signal to happen before we make the host
7221              * syscall then we will 'lose' it, because at the point of
7222              * execve the process leaves QEMU's control. So we use the
7223              * safe syscall wrapper to ensure that we either take the
7224              * signal as a guest signal, or else it does not happen
7225              * before the execve completes and makes it the other
7226              * program's problem.
7227              */
7228             ret = get_errno(safe_execve(p, argp, envp));
7229             unlock_user(p, arg1, 0);
7230 
7231             goto execve_end;
7232 
7233         execve_efault:
7234             ret = -TARGET_EFAULT;
7235 
7236         execve_end:
7237             for (gp = guest_argp, q = argp; *q;
7238                   gp += sizeof(abi_ulong), q++) {
7239                 if (get_user_ual(addr, gp)
7240                     || !addr)
7241                     break;
7242                 unlock_user(*q, addr, 0);
7243             }
7244             for (gp = guest_envp, q = envp; *q;
7245                   gp += sizeof(abi_ulong), q++) {
7246                 if (get_user_ual(addr, gp)
7247                     || !addr)
7248                     break;
7249                 unlock_user(*q, addr, 0);
7250             }
7251 
7252             g_free(argp);
7253             g_free(envp);
7254         }
7255         return ret;
7256     case TARGET_NR_chdir:
7257         if (!(p = lock_user_string(arg1)))
7258             return -TARGET_EFAULT;
7259         ret = get_errno(chdir(p));
7260         unlock_user(p, arg1, 0);
7261         return ret;
7262 #ifdef TARGET_NR_time
7263     case TARGET_NR_time:
7264         {
7265             time_t host_time;
7266             ret = get_errno(time(&host_time));
7267             if (!is_error(ret)
7268                 && arg1
7269                 && put_user_sal(host_time, arg1))
7270                 return -TARGET_EFAULT;
7271         }
7272         return ret;
7273 #endif
7274 #ifdef TARGET_NR_mknod
7275     case TARGET_NR_mknod:
7276         if (!(p = lock_user_string(arg1)))
7277             return -TARGET_EFAULT;
7278         ret = get_errno(mknod(p, arg2, arg3));
7279         unlock_user(p, arg1, 0);
7280         return ret;
7281 #endif
7282 #if defined(TARGET_NR_mknodat)
7283     case TARGET_NR_mknodat:
7284         if (!(p = lock_user_string(arg2)))
7285             return -TARGET_EFAULT;
7286         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7287         unlock_user(p, arg2, 0);
7288         return ret;
7289 #endif
7290 #ifdef TARGET_NR_chmod
7291     case TARGET_NR_chmod:
7292         if (!(p = lock_user_string(arg1)))
7293             return -TARGET_EFAULT;
7294         ret = get_errno(chmod(p, arg2));
7295         unlock_user(p, arg1, 0);
7296         return ret;
7297 #endif
7298 #ifdef TARGET_NR_lseek
7299     case TARGET_NR_lseek:
7300         return get_errno(lseek(arg1, arg2, arg3));
7301 #endif
7302 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7303     /* Alpha specific */
7304     case TARGET_NR_getxpid:
7305         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7306         return get_errno(getpid());
7307 #endif
7308 #ifdef TARGET_NR_getpid
7309     case TARGET_NR_getpid:
7310         return get_errno(getpid());
7311 #endif
7312     case TARGET_NR_mount:
7313         {
7314             /* need to look at the data field */
7315             void *p2, *p3;
7316 
7317             if (arg1) {
7318                 p = lock_user_string(arg1);
7319                 if (!p) {
7320                     return -TARGET_EFAULT;
7321                 }
7322             } else {
7323                 p = NULL;
7324             }
7325 
7326             p2 = lock_user_string(arg2);
7327             if (!p2) {
7328                 if (arg1) {
7329                     unlock_user(p, arg1, 0);
7330                 }
7331                 return -TARGET_EFAULT;
7332             }
7333 
7334             if (arg3) {
7335                 p3 = lock_user_string(arg3);
7336                 if (!p3) {
7337                     if (arg1) {
7338                         unlock_user(p, arg1, 0);
7339                     }
7340                     unlock_user(p2, arg2, 0);
7341                     return -TARGET_EFAULT;
7342                 }
7343             } else {
7344                 p3 = NULL;
7345             }
7346 
7347             /* FIXME - arg5 should be locked, but it isn't clear how to
7348              * do that since it's not guaranteed to be a NULL-terminated
7349              * string.
7350              */
7351             if (!arg5) {
7352                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7353             } else {
7354                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7355             }
7356             ret = get_errno(ret);
7357 
7358             if (arg1) {
7359                 unlock_user(p, arg1, 0);
7360             }
7361             unlock_user(p2, arg2, 0);
7362             if (arg3) {
7363                 unlock_user(p3, arg3, 0);
7364             }
7365         }
7366         return ret;
7367 #ifdef TARGET_NR_umount
7368     case TARGET_NR_umount:
7369         if (!(p = lock_user_string(arg1)))
7370             return -TARGET_EFAULT;
7371         ret = get_errno(umount(p));
7372         unlock_user(p, arg1, 0);
7373         return ret;
7374 #endif
7375 #ifdef TARGET_NR_stime /* not on alpha */
7376     case TARGET_NR_stime:
7377         {
7378             time_t host_time;
7379             if (get_user_sal(host_time, arg1))
7380                 return -TARGET_EFAULT;
7381             return get_errno(stime(&host_time));
7382         }
7383 #endif
7384 #ifdef TARGET_NR_alarm /* not on alpha */
7385     case TARGET_NR_alarm:
7386         return alarm(arg1);
7387 #endif
7388 #ifdef TARGET_NR_pause /* not on alpha */
7389     case TARGET_NR_pause:
7390         if (!block_signals()) {
7391             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7392         }
7393         return -TARGET_EINTR;
7394 #endif
7395 #ifdef TARGET_NR_utime
7396     case TARGET_NR_utime:
7397         {
7398             struct utimbuf tbuf, *host_tbuf;
7399             struct target_utimbuf *target_tbuf;
7400             if (arg2) {
7401                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7402                     return -TARGET_EFAULT;
7403                 tbuf.actime = tswapal(target_tbuf->actime);
7404                 tbuf.modtime = tswapal(target_tbuf->modtime);
7405                 unlock_user_struct(target_tbuf, arg2, 0);
7406                 host_tbuf = &tbuf;
7407             } else {
7408                 host_tbuf = NULL;
7409             }
7410             if (!(p = lock_user_string(arg1)))
7411                 return -TARGET_EFAULT;
7412             ret = get_errno(utime(p, host_tbuf));
7413             unlock_user(p, arg1, 0);
7414         }
7415         return ret;
7416 #endif
7417 #ifdef TARGET_NR_utimes
7418     case TARGET_NR_utimes:
7419         {
7420             struct timeval *tvp, tv[2];
7421             if (arg2) {
7422                 if (copy_from_user_timeval(&tv[0], arg2)
7423                     || copy_from_user_timeval(&tv[1],
7424                                               arg2 + sizeof(struct target_timeval)))
7425                     return -TARGET_EFAULT;
7426                 tvp = tv;
7427             } else {
7428                 tvp = NULL;
7429             }
7430             if (!(p = lock_user_string(arg1)))
7431                 return -TARGET_EFAULT;
7432             ret = get_errno(utimes(p, tvp));
7433             unlock_user(p, arg1, 0);
7434         }
7435         return ret;
7436 #endif
7437 #if defined(TARGET_NR_futimesat)
7438     case TARGET_NR_futimesat:
7439         {
7440             struct timeval *tvp, tv[2];
7441             if (arg3) {
7442                 if (copy_from_user_timeval(&tv[0], arg3)
7443                     || copy_from_user_timeval(&tv[1],
7444                                               arg3 + sizeof(struct target_timeval)))
7445                     return -TARGET_EFAULT;
7446                 tvp = tv;
7447             } else {
7448                 tvp = NULL;
7449             }
7450             if (!(p = lock_user_string(arg2))) {
7451                 return -TARGET_EFAULT;
7452             }
7453             ret = get_errno(futimesat(arg1, path(p), tvp));
7454             unlock_user(p, arg2, 0);
7455         }
7456         return ret;
7457 #endif
7458 #ifdef TARGET_NR_access
7459     case TARGET_NR_access:
7460         if (!(p = lock_user_string(arg1))) {
7461             return -TARGET_EFAULT;
7462         }
7463         ret = get_errno(access(path(p), arg2));
7464         unlock_user(p, arg1, 0);
7465         return ret;
7466 #endif
7467 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7468     case TARGET_NR_faccessat:
7469         if (!(p = lock_user_string(arg2))) {
7470             return -TARGET_EFAULT;
7471         }
7472         ret = get_errno(faccessat(arg1, p, arg3, 0));
7473         unlock_user(p, arg2, 0);
7474         return ret;
7475 #endif
7476 #ifdef TARGET_NR_nice /* not on alpha */
7477     case TARGET_NR_nice:
7478         return get_errno(nice(arg1));
7479 #endif
7480     case TARGET_NR_sync:
7481         sync();
7482         return 0;
7483 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7484     case TARGET_NR_syncfs:
7485         return get_errno(syncfs(arg1));
7486 #endif
7487     case TARGET_NR_kill:
7488         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7489 #ifdef TARGET_NR_rename
7490     case TARGET_NR_rename:
7491         {
7492             void *p2;
7493             p = lock_user_string(arg1);
7494             p2 = lock_user_string(arg2);
7495             if (!p || !p2)
7496                 ret = -TARGET_EFAULT;
7497             else
7498                 ret = get_errno(rename(p, p2));
7499             unlock_user(p2, arg2, 0);
7500             unlock_user(p, arg1, 0);
7501         }
7502         return ret;
7503 #endif
7504 #if defined(TARGET_NR_renameat)
7505     case TARGET_NR_renameat:
7506         {
7507             void *p2;
7508             p  = lock_user_string(arg2);
7509             p2 = lock_user_string(arg4);
7510             if (!p || !p2)
7511                 ret = -TARGET_EFAULT;
7512             else
7513                 ret = get_errno(renameat(arg1, p, arg3, p2));
7514             unlock_user(p2, arg4, 0);
7515             unlock_user(p, arg2, 0);
7516         }
7517         return ret;
7518 #endif
7519 #if defined(TARGET_NR_renameat2)
7520     case TARGET_NR_renameat2:
7521         {
7522             void *p2;
7523             p  = lock_user_string(arg2);
7524             p2 = lock_user_string(arg4);
7525             if (!p || !p2) {
7526                 ret = -TARGET_EFAULT;
7527             } else {
7528                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7529             }
7530             unlock_user(p2, arg4, 0);
7531             unlock_user(p, arg2, 0);
7532         }
7533         return ret;
7534 #endif
7535 #ifdef TARGET_NR_mkdir
7536     case TARGET_NR_mkdir:
7537         if (!(p = lock_user_string(arg1)))
7538             return -TARGET_EFAULT;
7539         ret = get_errno(mkdir(p, arg2));
7540         unlock_user(p, arg1, 0);
7541         return ret;
7542 #endif
7543 #if defined(TARGET_NR_mkdirat)
7544     case TARGET_NR_mkdirat:
7545         if (!(p = lock_user_string(arg2)))
7546             return -TARGET_EFAULT;
7547         ret = get_errno(mkdirat(arg1, p, arg3));
7548         unlock_user(p, arg2, 0);
7549         return ret;
7550 #endif
7551 #ifdef TARGET_NR_rmdir
7552     case TARGET_NR_rmdir:
7553         if (!(p = lock_user_string(arg1)))
7554             return -TARGET_EFAULT;
7555         ret = get_errno(rmdir(p));
7556         unlock_user(p, arg1, 0);
7557         return ret;
7558 #endif
7559     case TARGET_NR_dup:
7560         ret = get_errno(dup(arg1));
7561         if (ret >= 0) {
7562             fd_trans_dup(arg1, ret);
7563         }
7564         return ret;
7565 #ifdef TARGET_NR_pipe
7566     case TARGET_NR_pipe:
7567         return do_pipe(cpu_env, arg1, 0, 0);
7568 #endif
7569 #ifdef TARGET_NR_pipe2
7570     case TARGET_NR_pipe2:
7571         return do_pipe(cpu_env, arg1,
7572                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7573 #endif
7574     case TARGET_NR_times:
7575         {
7576             struct target_tms *tmsp;
7577             struct tms tms;
7578             ret = get_errno(times(&tms));
7579             if (arg1) {
7580                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7581                 if (!tmsp)
7582                     return -TARGET_EFAULT;
7583                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7584                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7585                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7586                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7587             }
7588             if (!is_error(ret))
7589                 ret = host_to_target_clock_t(ret);
7590         }
7591         return ret;
7592     case TARGET_NR_acct:
7593         if (arg1 == 0) {
7594             ret = get_errno(acct(NULL));
7595         } else {
7596             if (!(p = lock_user_string(arg1))) {
7597                 return -TARGET_EFAULT;
7598             }
7599             ret = get_errno(acct(path(p)));
7600             unlock_user(p, arg1, 0);
7601         }
7602         return ret;
7603 #ifdef TARGET_NR_umount2
7604     case TARGET_NR_umount2:
7605         if (!(p = lock_user_string(arg1)))
7606             return -TARGET_EFAULT;
7607         ret = get_errno(umount2(p, arg2));
7608         unlock_user(p, arg1, 0);
7609         return ret;
7610 #endif
7611     case TARGET_NR_ioctl:
7612         return do_ioctl(arg1, arg2, arg3);
7613 #ifdef TARGET_NR_fcntl
7614     case TARGET_NR_fcntl:
7615         return do_fcntl(arg1, arg2, arg3);
7616 #endif
7617     case TARGET_NR_setpgid:
7618         return get_errno(setpgid(arg1, arg2));
7619     case TARGET_NR_umask:
7620         return get_errno(umask(arg1));
7621     case TARGET_NR_chroot:
7622         if (!(p = lock_user_string(arg1)))
7623             return -TARGET_EFAULT;
7624         ret = get_errno(chroot(p));
7625         unlock_user(p, arg1, 0);
7626         return ret;
7627 #ifdef TARGET_NR_dup2
7628     case TARGET_NR_dup2:
7629         ret = get_errno(dup2(arg1, arg2));
7630         if (ret >= 0) {
7631             fd_trans_dup(arg1, arg2);
7632         }
7633         return ret;
7634 #endif
7635 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7636     case TARGET_NR_dup3:
7637     {
7638         int host_flags;
7639 
7640         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7641             return -EINVAL;
7642         }
7643         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7644         ret = get_errno(dup3(arg1, arg2, host_flags));
7645         if (ret >= 0) {
7646             fd_trans_dup(arg1, arg2);
7647         }
7648         return ret;
7649     }
7650 #endif
7651 #ifdef TARGET_NR_getppid /* not on alpha */
7652     case TARGET_NR_getppid:
7653         return get_errno(getppid());
7654 #endif
7655 #ifdef TARGET_NR_getpgrp
7656     case TARGET_NR_getpgrp:
7657         return get_errno(getpgrp());
7658 #endif
7659     case TARGET_NR_setsid:
7660         return get_errno(setsid());
7661 #ifdef TARGET_NR_sigaction
7662     case TARGET_NR_sigaction:
7663         {
7664 #if defined(TARGET_ALPHA)
7665             struct target_sigaction act, oact, *pact = 0;
7666             struct target_old_sigaction *old_act;
7667             if (arg2) {
7668                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7669                     return -TARGET_EFAULT;
7670                 act._sa_handler = old_act->_sa_handler;
7671                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7672                 act.sa_flags = old_act->sa_flags;
7673                 act.sa_restorer = 0;
7674                 unlock_user_struct(old_act, arg2, 0);
7675                 pact = &act;
7676             }
7677             ret = get_errno(do_sigaction(arg1, pact, &oact));
7678             if (!is_error(ret) && arg3) {
7679                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7680                     return -TARGET_EFAULT;
7681                 old_act->_sa_handler = oact._sa_handler;
7682                 old_act->sa_mask = oact.sa_mask.sig[0];
7683                 old_act->sa_flags = oact.sa_flags;
7684                 unlock_user_struct(old_act, arg3, 1);
7685             }
7686 #elif defined(TARGET_MIPS)
7687 	    struct target_sigaction act, oact, *pact, *old_act;
7688 
7689 	    if (arg2) {
7690                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7691                     return -TARGET_EFAULT;
7692 		act._sa_handler = old_act->_sa_handler;
7693 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7694 		act.sa_flags = old_act->sa_flags;
7695 		unlock_user_struct(old_act, arg2, 0);
7696 		pact = &act;
7697 	    } else {
7698 		pact = NULL;
7699 	    }
7700 
7701 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7702 
7703 	    if (!is_error(ret) && arg3) {
7704                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7705                     return -TARGET_EFAULT;
7706 		old_act->_sa_handler = oact._sa_handler;
7707 		old_act->sa_flags = oact.sa_flags;
7708 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7709 		old_act->sa_mask.sig[1] = 0;
7710 		old_act->sa_mask.sig[2] = 0;
7711 		old_act->sa_mask.sig[3] = 0;
7712 		unlock_user_struct(old_act, arg3, 1);
7713 	    }
7714 #else
7715             struct target_old_sigaction *old_act;
7716             struct target_sigaction act, oact, *pact;
7717             if (arg2) {
7718                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7719                     return -TARGET_EFAULT;
7720                 act._sa_handler = old_act->_sa_handler;
7721                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7722                 act.sa_flags = old_act->sa_flags;
7723                 act.sa_restorer = old_act->sa_restorer;
7724 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7725                 act.ka_restorer = 0;
7726 #endif
7727                 unlock_user_struct(old_act, arg2, 0);
7728                 pact = &act;
7729             } else {
7730                 pact = NULL;
7731             }
7732             ret = get_errno(do_sigaction(arg1, pact, &oact));
7733             if (!is_error(ret) && arg3) {
7734                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7735                     return -TARGET_EFAULT;
7736                 old_act->_sa_handler = oact._sa_handler;
7737                 old_act->sa_mask = oact.sa_mask.sig[0];
7738                 old_act->sa_flags = oact.sa_flags;
7739                 old_act->sa_restorer = oact.sa_restorer;
7740                 unlock_user_struct(old_act, arg3, 1);
7741             }
7742 #endif
7743         }
7744         return ret;
7745 #endif
7746     case TARGET_NR_rt_sigaction:
7747         {
7748 #if defined(TARGET_ALPHA)
7749             /* For Alpha and SPARC this is a 5 argument syscall, with
7750              * a 'restorer' parameter which must be copied into the
7751              * sa_restorer field of the sigaction struct.
7752              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7753              * and arg5 is the sigsetsize.
7754              * Alpha also has a separate rt_sigaction struct that it uses
7755              * here; SPARC uses the usual sigaction struct.
7756              */
7757             struct target_rt_sigaction *rt_act;
7758             struct target_sigaction act, oact, *pact = 0;
7759 
7760             if (arg4 != sizeof(target_sigset_t)) {
7761                 return -TARGET_EINVAL;
7762             }
7763             if (arg2) {
7764                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7765                     return -TARGET_EFAULT;
7766                 act._sa_handler = rt_act->_sa_handler;
7767                 act.sa_mask = rt_act->sa_mask;
7768                 act.sa_flags = rt_act->sa_flags;
7769                 act.sa_restorer = arg5;
7770                 unlock_user_struct(rt_act, arg2, 0);
7771                 pact = &act;
7772             }
7773             ret = get_errno(do_sigaction(arg1, pact, &oact));
7774             if (!is_error(ret) && arg3) {
7775                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7776                     return -TARGET_EFAULT;
7777                 rt_act->_sa_handler = oact._sa_handler;
7778                 rt_act->sa_mask = oact.sa_mask;
7779                 rt_act->sa_flags = oact.sa_flags;
7780                 unlock_user_struct(rt_act, arg3, 1);
7781             }
7782 #else
7783 #ifdef TARGET_SPARC
7784             target_ulong restorer = arg4;
7785             target_ulong sigsetsize = arg5;
7786 #else
7787             target_ulong sigsetsize = arg4;
7788 #endif
7789             struct target_sigaction *act;
7790             struct target_sigaction *oact;
7791 
7792             if (sigsetsize != sizeof(target_sigset_t)) {
7793                 return -TARGET_EINVAL;
7794             }
7795             if (arg2) {
7796                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7797                     return -TARGET_EFAULT;
7798                 }
7799 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7800                 act->ka_restorer = restorer;
7801 #endif
7802             } else {
7803                 act = NULL;
7804             }
7805             if (arg3) {
7806                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7807                     ret = -TARGET_EFAULT;
7808                     goto rt_sigaction_fail;
7809                 }
7810             } else
7811                 oact = NULL;
7812             ret = get_errno(do_sigaction(arg1, act, oact));
7813 	rt_sigaction_fail:
7814             if (act)
7815                 unlock_user_struct(act, arg2, 0);
7816             if (oact)
7817                 unlock_user_struct(oact, arg3, 1);
7818 #endif
7819         }
7820         return ret;
7821 #ifdef TARGET_NR_sgetmask /* not on alpha */
7822     case TARGET_NR_sgetmask:
7823         {
7824             sigset_t cur_set;
7825             abi_ulong target_set;
7826             ret = do_sigprocmask(0, NULL, &cur_set);
7827             if (!ret) {
7828                 host_to_target_old_sigset(&target_set, &cur_set);
7829                 ret = target_set;
7830             }
7831         }
7832         return ret;
7833 #endif
7834 #ifdef TARGET_NR_ssetmask /* not on alpha */
7835     case TARGET_NR_ssetmask:
7836         {
7837             sigset_t set, oset;
7838             abi_ulong target_set = arg1;
7839             target_to_host_old_sigset(&set, &target_set);
7840             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7841             if (!ret) {
7842                 host_to_target_old_sigset(&target_set, &oset);
7843                 ret = target_set;
7844             }
7845         }
7846         return ret;
7847 #endif
7848 #ifdef TARGET_NR_sigprocmask
7849     case TARGET_NR_sigprocmask:
7850         {
7851 #if defined(TARGET_ALPHA)
7852             sigset_t set, oldset;
7853             abi_ulong mask;
7854             int how;
7855 
7856             switch (arg1) {
7857             case TARGET_SIG_BLOCK:
7858                 how = SIG_BLOCK;
7859                 break;
7860             case TARGET_SIG_UNBLOCK:
7861                 how = SIG_UNBLOCK;
7862                 break;
7863             case TARGET_SIG_SETMASK:
7864                 how = SIG_SETMASK;
7865                 break;
7866             default:
7867                 return -TARGET_EINVAL;
7868             }
7869             mask = arg2;
7870             target_to_host_old_sigset(&set, &mask);
7871 
7872             ret = do_sigprocmask(how, &set, &oldset);
7873             if (!is_error(ret)) {
7874                 host_to_target_old_sigset(&mask, &oldset);
7875                 ret = mask;
7876                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7877             }
7878 #else
7879             sigset_t set, oldset, *set_ptr;
7880             int how;
7881 
7882             if (arg2) {
7883                 switch (arg1) {
7884                 case TARGET_SIG_BLOCK:
7885                     how = SIG_BLOCK;
7886                     break;
7887                 case TARGET_SIG_UNBLOCK:
7888                     how = SIG_UNBLOCK;
7889                     break;
7890                 case TARGET_SIG_SETMASK:
7891                     how = SIG_SETMASK;
7892                     break;
7893                 default:
7894                     return -TARGET_EINVAL;
7895                 }
7896                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7897                     return -TARGET_EFAULT;
7898                 target_to_host_old_sigset(&set, p);
7899                 unlock_user(p, arg2, 0);
7900                 set_ptr = &set;
7901             } else {
7902                 how = 0;
7903                 set_ptr = NULL;
7904             }
7905             ret = do_sigprocmask(how, set_ptr, &oldset);
7906             if (!is_error(ret) && arg3) {
7907                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7908                     return -TARGET_EFAULT;
7909                 host_to_target_old_sigset(p, &oldset);
7910                 unlock_user(p, arg3, sizeof(target_sigset_t));
7911             }
7912 #endif
7913         }
7914         return ret;
7915 #endif
7916     case TARGET_NR_rt_sigprocmask:
7917         {
7918             int how = arg1;
7919             sigset_t set, oldset, *set_ptr;
7920 
7921             if (arg4 != sizeof(target_sigset_t)) {
7922                 return -TARGET_EINVAL;
7923             }
7924 
7925             if (arg2) {
7926                 switch(how) {
7927                 case TARGET_SIG_BLOCK:
7928                     how = SIG_BLOCK;
7929                     break;
7930                 case TARGET_SIG_UNBLOCK:
7931                     how = SIG_UNBLOCK;
7932                     break;
7933                 case TARGET_SIG_SETMASK:
7934                     how = SIG_SETMASK;
7935                     break;
7936                 default:
7937                     return -TARGET_EINVAL;
7938                 }
7939                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7940                     return -TARGET_EFAULT;
7941                 target_to_host_sigset(&set, p);
7942                 unlock_user(p, arg2, 0);
7943                 set_ptr = &set;
7944             } else {
7945                 how = 0;
7946                 set_ptr = NULL;
7947             }
7948             ret = do_sigprocmask(how, set_ptr, &oldset);
7949             if (!is_error(ret) && arg3) {
7950                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7951                     return -TARGET_EFAULT;
7952                 host_to_target_sigset(p, &oldset);
7953                 unlock_user(p, arg3, sizeof(target_sigset_t));
7954             }
7955         }
7956         return ret;
7957 #ifdef TARGET_NR_sigpending
7958     case TARGET_NR_sigpending:
7959         {
7960             sigset_t set;
7961             ret = get_errno(sigpending(&set));
7962             if (!is_error(ret)) {
7963                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7964                     return -TARGET_EFAULT;
7965                 host_to_target_old_sigset(p, &set);
7966                 unlock_user(p, arg1, sizeof(target_sigset_t));
7967             }
7968         }
7969         return ret;
7970 #endif
7971     case TARGET_NR_rt_sigpending:
7972         {
7973             sigset_t set;
7974 
7975             /* Yes, this check is >, not != like most. We follow the kernel's
7976              * logic and it does it like this because it implements
7977              * NR_sigpending through the same code path, and in that case
7978              * the old_sigset_t is smaller in size.
7979              */
7980             if (arg2 > sizeof(target_sigset_t)) {
7981                 return -TARGET_EINVAL;
7982             }
7983 
7984             ret = get_errno(sigpending(&set));
7985             if (!is_error(ret)) {
7986                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7987                     return -TARGET_EFAULT;
7988                 host_to_target_sigset(p, &set);
7989                 unlock_user(p, arg1, sizeof(target_sigset_t));
7990             }
7991         }
7992         return ret;
7993 #ifdef TARGET_NR_sigsuspend
7994     case TARGET_NR_sigsuspend:
7995         {
7996             TaskState *ts = cpu->opaque;
7997 #if defined(TARGET_ALPHA)
7998             abi_ulong mask = arg1;
7999             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8000 #else
8001             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8002                 return -TARGET_EFAULT;
8003             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8004             unlock_user(p, arg1, 0);
8005 #endif
8006             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8007                                                SIGSET_T_SIZE));
8008             if (ret != -TARGET_ERESTARTSYS) {
8009                 ts->in_sigsuspend = 1;
8010             }
8011         }
8012         return ret;
8013 #endif
8014     case TARGET_NR_rt_sigsuspend:
8015         {
8016             TaskState *ts = cpu->opaque;
8017 
8018             if (arg2 != sizeof(target_sigset_t)) {
8019                 return -TARGET_EINVAL;
8020             }
8021             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8022                 return -TARGET_EFAULT;
8023             target_to_host_sigset(&ts->sigsuspend_mask, p);
8024             unlock_user(p, arg1, 0);
8025             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8026                                                SIGSET_T_SIZE));
8027             if (ret != -TARGET_ERESTARTSYS) {
8028                 ts->in_sigsuspend = 1;
8029             }
8030         }
8031         return ret;
8032     case TARGET_NR_rt_sigtimedwait:
8033         {
8034             sigset_t set;
8035             struct timespec uts, *puts;
8036             siginfo_t uinfo;
8037 
8038             if (arg4 != sizeof(target_sigset_t)) {
8039                 return -TARGET_EINVAL;
8040             }
8041 
8042             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8043                 return -TARGET_EFAULT;
8044             target_to_host_sigset(&set, p);
8045             unlock_user(p, arg1, 0);
8046             if (arg3) {
8047                 puts = &uts;
8048                 target_to_host_timespec(puts, arg3);
8049             } else {
8050                 puts = NULL;
8051             }
8052             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8053                                                  SIGSET_T_SIZE));
8054             if (!is_error(ret)) {
8055                 if (arg2) {
8056                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8057                                   0);
8058                     if (!p) {
8059                         return -TARGET_EFAULT;
8060                     }
8061                     host_to_target_siginfo(p, &uinfo);
8062                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8063                 }
8064                 ret = host_to_target_signal(ret);
8065             }
8066         }
8067         return ret;
8068     case TARGET_NR_rt_sigqueueinfo:
8069         {
8070             siginfo_t uinfo;
8071 
8072             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8073             if (!p) {
8074                 return -TARGET_EFAULT;
8075             }
8076             target_to_host_siginfo(&uinfo, p);
8077             unlock_user(p, arg3, 0);
8078             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8079         }
8080         return ret;
8081     case TARGET_NR_rt_tgsigqueueinfo:
8082         {
8083             siginfo_t uinfo;
8084 
8085             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8086             if (!p) {
8087                 return -TARGET_EFAULT;
8088             }
8089             target_to_host_siginfo(&uinfo, p);
8090             unlock_user(p, arg4, 0);
8091             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8092         }
8093         return ret;
8094 #ifdef TARGET_NR_sigreturn
8095     case TARGET_NR_sigreturn:
8096         if (block_signals()) {
8097             return -TARGET_ERESTARTSYS;
8098         }
8099         return do_sigreturn(cpu_env);
8100 #endif
8101     case TARGET_NR_rt_sigreturn:
8102         if (block_signals()) {
8103             return -TARGET_ERESTARTSYS;
8104         }
8105         return do_rt_sigreturn(cpu_env);
8106     case TARGET_NR_sethostname:
8107         if (!(p = lock_user_string(arg1)))
8108             return -TARGET_EFAULT;
8109         ret = get_errno(sethostname(p, arg2));
8110         unlock_user(p, arg1, 0);
8111         return ret;
8112 #ifdef TARGET_NR_setrlimit
8113     case TARGET_NR_setrlimit:
8114         {
8115             int resource = target_to_host_resource(arg1);
8116             struct target_rlimit *target_rlim;
8117             struct rlimit rlim;
8118             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8119                 return -TARGET_EFAULT;
8120             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8121             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8122             unlock_user_struct(target_rlim, arg2, 0);
8123             /*
8124              * If we just passed through resource limit settings for memory then
8125              * they would also apply to QEMU's own allocations, and QEMU will
8126              * crash or hang or die if its allocations fail. Ideally we would
8127              * track the guest allocations in QEMU and apply the limits ourselves.
8128              * For now, just tell the guest the call succeeded but don't actually
8129              * limit anything.
8130              */
8131             if (resource != RLIMIT_AS &&
8132                 resource != RLIMIT_DATA &&
8133                 resource != RLIMIT_STACK) {
8134                 return get_errno(setrlimit(resource, &rlim));
8135             } else {
8136                 return 0;
8137             }
8138         }
8139 #endif
8140 #ifdef TARGET_NR_getrlimit
8141     case TARGET_NR_getrlimit:
8142         {
8143             int resource = target_to_host_resource(arg1);
8144             struct target_rlimit *target_rlim;
8145             struct rlimit rlim;
8146 
8147             ret = get_errno(getrlimit(resource, &rlim));
8148             if (!is_error(ret)) {
8149                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8150                     return -TARGET_EFAULT;
8151                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8152                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8153                 unlock_user_struct(target_rlim, arg2, 1);
8154             }
8155         }
8156         return ret;
8157 #endif
8158     case TARGET_NR_getrusage:
8159         {
8160             struct rusage rusage;
8161             ret = get_errno(getrusage(arg1, &rusage));
8162             if (!is_error(ret)) {
8163                 ret = host_to_target_rusage(arg2, &rusage);
8164             }
8165         }
8166         return ret;
8167     case TARGET_NR_gettimeofday:
8168         {
8169             struct timeval tv;
8170             ret = get_errno(gettimeofday(&tv, NULL));
8171             if (!is_error(ret)) {
8172                 if (copy_to_user_timeval(arg1, &tv))
8173                     return -TARGET_EFAULT;
8174             }
8175         }
8176         return ret;
8177     case TARGET_NR_settimeofday:
8178         {
8179             struct timeval tv, *ptv = NULL;
8180             struct timezone tz, *ptz = NULL;
8181 
8182             if (arg1) {
8183                 if (copy_from_user_timeval(&tv, arg1)) {
8184                     return -TARGET_EFAULT;
8185                 }
8186                 ptv = &tv;
8187             }
8188 
8189             if (arg2) {
8190                 if (copy_from_user_timezone(&tz, arg2)) {
8191                     return -TARGET_EFAULT;
8192                 }
8193                 ptz = &tz;
8194             }
8195 
8196             return get_errno(settimeofday(ptv, ptz));
8197         }
8198 #if defined(TARGET_NR_select)
8199     case TARGET_NR_select:
8200 #if defined(TARGET_WANT_NI_OLD_SELECT)
8201         /* some architectures used to have old_select here
8202          * but now ENOSYS it.
8203          */
8204         ret = -TARGET_ENOSYS;
8205 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8206         ret = do_old_select(arg1);
8207 #else
8208         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8209 #endif
8210         return ret;
8211 #endif
8212 #ifdef TARGET_NR_pselect6
8213     case TARGET_NR_pselect6:
8214         {
8215             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8216             fd_set rfds, wfds, efds;
8217             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8218             struct timespec ts, *ts_ptr;
8219 
8220             /*
8221              * The 6th arg is actually two args smashed together,
8222              * so we cannot use the C library.
8223              */
8224             sigset_t set;
8225             struct {
8226                 sigset_t *set;
8227                 size_t size;
8228             } sig, *sig_ptr;
8229 
8230             abi_ulong arg_sigset, arg_sigsize, *arg7;
8231             target_sigset_t *target_sigset;
8232 
8233             n = arg1;
8234             rfd_addr = arg2;
8235             wfd_addr = arg3;
8236             efd_addr = arg4;
8237             ts_addr = arg5;
8238 
8239             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8240             if (ret) {
8241                 return ret;
8242             }
8243             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8244             if (ret) {
8245                 return ret;
8246             }
8247             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8248             if (ret) {
8249                 return ret;
8250             }
8251 
8252             /*
8253              * This takes a timespec, and not a timeval, so we cannot
8254              * use the do_select() helper ...
8255              */
8256             if (ts_addr) {
8257                 if (target_to_host_timespec(&ts, ts_addr)) {
8258                     return -TARGET_EFAULT;
8259                 }
8260                 ts_ptr = &ts;
8261             } else {
8262                 ts_ptr = NULL;
8263             }
8264 
8265             /* Extract the two packed args for the sigset */
8266             if (arg6) {
8267                 sig_ptr = &sig;
8268                 sig.size = SIGSET_T_SIZE;
8269 
8270                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8271                 if (!arg7) {
8272                     return -TARGET_EFAULT;
8273                 }
8274                 arg_sigset = tswapal(arg7[0]);
8275                 arg_sigsize = tswapal(arg7[1]);
8276                 unlock_user(arg7, arg6, 0);
8277 
8278                 if (arg_sigset) {
8279                     sig.set = &set;
8280                     if (arg_sigsize != sizeof(*target_sigset)) {
8281                         /* Like the kernel, we enforce correct size sigsets */
8282                         return -TARGET_EINVAL;
8283                     }
8284                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8285                                               sizeof(*target_sigset), 1);
8286                     if (!target_sigset) {
8287                         return -TARGET_EFAULT;
8288                     }
8289                     target_to_host_sigset(&set, target_sigset);
8290                     unlock_user(target_sigset, arg_sigset, 0);
8291                 } else {
8292                     sig.set = NULL;
8293                 }
8294             } else {
8295                 sig_ptr = NULL;
8296             }
8297 
8298             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8299                                           ts_ptr, sig_ptr));
8300 
8301             if (!is_error(ret)) {
8302                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8303                     return -TARGET_EFAULT;
8304                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8305                     return -TARGET_EFAULT;
8306                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8307                     return -TARGET_EFAULT;
8308 
8309                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8310                     return -TARGET_EFAULT;
8311             }
8312         }
8313         return ret;
8314 #endif
8315 #ifdef TARGET_NR_symlink
8316     case TARGET_NR_symlink:
8317         {
8318             void *p2;
8319             p = lock_user_string(arg1);
8320             p2 = lock_user_string(arg2);
8321             if (!p || !p2)
8322                 ret = -TARGET_EFAULT;
8323             else
8324                 ret = get_errno(symlink(p, p2));
8325             unlock_user(p2, arg2, 0);
8326             unlock_user(p, arg1, 0);
8327         }
8328         return ret;
8329 #endif
8330 #if defined(TARGET_NR_symlinkat)
8331     case TARGET_NR_symlinkat:
8332         {
8333             void *p2;
8334             p  = lock_user_string(arg1);
8335             p2 = lock_user_string(arg3);
8336             if (!p || !p2)
8337                 ret = -TARGET_EFAULT;
8338             else
8339                 ret = get_errno(symlinkat(p, arg2, p2));
8340             unlock_user(p2, arg3, 0);
8341             unlock_user(p, arg1, 0);
8342         }
8343         return ret;
8344 #endif
8345 #ifdef TARGET_NR_readlink
8346     case TARGET_NR_readlink:
8347         {
8348             void *p2;
8349             p = lock_user_string(arg1);
8350             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8351             if (!p || !p2) {
8352                 ret = -TARGET_EFAULT;
8353             } else if (!arg3) {
8354                 /* Short circuit this for the magic exe check. */
8355                 ret = -TARGET_EINVAL;
8356             } else if (is_proc_myself((const char *)p, "exe")) {
8357                 char real[PATH_MAX], *temp;
8358                 temp = realpath(exec_path, real);
8359                 /* Return value is # of bytes that we wrote to the buffer. */
8360                 if (temp == NULL) {
8361                     ret = get_errno(-1);
8362                 } else {
8363                     /* Don't worry about sign mismatch as earlier mapping
8364                      * logic would have thrown a bad address error. */
8365                     ret = MIN(strlen(real), arg3);
8366                     /* We cannot NUL terminate the string. */
8367                     memcpy(p2, real, ret);
8368                 }
8369             } else {
8370                 ret = get_errno(readlink(path(p), p2, arg3));
8371             }
8372             unlock_user(p2, arg2, ret);
8373             unlock_user(p, arg1, 0);
8374         }
8375         return ret;
8376 #endif
8377 #if defined(TARGET_NR_readlinkat)
8378     case TARGET_NR_readlinkat:
8379         {
8380             void *p2;
8381             p  = lock_user_string(arg2);
8382             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8383             if (!p || !p2) {
8384                 ret = -TARGET_EFAULT;
8385             } else if (is_proc_myself((const char *)p, "exe")) {
8386                 char real[PATH_MAX], *temp;
8387                 temp = realpath(exec_path, real);
8388                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8389                 snprintf((char *)p2, arg4, "%s", real);
8390             } else {
8391                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8392             }
8393             unlock_user(p2, arg3, ret);
8394             unlock_user(p, arg2, 0);
8395         }
8396         return ret;
8397 #endif
8398 #ifdef TARGET_NR_swapon
8399     case TARGET_NR_swapon:
8400         if (!(p = lock_user_string(arg1)))
8401             return -TARGET_EFAULT;
8402         ret = get_errno(swapon(p, arg2));
8403         unlock_user(p, arg1, 0);
8404         return ret;
8405 #endif
8406     case TARGET_NR_reboot:
8407         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8408            /* arg4 must be ignored in all other cases */
8409            p = lock_user_string(arg4);
8410            if (!p) {
8411                return -TARGET_EFAULT;
8412            }
8413            ret = get_errno(reboot(arg1, arg2, arg3, p));
8414            unlock_user(p, arg4, 0);
8415         } else {
8416            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8417         }
8418         return ret;
8419 #ifdef TARGET_NR_mmap
8420     case TARGET_NR_mmap:
8421 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8422     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8423     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8424     || defined(TARGET_S390X)
8425         {
8426             abi_ulong *v;
8427             abi_ulong v1, v2, v3, v4, v5, v6;
8428             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8429                 return -TARGET_EFAULT;
8430             v1 = tswapal(v[0]);
8431             v2 = tswapal(v[1]);
8432             v3 = tswapal(v[2]);
8433             v4 = tswapal(v[3]);
8434             v5 = tswapal(v[4]);
8435             v6 = tswapal(v[5]);
8436             unlock_user(v, arg1, 0);
8437             ret = get_errno(target_mmap(v1, v2, v3,
8438                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8439                                         v5, v6));
8440         }
8441 #else
8442         ret = get_errno(target_mmap(arg1, arg2, arg3,
8443                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8444                                     arg5,
8445                                     arg6));
8446 #endif
8447         return ret;
8448 #endif
8449 #ifdef TARGET_NR_mmap2
8450     case TARGET_NR_mmap2:
8451 #ifndef MMAP_SHIFT
8452 #define MMAP_SHIFT 12
8453 #endif
8454         ret = target_mmap(arg1, arg2, arg3,
8455                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8456                           arg5, arg6 << MMAP_SHIFT);
8457         return get_errno(ret);
8458 #endif
8459     case TARGET_NR_munmap:
8460         return get_errno(target_munmap(arg1, arg2));
8461     case TARGET_NR_mprotect:
8462         {
8463             TaskState *ts = cpu->opaque;
8464             /* Special hack to detect libc making the stack executable.  */
8465             if ((arg3 & PROT_GROWSDOWN)
8466                 && arg1 >= ts->info->stack_limit
8467                 && arg1 <= ts->info->start_stack) {
8468                 arg3 &= ~PROT_GROWSDOWN;
8469                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8470                 arg1 = ts->info->stack_limit;
8471             }
8472         }
8473         return get_errno(target_mprotect(arg1, arg2, arg3));
8474 #ifdef TARGET_NR_mremap
8475     case TARGET_NR_mremap:
8476         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8477 #endif
8478         /* ??? msync/mlock/munlock are broken for softmmu.  */
8479 #ifdef TARGET_NR_msync
8480     case TARGET_NR_msync:
8481         return get_errno(msync(g2h(arg1), arg2, arg3));
8482 #endif
8483 #ifdef TARGET_NR_mlock
8484     case TARGET_NR_mlock:
8485         return get_errno(mlock(g2h(arg1), arg2));
8486 #endif
8487 #ifdef TARGET_NR_munlock
8488     case TARGET_NR_munlock:
8489         return get_errno(munlock(g2h(arg1), arg2));
8490 #endif
8491 #ifdef TARGET_NR_mlockall
8492     case TARGET_NR_mlockall:
8493         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8494 #endif
8495 #ifdef TARGET_NR_munlockall
8496     case TARGET_NR_munlockall:
8497         return get_errno(munlockall());
8498 #endif
8499 #ifdef TARGET_NR_truncate
8500     case TARGET_NR_truncate:
8501         if (!(p = lock_user_string(arg1)))
8502             return -TARGET_EFAULT;
8503         ret = get_errno(truncate(p, arg2));
8504         unlock_user(p, arg1, 0);
8505         return ret;
8506 #endif
8507 #ifdef TARGET_NR_ftruncate
8508     case TARGET_NR_ftruncate:
8509         return get_errno(ftruncate(arg1, arg2));
8510 #endif
8511     case TARGET_NR_fchmod:
8512         return get_errno(fchmod(arg1, arg2));
8513 #if defined(TARGET_NR_fchmodat)
8514     case TARGET_NR_fchmodat:
8515         if (!(p = lock_user_string(arg2)))
8516             return -TARGET_EFAULT;
8517         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8518         unlock_user(p, arg2, 0);
8519         return ret;
8520 #endif
8521     case TARGET_NR_getpriority:
8522         /* Note that negative values are valid for getpriority, so we must
8523            differentiate based on errno settings.  */
8524         errno = 0;
8525         ret = getpriority(arg1, arg2);
8526         if (ret == -1 && errno != 0) {
8527             return -host_to_target_errno(errno);
8528         }
8529 #ifdef TARGET_ALPHA
8530         /* Return value is the unbiased priority.  Signal no error.  */
8531         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8532 #else
8533         /* Return value is a biased priority to avoid negative numbers.  */
8534         ret = 20 - ret;
8535 #endif
8536         return ret;
8537     case TARGET_NR_setpriority:
8538         return get_errno(setpriority(arg1, arg2, arg3));
8539 #ifdef TARGET_NR_statfs
8540     case TARGET_NR_statfs:
8541         if (!(p = lock_user_string(arg1))) {
8542             return -TARGET_EFAULT;
8543         }
8544         ret = get_errno(statfs(path(p), &stfs));
8545         unlock_user(p, arg1, 0);
8546     convert_statfs:
8547         if (!is_error(ret)) {
8548             struct target_statfs *target_stfs;
8549 
8550             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8551                 return -TARGET_EFAULT;
8552             __put_user(stfs.f_type, &target_stfs->f_type);
8553             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8554             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8555             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8556             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8557             __put_user(stfs.f_files, &target_stfs->f_files);
8558             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8559             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8560             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8561             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8562             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8563 #ifdef _STATFS_F_FLAGS
8564             __put_user(stfs.f_flags, &target_stfs->f_flags);
8565 #else
8566             __put_user(0, &target_stfs->f_flags);
8567 #endif
8568             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8569             unlock_user_struct(target_stfs, arg2, 1);
8570         }
8571         return ret;
8572 #endif
8573 #ifdef TARGET_NR_fstatfs
8574     case TARGET_NR_fstatfs:
8575         ret = get_errno(fstatfs(arg1, &stfs));
8576         goto convert_statfs;
8577 #endif
8578 #ifdef TARGET_NR_statfs64
8579     case TARGET_NR_statfs64:
8580         if (!(p = lock_user_string(arg1))) {
8581             return -TARGET_EFAULT;
8582         }
8583         ret = get_errno(statfs(path(p), &stfs));
8584         unlock_user(p, arg1, 0);
8585     convert_statfs64:
8586         if (!is_error(ret)) {
8587             struct target_statfs64 *target_stfs;
8588 
8589             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8590                 return -TARGET_EFAULT;
8591             __put_user(stfs.f_type, &target_stfs->f_type);
8592             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8593             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8594             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8595             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8596             __put_user(stfs.f_files, &target_stfs->f_files);
8597             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8598             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8599             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8600             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8601             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8602             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8603             unlock_user_struct(target_stfs, arg3, 1);
8604         }
8605         return ret;
8606     case TARGET_NR_fstatfs64:
8607         ret = get_errno(fstatfs(arg1, &stfs));
8608         goto convert_statfs64;
8609 #endif
8610 #ifdef TARGET_NR_socketcall
8611     case TARGET_NR_socketcall:
8612         return do_socketcall(arg1, arg2);
8613 #endif
8614 #ifdef TARGET_NR_accept
8615     case TARGET_NR_accept:
8616         return do_accept4(arg1, arg2, arg3, 0);
8617 #endif
8618 #ifdef TARGET_NR_accept4
8619     case TARGET_NR_accept4:
8620         return do_accept4(arg1, arg2, arg3, arg4);
8621 #endif
8622 #ifdef TARGET_NR_bind
8623     case TARGET_NR_bind:
8624         return do_bind(arg1, arg2, arg3);
8625 #endif
8626 #ifdef TARGET_NR_connect
8627     case TARGET_NR_connect:
8628         return do_connect(arg1, arg2, arg3);
8629 #endif
8630 #ifdef TARGET_NR_getpeername
8631     case TARGET_NR_getpeername:
8632         return do_getpeername(arg1, arg2, arg3);
8633 #endif
8634 #ifdef TARGET_NR_getsockname
8635     case TARGET_NR_getsockname:
8636         return do_getsockname(arg1, arg2, arg3);
8637 #endif
8638 #ifdef TARGET_NR_getsockopt
8639     case TARGET_NR_getsockopt:
8640         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8641 #endif
8642 #ifdef TARGET_NR_listen
8643     case TARGET_NR_listen:
8644         return get_errno(listen(arg1, arg2));
8645 #endif
8646 #ifdef TARGET_NR_recv
8647     case TARGET_NR_recv:
8648         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8649 #endif
8650 #ifdef TARGET_NR_recvfrom
8651     case TARGET_NR_recvfrom:
8652         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8653 #endif
8654 #ifdef TARGET_NR_recvmsg
8655     case TARGET_NR_recvmsg:
8656         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8657 #endif
8658 #ifdef TARGET_NR_send
8659     case TARGET_NR_send:
8660         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8661 #endif
8662 #ifdef TARGET_NR_sendmsg
8663     case TARGET_NR_sendmsg:
8664         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8665 #endif
8666 #ifdef TARGET_NR_sendmmsg
8667     case TARGET_NR_sendmmsg:
8668         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8669     case TARGET_NR_recvmmsg:
8670         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8671 #endif
8672 #ifdef TARGET_NR_sendto
8673     case TARGET_NR_sendto:
8674         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8675 #endif
8676 #ifdef TARGET_NR_shutdown
8677     case TARGET_NR_shutdown:
8678         return get_errno(shutdown(arg1, arg2));
8679 #endif
8680 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8681     case TARGET_NR_getrandom:
8682         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8683         if (!p) {
8684             return -TARGET_EFAULT;
8685         }
8686         ret = get_errno(getrandom(p, arg2, arg3));
8687         unlock_user(p, arg1, ret);
8688         return ret;
8689 #endif
8690 #ifdef TARGET_NR_socket
8691     case TARGET_NR_socket:
8692         return do_socket(arg1, arg2, arg3);
8693 #endif
8694 #ifdef TARGET_NR_socketpair
8695     case TARGET_NR_socketpair:
8696         return do_socketpair(arg1, arg2, arg3, arg4);
8697 #endif
8698 #ifdef TARGET_NR_setsockopt
8699     case TARGET_NR_setsockopt:
8700         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8701 #endif
8702 #if defined(TARGET_NR_syslog)
8703     case TARGET_NR_syslog:
8704         {
8705             int len = arg2;
8706 
8707             switch (arg1) {
8708             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8709             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8710             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8711             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8712             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8713             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8714             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8715             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8716                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8717             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8718             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8719             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8720                 {
8721                     if (len < 0) {
8722                         return -TARGET_EINVAL;
8723                     }
8724                     if (len == 0) {
8725                         return 0;
8726                     }
8727                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8728                     if (!p) {
8729                         return -TARGET_EFAULT;
8730                     }
8731                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8732                     unlock_user(p, arg2, arg3);
8733                 }
8734                 return ret;
8735             default:
8736                 return -TARGET_EINVAL;
8737             }
8738         }
8739         break;
8740 #endif
8741     case TARGET_NR_setitimer:
8742         {
8743             struct itimerval value, ovalue, *pvalue;
8744 
8745             if (arg2) {
8746                 pvalue = &value;
8747                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8748                     || copy_from_user_timeval(&pvalue->it_value,
8749                                               arg2 + sizeof(struct target_timeval)))
8750                     return -TARGET_EFAULT;
8751             } else {
8752                 pvalue = NULL;
8753             }
8754             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8755             if (!is_error(ret) && arg3) {
8756                 if (copy_to_user_timeval(arg3,
8757                                          &ovalue.it_interval)
8758                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8759                                             &ovalue.it_value))
8760                     return -TARGET_EFAULT;
8761             }
8762         }
8763         return ret;
8764     case TARGET_NR_getitimer:
8765         {
8766             struct itimerval value;
8767 
8768             ret = get_errno(getitimer(arg1, &value));
8769             if (!is_error(ret) && arg2) {
8770                 if (copy_to_user_timeval(arg2,
8771                                          &value.it_interval)
8772                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8773                                             &value.it_value))
8774                     return -TARGET_EFAULT;
8775             }
8776         }
8777         return ret;
8778 #ifdef TARGET_NR_stat
8779     case TARGET_NR_stat:
8780         if (!(p = lock_user_string(arg1))) {
8781             return -TARGET_EFAULT;
8782         }
8783         ret = get_errno(stat(path(p), &st));
8784         unlock_user(p, arg1, 0);
8785         goto do_stat;
8786 #endif
8787 #ifdef TARGET_NR_lstat
8788     case TARGET_NR_lstat:
8789         if (!(p = lock_user_string(arg1))) {
8790             return -TARGET_EFAULT;
8791         }
8792         ret = get_errno(lstat(path(p), &st));
8793         unlock_user(p, arg1, 0);
8794         goto do_stat;
8795 #endif
8796 #ifdef TARGET_NR_fstat
8797     case TARGET_NR_fstat:
8798         {
8799             ret = get_errno(fstat(arg1, &st));
8800 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8801         do_stat:
8802 #endif
8803             if (!is_error(ret)) {
8804                 struct target_stat *target_st;
8805 
8806                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8807                     return -TARGET_EFAULT;
8808                 memset(target_st, 0, sizeof(*target_st));
8809                 __put_user(st.st_dev, &target_st->st_dev);
8810                 __put_user(st.st_ino, &target_st->st_ino);
8811                 __put_user(st.st_mode, &target_st->st_mode);
8812                 __put_user(st.st_uid, &target_st->st_uid);
8813                 __put_user(st.st_gid, &target_st->st_gid);
8814                 __put_user(st.st_nlink, &target_st->st_nlink);
8815                 __put_user(st.st_rdev, &target_st->st_rdev);
8816                 __put_user(st.st_size, &target_st->st_size);
8817                 __put_user(st.st_blksize, &target_st->st_blksize);
8818                 __put_user(st.st_blocks, &target_st->st_blocks);
8819                 __put_user(st.st_atime, &target_st->target_st_atime);
8820                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8821                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8822                 unlock_user_struct(target_st, arg2, 1);
8823             }
8824         }
8825         return ret;
8826 #endif
8827     case TARGET_NR_vhangup:
8828         return get_errno(vhangup());
8829 #ifdef TARGET_NR_syscall
8830     case TARGET_NR_syscall:
8831         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8832                           arg6, arg7, arg8, 0);
8833 #endif
8834     case TARGET_NR_wait4:
8835         {
8836             int status;
8837             abi_long status_ptr = arg2;
8838             struct rusage rusage, *rusage_ptr;
8839             abi_ulong target_rusage = arg4;
8840             abi_long rusage_err;
8841             if (target_rusage)
8842                 rusage_ptr = &rusage;
8843             else
8844                 rusage_ptr = NULL;
8845             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8846             if (!is_error(ret)) {
8847                 if (status_ptr && ret) {
8848                     status = host_to_target_waitstatus(status);
8849                     if (put_user_s32(status, status_ptr))
8850                         return -TARGET_EFAULT;
8851                 }
8852                 if (target_rusage) {
8853                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8854                     if (rusage_err) {
8855                         ret = rusage_err;
8856                     }
8857                 }
8858             }
8859         }
8860         return ret;
8861 #ifdef TARGET_NR_swapoff
8862     case TARGET_NR_swapoff:
8863         if (!(p = lock_user_string(arg1)))
8864             return -TARGET_EFAULT;
8865         ret = get_errno(swapoff(p));
8866         unlock_user(p, arg1, 0);
8867         return ret;
8868 #endif
8869     case TARGET_NR_sysinfo:
8870         {
8871             struct target_sysinfo *target_value;
8872             struct sysinfo value;
8873             ret = get_errno(sysinfo(&value));
8874             if (!is_error(ret) && arg1)
8875             {
8876                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8877                     return -TARGET_EFAULT;
8878                 __put_user(value.uptime, &target_value->uptime);
8879                 __put_user(value.loads[0], &target_value->loads[0]);
8880                 __put_user(value.loads[1], &target_value->loads[1]);
8881                 __put_user(value.loads[2], &target_value->loads[2]);
8882                 __put_user(value.totalram, &target_value->totalram);
8883                 __put_user(value.freeram, &target_value->freeram);
8884                 __put_user(value.sharedram, &target_value->sharedram);
8885                 __put_user(value.bufferram, &target_value->bufferram);
8886                 __put_user(value.totalswap, &target_value->totalswap);
8887                 __put_user(value.freeswap, &target_value->freeswap);
8888                 __put_user(value.procs, &target_value->procs);
8889                 __put_user(value.totalhigh, &target_value->totalhigh);
8890                 __put_user(value.freehigh, &target_value->freehigh);
8891                 __put_user(value.mem_unit, &target_value->mem_unit);
8892                 unlock_user_struct(target_value, arg1, 1);
8893             }
8894         }
8895         return ret;
8896 #ifdef TARGET_NR_ipc
8897     case TARGET_NR_ipc:
8898         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8899 #endif
8900 #ifdef TARGET_NR_semget
8901     case TARGET_NR_semget:
8902         return get_errno(semget(arg1, arg2, arg3));
8903 #endif
8904 #ifdef TARGET_NR_semop
8905     case TARGET_NR_semop:
8906         return do_semop(arg1, arg2, arg3);
8907 #endif
8908 #ifdef TARGET_NR_semctl
8909     case TARGET_NR_semctl:
8910         return do_semctl(arg1, arg2, arg3, arg4);
8911 #endif
8912 #ifdef TARGET_NR_msgctl
8913     case TARGET_NR_msgctl:
8914         return do_msgctl(arg1, arg2, arg3);
8915 #endif
8916 #ifdef TARGET_NR_msgget
8917     case TARGET_NR_msgget:
8918         return get_errno(msgget(arg1, arg2));
8919 #endif
8920 #ifdef TARGET_NR_msgrcv
8921     case TARGET_NR_msgrcv:
8922         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8923 #endif
8924 #ifdef TARGET_NR_msgsnd
8925     case TARGET_NR_msgsnd:
8926         return do_msgsnd(arg1, arg2, arg3, arg4);
8927 #endif
8928 #ifdef TARGET_NR_shmget
8929     case TARGET_NR_shmget:
8930         return get_errno(shmget(arg1, arg2, arg3));
8931 #endif
8932 #ifdef TARGET_NR_shmctl
8933     case TARGET_NR_shmctl:
8934         return do_shmctl(arg1, arg2, arg3);
8935 #endif
8936 #ifdef TARGET_NR_shmat
8937     case TARGET_NR_shmat:
8938         return do_shmat(cpu_env, arg1, arg2, arg3);
8939 #endif
8940 #ifdef TARGET_NR_shmdt
8941     case TARGET_NR_shmdt:
8942         return do_shmdt(arg1);
8943 #endif
8944     case TARGET_NR_fsync:
8945         return get_errno(fsync(arg1));
8946     case TARGET_NR_clone:
8947         /* Linux manages to have three different orderings for its
8948          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8949          * match the kernel's CONFIG_CLONE_* settings.
8950          * Microblaze is further special in that it uses a sixth
8951          * implicit argument to clone for the TLS pointer.
8952          */
8953 #if defined(TARGET_MICROBLAZE)
8954         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8955 #elif defined(TARGET_CLONE_BACKWARDS)
8956         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8957 #elif defined(TARGET_CLONE_BACKWARDS2)
8958         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8959 #else
8960         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8961 #endif
8962         return ret;
8963 #ifdef __NR_exit_group
8964         /* new thread calls */
8965     case TARGET_NR_exit_group:
8966         preexit_cleanup(cpu_env, arg1);
8967         return get_errno(exit_group(arg1));
8968 #endif
8969     case TARGET_NR_setdomainname:
8970         if (!(p = lock_user_string(arg1)))
8971             return -TARGET_EFAULT;
8972         ret = get_errno(setdomainname(p, arg2));
8973         unlock_user(p, arg1, 0);
8974         return ret;
8975     case TARGET_NR_uname:
8976         /* no need to transcode because we use the linux syscall */
8977         {
8978             struct new_utsname * buf;
8979 
8980             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8981                 return -TARGET_EFAULT;
8982             ret = get_errno(sys_uname(buf));
8983             if (!is_error(ret)) {
8984                 /* Overwrite the native machine name with whatever is being
8985                    emulated. */
8986                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8987                           sizeof(buf->machine));
8988                 /* Allow the user to override the reported release.  */
8989                 if (qemu_uname_release && *qemu_uname_release) {
8990                     g_strlcpy(buf->release, qemu_uname_release,
8991                               sizeof(buf->release));
8992                 }
8993             }
8994             unlock_user_struct(buf, arg1, 1);
8995         }
8996         return ret;
8997 #ifdef TARGET_I386
8998     case TARGET_NR_modify_ldt:
8999         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9000 #if !defined(TARGET_X86_64)
9001     case TARGET_NR_vm86:
9002         return do_vm86(cpu_env, arg1, arg2);
9003 #endif
9004 #endif
9005     case TARGET_NR_adjtimex:
9006         {
9007             struct timex host_buf;
9008 
9009             if (target_to_host_timex(&host_buf, arg1) != 0) {
9010                 return -TARGET_EFAULT;
9011             }
9012             ret = get_errno(adjtimex(&host_buf));
9013             if (!is_error(ret)) {
9014                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9015                     return -TARGET_EFAULT;
9016                 }
9017             }
9018         }
9019         return ret;
9020 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9021     case TARGET_NR_clock_adjtime:
9022         {
9023             struct timex htx, *phtx = &htx;
9024 
9025             if (target_to_host_timex(phtx, arg2) != 0) {
9026                 return -TARGET_EFAULT;
9027             }
9028             ret = get_errno(clock_adjtime(arg1, phtx));
9029             if (!is_error(ret) && phtx) {
9030                 if (host_to_target_timex(arg2, phtx) != 0) {
9031                     return -TARGET_EFAULT;
9032                 }
9033             }
9034         }
9035         return ret;
9036 #endif
9037     case TARGET_NR_getpgid:
9038         return get_errno(getpgid(arg1));
9039     case TARGET_NR_fchdir:
9040         return get_errno(fchdir(arg1));
9041     case TARGET_NR_personality:
9042         return get_errno(personality(arg1));
9043 #ifdef TARGET_NR__llseek /* Not on alpha */
9044     case TARGET_NR__llseek:
9045         {
9046             int64_t res;
9047 #if !defined(__NR_llseek)
9048             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9049             if (res == -1) {
9050                 ret = get_errno(res);
9051             } else {
9052                 ret = 0;
9053             }
9054 #else
9055             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9056 #endif
9057             if ((ret == 0) && put_user_s64(res, arg4)) {
9058                 return -TARGET_EFAULT;
9059             }
9060         }
9061         return ret;
9062 #endif
9063 #ifdef TARGET_NR_getdents
9064     case TARGET_NR_getdents:
9065 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9066 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9067         {
9068             struct target_dirent *target_dirp;
9069             struct linux_dirent *dirp;
9070             abi_long count = arg3;
9071 
9072             dirp = g_try_malloc(count);
9073             if (!dirp) {
9074                 return -TARGET_ENOMEM;
9075             }
9076 
9077             ret = get_errno(sys_getdents(arg1, dirp, count));
9078             if (!is_error(ret)) {
9079                 struct linux_dirent *de;
9080 		struct target_dirent *tde;
9081                 int len = ret;
9082                 int reclen, treclen;
9083 		int count1, tnamelen;
9084 
9085 		count1 = 0;
9086                 de = dirp;
9087                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9088                     return -TARGET_EFAULT;
9089 		tde = target_dirp;
9090                 while (len > 0) {
9091                     reclen = de->d_reclen;
9092                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9093                     assert(tnamelen >= 0);
9094                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9095                     assert(count1 + treclen <= count);
9096                     tde->d_reclen = tswap16(treclen);
9097                     tde->d_ino = tswapal(de->d_ino);
9098                     tde->d_off = tswapal(de->d_off);
9099                     memcpy(tde->d_name, de->d_name, tnamelen);
9100                     de = (struct linux_dirent *)((char *)de + reclen);
9101                     len -= reclen;
9102                     tde = (struct target_dirent *)((char *)tde + treclen);
9103 		    count1 += treclen;
9104                 }
9105 		ret = count1;
9106                 unlock_user(target_dirp, arg2, ret);
9107             }
9108             g_free(dirp);
9109         }
9110 #else
9111         {
9112             struct linux_dirent *dirp;
9113             abi_long count = arg3;
9114 
9115             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9116                 return -TARGET_EFAULT;
9117             ret = get_errno(sys_getdents(arg1, dirp, count));
9118             if (!is_error(ret)) {
9119                 struct linux_dirent *de;
9120                 int len = ret;
9121                 int reclen;
9122                 de = dirp;
9123                 while (len > 0) {
9124                     reclen = de->d_reclen;
9125                     if (reclen > len)
9126                         break;
9127                     de->d_reclen = tswap16(reclen);
9128                     tswapls(&de->d_ino);
9129                     tswapls(&de->d_off);
9130                     de = (struct linux_dirent *)((char *)de + reclen);
9131                     len -= reclen;
9132                 }
9133             }
9134             unlock_user(dirp, arg2, ret);
9135         }
9136 #endif
9137 #else
9138         /* Implement getdents in terms of getdents64 */
9139         {
9140             struct linux_dirent64 *dirp;
9141             abi_long count = arg3;
9142 
9143             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9144             if (!dirp) {
9145                 return -TARGET_EFAULT;
9146             }
9147             ret = get_errno(sys_getdents64(arg1, dirp, count));
9148             if (!is_error(ret)) {
9149                 /* Convert the dirent64 structs to target dirent.  We do this
9150                  * in-place, since we can guarantee that a target_dirent is no
9151                  * larger than a dirent64; however this means we have to be
9152                  * careful to read everything before writing in the new format.
9153                  */
9154                 struct linux_dirent64 *de;
9155                 struct target_dirent *tde;
9156                 int len = ret;
9157                 int tlen = 0;
9158 
9159                 de = dirp;
9160                 tde = (struct target_dirent *)dirp;
9161                 while (len > 0) {
9162                     int namelen, treclen;
9163                     int reclen = de->d_reclen;
9164                     uint64_t ino = de->d_ino;
9165                     int64_t off = de->d_off;
9166                     uint8_t type = de->d_type;
9167 
9168                     namelen = strlen(de->d_name);
9169                     treclen = offsetof(struct target_dirent, d_name)
9170                         + namelen + 2;
9171                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9172 
9173                     memmove(tde->d_name, de->d_name, namelen + 1);
9174                     tde->d_ino = tswapal(ino);
9175                     tde->d_off = tswapal(off);
9176                     tde->d_reclen = tswap16(treclen);
9177                     /* The target_dirent type is in what was formerly a padding
9178                      * byte at the end of the structure:
9179                      */
9180                     *(((char *)tde) + treclen - 1) = type;
9181 
9182                     de = (struct linux_dirent64 *)((char *)de + reclen);
9183                     tde = (struct target_dirent *)((char *)tde + treclen);
9184                     len -= reclen;
9185                     tlen += treclen;
9186                 }
9187                 ret = tlen;
9188             }
9189             unlock_user(dirp, arg2, ret);
9190         }
9191 #endif
9192         return ret;
9193 #endif /* TARGET_NR_getdents */
9194 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9195     case TARGET_NR_getdents64:
9196         {
9197             struct linux_dirent64 *dirp;
9198             abi_long count = arg3;
9199             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9200                 return -TARGET_EFAULT;
9201             ret = get_errno(sys_getdents64(arg1, dirp, count));
9202             if (!is_error(ret)) {
9203                 struct linux_dirent64 *de;
9204                 int len = ret;
9205                 int reclen;
9206                 de = dirp;
9207                 while (len > 0) {
9208                     reclen = de->d_reclen;
9209                     if (reclen > len)
9210                         break;
9211                     de->d_reclen = tswap16(reclen);
9212                     tswap64s((uint64_t *)&de->d_ino);
9213                     tswap64s((uint64_t *)&de->d_off);
9214                     de = (struct linux_dirent64 *)((char *)de + reclen);
9215                     len -= reclen;
9216                 }
9217             }
9218             unlock_user(dirp, arg2, ret);
9219         }
9220         return ret;
9221 #endif /* TARGET_NR_getdents64 */
9222 #if defined(TARGET_NR__newselect)
9223     case TARGET_NR__newselect:
9224         return do_select(arg1, arg2, arg3, arg4, arg5);
9225 #endif
9226 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9227 # ifdef TARGET_NR_poll
9228     case TARGET_NR_poll:
9229 # endif
9230 # ifdef TARGET_NR_ppoll
9231     case TARGET_NR_ppoll:
9232 # endif
9233         {
9234             struct target_pollfd *target_pfd;
9235             unsigned int nfds = arg2;
9236             struct pollfd *pfd;
9237             unsigned int i;
9238 
9239             pfd = NULL;
9240             target_pfd = NULL;
9241             if (nfds) {
9242                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9243                     return -TARGET_EINVAL;
9244                 }
9245 
9246                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9247                                        sizeof(struct target_pollfd) * nfds, 1);
9248                 if (!target_pfd) {
9249                     return -TARGET_EFAULT;
9250                 }
9251 
9252                 pfd = alloca(sizeof(struct pollfd) * nfds);
9253                 for (i = 0; i < nfds; i++) {
9254                     pfd[i].fd = tswap32(target_pfd[i].fd);
9255                     pfd[i].events = tswap16(target_pfd[i].events);
9256                 }
9257             }
9258 
9259             switch (num) {
9260 # ifdef TARGET_NR_ppoll
9261             case TARGET_NR_ppoll:
9262             {
9263                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9264                 target_sigset_t *target_set;
9265                 sigset_t _set, *set = &_set;
9266 
9267                 if (arg3) {
9268                     if (target_to_host_timespec(timeout_ts, arg3)) {
9269                         unlock_user(target_pfd, arg1, 0);
9270                         return -TARGET_EFAULT;
9271                     }
9272                 } else {
9273                     timeout_ts = NULL;
9274                 }
9275 
9276                 if (arg4) {
9277                     if (arg5 != sizeof(target_sigset_t)) {
9278                         unlock_user(target_pfd, arg1, 0);
9279                         return -TARGET_EINVAL;
9280                     }
9281 
9282                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9283                     if (!target_set) {
9284                         unlock_user(target_pfd, arg1, 0);
9285                         return -TARGET_EFAULT;
9286                     }
9287                     target_to_host_sigset(set, target_set);
9288                 } else {
9289                     set = NULL;
9290                 }
9291 
9292                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9293                                            set, SIGSET_T_SIZE));
9294 
9295                 if (!is_error(ret) && arg3) {
9296                     host_to_target_timespec(arg3, timeout_ts);
9297                 }
9298                 if (arg4) {
9299                     unlock_user(target_set, arg4, 0);
9300                 }
9301                 break;
9302             }
9303 # endif
9304 # ifdef TARGET_NR_poll
9305             case TARGET_NR_poll:
9306             {
9307                 struct timespec ts, *pts;
9308 
9309                 if (arg3 >= 0) {
9310                     /* Convert ms to secs, ns */
9311                     ts.tv_sec = arg3 / 1000;
9312                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9313                     pts = &ts;
9314                 } else {
9315                     /* -ve poll() timeout means "infinite" */
9316                     pts = NULL;
9317                 }
9318                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9319                 break;
9320             }
9321 # endif
9322             default:
9323                 g_assert_not_reached();
9324             }
9325 
9326             if (!is_error(ret)) {
9327                 for(i = 0; i < nfds; i++) {
9328                     target_pfd[i].revents = tswap16(pfd[i].revents);
9329                 }
9330             }
9331             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9332         }
9333         return ret;
9334 #endif
9335     case TARGET_NR_flock:
9336         /* NOTE: the flock constant seems to be the same for every
9337            Linux platform */
9338         return get_errno(safe_flock(arg1, arg2));
9339     case TARGET_NR_readv:
9340         {
9341             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9342             if (vec != NULL) {
9343                 ret = get_errno(safe_readv(arg1, vec, arg3));
9344                 unlock_iovec(vec, arg2, arg3, 1);
9345             } else {
9346                 ret = -host_to_target_errno(errno);
9347             }
9348         }
9349         return ret;
9350     case TARGET_NR_writev:
9351         {
9352             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9353             if (vec != NULL) {
9354                 ret = get_errno(safe_writev(arg1, vec, arg3));
9355                 unlock_iovec(vec, arg2, arg3, 0);
9356             } else {
9357                 ret = -host_to_target_errno(errno);
9358             }
9359         }
9360         return ret;
9361 #if defined(TARGET_NR_preadv)
9362     case TARGET_NR_preadv:
9363         {
9364             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9365             if (vec != NULL) {
9366                 unsigned long low, high;
9367 
9368                 target_to_host_low_high(arg4, arg5, &low, &high);
9369                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9370                 unlock_iovec(vec, arg2, arg3, 1);
9371             } else {
9372                 ret = -host_to_target_errno(errno);
9373            }
9374         }
9375         return ret;
9376 #endif
9377 #if defined(TARGET_NR_pwritev)
9378     case TARGET_NR_pwritev:
9379         {
9380             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9381             if (vec != NULL) {
9382                 unsigned long low, high;
9383 
9384                 target_to_host_low_high(arg4, arg5, &low, &high);
9385                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9386                 unlock_iovec(vec, arg2, arg3, 0);
9387             } else {
9388                 ret = -host_to_target_errno(errno);
9389            }
9390         }
9391         return ret;
9392 #endif
9393     case TARGET_NR_getsid:
9394         return get_errno(getsid(arg1));
9395 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9396     case TARGET_NR_fdatasync:
9397         return get_errno(fdatasync(arg1));
9398 #endif
9399 #ifdef TARGET_NR__sysctl
9400     case TARGET_NR__sysctl:
9401         /* We don't implement this, but ENOTDIR is always a safe
9402            return value. */
9403         return -TARGET_ENOTDIR;
9404 #endif
9405     case TARGET_NR_sched_getaffinity:
9406         {
9407             unsigned int mask_size;
9408             unsigned long *mask;
9409 
9410             /*
9411              * sched_getaffinity needs multiples of ulong, so need to take
9412              * care of mismatches between target ulong and host ulong sizes.
9413              */
9414             if (arg2 & (sizeof(abi_ulong) - 1)) {
9415                 return -TARGET_EINVAL;
9416             }
9417             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9418 
9419             mask = alloca(mask_size);
9420             memset(mask, 0, mask_size);
9421             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9422 
9423             if (!is_error(ret)) {
9424                 if (ret > arg2) {
9425                     /* More data returned than the caller's buffer will fit.
9426                      * This only happens if sizeof(abi_long) < sizeof(long)
9427                      * and the caller passed us a buffer holding an odd number
9428                      * of abi_longs. If the host kernel is actually using the
9429                      * extra 4 bytes then fail EINVAL; otherwise we can just
9430                      * ignore them and only copy the interesting part.
9431                      */
9432                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9433                     if (numcpus > arg2 * 8) {
9434                         return -TARGET_EINVAL;
9435                     }
9436                     ret = arg2;
9437                 }
9438 
9439                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9440                     return -TARGET_EFAULT;
9441                 }
9442             }
9443         }
9444         return ret;
9445     case TARGET_NR_sched_setaffinity:
9446         {
9447             unsigned int mask_size;
9448             unsigned long *mask;
9449 
9450             /*
9451              * sched_setaffinity needs multiples of ulong, so need to take
9452              * care of mismatches between target ulong and host ulong sizes.
9453              */
9454             if (arg2 & (sizeof(abi_ulong) - 1)) {
9455                 return -TARGET_EINVAL;
9456             }
9457             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9458             mask = alloca(mask_size);
9459 
9460             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9461             if (ret) {
9462                 return ret;
9463             }
9464 
9465             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9466         }
9467     case TARGET_NR_getcpu:
9468         {
9469             unsigned cpu, node;
9470             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9471                                        arg2 ? &node : NULL,
9472                                        NULL));
9473             if (is_error(ret)) {
9474                 return ret;
9475             }
9476             if (arg1 && put_user_u32(cpu, arg1)) {
9477                 return -TARGET_EFAULT;
9478             }
9479             if (arg2 && put_user_u32(node, arg2)) {
9480                 return -TARGET_EFAULT;
9481             }
9482         }
9483         return ret;
9484     case TARGET_NR_sched_setparam:
9485         {
9486             struct sched_param *target_schp;
9487             struct sched_param schp;
9488 
9489             if (arg2 == 0) {
9490                 return -TARGET_EINVAL;
9491             }
9492             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9493                 return -TARGET_EFAULT;
9494             schp.sched_priority = tswap32(target_schp->sched_priority);
9495             unlock_user_struct(target_schp, arg2, 0);
9496             return get_errno(sched_setparam(arg1, &schp));
9497         }
9498     case TARGET_NR_sched_getparam:
9499         {
9500             struct sched_param *target_schp;
9501             struct sched_param schp;
9502 
9503             if (arg2 == 0) {
9504                 return -TARGET_EINVAL;
9505             }
9506             ret = get_errno(sched_getparam(arg1, &schp));
9507             if (!is_error(ret)) {
9508                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9509                     return -TARGET_EFAULT;
9510                 target_schp->sched_priority = tswap32(schp.sched_priority);
9511                 unlock_user_struct(target_schp, arg2, 1);
9512             }
9513         }
9514         return ret;
9515     case TARGET_NR_sched_setscheduler:
9516         {
9517             struct sched_param *target_schp;
9518             struct sched_param schp;
9519             if (arg3 == 0) {
9520                 return -TARGET_EINVAL;
9521             }
9522             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9523                 return -TARGET_EFAULT;
9524             schp.sched_priority = tswap32(target_schp->sched_priority);
9525             unlock_user_struct(target_schp, arg3, 0);
9526             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9527         }
9528     case TARGET_NR_sched_getscheduler:
9529         return get_errno(sched_getscheduler(arg1));
9530     case TARGET_NR_sched_yield:
9531         return get_errno(sched_yield());
9532     case TARGET_NR_sched_get_priority_max:
9533         return get_errno(sched_get_priority_max(arg1));
9534     case TARGET_NR_sched_get_priority_min:
9535         return get_errno(sched_get_priority_min(arg1));
9536     case TARGET_NR_sched_rr_get_interval:
9537         {
9538             struct timespec ts;
9539             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9540             if (!is_error(ret)) {
9541                 ret = host_to_target_timespec(arg2, &ts);
9542             }
9543         }
9544         return ret;
9545     case TARGET_NR_nanosleep:
9546         {
9547             struct timespec req, rem;
9548             target_to_host_timespec(&req, arg1);
9549             ret = get_errno(safe_nanosleep(&req, &rem));
9550             if (is_error(ret) && arg2) {
9551                 host_to_target_timespec(arg2, &rem);
9552             }
9553         }
9554         return ret;
9555     case TARGET_NR_prctl:
9556         switch (arg1) {
9557         case PR_GET_PDEATHSIG:
9558         {
9559             int deathsig;
9560             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9561             if (!is_error(ret) && arg2
9562                 && put_user_ual(deathsig, arg2)) {
9563                 return -TARGET_EFAULT;
9564             }
9565             return ret;
9566         }
9567 #ifdef PR_GET_NAME
9568         case PR_GET_NAME:
9569         {
9570             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9571             if (!name) {
9572                 return -TARGET_EFAULT;
9573             }
9574             ret = get_errno(prctl(arg1, (unsigned long)name,
9575                                   arg3, arg4, arg5));
9576             unlock_user(name, arg2, 16);
9577             return ret;
9578         }
9579         case PR_SET_NAME:
9580         {
9581             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9582             if (!name) {
9583                 return -TARGET_EFAULT;
9584             }
9585             ret = get_errno(prctl(arg1, (unsigned long)name,
9586                                   arg3, arg4, arg5));
9587             unlock_user(name, arg2, 0);
9588             return ret;
9589         }
9590 #endif
9591 #ifdef TARGET_MIPS
9592         case TARGET_PR_GET_FP_MODE:
9593         {
9594             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9595             ret = 0;
9596             if (env->CP0_Status & (1 << CP0St_FR)) {
9597                 ret |= TARGET_PR_FP_MODE_FR;
9598             }
9599             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9600                 ret |= TARGET_PR_FP_MODE_FRE;
9601             }
9602             return ret;
9603         }
9604         case TARGET_PR_SET_FP_MODE:
9605         {
9606             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9607             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9608             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9609             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9610             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9611 
9612             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9613                                             TARGET_PR_FP_MODE_FRE;
9614 
9615             /* If nothing to change, return right away, successfully.  */
9616             if (old_fr == new_fr && old_fre == new_fre) {
9617                 return 0;
9618             }
9619             /* Check the value is valid */
9620             if (arg2 & ~known_bits) {
9621                 return -TARGET_EOPNOTSUPP;
9622             }
9623             /* Setting FRE without FR is not supported.  */
9624             if (new_fre && !new_fr) {
9625                 return -TARGET_EOPNOTSUPP;
9626             }
9627             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9628                 /* FR1 is not supported */
9629                 return -TARGET_EOPNOTSUPP;
9630             }
9631             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9632                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9633                 /* cannot set FR=0 */
9634                 return -TARGET_EOPNOTSUPP;
9635             }
9636             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9637                 /* Cannot set FRE=1 */
9638                 return -TARGET_EOPNOTSUPP;
9639             }
9640 
9641             int i;
9642             fpr_t *fpr = env->active_fpu.fpr;
9643             for (i = 0; i < 32 ; i += 2) {
9644                 if (!old_fr && new_fr) {
9645                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9646                 } else if (old_fr && !new_fr) {
9647                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9648                 }
9649             }
9650 
9651             if (new_fr) {
9652                 env->CP0_Status |= (1 << CP0St_FR);
9653                 env->hflags |= MIPS_HFLAG_F64;
9654             } else {
9655                 env->CP0_Status &= ~(1 << CP0St_FR);
9656                 env->hflags &= ~MIPS_HFLAG_F64;
9657             }
9658             if (new_fre) {
9659                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9660                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9661                     env->hflags |= MIPS_HFLAG_FRE;
9662                 }
9663             } else {
9664                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9665                 env->hflags &= ~MIPS_HFLAG_FRE;
9666             }
9667 
9668             return 0;
9669         }
9670 #endif /* MIPS */
9671 #ifdef TARGET_AARCH64
9672         case TARGET_PR_SVE_SET_VL:
9673             /*
9674              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9675              * PR_SVE_VL_INHERIT.  Note the kernel definition
9676              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9677              * even though the current architectural maximum is VQ=16.
9678              */
9679             ret = -TARGET_EINVAL;
9680             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9681                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9682                 CPUARMState *env = cpu_env;
9683                 ARMCPU *cpu = arm_env_get_cpu(env);
9684                 uint32_t vq, old_vq;
9685 
9686                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9687                 vq = MAX(arg2 / 16, 1);
9688                 vq = MIN(vq, cpu->sve_max_vq);
9689 
9690                 if (vq < old_vq) {
9691                     aarch64_sve_narrow_vq(env, vq);
9692                 }
9693                 env->vfp.zcr_el[1] = vq - 1;
9694                 ret = vq * 16;
9695             }
9696             return ret;
9697         case TARGET_PR_SVE_GET_VL:
9698             ret = -TARGET_EINVAL;
9699             {
9700                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9701                 if (cpu_isar_feature(aa64_sve, cpu)) {
9702                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9703                 }
9704             }
9705             return ret;
9706         case TARGET_PR_PAC_RESET_KEYS:
9707             {
9708                 CPUARMState *env = cpu_env;
9709                 ARMCPU *cpu = arm_env_get_cpu(env);
9710 
9711                 if (arg3 || arg4 || arg5) {
9712                     return -TARGET_EINVAL;
9713                 }
9714                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9715                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9716                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9717                                TARGET_PR_PAC_APGAKEY);
9718                     if (arg2 == 0) {
9719                         arg2 = all;
9720                     } else if (arg2 & ~all) {
9721                         return -TARGET_EINVAL;
9722                     }
9723                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9724                         arm_init_pauth_key(&env->apia_key);
9725                     }
9726                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9727                         arm_init_pauth_key(&env->apib_key);
9728                     }
9729                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9730                         arm_init_pauth_key(&env->apda_key);
9731                     }
9732                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9733                         arm_init_pauth_key(&env->apdb_key);
9734                     }
9735                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9736                         arm_init_pauth_key(&env->apga_key);
9737                     }
9738                     return 0;
9739                 }
9740             }
9741             return -TARGET_EINVAL;
9742 #endif /* AARCH64 */
9743         case PR_GET_SECCOMP:
9744         case PR_SET_SECCOMP:
9745             /* Disable seccomp to prevent the target disabling syscalls we
9746              * need. */
9747             return -TARGET_EINVAL;
9748         default:
9749             /* Most prctl options have no pointer arguments */
9750             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9751         }
9752         break;
9753 #ifdef TARGET_NR_arch_prctl
9754     case TARGET_NR_arch_prctl:
9755 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9756         return do_arch_prctl(cpu_env, arg1, arg2);
9757 #else
9758 #error unreachable
9759 #endif
9760 #endif
9761 #ifdef TARGET_NR_pread64
9762     case TARGET_NR_pread64:
9763         if (regpairs_aligned(cpu_env, num)) {
9764             arg4 = arg5;
9765             arg5 = arg6;
9766         }
9767         if (arg2 == 0 && arg3 == 0) {
9768             /* Special-case NULL buffer and zero length, which should succeed */
9769             p = 0;
9770         } else {
9771             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9772             if (!p) {
9773                 return -TARGET_EFAULT;
9774             }
9775         }
9776         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9777         unlock_user(p, arg2, ret);
9778         return ret;
9779     case TARGET_NR_pwrite64:
9780         if (regpairs_aligned(cpu_env, num)) {
9781             arg4 = arg5;
9782             arg5 = arg6;
9783         }
9784         if (arg2 == 0 && arg3 == 0) {
9785             /* Special-case NULL buffer and zero length, which should succeed */
9786             p = 0;
9787         } else {
9788             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9789             if (!p) {
9790                 return -TARGET_EFAULT;
9791             }
9792         }
9793         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9794         unlock_user(p, arg2, 0);
9795         return ret;
9796 #endif
9797     case TARGET_NR_getcwd:
9798         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9799             return -TARGET_EFAULT;
9800         ret = get_errno(sys_getcwd1(p, arg2));
9801         unlock_user(p, arg1, ret);
9802         return ret;
9803     case TARGET_NR_capget:
9804     case TARGET_NR_capset:
9805     {
9806         struct target_user_cap_header *target_header;
9807         struct target_user_cap_data *target_data = NULL;
9808         struct __user_cap_header_struct header;
9809         struct __user_cap_data_struct data[2];
9810         struct __user_cap_data_struct *dataptr = NULL;
9811         int i, target_datalen;
9812         int data_items = 1;
9813 
9814         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9815             return -TARGET_EFAULT;
9816         }
9817         header.version = tswap32(target_header->version);
9818         header.pid = tswap32(target_header->pid);
9819 
9820         if (header.version != _LINUX_CAPABILITY_VERSION) {
9821             /* Version 2 and up takes pointer to two user_data structs */
9822             data_items = 2;
9823         }
9824 
9825         target_datalen = sizeof(*target_data) * data_items;
9826 
9827         if (arg2) {
9828             if (num == TARGET_NR_capget) {
9829                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9830             } else {
9831                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9832             }
9833             if (!target_data) {
9834                 unlock_user_struct(target_header, arg1, 0);
9835                 return -TARGET_EFAULT;
9836             }
9837 
9838             if (num == TARGET_NR_capset) {
9839                 for (i = 0; i < data_items; i++) {
9840                     data[i].effective = tswap32(target_data[i].effective);
9841                     data[i].permitted = tswap32(target_data[i].permitted);
9842                     data[i].inheritable = tswap32(target_data[i].inheritable);
9843                 }
9844             }
9845 
9846             dataptr = data;
9847         }
9848 
9849         if (num == TARGET_NR_capget) {
9850             ret = get_errno(capget(&header, dataptr));
9851         } else {
9852             ret = get_errno(capset(&header, dataptr));
9853         }
9854 
9855         /* The kernel always updates version for both capget and capset */
9856         target_header->version = tswap32(header.version);
9857         unlock_user_struct(target_header, arg1, 1);
9858 
9859         if (arg2) {
9860             if (num == TARGET_NR_capget) {
9861                 for (i = 0; i < data_items; i++) {
9862                     target_data[i].effective = tswap32(data[i].effective);
9863                     target_data[i].permitted = tswap32(data[i].permitted);
9864                     target_data[i].inheritable = tswap32(data[i].inheritable);
9865                 }
9866                 unlock_user(target_data, arg2, target_datalen);
9867             } else {
9868                 unlock_user(target_data, arg2, 0);
9869             }
9870         }
9871         return ret;
9872     }
9873     case TARGET_NR_sigaltstack:
9874         return do_sigaltstack(arg1, arg2,
9875                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9876 
9877 #ifdef CONFIG_SENDFILE
9878 #ifdef TARGET_NR_sendfile
9879     case TARGET_NR_sendfile:
9880     {
9881         off_t *offp = NULL;
9882         off_t off;
9883         if (arg3) {
9884             ret = get_user_sal(off, arg3);
9885             if (is_error(ret)) {
9886                 return ret;
9887             }
9888             offp = &off;
9889         }
9890         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9891         if (!is_error(ret) && arg3) {
9892             abi_long ret2 = put_user_sal(off, arg3);
9893             if (is_error(ret2)) {
9894                 ret = ret2;
9895             }
9896         }
9897         return ret;
9898     }
9899 #endif
9900 #ifdef TARGET_NR_sendfile64
9901     case TARGET_NR_sendfile64:
9902     {
9903         off_t *offp = NULL;
9904         off_t off;
9905         if (arg3) {
9906             ret = get_user_s64(off, arg3);
9907             if (is_error(ret)) {
9908                 return ret;
9909             }
9910             offp = &off;
9911         }
9912         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9913         if (!is_error(ret) && arg3) {
9914             abi_long ret2 = put_user_s64(off, arg3);
9915             if (is_error(ret2)) {
9916                 ret = ret2;
9917             }
9918         }
9919         return ret;
9920     }
9921 #endif
9922 #endif
9923 #ifdef TARGET_NR_vfork
9924     case TARGET_NR_vfork:
9925         return get_errno(do_fork(cpu_env,
9926                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9927                          0, 0, 0, 0));
9928 #endif
9929 #ifdef TARGET_NR_ugetrlimit
9930     case TARGET_NR_ugetrlimit:
9931     {
9932 	struct rlimit rlim;
9933 	int resource = target_to_host_resource(arg1);
9934 	ret = get_errno(getrlimit(resource, &rlim));
9935 	if (!is_error(ret)) {
9936 	    struct target_rlimit *target_rlim;
9937             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9938                 return -TARGET_EFAULT;
9939 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9940 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9941             unlock_user_struct(target_rlim, arg2, 1);
9942 	}
9943         return ret;
9944     }
9945 #endif
9946 #ifdef TARGET_NR_truncate64
9947     case TARGET_NR_truncate64:
9948         if (!(p = lock_user_string(arg1)))
9949             return -TARGET_EFAULT;
9950 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9951         unlock_user(p, arg1, 0);
9952         return ret;
9953 #endif
9954 #ifdef TARGET_NR_ftruncate64
9955     case TARGET_NR_ftruncate64:
9956         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9957 #endif
9958 #ifdef TARGET_NR_stat64
9959     case TARGET_NR_stat64:
9960         if (!(p = lock_user_string(arg1))) {
9961             return -TARGET_EFAULT;
9962         }
9963         ret = get_errno(stat(path(p), &st));
9964         unlock_user(p, arg1, 0);
9965         if (!is_error(ret))
9966             ret = host_to_target_stat64(cpu_env, arg2, &st);
9967         return ret;
9968 #endif
9969 #ifdef TARGET_NR_lstat64
9970     case TARGET_NR_lstat64:
9971         if (!(p = lock_user_string(arg1))) {
9972             return -TARGET_EFAULT;
9973         }
9974         ret = get_errno(lstat(path(p), &st));
9975         unlock_user(p, arg1, 0);
9976         if (!is_error(ret))
9977             ret = host_to_target_stat64(cpu_env, arg2, &st);
9978         return ret;
9979 #endif
9980 #ifdef TARGET_NR_fstat64
9981     case TARGET_NR_fstat64:
9982         ret = get_errno(fstat(arg1, &st));
9983         if (!is_error(ret))
9984             ret = host_to_target_stat64(cpu_env, arg2, &st);
9985         return ret;
9986 #endif
9987 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9988 #ifdef TARGET_NR_fstatat64
9989     case TARGET_NR_fstatat64:
9990 #endif
9991 #ifdef TARGET_NR_newfstatat
9992     case TARGET_NR_newfstatat:
9993 #endif
9994         if (!(p = lock_user_string(arg2))) {
9995             return -TARGET_EFAULT;
9996         }
9997         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9998         unlock_user(p, arg2, 0);
9999         if (!is_error(ret))
10000             ret = host_to_target_stat64(cpu_env, arg3, &st);
10001         return ret;
10002 #endif
10003 #ifdef TARGET_NR_lchown
10004     case TARGET_NR_lchown:
10005         if (!(p = lock_user_string(arg1)))
10006             return -TARGET_EFAULT;
10007         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10008         unlock_user(p, arg1, 0);
10009         return ret;
10010 #endif
10011 #ifdef TARGET_NR_getuid
10012     case TARGET_NR_getuid:
10013         return get_errno(high2lowuid(getuid()));
10014 #endif
10015 #ifdef TARGET_NR_getgid
10016     case TARGET_NR_getgid:
10017         return get_errno(high2lowgid(getgid()));
10018 #endif
10019 #ifdef TARGET_NR_geteuid
10020     case TARGET_NR_geteuid:
10021         return get_errno(high2lowuid(geteuid()));
10022 #endif
10023 #ifdef TARGET_NR_getegid
10024     case TARGET_NR_getegid:
10025         return get_errno(high2lowgid(getegid()));
10026 #endif
10027     case TARGET_NR_setreuid:
10028         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10029     case TARGET_NR_setregid:
10030         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10031     case TARGET_NR_getgroups:
10032         {
10033             int gidsetsize = arg1;
10034             target_id *target_grouplist;
10035             gid_t *grouplist;
10036             int i;
10037 
10038             grouplist = alloca(gidsetsize * sizeof(gid_t));
10039             ret = get_errno(getgroups(gidsetsize, grouplist));
10040             if (gidsetsize == 0)
10041                 return ret;
10042             if (!is_error(ret)) {
10043                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10044                 if (!target_grouplist)
10045                     return -TARGET_EFAULT;
10046                 for(i = 0;i < ret; i++)
10047                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10048                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10049             }
10050         }
10051         return ret;
10052     case TARGET_NR_setgroups:
10053         {
10054             int gidsetsize = arg1;
10055             target_id *target_grouplist;
10056             gid_t *grouplist = NULL;
10057             int i;
10058             if (gidsetsize) {
10059                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10060                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10061                 if (!target_grouplist) {
10062                     return -TARGET_EFAULT;
10063                 }
10064                 for (i = 0; i < gidsetsize; i++) {
10065                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10066                 }
10067                 unlock_user(target_grouplist, arg2, 0);
10068             }
10069             return get_errno(setgroups(gidsetsize, grouplist));
10070         }
10071     case TARGET_NR_fchown:
10072         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10073 #if defined(TARGET_NR_fchownat)
10074     case TARGET_NR_fchownat:
10075         if (!(p = lock_user_string(arg2)))
10076             return -TARGET_EFAULT;
10077         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10078                                  low2highgid(arg4), arg5));
10079         unlock_user(p, arg2, 0);
10080         return ret;
10081 #endif
10082 #ifdef TARGET_NR_setresuid
10083     case TARGET_NR_setresuid:
10084         return get_errno(sys_setresuid(low2highuid(arg1),
10085                                        low2highuid(arg2),
10086                                        low2highuid(arg3)));
10087 #endif
10088 #ifdef TARGET_NR_getresuid
10089     case TARGET_NR_getresuid:
10090         {
10091             uid_t ruid, euid, suid;
10092             ret = get_errno(getresuid(&ruid, &euid, &suid));
10093             if (!is_error(ret)) {
10094                 if (put_user_id(high2lowuid(ruid), arg1)
10095                     || put_user_id(high2lowuid(euid), arg2)
10096                     || put_user_id(high2lowuid(suid), arg3))
10097                     return -TARGET_EFAULT;
10098             }
10099         }
10100         return ret;
10101 #endif
10102 #ifdef TARGET_NR_getresgid
10103     case TARGET_NR_setresgid:
10104         return get_errno(sys_setresgid(low2highgid(arg1),
10105                                        low2highgid(arg2),
10106                                        low2highgid(arg3)));
10107 #endif
10108 #ifdef TARGET_NR_getresgid
10109     case TARGET_NR_getresgid:
10110         {
10111             gid_t rgid, egid, sgid;
10112             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10113             if (!is_error(ret)) {
10114                 if (put_user_id(high2lowgid(rgid), arg1)
10115                     || put_user_id(high2lowgid(egid), arg2)
10116                     || put_user_id(high2lowgid(sgid), arg3))
10117                     return -TARGET_EFAULT;
10118             }
10119         }
10120         return ret;
10121 #endif
10122 #ifdef TARGET_NR_chown
10123     case TARGET_NR_chown:
10124         if (!(p = lock_user_string(arg1)))
10125             return -TARGET_EFAULT;
10126         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10127         unlock_user(p, arg1, 0);
10128         return ret;
10129 #endif
10130     case TARGET_NR_setuid:
10131         return get_errno(sys_setuid(low2highuid(arg1)));
10132     case TARGET_NR_setgid:
10133         return get_errno(sys_setgid(low2highgid(arg1)));
10134     case TARGET_NR_setfsuid:
10135         return get_errno(setfsuid(arg1));
10136     case TARGET_NR_setfsgid:
10137         return get_errno(setfsgid(arg1));
10138 
10139 #ifdef TARGET_NR_lchown32
10140     case TARGET_NR_lchown32:
10141         if (!(p = lock_user_string(arg1)))
10142             return -TARGET_EFAULT;
10143         ret = get_errno(lchown(p, arg2, arg3));
10144         unlock_user(p, arg1, 0);
10145         return ret;
10146 #endif
10147 #ifdef TARGET_NR_getuid32
10148     case TARGET_NR_getuid32:
10149         return get_errno(getuid());
10150 #endif
10151 
10152 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10153    /* Alpha specific */
10154     case TARGET_NR_getxuid:
10155          {
10156             uid_t euid;
10157             euid=geteuid();
10158             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10159          }
10160         return get_errno(getuid());
10161 #endif
10162 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10163    /* Alpha specific */
10164     case TARGET_NR_getxgid:
10165          {
10166             uid_t egid;
10167             egid=getegid();
10168             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10169          }
10170         return get_errno(getgid());
10171 #endif
10172 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10173     /* Alpha specific */
10174     case TARGET_NR_osf_getsysinfo:
10175         ret = -TARGET_EOPNOTSUPP;
10176         switch (arg1) {
10177           case TARGET_GSI_IEEE_FP_CONTROL:
10178             {
10179                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10180 
10181                 /* Copied from linux ieee_fpcr_to_swcr.  */
10182                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10183                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10184                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10185                                         | SWCR_TRAP_ENABLE_DZE
10186                                         | SWCR_TRAP_ENABLE_OVF);
10187                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10188                                         | SWCR_TRAP_ENABLE_INE);
10189                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10190                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10191 
10192                 if (put_user_u64 (swcr, arg2))
10193                         return -TARGET_EFAULT;
10194                 ret = 0;
10195             }
10196             break;
10197 
10198           /* case GSI_IEEE_STATE_AT_SIGNAL:
10199              -- Not implemented in linux kernel.
10200              case GSI_UACPROC:
10201              -- Retrieves current unaligned access state; not much used.
10202              case GSI_PROC_TYPE:
10203              -- Retrieves implver information; surely not used.
10204              case GSI_GET_HWRPB:
10205              -- Grabs a copy of the HWRPB; surely not used.
10206           */
10207         }
10208         return ret;
10209 #endif
10210 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10211     /* Alpha specific */
10212     case TARGET_NR_osf_setsysinfo:
10213         ret = -TARGET_EOPNOTSUPP;
10214         switch (arg1) {
10215           case TARGET_SSI_IEEE_FP_CONTROL:
10216             {
10217                 uint64_t swcr, fpcr, orig_fpcr;
10218 
10219                 if (get_user_u64 (swcr, arg2)) {
10220                     return -TARGET_EFAULT;
10221                 }
10222                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10223                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10224 
10225                 /* Copied from linux ieee_swcr_to_fpcr.  */
10226                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10227                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10228                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10229                                   | SWCR_TRAP_ENABLE_DZE
10230                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10231                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10232                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10233                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10234                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10235 
10236                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10237                 ret = 0;
10238             }
10239             break;
10240 
10241           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10242             {
10243                 uint64_t exc, fpcr, orig_fpcr;
10244                 int si_code;
10245 
10246                 if (get_user_u64(exc, arg2)) {
10247                     return -TARGET_EFAULT;
10248                 }
10249 
10250                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10251 
10252                 /* We only add to the exception status here.  */
10253                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10254 
10255                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10256                 ret = 0;
10257 
10258                 /* Old exceptions are not signaled.  */
10259                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10260 
10261                 /* If any exceptions set by this call,
10262                    and are unmasked, send a signal.  */
10263                 si_code = 0;
10264                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10265                     si_code = TARGET_FPE_FLTRES;
10266                 }
10267                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10268                     si_code = TARGET_FPE_FLTUND;
10269                 }
10270                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10271                     si_code = TARGET_FPE_FLTOVF;
10272                 }
10273                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10274                     si_code = TARGET_FPE_FLTDIV;
10275                 }
10276                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10277                     si_code = TARGET_FPE_FLTINV;
10278                 }
10279                 if (si_code != 0) {
10280                     target_siginfo_t info;
10281                     info.si_signo = SIGFPE;
10282                     info.si_errno = 0;
10283                     info.si_code = si_code;
10284                     info._sifields._sigfault._addr
10285                         = ((CPUArchState *)cpu_env)->pc;
10286                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10287                                  QEMU_SI_FAULT, &info);
10288                 }
10289             }
10290             break;
10291 
10292           /* case SSI_NVPAIRS:
10293              -- Used with SSIN_UACPROC to enable unaligned accesses.
10294              case SSI_IEEE_STATE_AT_SIGNAL:
10295              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10296              -- Not implemented in linux kernel
10297           */
10298         }
10299         return ret;
10300 #endif
10301 #ifdef TARGET_NR_osf_sigprocmask
10302     /* Alpha specific.  */
10303     case TARGET_NR_osf_sigprocmask:
10304         {
10305             abi_ulong mask;
10306             int how;
10307             sigset_t set, oldset;
10308 
10309             switch(arg1) {
10310             case TARGET_SIG_BLOCK:
10311                 how = SIG_BLOCK;
10312                 break;
10313             case TARGET_SIG_UNBLOCK:
10314                 how = SIG_UNBLOCK;
10315                 break;
10316             case TARGET_SIG_SETMASK:
10317                 how = SIG_SETMASK;
10318                 break;
10319             default:
10320                 return -TARGET_EINVAL;
10321             }
10322             mask = arg2;
10323             target_to_host_old_sigset(&set, &mask);
10324             ret = do_sigprocmask(how, &set, &oldset);
10325             if (!ret) {
10326                 host_to_target_old_sigset(&mask, &oldset);
10327                 ret = mask;
10328             }
10329         }
10330         return ret;
10331 #endif
10332 
10333 #ifdef TARGET_NR_getgid32
10334     case TARGET_NR_getgid32:
10335         return get_errno(getgid());
10336 #endif
10337 #ifdef TARGET_NR_geteuid32
10338     case TARGET_NR_geteuid32:
10339         return get_errno(geteuid());
10340 #endif
10341 #ifdef TARGET_NR_getegid32
10342     case TARGET_NR_getegid32:
10343         return get_errno(getegid());
10344 #endif
10345 #ifdef TARGET_NR_setreuid32
10346     case TARGET_NR_setreuid32:
10347         return get_errno(setreuid(arg1, arg2));
10348 #endif
10349 #ifdef TARGET_NR_setregid32
10350     case TARGET_NR_setregid32:
10351         return get_errno(setregid(arg1, arg2));
10352 #endif
10353 #ifdef TARGET_NR_getgroups32
10354     case TARGET_NR_getgroups32:
10355         {
10356             int gidsetsize = arg1;
10357             uint32_t *target_grouplist;
10358             gid_t *grouplist;
10359             int i;
10360 
10361             grouplist = alloca(gidsetsize * sizeof(gid_t));
10362             ret = get_errno(getgroups(gidsetsize, grouplist));
10363             if (gidsetsize == 0)
10364                 return ret;
10365             if (!is_error(ret)) {
10366                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10367                 if (!target_grouplist) {
10368                     return -TARGET_EFAULT;
10369                 }
10370                 for(i = 0;i < ret; i++)
10371                     target_grouplist[i] = tswap32(grouplist[i]);
10372                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10373             }
10374         }
10375         return ret;
10376 #endif
10377 #ifdef TARGET_NR_setgroups32
10378     case TARGET_NR_setgroups32:
10379         {
10380             int gidsetsize = arg1;
10381             uint32_t *target_grouplist;
10382             gid_t *grouplist;
10383             int i;
10384 
10385             grouplist = alloca(gidsetsize * sizeof(gid_t));
10386             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10387             if (!target_grouplist) {
10388                 return -TARGET_EFAULT;
10389             }
10390             for(i = 0;i < gidsetsize; i++)
10391                 grouplist[i] = tswap32(target_grouplist[i]);
10392             unlock_user(target_grouplist, arg2, 0);
10393             return get_errno(setgroups(gidsetsize, grouplist));
10394         }
10395 #endif
10396 #ifdef TARGET_NR_fchown32
10397     case TARGET_NR_fchown32:
10398         return get_errno(fchown(arg1, arg2, arg3));
10399 #endif
10400 #ifdef TARGET_NR_setresuid32
10401     case TARGET_NR_setresuid32:
10402         return get_errno(sys_setresuid(arg1, arg2, arg3));
10403 #endif
10404 #ifdef TARGET_NR_getresuid32
10405     case TARGET_NR_getresuid32:
10406         {
10407             uid_t ruid, euid, suid;
10408             ret = get_errno(getresuid(&ruid, &euid, &suid));
10409             if (!is_error(ret)) {
10410                 if (put_user_u32(ruid, arg1)
10411                     || put_user_u32(euid, arg2)
10412                     || put_user_u32(suid, arg3))
10413                     return -TARGET_EFAULT;
10414             }
10415         }
10416         return ret;
10417 #endif
10418 #ifdef TARGET_NR_setresgid32
10419     case TARGET_NR_setresgid32:
10420         return get_errno(sys_setresgid(arg1, arg2, arg3));
10421 #endif
10422 #ifdef TARGET_NR_getresgid32
10423     case TARGET_NR_getresgid32:
10424         {
10425             gid_t rgid, egid, sgid;
10426             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10427             if (!is_error(ret)) {
10428                 if (put_user_u32(rgid, arg1)
10429                     || put_user_u32(egid, arg2)
10430                     || put_user_u32(sgid, arg3))
10431                     return -TARGET_EFAULT;
10432             }
10433         }
10434         return ret;
10435 #endif
10436 #ifdef TARGET_NR_chown32
10437     case TARGET_NR_chown32:
10438         if (!(p = lock_user_string(arg1)))
10439             return -TARGET_EFAULT;
10440         ret = get_errno(chown(p, arg2, arg3));
10441         unlock_user(p, arg1, 0);
10442         return ret;
10443 #endif
10444 #ifdef TARGET_NR_setuid32
10445     case TARGET_NR_setuid32:
10446         return get_errno(sys_setuid(arg1));
10447 #endif
10448 #ifdef TARGET_NR_setgid32
10449     case TARGET_NR_setgid32:
10450         return get_errno(sys_setgid(arg1));
10451 #endif
10452 #ifdef TARGET_NR_setfsuid32
10453     case TARGET_NR_setfsuid32:
10454         return get_errno(setfsuid(arg1));
10455 #endif
10456 #ifdef TARGET_NR_setfsgid32
10457     case TARGET_NR_setfsgid32:
10458         return get_errno(setfsgid(arg1));
10459 #endif
10460 #ifdef TARGET_NR_mincore
10461     case TARGET_NR_mincore:
10462         {
10463             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10464             if (!a) {
10465                 return -TARGET_ENOMEM;
10466             }
10467             p = lock_user_string(arg3);
10468             if (!p) {
10469                 ret = -TARGET_EFAULT;
10470             } else {
10471                 ret = get_errno(mincore(a, arg2, p));
10472                 unlock_user(p, arg3, ret);
10473             }
10474             unlock_user(a, arg1, 0);
10475         }
10476         return ret;
10477 #endif
10478 #ifdef TARGET_NR_arm_fadvise64_64
10479     case TARGET_NR_arm_fadvise64_64:
10480         /* arm_fadvise64_64 looks like fadvise64_64 but
10481          * with different argument order: fd, advice, offset, len
10482          * rather than the usual fd, offset, len, advice.
10483          * Note that offset and len are both 64-bit so appear as
10484          * pairs of 32-bit registers.
10485          */
10486         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10487                             target_offset64(arg5, arg6), arg2);
10488         return -host_to_target_errno(ret);
10489 #endif
10490 
10491 #if TARGET_ABI_BITS == 32
10492 
10493 #ifdef TARGET_NR_fadvise64_64
10494     case TARGET_NR_fadvise64_64:
10495 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10496         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10497         ret = arg2;
10498         arg2 = arg3;
10499         arg3 = arg4;
10500         arg4 = arg5;
10501         arg5 = arg6;
10502         arg6 = ret;
10503 #else
10504         /* 6 args: fd, offset (high, low), len (high, low), advice */
10505         if (regpairs_aligned(cpu_env, num)) {
10506             /* offset is in (3,4), len in (5,6) and advice in 7 */
10507             arg2 = arg3;
10508             arg3 = arg4;
10509             arg4 = arg5;
10510             arg5 = arg6;
10511             arg6 = arg7;
10512         }
10513 #endif
10514         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10515                             target_offset64(arg4, arg5), arg6);
10516         return -host_to_target_errno(ret);
10517 #endif
10518 
10519 #ifdef TARGET_NR_fadvise64
10520     case TARGET_NR_fadvise64:
10521         /* 5 args: fd, offset (high, low), len, advice */
10522         if (regpairs_aligned(cpu_env, num)) {
10523             /* offset is in (3,4), len in 5 and advice in 6 */
10524             arg2 = arg3;
10525             arg3 = arg4;
10526             arg4 = arg5;
10527             arg5 = arg6;
10528         }
10529         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10530         return -host_to_target_errno(ret);
10531 #endif
10532 
10533 #else /* not a 32-bit ABI */
10534 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10535 #ifdef TARGET_NR_fadvise64_64
10536     case TARGET_NR_fadvise64_64:
10537 #endif
10538 #ifdef TARGET_NR_fadvise64
10539     case TARGET_NR_fadvise64:
10540 #endif
10541 #ifdef TARGET_S390X
10542         switch (arg4) {
10543         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10544         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10545         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10546         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10547         default: break;
10548         }
10549 #endif
10550         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10551 #endif
10552 #endif /* end of 64-bit ABI fadvise handling */
10553 
10554 #ifdef TARGET_NR_madvise
10555     case TARGET_NR_madvise:
10556         /* A straight passthrough may not be safe because qemu sometimes
10557            turns private file-backed mappings into anonymous mappings.
10558            This will break MADV_DONTNEED.
10559            This is a hint, so ignoring and returning success is ok.  */
10560         return 0;
10561 #endif
10562 #if TARGET_ABI_BITS == 32
10563     case TARGET_NR_fcntl64:
10564     {
10565 	int cmd;
10566 	struct flock64 fl;
10567         from_flock64_fn *copyfrom = copy_from_user_flock64;
10568         to_flock64_fn *copyto = copy_to_user_flock64;
10569 
10570 #ifdef TARGET_ARM
10571         if (!((CPUARMState *)cpu_env)->eabi) {
10572             copyfrom = copy_from_user_oabi_flock64;
10573             copyto = copy_to_user_oabi_flock64;
10574         }
10575 #endif
10576 
10577 	cmd = target_to_host_fcntl_cmd(arg2);
10578         if (cmd == -TARGET_EINVAL) {
10579             return cmd;
10580         }
10581 
10582         switch(arg2) {
10583         case TARGET_F_GETLK64:
10584             ret = copyfrom(&fl, arg3);
10585             if (ret) {
10586                 break;
10587             }
10588             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10589             if (ret == 0) {
10590                 ret = copyto(arg3, &fl);
10591             }
10592 	    break;
10593 
10594         case TARGET_F_SETLK64:
10595         case TARGET_F_SETLKW64:
10596             ret = copyfrom(&fl, arg3);
10597             if (ret) {
10598                 break;
10599             }
10600             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10601 	    break;
10602         default:
10603             ret = do_fcntl(arg1, arg2, arg3);
10604             break;
10605         }
10606         return ret;
10607     }
10608 #endif
10609 #ifdef TARGET_NR_cacheflush
10610     case TARGET_NR_cacheflush:
10611         /* self-modifying code is handled automatically, so nothing needed */
10612         return 0;
10613 #endif
10614 #ifdef TARGET_NR_getpagesize
10615     case TARGET_NR_getpagesize:
10616         return TARGET_PAGE_SIZE;
10617 #endif
10618     case TARGET_NR_gettid:
10619         return get_errno(gettid());
10620 #ifdef TARGET_NR_readahead
10621     case TARGET_NR_readahead:
10622 #if TARGET_ABI_BITS == 32
10623         if (regpairs_aligned(cpu_env, num)) {
10624             arg2 = arg3;
10625             arg3 = arg4;
10626             arg4 = arg5;
10627         }
10628         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10629 #else
10630         ret = get_errno(readahead(arg1, arg2, arg3));
10631 #endif
10632         return ret;
10633 #endif
10634 #ifdef CONFIG_ATTR
10635 #ifdef TARGET_NR_setxattr
10636     case TARGET_NR_listxattr:
10637     case TARGET_NR_llistxattr:
10638     {
10639         void *p, *b = 0;
10640         if (arg2) {
10641             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10642             if (!b) {
10643                 return -TARGET_EFAULT;
10644             }
10645         }
10646         p = lock_user_string(arg1);
10647         if (p) {
10648             if (num == TARGET_NR_listxattr) {
10649                 ret = get_errno(listxattr(p, b, arg3));
10650             } else {
10651                 ret = get_errno(llistxattr(p, b, arg3));
10652             }
10653         } else {
10654             ret = -TARGET_EFAULT;
10655         }
10656         unlock_user(p, arg1, 0);
10657         unlock_user(b, arg2, arg3);
10658         return ret;
10659     }
10660     case TARGET_NR_flistxattr:
10661     {
10662         void *b = 0;
10663         if (arg2) {
10664             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10665             if (!b) {
10666                 return -TARGET_EFAULT;
10667             }
10668         }
10669         ret = get_errno(flistxattr(arg1, b, arg3));
10670         unlock_user(b, arg2, arg3);
10671         return ret;
10672     }
10673     case TARGET_NR_setxattr:
10674     case TARGET_NR_lsetxattr:
10675         {
10676             void *p, *n, *v = 0;
10677             if (arg3) {
10678                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10679                 if (!v) {
10680                     return -TARGET_EFAULT;
10681                 }
10682             }
10683             p = lock_user_string(arg1);
10684             n = lock_user_string(arg2);
10685             if (p && n) {
10686                 if (num == TARGET_NR_setxattr) {
10687                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10688                 } else {
10689                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10690                 }
10691             } else {
10692                 ret = -TARGET_EFAULT;
10693             }
10694             unlock_user(p, arg1, 0);
10695             unlock_user(n, arg2, 0);
10696             unlock_user(v, arg3, 0);
10697         }
10698         return ret;
10699     case TARGET_NR_fsetxattr:
10700         {
10701             void *n, *v = 0;
10702             if (arg3) {
10703                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10704                 if (!v) {
10705                     return -TARGET_EFAULT;
10706                 }
10707             }
10708             n = lock_user_string(arg2);
10709             if (n) {
10710                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10711             } else {
10712                 ret = -TARGET_EFAULT;
10713             }
10714             unlock_user(n, arg2, 0);
10715             unlock_user(v, arg3, 0);
10716         }
10717         return ret;
10718     case TARGET_NR_getxattr:
10719     case TARGET_NR_lgetxattr:
10720         {
10721             void *p, *n, *v = 0;
10722             if (arg3) {
10723                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10724                 if (!v) {
10725                     return -TARGET_EFAULT;
10726                 }
10727             }
10728             p = lock_user_string(arg1);
10729             n = lock_user_string(arg2);
10730             if (p && n) {
10731                 if (num == TARGET_NR_getxattr) {
10732                     ret = get_errno(getxattr(p, n, v, arg4));
10733                 } else {
10734                     ret = get_errno(lgetxattr(p, n, v, arg4));
10735                 }
10736             } else {
10737                 ret = -TARGET_EFAULT;
10738             }
10739             unlock_user(p, arg1, 0);
10740             unlock_user(n, arg2, 0);
10741             unlock_user(v, arg3, arg4);
10742         }
10743         return ret;
10744     case TARGET_NR_fgetxattr:
10745         {
10746             void *n, *v = 0;
10747             if (arg3) {
10748                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10749                 if (!v) {
10750                     return -TARGET_EFAULT;
10751                 }
10752             }
10753             n = lock_user_string(arg2);
10754             if (n) {
10755                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10756             } else {
10757                 ret = -TARGET_EFAULT;
10758             }
10759             unlock_user(n, arg2, 0);
10760             unlock_user(v, arg3, arg4);
10761         }
10762         return ret;
10763     case TARGET_NR_removexattr:
10764     case TARGET_NR_lremovexattr:
10765         {
10766             void *p, *n;
10767             p = lock_user_string(arg1);
10768             n = lock_user_string(arg2);
10769             if (p && n) {
10770                 if (num == TARGET_NR_removexattr) {
10771                     ret = get_errno(removexattr(p, n));
10772                 } else {
10773                     ret = get_errno(lremovexattr(p, n));
10774                 }
10775             } else {
10776                 ret = -TARGET_EFAULT;
10777             }
10778             unlock_user(p, arg1, 0);
10779             unlock_user(n, arg2, 0);
10780         }
10781         return ret;
10782     case TARGET_NR_fremovexattr:
10783         {
10784             void *n;
10785             n = lock_user_string(arg2);
10786             if (n) {
10787                 ret = get_errno(fremovexattr(arg1, n));
10788             } else {
10789                 ret = -TARGET_EFAULT;
10790             }
10791             unlock_user(n, arg2, 0);
10792         }
10793         return ret;
10794 #endif
10795 #endif /* CONFIG_ATTR */
10796 #ifdef TARGET_NR_set_thread_area
10797     case TARGET_NR_set_thread_area:
10798 #if defined(TARGET_MIPS)
10799       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10800       return 0;
10801 #elif defined(TARGET_CRIS)
10802       if (arg1 & 0xff)
10803           ret = -TARGET_EINVAL;
10804       else {
10805           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10806           ret = 0;
10807       }
10808       return ret;
10809 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10810       return do_set_thread_area(cpu_env, arg1);
10811 #elif defined(TARGET_M68K)
10812       {
10813           TaskState *ts = cpu->opaque;
10814           ts->tp_value = arg1;
10815           return 0;
10816       }
10817 #else
10818       return -TARGET_ENOSYS;
10819 #endif
10820 #endif
10821 #ifdef TARGET_NR_get_thread_area
10822     case TARGET_NR_get_thread_area:
10823 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10824         return do_get_thread_area(cpu_env, arg1);
10825 #elif defined(TARGET_M68K)
10826         {
10827             TaskState *ts = cpu->opaque;
10828             return ts->tp_value;
10829         }
10830 #else
10831         return -TARGET_ENOSYS;
10832 #endif
10833 #endif
10834 #ifdef TARGET_NR_getdomainname
10835     case TARGET_NR_getdomainname:
10836         return -TARGET_ENOSYS;
10837 #endif
10838 
10839 #ifdef TARGET_NR_clock_settime
10840     case TARGET_NR_clock_settime:
10841     {
10842         struct timespec ts;
10843 
10844         ret = target_to_host_timespec(&ts, arg2);
10845         if (!is_error(ret)) {
10846             ret = get_errno(clock_settime(arg1, &ts));
10847         }
10848         return ret;
10849     }
10850 #endif
10851 #ifdef TARGET_NR_clock_gettime
10852     case TARGET_NR_clock_gettime:
10853     {
10854         struct timespec ts;
10855         ret = get_errno(clock_gettime(arg1, &ts));
10856         if (!is_error(ret)) {
10857             ret = host_to_target_timespec(arg2, &ts);
10858         }
10859         return ret;
10860     }
10861 #endif
10862 #ifdef TARGET_NR_clock_getres
10863     case TARGET_NR_clock_getres:
10864     {
10865         struct timespec ts;
10866         ret = get_errno(clock_getres(arg1, &ts));
10867         if (!is_error(ret)) {
10868             host_to_target_timespec(arg2, &ts);
10869         }
10870         return ret;
10871     }
10872 #endif
10873 #ifdef TARGET_NR_clock_nanosleep
10874     case TARGET_NR_clock_nanosleep:
10875     {
10876         struct timespec ts;
10877         target_to_host_timespec(&ts, arg3);
10878         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10879                                              &ts, arg4 ? &ts : NULL));
10880         if (arg4)
10881             host_to_target_timespec(arg4, &ts);
10882 
10883 #if defined(TARGET_PPC)
10884         /* clock_nanosleep is odd in that it returns positive errno values.
10885          * On PPC, CR0 bit 3 should be set in such a situation. */
10886         if (ret && ret != -TARGET_ERESTARTSYS) {
10887             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10888         }
10889 #endif
10890         return ret;
10891     }
10892 #endif
10893 
10894 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10895     case TARGET_NR_set_tid_address:
10896         return get_errno(set_tid_address((int *)g2h(arg1)));
10897 #endif
10898 
10899     case TARGET_NR_tkill:
10900         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10901 
10902     case TARGET_NR_tgkill:
10903         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10904                          target_to_host_signal(arg3)));
10905 
10906 #ifdef TARGET_NR_set_robust_list
10907     case TARGET_NR_set_robust_list:
10908     case TARGET_NR_get_robust_list:
10909         /* The ABI for supporting robust futexes has userspace pass
10910          * the kernel a pointer to a linked list which is updated by
10911          * userspace after the syscall; the list is walked by the kernel
10912          * when the thread exits. Since the linked list in QEMU guest
10913          * memory isn't a valid linked list for the host and we have
10914          * no way to reliably intercept the thread-death event, we can't
10915          * support these. Silently return ENOSYS so that guest userspace
10916          * falls back to a non-robust futex implementation (which should
10917          * be OK except in the corner case of the guest crashing while
10918          * holding a mutex that is shared with another process via
10919          * shared memory).
10920          */
10921         return -TARGET_ENOSYS;
10922 #endif
10923 
10924 #if defined(TARGET_NR_utimensat)
10925     case TARGET_NR_utimensat:
10926         {
10927             struct timespec *tsp, ts[2];
10928             if (!arg3) {
10929                 tsp = NULL;
10930             } else {
10931                 target_to_host_timespec(ts, arg3);
10932                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10933                 tsp = ts;
10934             }
10935             if (!arg2)
10936                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10937             else {
10938                 if (!(p = lock_user_string(arg2))) {
10939                     return -TARGET_EFAULT;
10940                 }
10941                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10942                 unlock_user(p, arg2, 0);
10943             }
10944         }
10945         return ret;
10946 #endif
10947     case TARGET_NR_futex:
10948         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10949 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10950     case TARGET_NR_inotify_init:
10951         ret = get_errno(sys_inotify_init());
10952         if (ret >= 0) {
10953             fd_trans_register(ret, &target_inotify_trans);
10954         }
10955         return ret;
10956 #endif
10957 #ifdef CONFIG_INOTIFY1
10958 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10959     case TARGET_NR_inotify_init1:
10960         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10961                                           fcntl_flags_tbl)));
10962         if (ret >= 0) {
10963             fd_trans_register(ret, &target_inotify_trans);
10964         }
10965         return ret;
10966 #endif
10967 #endif
10968 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10969     case TARGET_NR_inotify_add_watch:
10970         p = lock_user_string(arg2);
10971         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10972         unlock_user(p, arg2, 0);
10973         return ret;
10974 #endif
10975 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10976     case TARGET_NR_inotify_rm_watch:
10977         return get_errno(sys_inotify_rm_watch(arg1, arg2));
10978 #endif
10979 
10980 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10981     case TARGET_NR_mq_open:
10982         {
10983             struct mq_attr posix_mq_attr;
10984             struct mq_attr *pposix_mq_attr;
10985             int host_flags;
10986 
10987             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10988             pposix_mq_attr = NULL;
10989             if (arg4) {
10990                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10991                     return -TARGET_EFAULT;
10992                 }
10993                 pposix_mq_attr = &posix_mq_attr;
10994             }
10995             p = lock_user_string(arg1 - 1);
10996             if (!p) {
10997                 return -TARGET_EFAULT;
10998             }
10999             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11000             unlock_user (p, arg1, 0);
11001         }
11002         return ret;
11003 
11004     case TARGET_NR_mq_unlink:
11005         p = lock_user_string(arg1 - 1);
11006         if (!p) {
11007             return -TARGET_EFAULT;
11008         }
11009         ret = get_errno(mq_unlink(p));
11010         unlock_user (p, arg1, 0);
11011         return ret;
11012 
11013     case TARGET_NR_mq_timedsend:
11014         {
11015             struct timespec ts;
11016 
11017             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11018             if (arg5 != 0) {
11019                 target_to_host_timespec(&ts, arg5);
11020                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11021                 host_to_target_timespec(arg5, &ts);
11022             } else {
11023                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11024             }
11025             unlock_user (p, arg2, arg3);
11026         }
11027         return ret;
11028 
11029     case TARGET_NR_mq_timedreceive:
11030         {
11031             struct timespec ts;
11032             unsigned int prio;
11033 
11034             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11035             if (arg5 != 0) {
11036                 target_to_host_timespec(&ts, arg5);
11037                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11038                                                      &prio, &ts));
11039                 host_to_target_timespec(arg5, &ts);
11040             } else {
11041                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11042                                                      &prio, NULL));
11043             }
11044             unlock_user (p, arg2, arg3);
11045             if (arg4 != 0)
11046                 put_user_u32(prio, arg4);
11047         }
11048         return ret;
11049 
11050     /* Not implemented for now... */
11051 /*     case TARGET_NR_mq_notify: */
11052 /*         break; */
11053 
11054     case TARGET_NR_mq_getsetattr:
11055         {
11056             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11057             ret = 0;
11058             if (arg2 != 0) {
11059                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11060                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11061                                            &posix_mq_attr_out));
11062             } else if (arg3 != 0) {
11063                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11064             }
11065             if (ret == 0 && arg3 != 0) {
11066                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11067             }
11068         }
11069         return ret;
11070 #endif
11071 
11072 #ifdef CONFIG_SPLICE
11073 #ifdef TARGET_NR_tee
11074     case TARGET_NR_tee:
11075         {
11076             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11077         }
11078         return ret;
11079 #endif
11080 #ifdef TARGET_NR_splice
11081     case TARGET_NR_splice:
11082         {
11083             loff_t loff_in, loff_out;
11084             loff_t *ploff_in = NULL, *ploff_out = NULL;
11085             if (arg2) {
11086                 if (get_user_u64(loff_in, arg2)) {
11087                     return -TARGET_EFAULT;
11088                 }
11089                 ploff_in = &loff_in;
11090             }
11091             if (arg4) {
11092                 if (get_user_u64(loff_out, arg4)) {
11093                     return -TARGET_EFAULT;
11094                 }
11095                 ploff_out = &loff_out;
11096             }
11097             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11098             if (arg2) {
11099                 if (put_user_u64(loff_in, arg2)) {
11100                     return -TARGET_EFAULT;
11101                 }
11102             }
11103             if (arg4) {
11104                 if (put_user_u64(loff_out, arg4)) {
11105                     return -TARGET_EFAULT;
11106                 }
11107             }
11108         }
11109         return ret;
11110 #endif
11111 #ifdef TARGET_NR_vmsplice
11112 	case TARGET_NR_vmsplice:
11113         {
11114             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11115             if (vec != NULL) {
11116                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11117                 unlock_iovec(vec, arg2, arg3, 0);
11118             } else {
11119                 ret = -host_to_target_errno(errno);
11120             }
11121         }
11122         return ret;
11123 #endif
11124 #endif /* CONFIG_SPLICE */
11125 #ifdef CONFIG_EVENTFD
11126 #if defined(TARGET_NR_eventfd)
11127     case TARGET_NR_eventfd:
11128         ret = get_errno(eventfd(arg1, 0));
11129         if (ret >= 0) {
11130             fd_trans_register(ret, &target_eventfd_trans);
11131         }
11132         return ret;
11133 #endif
11134 #if defined(TARGET_NR_eventfd2)
11135     case TARGET_NR_eventfd2:
11136     {
11137         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11138         if (arg2 & TARGET_O_NONBLOCK) {
11139             host_flags |= O_NONBLOCK;
11140         }
11141         if (arg2 & TARGET_O_CLOEXEC) {
11142             host_flags |= O_CLOEXEC;
11143         }
11144         ret = get_errno(eventfd(arg1, host_flags));
11145         if (ret >= 0) {
11146             fd_trans_register(ret, &target_eventfd_trans);
11147         }
11148         return ret;
11149     }
11150 #endif
11151 #endif /* CONFIG_EVENTFD  */
11152 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11153     case TARGET_NR_fallocate:
11154 #if TARGET_ABI_BITS == 32
11155         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11156                                   target_offset64(arg5, arg6)));
11157 #else
11158         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11159 #endif
11160         return ret;
11161 #endif
11162 #if defined(CONFIG_SYNC_FILE_RANGE)
11163 #if defined(TARGET_NR_sync_file_range)
11164     case TARGET_NR_sync_file_range:
11165 #if TARGET_ABI_BITS == 32
11166 #if defined(TARGET_MIPS)
11167         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11168                                         target_offset64(arg5, arg6), arg7));
11169 #else
11170         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11171                                         target_offset64(arg4, arg5), arg6));
11172 #endif /* !TARGET_MIPS */
11173 #else
11174         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11175 #endif
11176         return ret;
11177 #endif
11178 #if defined(TARGET_NR_sync_file_range2)
11179     case TARGET_NR_sync_file_range2:
11180         /* This is like sync_file_range but the arguments are reordered */
11181 #if TARGET_ABI_BITS == 32
11182         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11183                                         target_offset64(arg5, arg6), arg2));
11184 #else
11185         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11186 #endif
11187         return ret;
11188 #endif
11189 #endif
11190 #if defined(TARGET_NR_signalfd4)
11191     case TARGET_NR_signalfd4:
11192         return do_signalfd4(arg1, arg2, arg4);
11193 #endif
11194 #if defined(TARGET_NR_signalfd)
11195     case TARGET_NR_signalfd:
11196         return do_signalfd4(arg1, arg2, 0);
11197 #endif
11198 #if defined(CONFIG_EPOLL)
11199 #if defined(TARGET_NR_epoll_create)
11200     case TARGET_NR_epoll_create:
11201         return get_errno(epoll_create(arg1));
11202 #endif
11203 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11204     case TARGET_NR_epoll_create1:
11205         return get_errno(epoll_create1(arg1));
11206 #endif
11207 #if defined(TARGET_NR_epoll_ctl)
11208     case TARGET_NR_epoll_ctl:
11209     {
11210         struct epoll_event ep;
11211         struct epoll_event *epp = 0;
11212         if (arg4) {
11213             struct target_epoll_event *target_ep;
11214             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11215                 return -TARGET_EFAULT;
11216             }
11217             ep.events = tswap32(target_ep->events);
11218             /* The epoll_data_t union is just opaque data to the kernel,
11219              * so we transfer all 64 bits across and need not worry what
11220              * actual data type it is.
11221              */
11222             ep.data.u64 = tswap64(target_ep->data.u64);
11223             unlock_user_struct(target_ep, arg4, 0);
11224             epp = &ep;
11225         }
11226         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11227     }
11228 #endif
11229 
11230 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11231 #if defined(TARGET_NR_epoll_wait)
11232     case TARGET_NR_epoll_wait:
11233 #endif
11234 #if defined(TARGET_NR_epoll_pwait)
11235     case TARGET_NR_epoll_pwait:
11236 #endif
11237     {
11238         struct target_epoll_event *target_ep;
11239         struct epoll_event *ep;
11240         int epfd = arg1;
11241         int maxevents = arg3;
11242         int timeout = arg4;
11243 
11244         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11245             return -TARGET_EINVAL;
11246         }
11247 
11248         target_ep = lock_user(VERIFY_WRITE, arg2,
11249                               maxevents * sizeof(struct target_epoll_event), 1);
11250         if (!target_ep) {
11251             return -TARGET_EFAULT;
11252         }
11253 
11254         ep = g_try_new(struct epoll_event, maxevents);
11255         if (!ep) {
11256             unlock_user(target_ep, arg2, 0);
11257             return -TARGET_ENOMEM;
11258         }
11259 
11260         switch (num) {
11261 #if defined(TARGET_NR_epoll_pwait)
11262         case TARGET_NR_epoll_pwait:
11263         {
11264             target_sigset_t *target_set;
11265             sigset_t _set, *set = &_set;
11266 
11267             if (arg5) {
11268                 if (arg6 != sizeof(target_sigset_t)) {
11269                     ret = -TARGET_EINVAL;
11270                     break;
11271                 }
11272 
11273                 target_set = lock_user(VERIFY_READ, arg5,
11274                                        sizeof(target_sigset_t), 1);
11275                 if (!target_set) {
11276                     ret = -TARGET_EFAULT;
11277                     break;
11278                 }
11279                 target_to_host_sigset(set, target_set);
11280                 unlock_user(target_set, arg5, 0);
11281             } else {
11282                 set = NULL;
11283             }
11284 
11285             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11286                                              set, SIGSET_T_SIZE));
11287             break;
11288         }
11289 #endif
11290 #if defined(TARGET_NR_epoll_wait)
11291         case TARGET_NR_epoll_wait:
11292             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11293                                              NULL, 0));
11294             break;
11295 #endif
11296         default:
11297             ret = -TARGET_ENOSYS;
11298         }
11299         if (!is_error(ret)) {
11300             int i;
11301             for (i = 0; i < ret; i++) {
11302                 target_ep[i].events = tswap32(ep[i].events);
11303                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11304             }
11305             unlock_user(target_ep, arg2,
11306                         ret * sizeof(struct target_epoll_event));
11307         } else {
11308             unlock_user(target_ep, arg2, 0);
11309         }
11310         g_free(ep);
11311         return ret;
11312     }
11313 #endif
11314 #endif
11315 #ifdef TARGET_NR_prlimit64
11316     case TARGET_NR_prlimit64:
11317     {
11318         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11319         struct target_rlimit64 *target_rnew, *target_rold;
11320         struct host_rlimit64 rnew, rold, *rnewp = 0;
11321         int resource = target_to_host_resource(arg2);
11322         if (arg3) {
11323             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11324                 return -TARGET_EFAULT;
11325             }
11326             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11327             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11328             unlock_user_struct(target_rnew, arg3, 0);
11329             rnewp = &rnew;
11330         }
11331 
11332         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11333         if (!is_error(ret) && arg4) {
11334             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11335                 return -TARGET_EFAULT;
11336             }
11337             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11338             target_rold->rlim_max = tswap64(rold.rlim_max);
11339             unlock_user_struct(target_rold, arg4, 1);
11340         }
11341         return ret;
11342     }
11343 #endif
11344 #ifdef TARGET_NR_gethostname
11345     case TARGET_NR_gethostname:
11346     {
11347         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11348         if (name) {
11349             ret = get_errno(gethostname(name, arg2));
11350             unlock_user(name, arg1, arg2);
11351         } else {
11352             ret = -TARGET_EFAULT;
11353         }
11354         return ret;
11355     }
11356 #endif
11357 #ifdef TARGET_NR_atomic_cmpxchg_32
11358     case TARGET_NR_atomic_cmpxchg_32:
11359     {
11360         /* should use start_exclusive from main.c */
11361         abi_ulong mem_value;
11362         if (get_user_u32(mem_value, arg6)) {
11363             target_siginfo_t info;
11364             info.si_signo = SIGSEGV;
11365             info.si_errno = 0;
11366             info.si_code = TARGET_SEGV_MAPERR;
11367             info._sifields._sigfault._addr = arg6;
11368             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11369                          QEMU_SI_FAULT, &info);
11370             ret = 0xdeadbeef;
11371 
11372         }
11373         if (mem_value == arg2)
11374             put_user_u32(arg1, arg6);
11375         return mem_value;
11376     }
11377 #endif
11378 #ifdef TARGET_NR_atomic_barrier
11379     case TARGET_NR_atomic_barrier:
11380         /* Like the kernel implementation and the
11381            qemu arm barrier, no-op this? */
11382         return 0;
11383 #endif
11384 
11385 #ifdef TARGET_NR_timer_create
11386     case TARGET_NR_timer_create:
11387     {
11388         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11389 
11390         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11391 
11392         int clkid = arg1;
11393         int timer_index = next_free_host_timer();
11394 
11395         if (timer_index < 0) {
11396             ret = -TARGET_EAGAIN;
11397         } else {
11398             timer_t *phtimer = g_posix_timers  + timer_index;
11399 
11400             if (arg2) {
11401                 phost_sevp = &host_sevp;
11402                 ret = target_to_host_sigevent(phost_sevp, arg2);
11403                 if (ret != 0) {
11404                     return ret;
11405                 }
11406             }
11407 
11408             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11409             if (ret) {
11410                 phtimer = NULL;
11411             } else {
11412                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11413                     return -TARGET_EFAULT;
11414                 }
11415             }
11416         }
11417         return ret;
11418     }
11419 #endif
11420 
11421 #ifdef TARGET_NR_timer_settime
11422     case TARGET_NR_timer_settime:
11423     {
11424         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11425          * struct itimerspec * old_value */
11426         target_timer_t timerid = get_timer_id(arg1);
11427 
11428         if (timerid < 0) {
11429             ret = timerid;
11430         } else if (arg3 == 0) {
11431             ret = -TARGET_EINVAL;
11432         } else {
11433             timer_t htimer = g_posix_timers[timerid];
11434             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11435 
11436             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11437                 return -TARGET_EFAULT;
11438             }
11439             ret = get_errno(
11440                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11441             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11442                 return -TARGET_EFAULT;
11443             }
11444         }
11445         return ret;
11446     }
11447 #endif
11448 
11449 #ifdef TARGET_NR_timer_gettime
11450     case TARGET_NR_timer_gettime:
11451     {
11452         /* args: timer_t timerid, struct itimerspec *curr_value */
11453         target_timer_t timerid = get_timer_id(arg1);
11454 
11455         if (timerid < 0) {
11456             ret = timerid;
11457         } else if (!arg2) {
11458             ret = -TARGET_EFAULT;
11459         } else {
11460             timer_t htimer = g_posix_timers[timerid];
11461             struct itimerspec hspec;
11462             ret = get_errno(timer_gettime(htimer, &hspec));
11463 
11464             if (host_to_target_itimerspec(arg2, &hspec)) {
11465                 ret = -TARGET_EFAULT;
11466             }
11467         }
11468         return ret;
11469     }
11470 #endif
11471 
11472 #ifdef TARGET_NR_timer_getoverrun
11473     case TARGET_NR_timer_getoverrun:
11474     {
11475         /* args: timer_t timerid */
11476         target_timer_t timerid = get_timer_id(arg1);
11477 
11478         if (timerid < 0) {
11479             ret = timerid;
11480         } else {
11481             timer_t htimer = g_posix_timers[timerid];
11482             ret = get_errno(timer_getoverrun(htimer));
11483         }
11484         fd_trans_unregister(ret);
11485         return ret;
11486     }
11487 #endif
11488 
11489 #ifdef TARGET_NR_timer_delete
11490     case TARGET_NR_timer_delete:
11491     {
11492         /* args: timer_t timerid */
11493         target_timer_t timerid = get_timer_id(arg1);
11494 
11495         if (timerid < 0) {
11496             ret = timerid;
11497         } else {
11498             timer_t htimer = g_posix_timers[timerid];
11499             ret = get_errno(timer_delete(htimer));
11500             g_posix_timers[timerid] = 0;
11501         }
11502         return ret;
11503     }
11504 #endif
11505 
11506 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11507     case TARGET_NR_timerfd_create:
11508         return get_errno(timerfd_create(arg1,
11509                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11510 #endif
11511 
11512 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11513     case TARGET_NR_timerfd_gettime:
11514         {
11515             struct itimerspec its_curr;
11516 
11517             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11518 
11519             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11520                 return -TARGET_EFAULT;
11521             }
11522         }
11523         return ret;
11524 #endif
11525 
11526 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11527     case TARGET_NR_timerfd_settime:
11528         {
11529             struct itimerspec its_new, its_old, *p_new;
11530 
11531             if (arg3) {
11532                 if (target_to_host_itimerspec(&its_new, arg3)) {
11533                     return -TARGET_EFAULT;
11534                 }
11535                 p_new = &its_new;
11536             } else {
11537                 p_new = NULL;
11538             }
11539 
11540             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11541 
11542             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11543                 return -TARGET_EFAULT;
11544             }
11545         }
11546         return ret;
11547 #endif
11548 
11549 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11550     case TARGET_NR_ioprio_get:
11551         return get_errno(ioprio_get(arg1, arg2));
11552 #endif
11553 
11554 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11555     case TARGET_NR_ioprio_set:
11556         return get_errno(ioprio_set(arg1, arg2, arg3));
11557 #endif
11558 
11559 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11560     case TARGET_NR_setns:
11561         return get_errno(setns(arg1, arg2));
11562 #endif
11563 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11564     case TARGET_NR_unshare:
11565         return get_errno(unshare(arg1));
11566 #endif
11567 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11568     case TARGET_NR_kcmp:
11569         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11570 #endif
11571 #ifdef TARGET_NR_swapcontext
11572     case TARGET_NR_swapcontext:
11573         /* PowerPC specific.  */
11574         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11575 #endif
11576 
11577     default:
11578         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11579         return -TARGET_ENOSYS;
11580     }
11581     return ret;
11582 }
11583 
11584 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11585                     abi_long arg2, abi_long arg3, abi_long arg4,
11586                     abi_long arg5, abi_long arg6, abi_long arg7,
11587                     abi_long arg8)
11588 {
11589     CPUState *cpu = ENV_GET_CPU(cpu_env);
11590     abi_long ret;
11591 
11592 #ifdef DEBUG_ERESTARTSYS
11593     /* Debug-only code for exercising the syscall-restart code paths
11594      * in the per-architecture cpu main loops: restart every syscall
11595      * the guest makes once before letting it through.
11596      */
11597     {
11598         static bool flag;
11599         flag = !flag;
11600         if (flag) {
11601             return -TARGET_ERESTARTSYS;
11602         }
11603     }
11604 #endif
11605 
11606     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11607                              arg5, arg6, arg7, arg8);
11608 
11609     if (unlikely(do_strace)) {
11610         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11611         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11612                           arg5, arg6, arg7, arg8);
11613         print_syscall_ret(num, ret);
11614     } else {
11615         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11616                           arg5, arg6, arg7, arg8);
11617     }
11618 
11619     trace_guest_user_syscall_ret(cpu, num, ret);
11620     return ret;
11621 }
11622