xref: /openbmc/qemu/linux-user/syscall.c (revision 49d755d0)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef CONFIG_EVENTFD
63 #include <sys/eventfd.h>
64 #endif
65 #ifdef CONFIG_EPOLL
66 #include <sys/epoll.h>
67 #endif
68 #ifdef CONFIG_ATTR
69 #include "qemu/xattr.h"
70 #endif
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
73 #endif
74 
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
81 
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
87 #include <linux/kd.h>
88 #include <linux/mtio.h>
89 #include <linux/fs.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
92 #endif
93 #include <linux/fb.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
97 #endif
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
113 
114 #ifndef CLONE_IO
115 #define CLONE_IO                0x80000000      /* Clone io context */
116 #endif
117 
118 /* We can't directly call the host clone syscall, because this will
119  * badly confuse libc (breaking mutexes, for example). So we must
120  * divide clone flags into:
121  *  * flag combinations that look like pthread_create()
122  *  * flag combinations that look like fork()
123  *  * flags we can implement within QEMU itself
124  *  * flags we can't support and will return an error for
125  */
126 /* For thread creation, all these flags must be present; for
127  * fork, none must be present.
128  */
129 #define CLONE_THREAD_FLAGS                              \
130     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
131      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
132 
133 /* These flags are ignored:
134  * CLONE_DETACHED is now ignored by the kernel;
135  * CLONE_IO is just an optimisation hint to the I/O scheduler
136  */
137 #define CLONE_IGNORED_FLAGS                     \
138     (CLONE_DETACHED | CLONE_IO)
139 
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS               \
142     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
143      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
144 
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
149 
150 #define CLONE_INVALID_FORK_FLAGS                                        \
151     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
152 
153 #define CLONE_INVALID_THREAD_FLAGS                                      \
154     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
155        CLONE_IGNORED_FLAGS))
156 
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158  * have almost all been allocated. We cannot support any of
159  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161  * The checks against the invalid thread masks above will catch these.
162  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
163  */
164 
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166  * once. This exercises the codepaths for restart.
167  */
168 //#define DEBUG_ERESTARTSYS
169 
170 //#include <linux/msdos_fs.h>
171 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
172 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
173 
174 #undef _syscall0
175 #undef _syscall1
176 #undef _syscall2
177 #undef _syscall3
178 #undef _syscall4
179 #undef _syscall5
180 #undef _syscall6
181 
182 #define _syscall0(type,name)		\
183 static type name (void)			\
184 {					\
185 	return syscall(__NR_##name);	\
186 }
187 
188 #define _syscall1(type,name,type1,arg1)		\
189 static type name (type1 arg1)			\
190 {						\
191 	return syscall(__NR_##name, arg1);	\
192 }
193 
194 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
195 static type name (type1 arg1,type2 arg2)		\
196 {							\
197 	return syscall(__NR_##name, arg1, arg2);	\
198 }
199 
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
201 static type name (type1 arg1,type2 arg2,type3 arg3)		\
202 {								\
203 	return syscall(__NR_##name, arg1, arg2, arg3);		\
204 }
205 
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
208 {										\
209 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
210 }
211 
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
213 		  type5,arg5)							\
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
215 {										\
216 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
217 }
218 
219 
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
221 		  type5,arg5,type6,arg6)					\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
223                   type6 arg6)							\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
226 }
227 
228 
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
244 #endif
245 
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
249 #endif
250 
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid)
253 
254 /* For the 64-bit guest on 32-bit host case we must emulate
255  * getdents using getdents64, because otherwise the host
256  * might hand us back more dirent records than we can fit
257  * into the guest buffer after structure format conversion.
258  * Otherwise we emulate getdents with getdents if the host has it.
259  */
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
262 #endif
263 
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if (defined(TARGET_NR_getdents) && \
268       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
271 #endif
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
274           loff_t *, res, uint, wh);
275 #endif
276 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
277 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
278           siginfo_t *, uinfo)
279 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group,int,error_code)
282 #endif
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address,int *,tidptr)
285 #endif
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
288           const struct timespec *,timeout,int *,uaddr2,int,val3)
289 #endif
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
292           unsigned long *, user_mask_ptr);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
295           unsigned long *, user_mask_ptr);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
298 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
299           void *, arg);
300 _syscall2(int, capget, struct __user_cap_header_struct *, header,
301           struct __user_cap_data_struct *, data);
302 _syscall2(int, capset, struct __user_cap_header_struct *, header,
303           struct __user_cap_data_struct *, data);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get, int, which, int, who)
306 #endif
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
309 #endif
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
312 #endif
313 
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
316           unsigned long, idx1, unsigned long, idx2)
317 #endif
318 
319 static bitmask_transtbl fcntl_flags_tbl[] = {
320   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
321   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
322   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
323   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
324   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
325   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
326   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
327   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
328   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
329   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
330   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
331   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
332   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
333 #if defined(O_DIRECT)
334   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
335 #endif
336 #if defined(O_NOATIME)
337   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
338 #endif
339 #if defined(O_CLOEXEC)
340   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
341 #endif
342 #if defined(O_PATH)
343   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
344 #endif
345 #if defined(O_TMPFILE)
346   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
347 #endif
348   /* Don't terminate the list prematurely on 64-bit host+guest.  */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
351 #endif
352   { 0, 0, 0, 0 }
353 };
354 
355 static int sys_getcwd1(char *buf, size_t size)
356 {
357   if (getcwd(buf, size) == NULL) {
358       /* getcwd() sets errno */
359       return (-1);
360   }
361   return strlen(buf)+1;
362 }
363 
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
368           const struct timespec *,tsp,int,flags)
369 #else
370 static int sys_utimensat(int dirfd, const char *pathname,
371                          const struct timespec times[2], int flags)
372 {
373     errno = ENOSYS;
374     return -1;
375 }
376 #endif
377 #endif /* TARGET_NR_utimensat */
378 
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
383           const char *, new, unsigned int, flags)
384 #else
385 static int sys_renameat2(int oldfd, const char *old,
386                          int newfd, const char *new, int flags)
387 {
388     if (flags == 0) {
389         return renameat(oldfd, old, newfd, new);
390     }
391     errno = ENOSYS;
392     return -1;
393 }
394 #endif
395 #endif /* TARGET_NR_renameat2 */
396 
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
399 
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
402 {
403   return (inotify_init());
404 }
405 #endif
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
408 {
409   return (inotify_add_watch(fd, pathname, mask));
410 }
411 #endif
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd, int32_t wd)
414 {
415   return (inotify_rm_watch(fd, wd));
416 }
417 #endif
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags)
421 {
422   return (inotify_init1(flags));
423 }
424 #endif
425 #endif
426 #else
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY  */
433 
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
437 #endif
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64 {
441     uint64_t rlim_cur;
442     uint64_t rlim_max;
443 };
444 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
445           const struct host_rlimit64 *, new_limit,
446           struct host_rlimit64 *, old_limit)
447 #endif
448 
449 
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers[32] = { 0, } ;
453 
454 static inline int next_free_host_timer(void)
455 {
456     int k ;
457     /* FIXME: Does finding the next free slot require a lock? */
458     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
459         if (g_posix_timers[k] == 0) {
460             g_posix_timers[k] = (timer_t) 1;
461             return k;
462         }
463     }
464     return -1;
465 }
466 #endif
467 
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
469 #ifdef TARGET_ARM
470 static inline int regpairs_aligned(void *cpu_env, int num)
471 {
472     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
473 }
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478  * of registers which translates to the same as ARM/MIPS, because we start with
479  * r3 as arg1 */
480 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     switch (num) {
486     case TARGET_NR_pread64:
487     case TARGET_NR_pwrite64:
488         return 1;
489 
490     default:
491         return 0;
492     }
493 }
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
498 #endif
499 
500 #define ERRNO_TABLE_SIZE 1200
501 
502 /* target_to_host_errno_table[] is initialized from
503  * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
505 };
506 
507 /*
508  * This list is the union of errno values overridden in asm-<arch>/errno.h
509  * minus the errnos that are not actually generic to all archs.
510  */
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512     [EAGAIN]		= TARGET_EAGAIN,
513     [EIDRM]		= TARGET_EIDRM,
514     [ECHRNG]		= TARGET_ECHRNG,
515     [EL2NSYNC]		= TARGET_EL2NSYNC,
516     [EL3HLT]		= TARGET_EL3HLT,
517     [EL3RST]		= TARGET_EL3RST,
518     [ELNRNG]		= TARGET_ELNRNG,
519     [EUNATCH]		= TARGET_EUNATCH,
520     [ENOCSI]		= TARGET_ENOCSI,
521     [EL2HLT]		= TARGET_EL2HLT,
522     [EDEADLK]		= TARGET_EDEADLK,
523     [ENOLCK]		= TARGET_ENOLCK,
524     [EBADE]		= TARGET_EBADE,
525     [EBADR]		= TARGET_EBADR,
526     [EXFULL]		= TARGET_EXFULL,
527     [ENOANO]		= TARGET_ENOANO,
528     [EBADRQC]		= TARGET_EBADRQC,
529     [EBADSLT]		= TARGET_EBADSLT,
530     [EBFONT]		= TARGET_EBFONT,
531     [ENOSTR]		= TARGET_ENOSTR,
532     [ENODATA]		= TARGET_ENODATA,
533     [ETIME]		= TARGET_ETIME,
534     [ENOSR]		= TARGET_ENOSR,
535     [ENONET]		= TARGET_ENONET,
536     [ENOPKG]		= TARGET_ENOPKG,
537     [EREMOTE]		= TARGET_EREMOTE,
538     [ENOLINK]		= TARGET_ENOLINK,
539     [EADV]		= TARGET_EADV,
540     [ESRMNT]		= TARGET_ESRMNT,
541     [ECOMM]		= TARGET_ECOMM,
542     [EPROTO]		= TARGET_EPROTO,
543     [EDOTDOT]		= TARGET_EDOTDOT,
544     [EMULTIHOP]		= TARGET_EMULTIHOP,
545     [EBADMSG]		= TARGET_EBADMSG,
546     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
547     [EOVERFLOW]		= TARGET_EOVERFLOW,
548     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
549     [EBADFD]		= TARGET_EBADFD,
550     [EREMCHG]		= TARGET_EREMCHG,
551     [ELIBACC]		= TARGET_ELIBACC,
552     [ELIBBAD]		= TARGET_ELIBBAD,
553     [ELIBSCN]		= TARGET_ELIBSCN,
554     [ELIBMAX]		= TARGET_ELIBMAX,
555     [ELIBEXEC]		= TARGET_ELIBEXEC,
556     [EILSEQ]		= TARGET_EILSEQ,
557     [ENOSYS]		= TARGET_ENOSYS,
558     [ELOOP]		= TARGET_ELOOP,
559     [ERESTART]		= TARGET_ERESTART,
560     [ESTRPIPE]		= TARGET_ESTRPIPE,
561     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
562     [EUSERS]		= TARGET_EUSERS,
563     [ENOTSOCK]		= TARGET_ENOTSOCK,
564     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
565     [EMSGSIZE]		= TARGET_EMSGSIZE,
566     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
567     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
568     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
569     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
570     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
571     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
572     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
573     [EADDRINUSE]	= TARGET_EADDRINUSE,
574     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
575     [ENETDOWN]		= TARGET_ENETDOWN,
576     [ENETUNREACH]	= TARGET_ENETUNREACH,
577     [ENETRESET]		= TARGET_ENETRESET,
578     [ECONNABORTED]	= TARGET_ECONNABORTED,
579     [ECONNRESET]	= TARGET_ECONNRESET,
580     [ENOBUFS]		= TARGET_ENOBUFS,
581     [EISCONN]		= TARGET_EISCONN,
582     [ENOTCONN]		= TARGET_ENOTCONN,
583     [EUCLEAN]		= TARGET_EUCLEAN,
584     [ENOTNAM]		= TARGET_ENOTNAM,
585     [ENAVAIL]		= TARGET_ENAVAIL,
586     [EISNAM]		= TARGET_EISNAM,
587     [EREMOTEIO]		= TARGET_EREMOTEIO,
588     [EDQUOT]            = TARGET_EDQUOT,
589     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
590     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
591     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
592     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
593     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
594     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
595     [EALREADY]		= TARGET_EALREADY,
596     [EINPROGRESS]	= TARGET_EINPROGRESS,
597     [ESTALE]		= TARGET_ESTALE,
598     [ECANCELED]		= TARGET_ECANCELED,
599     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
600     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
601 #ifdef ENOKEY
602     [ENOKEY]		= TARGET_ENOKEY,
603 #endif
604 #ifdef EKEYEXPIRED
605     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
606 #endif
607 #ifdef EKEYREVOKED
608     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
609 #endif
610 #ifdef EKEYREJECTED
611     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
612 #endif
613 #ifdef EOWNERDEAD
614     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
615 #endif
616 #ifdef ENOTRECOVERABLE
617     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
618 #endif
619 #ifdef ENOMSG
620     [ENOMSG]            = TARGET_ENOMSG,
621 #endif
622 #ifdef ERKFILL
623     [ERFKILL]           = TARGET_ERFKILL,
624 #endif
625 #ifdef EHWPOISON
626     [EHWPOISON]         = TARGET_EHWPOISON,
627 #endif
628 };
629 
630 static inline int host_to_target_errno(int err)
631 {
632     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
633         host_to_target_errno_table[err]) {
634         return host_to_target_errno_table[err];
635     }
636     return err;
637 }
638 
639 static inline int target_to_host_errno(int err)
640 {
641     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
642         target_to_host_errno_table[err]) {
643         return target_to_host_errno_table[err];
644     }
645     return err;
646 }
647 
648 static inline abi_long get_errno(abi_long ret)
649 {
650     if (ret == -1)
651         return -host_to_target_errno(errno);
652     else
653         return ret;
654 }
655 
656 const char *target_strerror(int err)
657 {
658     if (err == TARGET_ERESTARTSYS) {
659         return "To be restarted";
660     }
661     if (err == TARGET_QEMU_ESIGRETURN) {
662         return "Successful exit from sigreturn";
663     }
664 
665     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
666         return NULL;
667     }
668     return strerror(target_to_host_errno(err));
669 }
670 
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
673 { \
674     return safe_syscall(__NR_##name); \
675 }
676 
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
679 { \
680     return safe_syscall(__NR_##name, arg1); \
681 }
682 
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
685 { \
686     return safe_syscall(__NR_##name, arg1, arg2); \
687 }
688 
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
691 { \
692     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
693 }
694 
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
696     type4, arg4) \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
700 }
701 
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703     type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
705     type5 arg5) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
708 }
709 
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713     type5 arg5, type6 arg6) \
714 { \
715     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
716 }
717 
718 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
719 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
720 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
721               int, flags, mode_t, mode)
722 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
723               struct rusage *, rusage)
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
728               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
729 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
730               struct timespec *, tsp, const sigset_t *, sigmask,
731               size_t, sigsetsize)
732 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
733               int, maxevents, int, timeout, const sigset_t *, sigmask,
734               size_t, sigsetsize)
735 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
736               const struct timespec *,timeout,int *,uaddr2,int,val3)
737 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
738 safe_syscall2(int, kill, pid_t, pid, int, sig)
739 safe_syscall2(int, tkill, int, tid, int, sig)
740 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
741 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
746               unsigned long, pos_l, unsigned long, pos_h)
747 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
748               socklen_t, addrlen)
749 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
750               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
751 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
752               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
753 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
754 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
755 safe_syscall2(int, flock, int, fd, int, operation)
756 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
757               const struct timespec *, uts, size_t, sigsetsize)
758 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
759               int, flags)
760 safe_syscall2(int, nanosleep, const struct timespec *, req,
761               struct timespec *, rem)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
764               const struct timespec *, req, struct timespec *, rem)
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
772               unsigned, nsops, const struct timespec *, timeout)
773 #else
774 /* This host kernel architecture uses a single ipc syscall; fake up
775  * wrappers for the sub-operations to hide this implementation detail.
776  * Annoyingly we can't include linux/ipc.h to get the constant definitions
777  * for the call parameter because some structs in there conflict with the
778  * sys/ipc.h ones. So we just define them here, and rely on them being
779  * the same for all host architectures.
780  */
781 #define Q_SEMTIMEDOP 4
782 #define Q_MSGSND 11
783 #define Q_MSGRCV 12
784 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
785 
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
789 {
790     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
791 }
792 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
793 {
794     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
795 }
796 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
797                            const struct timespec *timeout)
798 {
799     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
800                     (long)timeout);
801 }
802 #endif
803 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
804 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
805               size_t, len, unsigned, prio, const struct timespec *, timeout)
806 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
807               size_t, len, unsigned *, prio, const struct timespec *, timeout)
808 #endif
809 /* We do ioctl like this rather than via safe_syscall3 to preserve the
810  * "third argument might be integer or pointer or not present" behaviour of
811  * the libc function.
812  */
813 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
814 /* Similarly for fcntl. Note that callers must always:
815  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
816  *  use the flock64 struct rather than unsuffixed flock
817  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
818  */
819 #ifdef __NR_fcntl64
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
821 #else
822 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
823 #endif
824 
825 static inline int host_to_target_sock_type(int host_type)
826 {
827     int target_type;
828 
829     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
830     case SOCK_DGRAM:
831         target_type = TARGET_SOCK_DGRAM;
832         break;
833     case SOCK_STREAM:
834         target_type = TARGET_SOCK_STREAM;
835         break;
836     default:
837         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
838         break;
839     }
840 
841 #if defined(SOCK_CLOEXEC)
842     if (host_type & SOCK_CLOEXEC) {
843         target_type |= TARGET_SOCK_CLOEXEC;
844     }
845 #endif
846 
847 #if defined(SOCK_NONBLOCK)
848     if (host_type & SOCK_NONBLOCK) {
849         target_type |= TARGET_SOCK_NONBLOCK;
850     }
851 #endif
852 
853     return target_type;
854 }
855 
856 static abi_ulong target_brk;
857 static abi_ulong target_original_brk;
858 static abi_ulong brk_page;
859 
860 void target_set_brk(abi_ulong new_brk)
861 {
862     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
863     brk_page = HOST_PAGE_ALIGN(target_brk);
864 }
865 
866 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
867 #define DEBUGF_BRK(message, args...)
868 
869 /* do_brk() must return target values and target errnos. */
870 abi_long do_brk(abi_ulong new_brk)
871 {
872     abi_long mapped_addr;
873     abi_ulong new_alloc_size;
874 
875     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
876 
877     if (!new_brk) {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
879         return target_brk;
880     }
881     if (new_brk < target_original_brk) {
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
883                    target_brk);
884         return target_brk;
885     }
886 
887     /* If the new brk is less than the highest page reserved to the
888      * target heap allocation, set it and we're almost done...  */
889     if (new_brk <= brk_page) {
890         /* Heap contents are initialized to zero, as for anonymous
891          * mapped pages.  */
892         if (new_brk > target_brk) {
893             memset(g2h(target_brk), 0, new_brk - target_brk);
894         }
895 	target_brk = new_brk;
896         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
897 	return target_brk;
898     }
899 
900     /* We need to allocate more memory after the brk... Note that
901      * we don't use MAP_FIXED because that will map over the top of
902      * any existing mapping (like the one with the host libc or qemu
903      * itself); instead we treat "mapped but at wrong address" as
904      * a failure and unmap again.
905      */
906     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
907     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
908                                         PROT_READ|PROT_WRITE,
909                                         MAP_ANON|MAP_PRIVATE, 0, 0));
910 
911     if (mapped_addr == brk_page) {
912         /* Heap contents are initialized to zero, as for anonymous
913          * mapped pages.  Technically the new pages are already
914          * initialized to zero since they *are* anonymous mapped
915          * pages, however we have to take care with the contents that
916          * come from the remaining part of the previous page: it may
917          * contains garbage data due to a previous heap usage (grown
918          * then shrunken).  */
919         memset(g2h(target_brk), 0, brk_page - target_brk);
920 
921         target_brk = new_brk;
922         brk_page = HOST_PAGE_ALIGN(target_brk);
923         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
924             target_brk);
925         return target_brk;
926     } else if (mapped_addr != -1) {
927         /* Mapped but at wrong address, meaning there wasn't actually
928          * enough space for this brk.
929          */
930         target_munmap(mapped_addr, new_alloc_size);
931         mapped_addr = -1;
932         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
933     }
934     else {
935         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
936     }
937 
938 #if defined(TARGET_ALPHA)
939     /* We (partially) emulate OSF/1 on Alpha, which requires we
940        return a proper errno, not an unchanged brk value.  */
941     return -TARGET_ENOMEM;
942 #endif
943     /* For everything else, return the previous break. */
944     return target_brk;
945 }
946 
947 static inline abi_long copy_from_user_fdset(fd_set *fds,
948                                             abi_ulong target_fds_addr,
949                                             int n)
950 {
951     int i, nw, j, k;
952     abi_ulong b, *target_fds;
953 
954     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
955     if (!(target_fds = lock_user(VERIFY_READ,
956                                  target_fds_addr,
957                                  sizeof(abi_ulong) * nw,
958                                  1)))
959         return -TARGET_EFAULT;
960 
961     FD_ZERO(fds);
962     k = 0;
963     for (i = 0; i < nw; i++) {
964         /* grab the abi_ulong */
965         __get_user(b, &target_fds[i]);
966         for (j = 0; j < TARGET_ABI_BITS; j++) {
967             /* check the bit inside the abi_ulong */
968             if ((b >> j) & 1)
969                 FD_SET(k, fds);
970             k++;
971         }
972     }
973 
974     unlock_user(target_fds, target_fds_addr, 0);
975 
976     return 0;
977 }
978 
979 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
980                                                  abi_ulong target_fds_addr,
981                                                  int n)
982 {
983     if (target_fds_addr) {
984         if (copy_from_user_fdset(fds, target_fds_addr, n))
985             return -TARGET_EFAULT;
986         *fds_ptr = fds;
987     } else {
988         *fds_ptr = NULL;
989     }
990     return 0;
991 }
992 
993 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
994                                           const fd_set *fds,
995                                           int n)
996 {
997     int i, nw, j, k;
998     abi_long v;
999     abi_ulong *target_fds;
1000 
1001     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1002     if (!(target_fds = lock_user(VERIFY_WRITE,
1003                                  target_fds_addr,
1004                                  sizeof(abi_ulong) * nw,
1005                                  0)))
1006         return -TARGET_EFAULT;
1007 
1008     k = 0;
1009     for (i = 0; i < nw; i++) {
1010         v = 0;
1011         for (j = 0; j < TARGET_ABI_BITS; j++) {
1012             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1013             k++;
1014         }
1015         __put_user(v, &target_fds[i]);
1016     }
1017 
1018     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1019 
1020     return 0;
1021 }
1022 
1023 #if defined(__alpha__)
1024 #define HOST_HZ 1024
1025 #else
1026 #define HOST_HZ 100
1027 #endif
1028 
1029 static inline abi_long host_to_target_clock_t(long ticks)
1030 {
1031 #if HOST_HZ == TARGET_HZ
1032     return ticks;
1033 #else
1034     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1035 #endif
1036 }
1037 
1038 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1039                                              const struct rusage *rusage)
1040 {
1041     struct target_rusage *target_rusage;
1042 
1043     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1044         return -TARGET_EFAULT;
1045     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1046     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1047     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1048     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1049     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1050     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1051     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1052     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1053     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1054     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1055     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1056     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1057     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1058     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1059     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1060     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1061     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1062     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1063     unlock_user_struct(target_rusage, target_addr, 1);
1064 
1065     return 0;
1066 }
1067 
1068 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1069 {
1070     abi_ulong target_rlim_swap;
1071     rlim_t result;
1072 
1073     target_rlim_swap = tswapal(target_rlim);
1074     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1075         return RLIM_INFINITY;
1076 
1077     result = target_rlim_swap;
1078     if (target_rlim_swap != (rlim_t)result)
1079         return RLIM_INFINITY;
1080 
1081     return result;
1082 }
1083 
1084 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1085 {
1086     abi_ulong target_rlim_swap;
1087     abi_ulong result;
1088 
1089     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1090         target_rlim_swap = TARGET_RLIM_INFINITY;
1091     else
1092         target_rlim_swap = rlim;
1093     result = tswapal(target_rlim_swap);
1094 
1095     return result;
1096 }
1097 
1098 static inline int target_to_host_resource(int code)
1099 {
1100     switch (code) {
1101     case TARGET_RLIMIT_AS:
1102         return RLIMIT_AS;
1103     case TARGET_RLIMIT_CORE:
1104         return RLIMIT_CORE;
1105     case TARGET_RLIMIT_CPU:
1106         return RLIMIT_CPU;
1107     case TARGET_RLIMIT_DATA:
1108         return RLIMIT_DATA;
1109     case TARGET_RLIMIT_FSIZE:
1110         return RLIMIT_FSIZE;
1111     case TARGET_RLIMIT_LOCKS:
1112         return RLIMIT_LOCKS;
1113     case TARGET_RLIMIT_MEMLOCK:
1114         return RLIMIT_MEMLOCK;
1115     case TARGET_RLIMIT_MSGQUEUE:
1116         return RLIMIT_MSGQUEUE;
1117     case TARGET_RLIMIT_NICE:
1118         return RLIMIT_NICE;
1119     case TARGET_RLIMIT_NOFILE:
1120         return RLIMIT_NOFILE;
1121     case TARGET_RLIMIT_NPROC:
1122         return RLIMIT_NPROC;
1123     case TARGET_RLIMIT_RSS:
1124         return RLIMIT_RSS;
1125     case TARGET_RLIMIT_RTPRIO:
1126         return RLIMIT_RTPRIO;
1127     case TARGET_RLIMIT_SIGPENDING:
1128         return RLIMIT_SIGPENDING;
1129     case TARGET_RLIMIT_STACK:
1130         return RLIMIT_STACK;
1131     default:
1132         return code;
1133     }
1134 }
1135 
1136 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1137                                               abi_ulong target_tv_addr)
1138 {
1139     struct target_timeval *target_tv;
1140 
1141     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1142         return -TARGET_EFAULT;
1143 
1144     __get_user(tv->tv_sec, &target_tv->tv_sec);
1145     __get_user(tv->tv_usec, &target_tv->tv_usec);
1146 
1147     unlock_user_struct(target_tv, target_tv_addr, 0);
1148 
1149     return 0;
1150 }
1151 
1152 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1153                                             const struct timeval *tv)
1154 {
1155     struct target_timeval *target_tv;
1156 
1157     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1158         return -TARGET_EFAULT;
1159 
1160     __put_user(tv->tv_sec, &target_tv->tv_sec);
1161     __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 
1163     unlock_user_struct(target_tv, target_tv_addr, 1);
1164 
1165     return 0;
1166 }
1167 
1168 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1169                                                abi_ulong target_tz_addr)
1170 {
1171     struct target_timezone *target_tz;
1172 
1173     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1178     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1179 
1180     unlock_user_struct(target_tz, target_tz_addr, 0);
1181 
1182     return 0;
1183 }
1184 
1185 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1186 #include <mqueue.h>
1187 
1188 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1189                                               abi_ulong target_mq_attr_addr)
1190 {
1191     struct target_mq_attr *target_mq_attr;
1192 
1193     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1194                           target_mq_attr_addr, 1))
1195         return -TARGET_EFAULT;
1196 
1197     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1198     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1199     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1200     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1201 
1202     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1203 
1204     return 0;
1205 }
1206 
1207 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1208                                             const struct mq_attr *attr)
1209 {
1210     struct target_mq_attr *target_mq_attr;
1211 
1212     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1213                           target_mq_attr_addr, 0))
1214         return -TARGET_EFAULT;
1215 
1216     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1217     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1218     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1219     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1220 
1221     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1222 
1223     return 0;
1224 }
1225 #endif
1226 
1227 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1228 /* do_select() must return target values and target errnos. */
1229 static abi_long do_select(int n,
1230                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1231                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1232 {
1233     fd_set rfds, wfds, efds;
1234     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1235     struct timeval tv;
1236     struct timespec ts, *ts_ptr;
1237     abi_long ret;
1238 
1239     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1240     if (ret) {
1241         return ret;
1242     }
1243     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1244     if (ret) {
1245         return ret;
1246     }
1247     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1248     if (ret) {
1249         return ret;
1250     }
1251 
1252     if (target_tv_addr) {
1253         if (copy_from_user_timeval(&tv, target_tv_addr))
1254             return -TARGET_EFAULT;
1255         ts.tv_sec = tv.tv_sec;
1256         ts.tv_nsec = tv.tv_usec * 1000;
1257         ts_ptr = &ts;
1258     } else {
1259         ts_ptr = NULL;
1260     }
1261 
1262     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1263                                   ts_ptr, NULL));
1264 
1265     if (!is_error(ret)) {
1266         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1267             return -TARGET_EFAULT;
1268         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1269             return -TARGET_EFAULT;
1270         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1271             return -TARGET_EFAULT;
1272 
1273         if (target_tv_addr) {
1274             tv.tv_sec = ts.tv_sec;
1275             tv.tv_usec = ts.tv_nsec / 1000;
1276             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1277                 return -TARGET_EFAULT;
1278             }
1279         }
1280     }
1281 
1282     return ret;
1283 }
1284 
1285 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1286 static abi_long do_old_select(abi_ulong arg1)
1287 {
1288     struct target_sel_arg_struct *sel;
1289     abi_ulong inp, outp, exp, tvp;
1290     long nsel;
1291 
1292     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1293         return -TARGET_EFAULT;
1294     }
1295 
1296     nsel = tswapal(sel->n);
1297     inp = tswapal(sel->inp);
1298     outp = tswapal(sel->outp);
1299     exp = tswapal(sel->exp);
1300     tvp = tswapal(sel->tvp);
1301 
1302     unlock_user_struct(sel, arg1, 0);
1303 
1304     return do_select(nsel, inp, outp, exp, tvp);
1305 }
1306 #endif
1307 #endif
1308 
1309 static abi_long do_pipe2(int host_pipe[], int flags)
1310 {
1311 #ifdef CONFIG_PIPE2
1312     return pipe2(host_pipe, flags);
1313 #else
1314     return -ENOSYS;
1315 #endif
1316 }
1317 
1318 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1319                         int flags, int is_pipe2)
1320 {
1321     int host_pipe[2];
1322     abi_long ret;
1323     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1324 
1325     if (is_error(ret))
1326         return get_errno(ret);
1327 
1328     /* Several targets have special calling conventions for the original
1329        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1330     if (!is_pipe2) {
1331 #if defined(TARGET_ALPHA)
1332         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1333         return host_pipe[0];
1334 #elif defined(TARGET_MIPS)
1335         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1336         return host_pipe[0];
1337 #elif defined(TARGET_SH4)
1338         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1339         return host_pipe[0];
1340 #elif defined(TARGET_SPARC)
1341         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1342         return host_pipe[0];
1343 #endif
1344     }
1345 
1346     if (put_user_s32(host_pipe[0], pipedes)
1347         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1348         return -TARGET_EFAULT;
1349     return get_errno(ret);
1350 }
1351 
1352 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1353                                               abi_ulong target_addr,
1354                                               socklen_t len)
1355 {
1356     struct target_ip_mreqn *target_smreqn;
1357 
1358     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1359     if (!target_smreqn)
1360         return -TARGET_EFAULT;
1361     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1362     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1363     if (len == sizeof(struct target_ip_mreqn))
1364         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1365     unlock_user(target_smreqn, target_addr, 0);
1366 
1367     return 0;
1368 }
1369 
1370 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1371                                                abi_ulong target_addr,
1372                                                socklen_t len)
1373 {
1374     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1375     sa_family_t sa_family;
1376     struct target_sockaddr *target_saddr;
1377 
1378     if (fd_trans_target_to_host_addr(fd)) {
1379         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1380     }
1381 
1382     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1383     if (!target_saddr)
1384         return -TARGET_EFAULT;
1385 
1386     sa_family = tswap16(target_saddr->sa_family);
1387 
1388     /* Oops. The caller might send a incomplete sun_path; sun_path
1389      * must be terminated by \0 (see the manual page), but
1390      * unfortunately it is quite common to specify sockaddr_un
1391      * length as "strlen(x->sun_path)" while it should be
1392      * "strlen(...) + 1". We'll fix that here if needed.
1393      * Linux kernel has a similar feature.
1394      */
1395 
1396     if (sa_family == AF_UNIX) {
1397         if (len < unix_maxlen && len > 0) {
1398             char *cp = (char*)target_saddr;
1399 
1400             if ( cp[len-1] && !cp[len] )
1401                 len++;
1402         }
1403         if (len > unix_maxlen)
1404             len = unix_maxlen;
1405     }
1406 
1407     memcpy(addr, target_saddr, len);
1408     addr->sa_family = sa_family;
1409     if (sa_family == AF_NETLINK) {
1410         struct sockaddr_nl *nladdr;
1411 
1412         nladdr = (struct sockaddr_nl *)addr;
1413         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1414         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1415     } else if (sa_family == AF_PACKET) {
1416 	struct target_sockaddr_ll *lladdr;
1417 
1418 	lladdr = (struct target_sockaddr_ll *)addr;
1419 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1420 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1421     }
1422     unlock_user(target_saddr, target_addr, 0);
1423 
1424     return 0;
1425 }
1426 
1427 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1428                                                struct sockaddr *addr,
1429                                                socklen_t len)
1430 {
1431     struct target_sockaddr *target_saddr;
1432 
1433     if (len == 0) {
1434         return 0;
1435     }
1436     assert(addr);
1437 
1438     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1439     if (!target_saddr)
1440         return -TARGET_EFAULT;
1441     memcpy(target_saddr, addr, len);
1442     if (len >= offsetof(struct target_sockaddr, sa_family) +
1443         sizeof(target_saddr->sa_family)) {
1444         target_saddr->sa_family = tswap16(addr->sa_family);
1445     }
1446     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1447         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1448         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1449         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1450     } else if (addr->sa_family == AF_PACKET) {
1451         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1452         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1453         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1454     } else if (addr->sa_family == AF_INET6 &&
1455                len >= sizeof(struct target_sockaddr_in6)) {
1456         struct target_sockaddr_in6 *target_in6 =
1457                (struct target_sockaddr_in6 *)target_saddr;
1458         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1459     }
1460     unlock_user(target_saddr, target_addr, len);
1461 
1462     return 0;
1463 }
1464 
1465 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1466                                            struct target_msghdr *target_msgh)
1467 {
1468     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1469     abi_long msg_controllen;
1470     abi_ulong target_cmsg_addr;
1471     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1472     socklen_t space = 0;
1473 
1474     msg_controllen = tswapal(target_msgh->msg_controllen);
1475     if (msg_controllen < sizeof (struct target_cmsghdr))
1476         goto the_end;
1477     target_cmsg_addr = tswapal(target_msgh->msg_control);
1478     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1479     target_cmsg_start = target_cmsg;
1480     if (!target_cmsg)
1481         return -TARGET_EFAULT;
1482 
1483     while (cmsg && target_cmsg) {
1484         void *data = CMSG_DATA(cmsg);
1485         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1486 
1487         int len = tswapal(target_cmsg->cmsg_len)
1488             - sizeof(struct target_cmsghdr);
1489 
1490         space += CMSG_SPACE(len);
1491         if (space > msgh->msg_controllen) {
1492             space -= CMSG_SPACE(len);
1493             /* This is a QEMU bug, since we allocated the payload
1494              * area ourselves (unlike overflow in host-to-target
1495              * conversion, which is just the guest giving us a buffer
1496              * that's too small). It can't happen for the payload types
1497              * we currently support; if it becomes an issue in future
1498              * we would need to improve our allocation strategy to
1499              * something more intelligent than "twice the size of the
1500              * target buffer we're reading from".
1501              */
1502             gemu_log("Host cmsg overflow\n");
1503             break;
1504         }
1505 
1506         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1507             cmsg->cmsg_level = SOL_SOCKET;
1508         } else {
1509             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1510         }
1511         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1512         cmsg->cmsg_len = CMSG_LEN(len);
1513 
1514         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1515             int *fd = (int *)data;
1516             int *target_fd = (int *)target_data;
1517             int i, numfds = len / sizeof(int);
1518 
1519             for (i = 0; i < numfds; i++) {
1520                 __get_user(fd[i], target_fd + i);
1521             }
1522         } else if (cmsg->cmsg_level == SOL_SOCKET
1523                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1524             struct ucred *cred = (struct ucred *)data;
1525             struct target_ucred *target_cred =
1526                 (struct target_ucred *)target_data;
1527 
1528             __get_user(cred->pid, &target_cred->pid);
1529             __get_user(cred->uid, &target_cred->uid);
1530             __get_user(cred->gid, &target_cred->gid);
1531         } else {
1532             gemu_log("Unsupported ancillary data: %d/%d\n",
1533                                         cmsg->cmsg_level, cmsg->cmsg_type);
1534             memcpy(data, target_data, len);
1535         }
1536 
1537         cmsg = CMSG_NXTHDR(msgh, cmsg);
1538         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1539                                          target_cmsg_start);
1540     }
1541     unlock_user(target_cmsg, target_cmsg_addr, 0);
1542  the_end:
1543     msgh->msg_controllen = space;
1544     return 0;
1545 }
1546 
1547 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1548                                            struct msghdr *msgh)
1549 {
1550     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1551     abi_long msg_controllen;
1552     abi_ulong target_cmsg_addr;
1553     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1554     socklen_t space = 0;
1555 
1556     msg_controllen = tswapal(target_msgh->msg_controllen);
1557     if (msg_controllen < sizeof (struct target_cmsghdr))
1558         goto the_end;
1559     target_cmsg_addr = tswapal(target_msgh->msg_control);
1560     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1561     target_cmsg_start = target_cmsg;
1562     if (!target_cmsg)
1563         return -TARGET_EFAULT;
1564 
1565     while (cmsg && target_cmsg) {
1566         void *data = CMSG_DATA(cmsg);
1567         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1568 
1569         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1570         int tgt_len, tgt_space;
1571 
1572         /* We never copy a half-header but may copy half-data;
1573          * this is Linux's behaviour in put_cmsg(). Note that
1574          * truncation here is a guest problem (which we report
1575          * to the guest via the CTRUNC bit), unlike truncation
1576          * in target_to_host_cmsg, which is a QEMU bug.
1577          */
1578         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1579             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1580             break;
1581         }
1582 
1583         if (cmsg->cmsg_level == SOL_SOCKET) {
1584             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1585         } else {
1586             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1587         }
1588         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1589 
1590         /* Payload types which need a different size of payload on
1591          * the target must adjust tgt_len here.
1592          */
1593         tgt_len = len;
1594         switch (cmsg->cmsg_level) {
1595         case SOL_SOCKET:
1596             switch (cmsg->cmsg_type) {
1597             case SO_TIMESTAMP:
1598                 tgt_len = sizeof(struct target_timeval);
1599                 break;
1600             default:
1601                 break;
1602             }
1603             break;
1604         default:
1605             break;
1606         }
1607 
1608         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1609             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1610             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1611         }
1612 
1613         /* We must now copy-and-convert len bytes of payload
1614          * into tgt_len bytes of destination space. Bear in mind
1615          * that in both source and destination we may be dealing
1616          * with a truncated value!
1617          */
1618         switch (cmsg->cmsg_level) {
1619         case SOL_SOCKET:
1620             switch (cmsg->cmsg_type) {
1621             case SCM_RIGHTS:
1622             {
1623                 int *fd = (int *)data;
1624                 int *target_fd = (int *)target_data;
1625                 int i, numfds = tgt_len / sizeof(int);
1626 
1627                 for (i = 0; i < numfds; i++) {
1628                     __put_user(fd[i], target_fd + i);
1629                 }
1630                 break;
1631             }
1632             case SO_TIMESTAMP:
1633             {
1634                 struct timeval *tv = (struct timeval *)data;
1635                 struct target_timeval *target_tv =
1636                     (struct target_timeval *)target_data;
1637 
1638                 if (len != sizeof(struct timeval) ||
1639                     tgt_len != sizeof(struct target_timeval)) {
1640                     goto unimplemented;
1641                 }
1642 
1643                 /* copy struct timeval to target */
1644                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1645                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1646                 break;
1647             }
1648             case SCM_CREDENTIALS:
1649             {
1650                 struct ucred *cred = (struct ucred *)data;
1651                 struct target_ucred *target_cred =
1652                     (struct target_ucred *)target_data;
1653 
1654                 __put_user(cred->pid, &target_cred->pid);
1655                 __put_user(cred->uid, &target_cred->uid);
1656                 __put_user(cred->gid, &target_cred->gid);
1657                 break;
1658             }
1659             default:
1660                 goto unimplemented;
1661             }
1662             break;
1663 
1664         case SOL_IP:
1665             switch (cmsg->cmsg_type) {
1666             case IP_TTL:
1667             {
1668                 uint32_t *v = (uint32_t *)data;
1669                 uint32_t *t_int = (uint32_t *)target_data;
1670 
1671                 if (len != sizeof(uint32_t) ||
1672                     tgt_len != sizeof(uint32_t)) {
1673                     goto unimplemented;
1674                 }
1675                 __put_user(*v, t_int);
1676                 break;
1677             }
1678             case IP_RECVERR:
1679             {
1680                 struct errhdr_t {
1681                    struct sock_extended_err ee;
1682                    struct sockaddr_in offender;
1683                 };
1684                 struct errhdr_t *errh = (struct errhdr_t *)data;
1685                 struct errhdr_t *target_errh =
1686                     (struct errhdr_t *)target_data;
1687 
1688                 if (len != sizeof(struct errhdr_t) ||
1689                     tgt_len != sizeof(struct errhdr_t)) {
1690                     goto unimplemented;
1691                 }
1692                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1693                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1694                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1695                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1696                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1697                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1698                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1699                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1700                     (void *) &errh->offender, sizeof(errh->offender));
1701                 break;
1702             }
1703             default:
1704                 goto unimplemented;
1705             }
1706             break;
1707 
1708         case SOL_IPV6:
1709             switch (cmsg->cmsg_type) {
1710             case IPV6_HOPLIMIT:
1711             {
1712                 uint32_t *v = (uint32_t *)data;
1713                 uint32_t *t_int = (uint32_t *)target_data;
1714 
1715                 if (len != sizeof(uint32_t) ||
1716                     tgt_len != sizeof(uint32_t)) {
1717                     goto unimplemented;
1718                 }
1719                 __put_user(*v, t_int);
1720                 break;
1721             }
1722             case IPV6_RECVERR:
1723             {
1724                 struct errhdr6_t {
1725                    struct sock_extended_err ee;
1726                    struct sockaddr_in6 offender;
1727                 };
1728                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1729                 struct errhdr6_t *target_errh =
1730                     (struct errhdr6_t *)target_data;
1731 
1732                 if (len != sizeof(struct errhdr6_t) ||
1733                     tgt_len != sizeof(struct errhdr6_t)) {
1734                     goto unimplemented;
1735                 }
1736                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1737                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1738                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1739                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1740                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1741                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1742                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1743                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1744                     (void *) &errh->offender, sizeof(errh->offender));
1745                 break;
1746             }
1747             default:
1748                 goto unimplemented;
1749             }
1750             break;
1751 
1752         default:
1753         unimplemented:
1754             gemu_log("Unsupported ancillary data: %d/%d\n",
1755                                         cmsg->cmsg_level, cmsg->cmsg_type);
1756             memcpy(target_data, data, MIN(len, tgt_len));
1757             if (tgt_len > len) {
1758                 memset(target_data + len, 0, tgt_len - len);
1759             }
1760         }
1761 
1762         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1763         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1764         if (msg_controllen < tgt_space) {
1765             tgt_space = msg_controllen;
1766         }
1767         msg_controllen -= tgt_space;
1768         space += tgt_space;
1769         cmsg = CMSG_NXTHDR(msgh, cmsg);
1770         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1771                                          target_cmsg_start);
1772     }
1773     unlock_user(target_cmsg, target_cmsg_addr, space);
1774  the_end:
1775     target_msgh->msg_controllen = tswapal(space);
1776     return 0;
1777 }
1778 
1779 /* do_setsockopt() Must return target values and target errnos. */
1780 static abi_long do_setsockopt(int sockfd, int level, int optname,
1781                               abi_ulong optval_addr, socklen_t optlen)
1782 {
1783     abi_long ret;
1784     int val;
1785     struct ip_mreqn *ip_mreq;
1786     struct ip_mreq_source *ip_mreq_source;
1787 
1788     switch(level) {
1789     case SOL_TCP:
1790         /* TCP options all take an 'int' value.  */
1791         if (optlen < sizeof(uint32_t))
1792             return -TARGET_EINVAL;
1793 
1794         if (get_user_u32(val, optval_addr))
1795             return -TARGET_EFAULT;
1796         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1797         break;
1798     case SOL_IP:
1799         switch(optname) {
1800         case IP_TOS:
1801         case IP_TTL:
1802         case IP_HDRINCL:
1803         case IP_ROUTER_ALERT:
1804         case IP_RECVOPTS:
1805         case IP_RETOPTS:
1806         case IP_PKTINFO:
1807         case IP_MTU_DISCOVER:
1808         case IP_RECVERR:
1809         case IP_RECVTTL:
1810         case IP_RECVTOS:
1811 #ifdef IP_FREEBIND
1812         case IP_FREEBIND:
1813 #endif
1814         case IP_MULTICAST_TTL:
1815         case IP_MULTICAST_LOOP:
1816             val = 0;
1817             if (optlen >= sizeof(uint32_t)) {
1818                 if (get_user_u32(val, optval_addr))
1819                     return -TARGET_EFAULT;
1820             } else if (optlen >= 1) {
1821                 if (get_user_u8(val, optval_addr))
1822                     return -TARGET_EFAULT;
1823             }
1824             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1825             break;
1826         case IP_ADD_MEMBERSHIP:
1827         case IP_DROP_MEMBERSHIP:
1828             if (optlen < sizeof (struct target_ip_mreq) ||
1829                 optlen > sizeof (struct target_ip_mreqn))
1830                 return -TARGET_EINVAL;
1831 
1832             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1833             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1834             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1835             break;
1836 
1837         case IP_BLOCK_SOURCE:
1838         case IP_UNBLOCK_SOURCE:
1839         case IP_ADD_SOURCE_MEMBERSHIP:
1840         case IP_DROP_SOURCE_MEMBERSHIP:
1841             if (optlen != sizeof (struct target_ip_mreq_source))
1842                 return -TARGET_EINVAL;
1843 
1844             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1845             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1846             unlock_user (ip_mreq_source, optval_addr, 0);
1847             break;
1848 
1849         default:
1850             goto unimplemented;
1851         }
1852         break;
1853     case SOL_IPV6:
1854         switch (optname) {
1855         case IPV6_MTU_DISCOVER:
1856         case IPV6_MTU:
1857         case IPV6_V6ONLY:
1858         case IPV6_RECVPKTINFO:
1859         case IPV6_UNICAST_HOPS:
1860         case IPV6_MULTICAST_HOPS:
1861         case IPV6_MULTICAST_LOOP:
1862         case IPV6_RECVERR:
1863         case IPV6_RECVHOPLIMIT:
1864         case IPV6_2292HOPLIMIT:
1865         case IPV6_CHECKSUM:
1866         case IPV6_ADDRFORM:
1867         case IPV6_2292PKTINFO:
1868         case IPV6_RECVTCLASS:
1869         case IPV6_RECVRTHDR:
1870         case IPV6_2292RTHDR:
1871         case IPV6_RECVHOPOPTS:
1872         case IPV6_2292HOPOPTS:
1873         case IPV6_RECVDSTOPTS:
1874         case IPV6_2292DSTOPTS:
1875         case IPV6_TCLASS:
1876 #ifdef IPV6_RECVPATHMTU
1877         case IPV6_RECVPATHMTU:
1878 #endif
1879 #ifdef IPV6_TRANSPARENT
1880         case IPV6_TRANSPARENT:
1881 #endif
1882 #ifdef IPV6_FREEBIND
1883         case IPV6_FREEBIND:
1884 #endif
1885 #ifdef IPV6_RECVORIGDSTADDR
1886         case IPV6_RECVORIGDSTADDR:
1887 #endif
1888             val = 0;
1889             if (optlen < sizeof(uint32_t)) {
1890                 return -TARGET_EINVAL;
1891             }
1892             if (get_user_u32(val, optval_addr)) {
1893                 return -TARGET_EFAULT;
1894             }
1895             ret = get_errno(setsockopt(sockfd, level, optname,
1896                                        &val, sizeof(val)));
1897             break;
1898         case IPV6_PKTINFO:
1899         {
1900             struct in6_pktinfo pki;
1901 
1902             if (optlen < sizeof(pki)) {
1903                 return -TARGET_EINVAL;
1904             }
1905 
1906             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1907                 return -TARGET_EFAULT;
1908             }
1909 
1910             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1911 
1912             ret = get_errno(setsockopt(sockfd, level, optname,
1913                                        &pki, sizeof(pki)));
1914             break;
1915         }
1916         default:
1917             goto unimplemented;
1918         }
1919         break;
1920     case SOL_ICMPV6:
1921         switch (optname) {
1922         case ICMPV6_FILTER:
1923         {
1924             struct icmp6_filter icmp6f;
1925 
1926             if (optlen > sizeof(icmp6f)) {
1927                 optlen = sizeof(icmp6f);
1928             }
1929 
1930             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1931                 return -TARGET_EFAULT;
1932             }
1933 
1934             for (val = 0; val < 8; val++) {
1935                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1936             }
1937 
1938             ret = get_errno(setsockopt(sockfd, level, optname,
1939                                        &icmp6f, optlen));
1940             break;
1941         }
1942         default:
1943             goto unimplemented;
1944         }
1945         break;
1946     case SOL_RAW:
1947         switch (optname) {
1948         case ICMP_FILTER:
1949         case IPV6_CHECKSUM:
1950             /* those take an u32 value */
1951             if (optlen < sizeof(uint32_t)) {
1952                 return -TARGET_EINVAL;
1953             }
1954 
1955             if (get_user_u32(val, optval_addr)) {
1956                 return -TARGET_EFAULT;
1957             }
1958             ret = get_errno(setsockopt(sockfd, level, optname,
1959                                        &val, sizeof(val)));
1960             break;
1961 
1962         default:
1963             goto unimplemented;
1964         }
1965         break;
1966     case TARGET_SOL_SOCKET:
1967         switch (optname) {
1968         case TARGET_SO_RCVTIMEO:
1969         {
1970                 struct timeval tv;
1971 
1972                 optname = SO_RCVTIMEO;
1973 
1974 set_timeout:
1975                 if (optlen != sizeof(struct target_timeval)) {
1976                     return -TARGET_EINVAL;
1977                 }
1978 
1979                 if (copy_from_user_timeval(&tv, optval_addr)) {
1980                     return -TARGET_EFAULT;
1981                 }
1982 
1983                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1984                                 &tv, sizeof(tv)));
1985                 return ret;
1986         }
1987         case TARGET_SO_SNDTIMEO:
1988                 optname = SO_SNDTIMEO;
1989                 goto set_timeout;
1990         case TARGET_SO_ATTACH_FILTER:
1991         {
1992                 struct target_sock_fprog *tfprog;
1993                 struct target_sock_filter *tfilter;
1994                 struct sock_fprog fprog;
1995                 struct sock_filter *filter;
1996                 int i;
1997 
1998                 if (optlen != sizeof(*tfprog)) {
1999                     return -TARGET_EINVAL;
2000                 }
2001                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2002                     return -TARGET_EFAULT;
2003                 }
2004                 if (!lock_user_struct(VERIFY_READ, tfilter,
2005                                       tswapal(tfprog->filter), 0)) {
2006                     unlock_user_struct(tfprog, optval_addr, 1);
2007                     return -TARGET_EFAULT;
2008                 }
2009 
2010                 fprog.len = tswap16(tfprog->len);
2011                 filter = g_try_new(struct sock_filter, fprog.len);
2012                 if (filter == NULL) {
2013                     unlock_user_struct(tfilter, tfprog->filter, 1);
2014                     unlock_user_struct(tfprog, optval_addr, 1);
2015                     return -TARGET_ENOMEM;
2016                 }
2017                 for (i = 0; i < fprog.len; i++) {
2018                     filter[i].code = tswap16(tfilter[i].code);
2019                     filter[i].jt = tfilter[i].jt;
2020                     filter[i].jf = tfilter[i].jf;
2021                     filter[i].k = tswap32(tfilter[i].k);
2022                 }
2023                 fprog.filter = filter;
2024 
2025                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2026                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2027                 g_free(filter);
2028 
2029                 unlock_user_struct(tfilter, tfprog->filter, 1);
2030                 unlock_user_struct(tfprog, optval_addr, 1);
2031                 return ret;
2032         }
2033 	case TARGET_SO_BINDTODEVICE:
2034 	{
2035 		char *dev_ifname, *addr_ifname;
2036 
2037 		if (optlen > IFNAMSIZ - 1) {
2038 		    optlen = IFNAMSIZ - 1;
2039 		}
2040 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2041 		if (!dev_ifname) {
2042 		    return -TARGET_EFAULT;
2043 		}
2044 		optname = SO_BINDTODEVICE;
2045 		addr_ifname = alloca(IFNAMSIZ);
2046 		memcpy(addr_ifname, dev_ifname, optlen);
2047 		addr_ifname[optlen] = 0;
2048 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2049                                            addr_ifname, optlen));
2050 		unlock_user (dev_ifname, optval_addr, 0);
2051 		return ret;
2052 	}
2053         case TARGET_SO_LINGER:
2054         {
2055                 struct linger lg;
2056                 struct target_linger *tlg;
2057 
2058                 if (optlen != sizeof(struct target_linger)) {
2059                     return -TARGET_EINVAL;
2060                 }
2061                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2062                     return -TARGET_EFAULT;
2063                 }
2064                 __get_user(lg.l_onoff, &tlg->l_onoff);
2065                 __get_user(lg.l_linger, &tlg->l_linger);
2066                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2067                                 &lg, sizeof(lg)));
2068                 unlock_user_struct(tlg, optval_addr, 0);
2069                 return ret;
2070         }
2071             /* Options with 'int' argument.  */
2072         case TARGET_SO_DEBUG:
2073 		optname = SO_DEBUG;
2074 		break;
2075         case TARGET_SO_REUSEADDR:
2076 		optname = SO_REUSEADDR;
2077 		break;
2078 #ifdef SO_REUSEPORT
2079         case TARGET_SO_REUSEPORT:
2080                 optname = SO_REUSEPORT;
2081                 break;
2082 #endif
2083         case TARGET_SO_TYPE:
2084 		optname = SO_TYPE;
2085 		break;
2086         case TARGET_SO_ERROR:
2087 		optname = SO_ERROR;
2088 		break;
2089         case TARGET_SO_DONTROUTE:
2090 		optname = SO_DONTROUTE;
2091 		break;
2092         case TARGET_SO_BROADCAST:
2093 		optname = SO_BROADCAST;
2094 		break;
2095         case TARGET_SO_SNDBUF:
2096 		optname = SO_SNDBUF;
2097 		break;
2098         case TARGET_SO_SNDBUFFORCE:
2099                 optname = SO_SNDBUFFORCE;
2100                 break;
2101         case TARGET_SO_RCVBUF:
2102 		optname = SO_RCVBUF;
2103 		break;
2104         case TARGET_SO_RCVBUFFORCE:
2105                 optname = SO_RCVBUFFORCE;
2106                 break;
2107         case TARGET_SO_KEEPALIVE:
2108 		optname = SO_KEEPALIVE;
2109 		break;
2110         case TARGET_SO_OOBINLINE:
2111 		optname = SO_OOBINLINE;
2112 		break;
2113         case TARGET_SO_NO_CHECK:
2114 		optname = SO_NO_CHECK;
2115 		break;
2116         case TARGET_SO_PRIORITY:
2117 		optname = SO_PRIORITY;
2118 		break;
2119 #ifdef SO_BSDCOMPAT
2120         case TARGET_SO_BSDCOMPAT:
2121 		optname = SO_BSDCOMPAT;
2122 		break;
2123 #endif
2124         case TARGET_SO_PASSCRED:
2125 		optname = SO_PASSCRED;
2126 		break;
2127         case TARGET_SO_PASSSEC:
2128                 optname = SO_PASSSEC;
2129                 break;
2130         case TARGET_SO_TIMESTAMP:
2131 		optname = SO_TIMESTAMP;
2132 		break;
2133         case TARGET_SO_RCVLOWAT:
2134 		optname = SO_RCVLOWAT;
2135 		break;
2136         default:
2137             goto unimplemented;
2138         }
2139 	if (optlen < sizeof(uint32_t))
2140             return -TARGET_EINVAL;
2141 
2142 	if (get_user_u32(val, optval_addr))
2143             return -TARGET_EFAULT;
2144 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2145         break;
2146     default:
2147     unimplemented:
2148         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2149         ret = -TARGET_ENOPROTOOPT;
2150     }
2151     return ret;
2152 }
2153 
2154 /* do_getsockopt() Must return target values and target errnos. */
2155 static abi_long do_getsockopt(int sockfd, int level, int optname,
2156                               abi_ulong optval_addr, abi_ulong optlen)
2157 {
2158     abi_long ret;
2159     int len, val;
2160     socklen_t lv;
2161 
2162     switch(level) {
2163     case TARGET_SOL_SOCKET:
2164         level = SOL_SOCKET;
2165         switch (optname) {
2166         /* These don't just return a single integer */
2167         case TARGET_SO_RCVTIMEO:
2168         case TARGET_SO_SNDTIMEO:
2169         case TARGET_SO_PEERNAME:
2170             goto unimplemented;
2171         case TARGET_SO_PEERCRED: {
2172             struct ucred cr;
2173             socklen_t crlen;
2174             struct target_ucred *tcr;
2175 
2176             if (get_user_u32(len, optlen)) {
2177                 return -TARGET_EFAULT;
2178             }
2179             if (len < 0) {
2180                 return -TARGET_EINVAL;
2181             }
2182 
2183             crlen = sizeof(cr);
2184             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2185                                        &cr, &crlen));
2186             if (ret < 0) {
2187                 return ret;
2188             }
2189             if (len > crlen) {
2190                 len = crlen;
2191             }
2192             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2193                 return -TARGET_EFAULT;
2194             }
2195             __put_user(cr.pid, &tcr->pid);
2196             __put_user(cr.uid, &tcr->uid);
2197             __put_user(cr.gid, &tcr->gid);
2198             unlock_user_struct(tcr, optval_addr, 1);
2199             if (put_user_u32(len, optlen)) {
2200                 return -TARGET_EFAULT;
2201             }
2202             break;
2203         }
2204         case TARGET_SO_LINGER:
2205         {
2206             struct linger lg;
2207             socklen_t lglen;
2208             struct target_linger *tlg;
2209 
2210             if (get_user_u32(len, optlen)) {
2211                 return -TARGET_EFAULT;
2212             }
2213             if (len < 0) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             lglen = sizeof(lg);
2218             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2219                                        &lg, &lglen));
2220             if (ret < 0) {
2221                 return ret;
2222             }
2223             if (len > lglen) {
2224                 len = lglen;
2225             }
2226             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2227                 return -TARGET_EFAULT;
2228             }
2229             __put_user(lg.l_onoff, &tlg->l_onoff);
2230             __put_user(lg.l_linger, &tlg->l_linger);
2231             unlock_user_struct(tlg, optval_addr, 1);
2232             if (put_user_u32(len, optlen)) {
2233                 return -TARGET_EFAULT;
2234             }
2235             break;
2236         }
2237         /* Options with 'int' argument.  */
2238         case TARGET_SO_DEBUG:
2239             optname = SO_DEBUG;
2240             goto int_case;
2241         case TARGET_SO_REUSEADDR:
2242             optname = SO_REUSEADDR;
2243             goto int_case;
2244 #ifdef SO_REUSEPORT
2245         case TARGET_SO_REUSEPORT:
2246             optname = SO_REUSEPORT;
2247             goto int_case;
2248 #endif
2249         case TARGET_SO_TYPE:
2250             optname = SO_TYPE;
2251             goto int_case;
2252         case TARGET_SO_ERROR:
2253             optname = SO_ERROR;
2254             goto int_case;
2255         case TARGET_SO_DONTROUTE:
2256             optname = SO_DONTROUTE;
2257             goto int_case;
2258         case TARGET_SO_BROADCAST:
2259             optname = SO_BROADCAST;
2260             goto int_case;
2261         case TARGET_SO_SNDBUF:
2262             optname = SO_SNDBUF;
2263             goto int_case;
2264         case TARGET_SO_RCVBUF:
2265             optname = SO_RCVBUF;
2266             goto int_case;
2267         case TARGET_SO_KEEPALIVE:
2268             optname = SO_KEEPALIVE;
2269             goto int_case;
2270         case TARGET_SO_OOBINLINE:
2271             optname = SO_OOBINLINE;
2272             goto int_case;
2273         case TARGET_SO_NO_CHECK:
2274             optname = SO_NO_CHECK;
2275             goto int_case;
2276         case TARGET_SO_PRIORITY:
2277             optname = SO_PRIORITY;
2278             goto int_case;
2279 #ifdef SO_BSDCOMPAT
2280         case TARGET_SO_BSDCOMPAT:
2281             optname = SO_BSDCOMPAT;
2282             goto int_case;
2283 #endif
2284         case TARGET_SO_PASSCRED:
2285             optname = SO_PASSCRED;
2286             goto int_case;
2287         case TARGET_SO_TIMESTAMP:
2288             optname = SO_TIMESTAMP;
2289             goto int_case;
2290         case TARGET_SO_RCVLOWAT:
2291             optname = SO_RCVLOWAT;
2292             goto int_case;
2293         case TARGET_SO_ACCEPTCONN:
2294             optname = SO_ACCEPTCONN;
2295             goto int_case;
2296         default:
2297             goto int_case;
2298         }
2299         break;
2300     case SOL_TCP:
2301         /* TCP options all take an 'int' value.  */
2302     int_case:
2303         if (get_user_u32(len, optlen))
2304             return -TARGET_EFAULT;
2305         if (len < 0)
2306             return -TARGET_EINVAL;
2307         lv = sizeof(lv);
2308         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2309         if (ret < 0)
2310             return ret;
2311         if (optname == SO_TYPE) {
2312             val = host_to_target_sock_type(val);
2313         }
2314         if (len > lv)
2315             len = lv;
2316         if (len == 4) {
2317             if (put_user_u32(val, optval_addr))
2318                 return -TARGET_EFAULT;
2319         } else {
2320             if (put_user_u8(val, optval_addr))
2321                 return -TARGET_EFAULT;
2322         }
2323         if (put_user_u32(len, optlen))
2324             return -TARGET_EFAULT;
2325         break;
2326     case SOL_IP:
2327         switch(optname) {
2328         case IP_TOS:
2329         case IP_TTL:
2330         case IP_HDRINCL:
2331         case IP_ROUTER_ALERT:
2332         case IP_RECVOPTS:
2333         case IP_RETOPTS:
2334         case IP_PKTINFO:
2335         case IP_MTU_DISCOVER:
2336         case IP_RECVERR:
2337         case IP_RECVTOS:
2338 #ifdef IP_FREEBIND
2339         case IP_FREEBIND:
2340 #endif
2341         case IP_MULTICAST_TTL:
2342         case IP_MULTICAST_LOOP:
2343             if (get_user_u32(len, optlen))
2344                 return -TARGET_EFAULT;
2345             if (len < 0)
2346                 return -TARGET_EINVAL;
2347             lv = sizeof(lv);
2348             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2349             if (ret < 0)
2350                 return ret;
2351             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2352                 len = 1;
2353                 if (put_user_u32(len, optlen)
2354                     || put_user_u8(val, optval_addr))
2355                     return -TARGET_EFAULT;
2356             } else {
2357                 if (len > sizeof(int))
2358                     len = sizeof(int);
2359                 if (put_user_u32(len, optlen)
2360                     || put_user_u32(val, optval_addr))
2361                     return -TARGET_EFAULT;
2362             }
2363             break;
2364         default:
2365             ret = -TARGET_ENOPROTOOPT;
2366             break;
2367         }
2368         break;
2369     case SOL_IPV6:
2370         switch (optname) {
2371         case IPV6_MTU_DISCOVER:
2372         case IPV6_MTU:
2373         case IPV6_V6ONLY:
2374         case IPV6_RECVPKTINFO:
2375         case IPV6_UNICAST_HOPS:
2376         case IPV6_MULTICAST_HOPS:
2377         case IPV6_MULTICAST_LOOP:
2378         case IPV6_RECVERR:
2379         case IPV6_RECVHOPLIMIT:
2380         case IPV6_2292HOPLIMIT:
2381         case IPV6_CHECKSUM:
2382         case IPV6_ADDRFORM:
2383         case IPV6_2292PKTINFO:
2384         case IPV6_RECVTCLASS:
2385         case IPV6_RECVRTHDR:
2386         case IPV6_2292RTHDR:
2387         case IPV6_RECVHOPOPTS:
2388         case IPV6_2292HOPOPTS:
2389         case IPV6_RECVDSTOPTS:
2390         case IPV6_2292DSTOPTS:
2391         case IPV6_TCLASS:
2392 #ifdef IPV6_RECVPATHMTU
2393         case IPV6_RECVPATHMTU:
2394 #endif
2395 #ifdef IPV6_TRANSPARENT
2396         case IPV6_TRANSPARENT:
2397 #endif
2398 #ifdef IPV6_FREEBIND
2399         case IPV6_FREEBIND:
2400 #endif
2401 #ifdef IPV6_RECVORIGDSTADDR
2402         case IPV6_RECVORIGDSTADDR:
2403 #endif
2404             if (get_user_u32(len, optlen))
2405                 return -TARGET_EFAULT;
2406             if (len < 0)
2407                 return -TARGET_EINVAL;
2408             lv = sizeof(lv);
2409             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2410             if (ret < 0)
2411                 return ret;
2412             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2413                 len = 1;
2414                 if (put_user_u32(len, optlen)
2415                     || put_user_u8(val, optval_addr))
2416                     return -TARGET_EFAULT;
2417             } else {
2418                 if (len > sizeof(int))
2419                     len = sizeof(int);
2420                 if (put_user_u32(len, optlen)
2421                     || put_user_u32(val, optval_addr))
2422                     return -TARGET_EFAULT;
2423             }
2424             break;
2425         default:
2426             ret = -TARGET_ENOPROTOOPT;
2427             break;
2428         }
2429         break;
2430     default:
2431     unimplemented:
2432         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2433                  level, optname);
2434         ret = -TARGET_EOPNOTSUPP;
2435         break;
2436     }
2437     return ret;
2438 }
2439 
2440 /* Convert target low/high pair representing file offset into the host
2441  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2442  * as the kernel doesn't handle them either.
2443  */
2444 static void target_to_host_low_high(abi_ulong tlow,
2445                                     abi_ulong thigh,
2446                                     unsigned long *hlow,
2447                                     unsigned long *hhigh)
2448 {
2449     uint64_t off = tlow |
2450         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2451         TARGET_LONG_BITS / 2;
2452 
2453     *hlow = off;
2454     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2455 }
2456 
2457 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2458                                 abi_ulong count, int copy)
2459 {
2460     struct target_iovec *target_vec;
2461     struct iovec *vec;
2462     abi_ulong total_len, max_len;
2463     int i;
2464     int err = 0;
2465     bool bad_address = false;
2466 
2467     if (count == 0) {
2468         errno = 0;
2469         return NULL;
2470     }
2471     if (count > IOV_MAX) {
2472         errno = EINVAL;
2473         return NULL;
2474     }
2475 
2476     vec = g_try_new0(struct iovec, count);
2477     if (vec == NULL) {
2478         errno = ENOMEM;
2479         return NULL;
2480     }
2481 
2482     target_vec = lock_user(VERIFY_READ, target_addr,
2483                            count * sizeof(struct target_iovec), 1);
2484     if (target_vec == NULL) {
2485         err = EFAULT;
2486         goto fail2;
2487     }
2488 
2489     /* ??? If host page size > target page size, this will result in a
2490        value larger than what we can actually support.  */
2491     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2492     total_len = 0;
2493 
2494     for (i = 0; i < count; i++) {
2495         abi_ulong base = tswapal(target_vec[i].iov_base);
2496         abi_long len = tswapal(target_vec[i].iov_len);
2497 
2498         if (len < 0) {
2499             err = EINVAL;
2500             goto fail;
2501         } else if (len == 0) {
2502             /* Zero length pointer is ignored.  */
2503             vec[i].iov_base = 0;
2504         } else {
2505             vec[i].iov_base = lock_user(type, base, len, copy);
2506             /* If the first buffer pointer is bad, this is a fault.  But
2507              * subsequent bad buffers will result in a partial write; this
2508              * is realized by filling the vector with null pointers and
2509              * zero lengths. */
2510             if (!vec[i].iov_base) {
2511                 if (i == 0) {
2512                     err = EFAULT;
2513                     goto fail;
2514                 } else {
2515                     bad_address = true;
2516                 }
2517             }
2518             if (bad_address) {
2519                 len = 0;
2520             }
2521             if (len > max_len - total_len) {
2522                 len = max_len - total_len;
2523             }
2524         }
2525         vec[i].iov_len = len;
2526         total_len += len;
2527     }
2528 
2529     unlock_user(target_vec, target_addr, 0);
2530     return vec;
2531 
2532  fail:
2533     while (--i >= 0) {
2534         if (tswapal(target_vec[i].iov_len) > 0) {
2535             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2536         }
2537     }
2538     unlock_user(target_vec, target_addr, 0);
2539  fail2:
2540     g_free(vec);
2541     errno = err;
2542     return NULL;
2543 }
2544 
2545 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2546                          abi_ulong count, int copy)
2547 {
2548     struct target_iovec *target_vec;
2549     int i;
2550 
2551     target_vec = lock_user(VERIFY_READ, target_addr,
2552                            count * sizeof(struct target_iovec), 1);
2553     if (target_vec) {
2554         for (i = 0; i < count; i++) {
2555             abi_ulong base = tswapal(target_vec[i].iov_base);
2556             abi_long len = tswapal(target_vec[i].iov_len);
2557             if (len < 0) {
2558                 break;
2559             }
2560             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2561         }
2562         unlock_user(target_vec, target_addr, 0);
2563     }
2564 
2565     g_free(vec);
2566 }
2567 
2568 static inline int target_to_host_sock_type(int *type)
2569 {
2570     int host_type = 0;
2571     int target_type = *type;
2572 
2573     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2574     case TARGET_SOCK_DGRAM:
2575         host_type = SOCK_DGRAM;
2576         break;
2577     case TARGET_SOCK_STREAM:
2578         host_type = SOCK_STREAM;
2579         break;
2580     default:
2581         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2582         break;
2583     }
2584     if (target_type & TARGET_SOCK_CLOEXEC) {
2585 #if defined(SOCK_CLOEXEC)
2586         host_type |= SOCK_CLOEXEC;
2587 #else
2588         return -TARGET_EINVAL;
2589 #endif
2590     }
2591     if (target_type & TARGET_SOCK_NONBLOCK) {
2592 #if defined(SOCK_NONBLOCK)
2593         host_type |= SOCK_NONBLOCK;
2594 #elif !defined(O_NONBLOCK)
2595         return -TARGET_EINVAL;
2596 #endif
2597     }
2598     *type = host_type;
2599     return 0;
2600 }
2601 
2602 /* Try to emulate socket type flags after socket creation.  */
2603 static int sock_flags_fixup(int fd, int target_type)
2604 {
2605 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2606     if (target_type & TARGET_SOCK_NONBLOCK) {
2607         int flags = fcntl(fd, F_GETFL);
2608         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2609             close(fd);
2610             return -TARGET_EINVAL;
2611         }
2612     }
2613 #endif
2614     return fd;
2615 }
2616 
2617 /* do_socket() Must return target values and target errnos. */
2618 static abi_long do_socket(int domain, int type, int protocol)
2619 {
2620     int target_type = type;
2621     int ret;
2622 
2623     ret = target_to_host_sock_type(&type);
2624     if (ret) {
2625         return ret;
2626     }
2627 
2628     if (domain == PF_NETLINK && !(
2629 #ifdef CONFIG_RTNETLINK
2630          protocol == NETLINK_ROUTE ||
2631 #endif
2632          protocol == NETLINK_KOBJECT_UEVENT ||
2633          protocol == NETLINK_AUDIT)) {
2634         return -EPFNOSUPPORT;
2635     }
2636 
2637     if (domain == AF_PACKET ||
2638         (domain == AF_INET && type == SOCK_PACKET)) {
2639         protocol = tswap16(protocol);
2640     }
2641 
2642     ret = get_errno(socket(domain, type, protocol));
2643     if (ret >= 0) {
2644         ret = sock_flags_fixup(ret, target_type);
2645         if (type == SOCK_PACKET) {
2646             /* Manage an obsolete case :
2647              * if socket type is SOCK_PACKET, bind by name
2648              */
2649             fd_trans_register(ret, &target_packet_trans);
2650         } else if (domain == PF_NETLINK) {
2651             switch (protocol) {
2652 #ifdef CONFIG_RTNETLINK
2653             case NETLINK_ROUTE:
2654                 fd_trans_register(ret, &target_netlink_route_trans);
2655                 break;
2656 #endif
2657             case NETLINK_KOBJECT_UEVENT:
2658                 /* nothing to do: messages are strings */
2659                 break;
2660             case NETLINK_AUDIT:
2661                 fd_trans_register(ret, &target_netlink_audit_trans);
2662                 break;
2663             default:
2664                 g_assert_not_reached();
2665             }
2666         }
2667     }
2668     return ret;
2669 }
2670 
2671 /* do_bind() Must return target values and target errnos. */
2672 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2673                         socklen_t addrlen)
2674 {
2675     void *addr;
2676     abi_long ret;
2677 
2678     if ((int)addrlen < 0) {
2679         return -TARGET_EINVAL;
2680     }
2681 
2682     addr = alloca(addrlen+1);
2683 
2684     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2685     if (ret)
2686         return ret;
2687 
2688     return get_errno(bind(sockfd, addr, addrlen));
2689 }
2690 
2691 /* do_connect() Must return target values and target errnos. */
2692 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2693                            socklen_t addrlen)
2694 {
2695     void *addr;
2696     abi_long ret;
2697 
2698     if ((int)addrlen < 0) {
2699         return -TARGET_EINVAL;
2700     }
2701 
2702     addr = alloca(addrlen+1);
2703 
2704     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2705     if (ret)
2706         return ret;
2707 
2708     return get_errno(safe_connect(sockfd, addr, addrlen));
2709 }
2710 
2711 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2712 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2713                                       int flags, int send)
2714 {
2715     abi_long ret, len;
2716     struct msghdr msg;
2717     abi_ulong count;
2718     struct iovec *vec;
2719     abi_ulong target_vec;
2720 
2721     if (msgp->msg_name) {
2722         msg.msg_namelen = tswap32(msgp->msg_namelen);
2723         msg.msg_name = alloca(msg.msg_namelen+1);
2724         ret = target_to_host_sockaddr(fd, msg.msg_name,
2725                                       tswapal(msgp->msg_name),
2726                                       msg.msg_namelen);
2727         if (ret == -TARGET_EFAULT) {
2728             /* For connected sockets msg_name and msg_namelen must
2729              * be ignored, so returning EFAULT immediately is wrong.
2730              * Instead, pass a bad msg_name to the host kernel, and
2731              * let it decide whether to return EFAULT or not.
2732              */
2733             msg.msg_name = (void *)-1;
2734         } else if (ret) {
2735             goto out2;
2736         }
2737     } else {
2738         msg.msg_name = NULL;
2739         msg.msg_namelen = 0;
2740     }
2741     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2742     msg.msg_control = alloca(msg.msg_controllen);
2743     memset(msg.msg_control, 0, msg.msg_controllen);
2744 
2745     msg.msg_flags = tswap32(msgp->msg_flags);
2746 
2747     count = tswapal(msgp->msg_iovlen);
2748     target_vec = tswapal(msgp->msg_iov);
2749 
2750     if (count > IOV_MAX) {
2751         /* sendrcvmsg returns a different errno for this condition than
2752          * readv/writev, so we must catch it here before lock_iovec() does.
2753          */
2754         ret = -TARGET_EMSGSIZE;
2755         goto out2;
2756     }
2757 
2758     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2759                      target_vec, count, send);
2760     if (vec == NULL) {
2761         ret = -host_to_target_errno(errno);
2762         goto out2;
2763     }
2764     msg.msg_iovlen = count;
2765     msg.msg_iov = vec;
2766 
2767     if (send) {
2768         if (fd_trans_target_to_host_data(fd)) {
2769             void *host_msg;
2770 
2771             host_msg = g_malloc(msg.msg_iov->iov_len);
2772             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2773             ret = fd_trans_target_to_host_data(fd)(host_msg,
2774                                                    msg.msg_iov->iov_len);
2775             if (ret >= 0) {
2776                 msg.msg_iov->iov_base = host_msg;
2777                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2778             }
2779             g_free(host_msg);
2780         } else {
2781             ret = target_to_host_cmsg(&msg, msgp);
2782             if (ret == 0) {
2783                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2784             }
2785         }
2786     } else {
2787         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2788         if (!is_error(ret)) {
2789             len = ret;
2790             if (fd_trans_host_to_target_data(fd)) {
2791                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2792                                                MIN(msg.msg_iov->iov_len, len));
2793             } else {
2794                 ret = host_to_target_cmsg(msgp, &msg);
2795             }
2796             if (!is_error(ret)) {
2797                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2798                 msgp->msg_flags = tswap32(msg.msg_flags);
2799                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2800                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2801                                     msg.msg_name, msg.msg_namelen);
2802                     if (ret) {
2803                         goto out;
2804                     }
2805                 }
2806 
2807                 ret = len;
2808             }
2809         }
2810     }
2811 
2812 out:
2813     unlock_iovec(vec, target_vec, count, !send);
2814 out2:
2815     return ret;
2816 }
2817 
2818 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2819                                int flags, int send)
2820 {
2821     abi_long ret;
2822     struct target_msghdr *msgp;
2823 
2824     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2825                           msgp,
2826                           target_msg,
2827                           send ? 1 : 0)) {
2828         return -TARGET_EFAULT;
2829     }
2830     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2831     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2832     return ret;
2833 }
2834 
2835 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2836  * so it might not have this *mmsg-specific flag either.
2837  */
2838 #ifndef MSG_WAITFORONE
2839 #define MSG_WAITFORONE 0x10000
2840 #endif
2841 
2842 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2843                                 unsigned int vlen, unsigned int flags,
2844                                 int send)
2845 {
2846     struct target_mmsghdr *mmsgp;
2847     abi_long ret = 0;
2848     int i;
2849 
2850     if (vlen > UIO_MAXIOV) {
2851         vlen = UIO_MAXIOV;
2852     }
2853 
2854     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2855     if (!mmsgp) {
2856         return -TARGET_EFAULT;
2857     }
2858 
2859     for (i = 0; i < vlen; i++) {
2860         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2861         if (is_error(ret)) {
2862             break;
2863         }
2864         mmsgp[i].msg_len = tswap32(ret);
2865         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2866         if (flags & MSG_WAITFORONE) {
2867             flags |= MSG_DONTWAIT;
2868         }
2869     }
2870 
2871     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2872 
2873     /* Return number of datagrams sent if we sent any at all;
2874      * otherwise return the error.
2875      */
2876     if (i) {
2877         return i;
2878     }
2879     return ret;
2880 }
2881 
2882 /* do_accept4() Must return target values and target errnos. */
2883 static abi_long do_accept4(int fd, abi_ulong target_addr,
2884                            abi_ulong target_addrlen_addr, int flags)
2885 {
2886     socklen_t addrlen, ret_addrlen;
2887     void *addr;
2888     abi_long ret;
2889     int host_flags;
2890 
2891     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2892 
2893     if (target_addr == 0) {
2894         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2895     }
2896 
2897     /* linux returns EINVAL if addrlen pointer is invalid */
2898     if (get_user_u32(addrlen, target_addrlen_addr))
2899         return -TARGET_EINVAL;
2900 
2901     if ((int)addrlen < 0) {
2902         return -TARGET_EINVAL;
2903     }
2904 
2905     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2906         return -TARGET_EINVAL;
2907 
2908     addr = alloca(addrlen);
2909 
2910     ret_addrlen = addrlen;
2911     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2912     if (!is_error(ret)) {
2913         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2914         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2915             ret = -TARGET_EFAULT;
2916         }
2917     }
2918     return ret;
2919 }
2920 
2921 /* do_getpeername() Must return target values and target errnos. */
2922 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2923                                abi_ulong target_addrlen_addr)
2924 {
2925     socklen_t addrlen, ret_addrlen;
2926     void *addr;
2927     abi_long ret;
2928 
2929     if (get_user_u32(addrlen, target_addrlen_addr))
2930         return -TARGET_EFAULT;
2931 
2932     if ((int)addrlen < 0) {
2933         return -TARGET_EINVAL;
2934     }
2935 
2936     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2937         return -TARGET_EFAULT;
2938 
2939     addr = alloca(addrlen);
2940 
2941     ret_addrlen = addrlen;
2942     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2943     if (!is_error(ret)) {
2944         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2945         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2946             ret = -TARGET_EFAULT;
2947         }
2948     }
2949     return ret;
2950 }
2951 
2952 /* do_getsockname() Must return target values and target errnos. */
2953 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2954                                abi_ulong target_addrlen_addr)
2955 {
2956     socklen_t addrlen, ret_addrlen;
2957     void *addr;
2958     abi_long ret;
2959 
2960     if (get_user_u32(addrlen, target_addrlen_addr))
2961         return -TARGET_EFAULT;
2962 
2963     if ((int)addrlen < 0) {
2964         return -TARGET_EINVAL;
2965     }
2966 
2967     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2968         return -TARGET_EFAULT;
2969 
2970     addr = alloca(addrlen);
2971 
2972     ret_addrlen = addrlen;
2973     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2974     if (!is_error(ret)) {
2975         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2976         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2977             ret = -TARGET_EFAULT;
2978         }
2979     }
2980     return ret;
2981 }
2982 
2983 /* do_socketpair() Must return target values and target errnos. */
2984 static abi_long do_socketpair(int domain, int type, int protocol,
2985                               abi_ulong target_tab_addr)
2986 {
2987     int tab[2];
2988     abi_long ret;
2989 
2990     target_to_host_sock_type(&type);
2991 
2992     ret = get_errno(socketpair(domain, type, protocol, tab));
2993     if (!is_error(ret)) {
2994         if (put_user_s32(tab[0], target_tab_addr)
2995             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2996             ret = -TARGET_EFAULT;
2997     }
2998     return ret;
2999 }
3000 
3001 /* do_sendto() Must return target values and target errnos. */
3002 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3003                           abi_ulong target_addr, socklen_t addrlen)
3004 {
3005     void *addr;
3006     void *host_msg;
3007     void *copy_msg = NULL;
3008     abi_long ret;
3009 
3010     if ((int)addrlen < 0) {
3011         return -TARGET_EINVAL;
3012     }
3013 
3014     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3015     if (!host_msg)
3016         return -TARGET_EFAULT;
3017     if (fd_trans_target_to_host_data(fd)) {
3018         copy_msg = host_msg;
3019         host_msg = g_malloc(len);
3020         memcpy(host_msg, copy_msg, len);
3021         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3022         if (ret < 0) {
3023             goto fail;
3024         }
3025     }
3026     if (target_addr) {
3027         addr = alloca(addrlen+1);
3028         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3029         if (ret) {
3030             goto fail;
3031         }
3032         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3033     } else {
3034         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3035     }
3036 fail:
3037     if (copy_msg) {
3038         g_free(host_msg);
3039         host_msg = copy_msg;
3040     }
3041     unlock_user(host_msg, msg, 0);
3042     return ret;
3043 }
3044 
3045 /* do_recvfrom() Must return target values and target errnos. */
3046 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3047                             abi_ulong target_addr,
3048                             abi_ulong target_addrlen)
3049 {
3050     socklen_t addrlen, ret_addrlen;
3051     void *addr;
3052     void *host_msg;
3053     abi_long ret;
3054 
3055     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3056     if (!host_msg)
3057         return -TARGET_EFAULT;
3058     if (target_addr) {
3059         if (get_user_u32(addrlen, target_addrlen)) {
3060             ret = -TARGET_EFAULT;
3061             goto fail;
3062         }
3063         if ((int)addrlen < 0) {
3064             ret = -TARGET_EINVAL;
3065             goto fail;
3066         }
3067         addr = alloca(addrlen);
3068         ret_addrlen = addrlen;
3069         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3070                                       addr, &ret_addrlen));
3071     } else {
3072         addr = NULL; /* To keep compiler quiet.  */
3073         addrlen = 0; /* To keep compiler quiet.  */
3074         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3075     }
3076     if (!is_error(ret)) {
3077         if (fd_trans_host_to_target_data(fd)) {
3078             abi_long trans;
3079             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3080             if (is_error(trans)) {
3081                 ret = trans;
3082                 goto fail;
3083             }
3084         }
3085         if (target_addr) {
3086             host_to_target_sockaddr(target_addr, addr,
3087                                     MIN(addrlen, ret_addrlen));
3088             if (put_user_u32(ret_addrlen, target_addrlen)) {
3089                 ret = -TARGET_EFAULT;
3090                 goto fail;
3091             }
3092         }
3093         unlock_user(host_msg, msg, len);
3094     } else {
3095 fail:
3096         unlock_user(host_msg, msg, 0);
3097     }
3098     return ret;
3099 }
3100 
3101 #ifdef TARGET_NR_socketcall
3102 /* do_socketcall() must return target values and target errnos. */
3103 static abi_long do_socketcall(int num, abi_ulong vptr)
3104 {
3105     static const unsigned nargs[] = { /* number of arguments per operation */
3106         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3107         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3108         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3109         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3110         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3111         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3112         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3113         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3114         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3115         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3116         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3117         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3118         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3119         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3120         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3121         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3122         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3123         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3124         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3125         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3126     };
3127     abi_long a[6]; /* max 6 args */
3128     unsigned i;
3129 
3130     /* check the range of the first argument num */
3131     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3132     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3133         return -TARGET_EINVAL;
3134     }
3135     /* ensure we have space for args */
3136     if (nargs[num] > ARRAY_SIZE(a)) {
3137         return -TARGET_EINVAL;
3138     }
3139     /* collect the arguments in a[] according to nargs[] */
3140     for (i = 0; i < nargs[num]; ++i) {
3141         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3142             return -TARGET_EFAULT;
3143         }
3144     }
3145     /* now when we have the args, invoke the appropriate underlying function */
3146     switch (num) {
3147     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3148         return do_socket(a[0], a[1], a[2]);
3149     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3150         return do_bind(a[0], a[1], a[2]);
3151     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3152         return do_connect(a[0], a[1], a[2]);
3153     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3154         return get_errno(listen(a[0], a[1]));
3155     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3156         return do_accept4(a[0], a[1], a[2], 0);
3157     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3158         return do_getsockname(a[0], a[1], a[2]);
3159     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3160         return do_getpeername(a[0], a[1], a[2]);
3161     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3162         return do_socketpair(a[0], a[1], a[2], a[3]);
3163     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3164         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3165     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3166         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3167     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3168         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3169     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3170         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3171     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3172         return get_errno(shutdown(a[0], a[1]));
3173     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3174         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3175     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3176         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3177     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3178         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3179     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3180         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3181     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3182         return do_accept4(a[0], a[1], a[2], a[3]);
3183     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3184         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3185     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3186         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3187     default:
3188         gemu_log("Unsupported socketcall: %d\n", num);
3189         return -TARGET_EINVAL;
3190     }
3191 }
3192 #endif
3193 
3194 #define N_SHM_REGIONS	32
3195 
3196 static struct shm_region {
3197     abi_ulong start;
3198     abi_ulong size;
3199     bool in_use;
3200 } shm_regions[N_SHM_REGIONS];
3201 
3202 #ifndef TARGET_SEMID64_DS
3203 /* asm-generic version of this struct */
3204 struct target_semid64_ds
3205 {
3206   struct target_ipc_perm sem_perm;
3207   abi_ulong sem_otime;
3208 #if TARGET_ABI_BITS == 32
3209   abi_ulong __unused1;
3210 #endif
3211   abi_ulong sem_ctime;
3212 #if TARGET_ABI_BITS == 32
3213   abi_ulong __unused2;
3214 #endif
3215   abi_ulong sem_nsems;
3216   abi_ulong __unused3;
3217   abi_ulong __unused4;
3218 };
3219 #endif
3220 
3221 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3222                                                abi_ulong target_addr)
3223 {
3224     struct target_ipc_perm *target_ip;
3225     struct target_semid64_ds *target_sd;
3226 
3227     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3228         return -TARGET_EFAULT;
3229     target_ip = &(target_sd->sem_perm);
3230     host_ip->__key = tswap32(target_ip->__key);
3231     host_ip->uid = tswap32(target_ip->uid);
3232     host_ip->gid = tswap32(target_ip->gid);
3233     host_ip->cuid = tswap32(target_ip->cuid);
3234     host_ip->cgid = tswap32(target_ip->cgid);
3235 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3236     host_ip->mode = tswap32(target_ip->mode);
3237 #else
3238     host_ip->mode = tswap16(target_ip->mode);
3239 #endif
3240 #if defined(TARGET_PPC)
3241     host_ip->__seq = tswap32(target_ip->__seq);
3242 #else
3243     host_ip->__seq = tswap16(target_ip->__seq);
3244 #endif
3245     unlock_user_struct(target_sd, target_addr, 0);
3246     return 0;
3247 }
3248 
3249 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3250                                                struct ipc_perm *host_ip)
3251 {
3252     struct target_ipc_perm *target_ip;
3253     struct target_semid64_ds *target_sd;
3254 
3255     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3256         return -TARGET_EFAULT;
3257     target_ip = &(target_sd->sem_perm);
3258     target_ip->__key = tswap32(host_ip->__key);
3259     target_ip->uid = tswap32(host_ip->uid);
3260     target_ip->gid = tswap32(host_ip->gid);
3261     target_ip->cuid = tswap32(host_ip->cuid);
3262     target_ip->cgid = tswap32(host_ip->cgid);
3263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3264     target_ip->mode = tswap32(host_ip->mode);
3265 #else
3266     target_ip->mode = tswap16(host_ip->mode);
3267 #endif
3268 #if defined(TARGET_PPC)
3269     target_ip->__seq = tswap32(host_ip->__seq);
3270 #else
3271     target_ip->__seq = tswap16(host_ip->__seq);
3272 #endif
3273     unlock_user_struct(target_sd, target_addr, 1);
3274     return 0;
3275 }
3276 
3277 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3278                                                abi_ulong target_addr)
3279 {
3280     struct target_semid64_ds *target_sd;
3281 
3282     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3283         return -TARGET_EFAULT;
3284     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3285         return -TARGET_EFAULT;
3286     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3287     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3288     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3289     unlock_user_struct(target_sd, target_addr, 0);
3290     return 0;
3291 }
3292 
3293 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3294                                                struct semid_ds *host_sd)
3295 {
3296     struct target_semid64_ds *target_sd;
3297 
3298     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3299         return -TARGET_EFAULT;
3300     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3301         return -TARGET_EFAULT;
3302     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3303     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3304     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3305     unlock_user_struct(target_sd, target_addr, 1);
3306     return 0;
3307 }
3308 
3309 struct target_seminfo {
3310     int semmap;
3311     int semmni;
3312     int semmns;
3313     int semmnu;
3314     int semmsl;
3315     int semopm;
3316     int semume;
3317     int semusz;
3318     int semvmx;
3319     int semaem;
3320 };
3321 
3322 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3323                                               struct seminfo *host_seminfo)
3324 {
3325     struct target_seminfo *target_seminfo;
3326     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3327         return -TARGET_EFAULT;
3328     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3329     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3330     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3331     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3332     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3333     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3334     __put_user(host_seminfo->semume, &target_seminfo->semume);
3335     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3336     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3337     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3338     unlock_user_struct(target_seminfo, target_addr, 1);
3339     return 0;
3340 }
3341 
3342 union semun {
3343 	int val;
3344 	struct semid_ds *buf;
3345 	unsigned short *array;
3346 	struct seminfo *__buf;
3347 };
3348 
3349 union target_semun {
3350 	int val;
3351 	abi_ulong buf;
3352 	abi_ulong array;
3353 	abi_ulong __buf;
3354 };
3355 
3356 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3357                                                abi_ulong target_addr)
3358 {
3359     int nsems;
3360     unsigned short *array;
3361     union semun semun;
3362     struct semid_ds semid_ds;
3363     int i, ret;
3364 
3365     semun.buf = &semid_ds;
3366 
3367     ret = semctl(semid, 0, IPC_STAT, semun);
3368     if (ret == -1)
3369         return get_errno(ret);
3370 
3371     nsems = semid_ds.sem_nsems;
3372 
3373     *host_array = g_try_new(unsigned short, nsems);
3374     if (!*host_array) {
3375         return -TARGET_ENOMEM;
3376     }
3377     array = lock_user(VERIFY_READ, target_addr,
3378                       nsems*sizeof(unsigned short), 1);
3379     if (!array) {
3380         g_free(*host_array);
3381         return -TARGET_EFAULT;
3382     }
3383 
3384     for(i=0; i<nsems; i++) {
3385         __get_user((*host_array)[i], &array[i]);
3386     }
3387     unlock_user(array, target_addr, 0);
3388 
3389     return 0;
3390 }
3391 
3392 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3393                                                unsigned short **host_array)
3394 {
3395     int nsems;
3396     unsigned short *array;
3397     union semun semun;
3398     struct semid_ds semid_ds;
3399     int i, ret;
3400 
3401     semun.buf = &semid_ds;
3402 
3403     ret = semctl(semid, 0, IPC_STAT, semun);
3404     if (ret == -1)
3405         return get_errno(ret);
3406 
3407     nsems = semid_ds.sem_nsems;
3408 
3409     array = lock_user(VERIFY_WRITE, target_addr,
3410                       nsems*sizeof(unsigned short), 0);
3411     if (!array)
3412         return -TARGET_EFAULT;
3413 
3414     for(i=0; i<nsems; i++) {
3415         __put_user((*host_array)[i], &array[i]);
3416     }
3417     g_free(*host_array);
3418     unlock_user(array, target_addr, 1);
3419 
3420     return 0;
3421 }
3422 
3423 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3424                                  abi_ulong target_arg)
3425 {
3426     union target_semun target_su = { .buf = target_arg };
3427     union semun arg;
3428     struct semid_ds dsarg;
3429     unsigned short *array = NULL;
3430     struct seminfo seminfo;
3431     abi_long ret = -TARGET_EINVAL;
3432     abi_long err;
3433     cmd &= 0xff;
3434 
3435     switch( cmd ) {
3436 	case GETVAL:
3437 	case SETVAL:
3438             /* In 64 bit cross-endian situations, we will erroneously pick up
3439              * the wrong half of the union for the "val" element.  To rectify
3440              * this, the entire 8-byte structure is byteswapped, followed by
3441 	     * a swap of the 4 byte val field. In other cases, the data is
3442 	     * already in proper host byte order. */
3443 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3444 		target_su.buf = tswapal(target_su.buf);
3445 		arg.val = tswap32(target_su.val);
3446 	    } else {
3447 		arg.val = target_su.val;
3448 	    }
3449             ret = get_errno(semctl(semid, semnum, cmd, arg));
3450             break;
3451 	case GETALL:
3452 	case SETALL:
3453             err = target_to_host_semarray(semid, &array, target_su.array);
3454             if (err)
3455                 return err;
3456             arg.array = array;
3457             ret = get_errno(semctl(semid, semnum, cmd, arg));
3458             err = host_to_target_semarray(semid, target_su.array, &array);
3459             if (err)
3460                 return err;
3461             break;
3462 	case IPC_STAT:
3463 	case IPC_SET:
3464 	case SEM_STAT:
3465             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3466             if (err)
3467                 return err;
3468             arg.buf = &dsarg;
3469             ret = get_errno(semctl(semid, semnum, cmd, arg));
3470             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3471             if (err)
3472                 return err;
3473             break;
3474 	case IPC_INFO:
3475 	case SEM_INFO:
3476             arg.__buf = &seminfo;
3477             ret = get_errno(semctl(semid, semnum, cmd, arg));
3478             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3479             if (err)
3480                 return err;
3481             break;
3482 	case IPC_RMID:
3483 	case GETPID:
3484 	case GETNCNT:
3485 	case GETZCNT:
3486             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3487             break;
3488     }
3489 
3490     return ret;
3491 }
3492 
3493 struct target_sembuf {
3494     unsigned short sem_num;
3495     short sem_op;
3496     short sem_flg;
3497 };
3498 
3499 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3500                                              abi_ulong target_addr,
3501                                              unsigned nsops)
3502 {
3503     struct target_sembuf *target_sembuf;
3504     int i;
3505 
3506     target_sembuf = lock_user(VERIFY_READ, target_addr,
3507                               nsops*sizeof(struct target_sembuf), 1);
3508     if (!target_sembuf)
3509         return -TARGET_EFAULT;
3510 
3511     for(i=0; i<nsops; i++) {
3512         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3513         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3514         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3515     }
3516 
3517     unlock_user(target_sembuf, target_addr, 0);
3518 
3519     return 0;
3520 }
3521 
3522 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3523 {
3524     struct sembuf sops[nsops];
3525 
3526     if (target_to_host_sembuf(sops, ptr, nsops))
3527         return -TARGET_EFAULT;
3528 
3529     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3530 }
3531 
3532 struct target_msqid_ds
3533 {
3534     struct target_ipc_perm msg_perm;
3535     abi_ulong msg_stime;
3536 #if TARGET_ABI_BITS == 32
3537     abi_ulong __unused1;
3538 #endif
3539     abi_ulong msg_rtime;
3540 #if TARGET_ABI_BITS == 32
3541     abi_ulong __unused2;
3542 #endif
3543     abi_ulong msg_ctime;
3544 #if TARGET_ABI_BITS == 32
3545     abi_ulong __unused3;
3546 #endif
3547     abi_ulong __msg_cbytes;
3548     abi_ulong msg_qnum;
3549     abi_ulong msg_qbytes;
3550     abi_ulong msg_lspid;
3551     abi_ulong msg_lrpid;
3552     abi_ulong __unused4;
3553     abi_ulong __unused5;
3554 };
3555 
3556 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3557                                                abi_ulong target_addr)
3558 {
3559     struct target_msqid_ds *target_md;
3560 
3561     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3562         return -TARGET_EFAULT;
3563     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3564         return -TARGET_EFAULT;
3565     host_md->msg_stime = tswapal(target_md->msg_stime);
3566     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3567     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3568     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3569     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3570     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3571     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3572     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3573     unlock_user_struct(target_md, target_addr, 0);
3574     return 0;
3575 }
3576 
3577 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3578                                                struct msqid_ds *host_md)
3579 {
3580     struct target_msqid_ds *target_md;
3581 
3582     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3583         return -TARGET_EFAULT;
3584     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3585         return -TARGET_EFAULT;
3586     target_md->msg_stime = tswapal(host_md->msg_stime);
3587     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3588     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3589     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3590     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3591     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3592     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3593     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3594     unlock_user_struct(target_md, target_addr, 1);
3595     return 0;
3596 }
3597 
3598 struct target_msginfo {
3599     int msgpool;
3600     int msgmap;
3601     int msgmax;
3602     int msgmnb;
3603     int msgmni;
3604     int msgssz;
3605     int msgtql;
3606     unsigned short int msgseg;
3607 };
3608 
3609 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3610                                               struct msginfo *host_msginfo)
3611 {
3612     struct target_msginfo *target_msginfo;
3613     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3614         return -TARGET_EFAULT;
3615     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3616     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3617     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3618     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3619     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3620     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3621     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3622     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3623     unlock_user_struct(target_msginfo, target_addr, 1);
3624     return 0;
3625 }
3626 
3627 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3628 {
3629     struct msqid_ds dsarg;
3630     struct msginfo msginfo;
3631     abi_long ret = -TARGET_EINVAL;
3632 
3633     cmd &= 0xff;
3634 
3635     switch (cmd) {
3636     case IPC_STAT:
3637     case IPC_SET:
3638     case MSG_STAT:
3639         if (target_to_host_msqid_ds(&dsarg,ptr))
3640             return -TARGET_EFAULT;
3641         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3642         if (host_to_target_msqid_ds(ptr,&dsarg))
3643             return -TARGET_EFAULT;
3644         break;
3645     case IPC_RMID:
3646         ret = get_errno(msgctl(msgid, cmd, NULL));
3647         break;
3648     case IPC_INFO:
3649     case MSG_INFO:
3650         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3651         if (host_to_target_msginfo(ptr, &msginfo))
3652             return -TARGET_EFAULT;
3653         break;
3654     }
3655 
3656     return ret;
3657 }
3658 
3659 struct target_msgbuf {
3660     abi_long mtype;
3661     char	mtext[1];
3662 };
3663 
3664 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3665                                  ssize_t msgsz, int msgflg)
3666 {
3667     struct target_msgbuf *target_mb;
3668     struct msgbuf *host_mb;
3669     abi_long ret = 0;
3670 
3671     if (msgsz < 0) {
3672         return -TARGET_EINVAL;
3673     }
3674 
3675     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3676         return -TARGET_EFAULT;
3677     host_mb = g_try_malloc(msgsz + sizeof(long));
3678     if (!host_mb) {
3679         unlock_user_struct(target_mb, msgp, 0);
3680         return -TARGET_ENOMEM;
3681     }
3682     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3683     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3684     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3685     g_free(host_mb);
3686     unlock_user_struct(target_mb, msgp, 0);
3687 
3688     return ret;
3689 }
3690 
3691 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3692                                  ssize_t msgsz, abi_long msgtyp,
3693                                  int msgflg)
3694 {
3695     struct target_msgbuf *target_mb;
3696     char *target_mtext;
3697     struct msgbuf *host_mb;
3698     abi_long ret = 0;
3699 
3700     if (msgsz < 0) {
3701         return -TARGET_EINVAL;
3702     }
3703 
3704     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3705         return -TARGET_EFAULT;
3706 
3707     host_mb = g_try_malloc(msgsz + sizeof(long));
3708     if (!host_mb) {
3709         ret = -TARGET_ENOMEM;
3710         goto end;
3711     }
3712     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3713 
3714     if (ret > 0) {
3715         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3716         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3717         if (!target_mtext) {
3718             ret = -TARGET_EFAULT;
3719             goto end;
3720         }
3721         memcpy(target_mb->mtext, host_mb->mtext, ret);
3722         unlock_user(target_mtext, target_mtext_addr, ret);
3723     }
3724 
3725     target_mb->mtype = tswapal(host_mb->mtype);
3726 
3727 end:
3728     if (target_mb)
3729         unlock_user_struct(target_mb, msgp, 1);
3730     g_free(host_mb);
3731     return ret;
3732 }
3733 
3734 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3735                                                abi_ulong target_addr)
3736 {
3737     struct target_shmid_ds *target_sd;
3738 
3739     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3740         return -TARGET_EFAULT;
3741     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3742         return -TARGET_EFAULT;
3743     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3744     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3745     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3746     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3747     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3748     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3749     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3750     unlock_user_struct(target_sd, target_addr, 0);
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3755                                                struct shmid_ds *host_sd)
3756 {
3757     struct target_shmid_ds *target_sd;
3758 
3759     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3760         return -TARGET_EFAULT;
3761     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3762         return -TARGET_EFAULT;
3763     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3764     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3765     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3766     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3767     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3768     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3769     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3770     unlock_user_struct(target_sd, target_addr, 1);
3771     return 0;
3772 }
3773 
3774 struct  target_shminfo {
3775     abi_ulong shmmax;
3776     abi_ulong shmmin;
3777     abi_ulong shmmni;
3778     abi_ulong shmseg;
3779     abi_ulong shmall;
3780 };
3781 
3782 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3783                                               struct shminfo *host_shminfo)
3784 {
3785     struct target_shminfo *target_shminfo;
3786     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3787         return -TARGET_EFAULT;
3788     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3789     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3790     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3791     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3792     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3793     unlock_user_struct(target_shminfo, target_addr, 1);
3794     return 0;
3795 }
3796 
3797 struct target_shm_info {
3798     int used_ids;
3799     abi_ulong shm_tot;
3800     abi_ulong shm_rss;
3801     abi_ulong shm_swp;
3802     abi_ulong swap_attempts;
3803     abi_ulong swap_successes;
3804 };
3805 
3806 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3807                                                struct shm_info *host_shm_info)
3808 {
3809     struct target_shm_info *target_shm_info;
3810     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3811         return -TARGET_EFAULT;
3812     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3813     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3814     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3815     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3816     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3817     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3818     unlock_user_struct(target_shm_info, target_addr, 1);
3819     return 0;
3820 }
3821 
3822 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3823 {
3824     struct shmid_ds dsarg;
3825     struct shminfo shminfo;
3826     struct shm_info shm_info;
3827     abi_long ret = -TARGET_EINVAL;
3828 
3829     cmd &= 0xff;
3830 
3831     switch(cmd) {
3832     case IPC_STAT:
3833     case IPC_SET:
3834     case SHM_STAT:
3835         if (target_to_host_shmid_ds(&dsarg, buf))
3836             return -TARGET_EFAULT;
3837         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3838         if (host_to_target_shmid_ds(buf, &dsarg))
3839             return -TARGET_EFAULT;
3840         break;
3841     case IPC_INFO:
3842         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3843         if (host_to_target_shminfo(buf, &shminfo))
3844             return -TARGET_EFAULT;
3845         break;
3846     case SHM_INFO:
3847         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3848         if (host_to_target_shm_info(buf, &shm_info))
3849             return -TARGET_EFAULT;
3850         break;
3851     case IPC_RMID:
3852     case SHM_LOCK:
3853     case SHM_UNLOCK:
3854         ret = get_errno(shmctl(shmid, cmd, NULL));
3855         break;
3856     }
3857 
3858     return ret;
3859 }
3860 
3861 #ifndef TARGET_FORCE_SHMLBA
3862 /* For most architectures, SHMLBA is the same as the page size;
3863  * some architectures have larger values, in which case they should
3864  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3865  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3866  * and defining its own value for SHMLBA.
3867  *
3868  * The kernel also permits SHMLBA to be set by the architecture to a
3869  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3870  * this means that addresses are rounded to the large size if
3871  * SHM_RND is set but addresses not aligned to that size are not rejected
3872  * as long as they are at least page-aligned. Since the only architecture
3873  * which uses this is ia64 this code doesn't provide for that oddity.
3874  */
3875 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3876 {
3877     return TARGET_PAGE_SIZE;
3878 }
3879 #endif
3880 
3881 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3882                                  int shmid, abi_ulong shmaddr, int shmflg)
3883 {
3884     abi_long raddr;
3885     void *host_raddr;
3886     struct shmid_ds shm_info;
3887     int i,ret;
3888     abi_ulong shmlba;
3889 
3890     /* find out the length of the shared memory segment */
3891     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3892     if (is_error(ret)) {
3893         /* can't get length, bail out */
3894         return ret;
3895     }
3896 
3897     shmlba = target_shmlba(cpu_env);
3898 
3899     if (shmaddr & (shmlba - 1)) {
3900         if (shmflg & SHM_RND) {
3901             shmaddr &= ~(shmlba - 1);
3902         } else {
3903             return -TARGET_EINVAL;
3904         }
3905     }
3906     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3907         return -TARGET_EINVAL;
3908     }
3909 
3910     mmap_lock();
3911 
3912     if (shmaddr)
3913         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3914     else {
3915         abi_ulong mmap_start;
3916 
3917         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3918         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3919 
3920         if (mmap_start == -1) {
3921             errno = ENOMEM;
3922             host_raddr = (void *)-1;
3923         } else
3924             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3925     }
3926 
3927     if (host_raddr == (void *)-1) {
3928         mmap_unlock();
3929         return get_errno((long)host_raddr);
3930     }
3931     raddr=h2g((unsigned long)host_raddr);
3932 
3933     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3934                    PAGE_VALID | PAGE_READ |
3935                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3936 
3937     for (i = 0; i < N_SHM_REGIONS; i++) {
3938         if (!shm_regions[i].in_use) {
3939             shm_regions[i].in_use = true;
3940             shm_regions[i].start = raddr;
3941             shm_regions[i].size = shm_info.shm_segsz;
3942             break;
3943         }
3944     }
3945 
3946     mmap_unlock();
3947     return raddr;
3948 
3949 }
3950 
3951 static inline abi_long do_shmdt(abi_ulong shmaddr)
3952 {
3953     int i;
3954     abi_long rv;
3955 
3956     mmap_lock();
3957 
3958     for (i = 0; i < N_SHM_REGIONS; ++i) {
3959         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3960             shm_regions[i].in_use = false;
3961             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3962             break;
3963         }
3964     }
3965     rv = get_errno(shmdt(g2h(shmaddr)));
3966 
3967     mmap_unlock();
3968 
3969     return rv;
3970 }
3971 
3972 #ifdef TARGET_NR_ipc
3973 /* ??? This only works with linear mappings.  */
3974 /* do_ipc() must return target values and target errnos. */
3975 static abi_long do_ipc(CPUArchState *cpu_env,
3976                        unsigned int call, abi_long first,
3977                        abi_long second, abi_long third,
3978                        abi_long ptr, abi_long fifth)
3979 {
3980     int version;
3981     abi_long ret = 0;
3982 
3983     version = call >> 16;
3984     call &= 0xffff;
3985 
3986     switch (call) {
3987     case IPCOP_semop:
3988         ret = do_semop(first, ptr, second);
3989         break;
3990 
3991     case IPCOP_semget:
3992         ret = get_errno(semget(first, second, third));
3993         break;
3994 
3995     case IPCOP_semctl: {
3996         /* The semun argument to semctl is passed by value, so dereference the
3997          * ptr argument. */
3998         abi_ulong atptr;
3999         get_user_ual(atptr, ptr);
4000         ret = do_semctl(first, second, third, atptr);
4001         break;
4002     }
4003 
4004     case IPCOP_msgget:
4005         ret = get_errno(msgget(first, second));
4006         break;
4007 
4008     case IPCOP_msgsnd:
4009         ret = do_msgsnd(first, ptr, second, third);
4010         break;
4011 
4012     case IPCOP_msgctl:
4013         ret = do_msgctl(first, second, ptr);
4014         break;
4015 
4016     case IPCOP_msgrcv:
4017         switch (version) {
4018         case 0:
4019             {
4020                 struct target_ipc_kludge {
4021                     abi_long msgp;
4022                     abi_long msgtyp;
4023                 } *tmp;
4024 
4025                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4026                     ret = -TARGET_EFAULT;
4027                     break;
4028                 }
4029 
4030                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4031 
4032                 unlock_user_struct(tmp, ptr, 0);
4033                 break;
4034             }
4035         default:
4036             ret = do_msgrcv(first, ptr, second, fifth, third);
4037         }
4038         break;
4039 
4040     case IPCOP_shmat:
4041         switch (version) {
4042         default:
4043         {
4044             abi_ulong raddr;
4045             raddr = do_shmat(cpu_env, first, ptr, second);
4046             if (is_error(raddr))
4047                 return get_errno(raddr);
4048             if (put_user_ual(raddr, third))
4049                 return -TARGET_EFAULT;
4050             break;
4051         }
4052         case 1:
4053             ret = -TARGET_EINVAL;
4054             break;
4055         }
4056 	break;
4057     case IPCOP_shmdt:
4058         ret = do_shmdt(ptr);
4059 	break;
4060 
4061     case IPCOP_shmget:
4062 	/* IPC_* flag values are the same on all linux platforms */
4063 	ret = get_errno(shmget(first, second, third));
4064 	break;
4065 
4066 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4067     case IPCOP_shmctl:
4068         ret = do_shmctl(first, second, ptr);
4069         break;
4070     default:
4071 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4072 	ret = -TARGET_ENOSYS;
4073 	break;
4074     }
4075     return ret;
4076 }
4077 #endif
4078 
4079 /* kernel structure types definitions */
4080 
4081 #define STRUCT(name, ...) STRUCT_ ## name,
4082 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4083 enum {
4084 #include "syscall_types.h"
4085 STRUCT_MAX
4086 };
4087 #undef STRUCT
4088 #undef STRUCT_SPECIAL
4089 
4090 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4091 #define STRUCT_SPECIAL(name)
4092 #include "syscall_types.h"
4093 #undef STRUCT
4094 #undef STRUCT_SPECIAL
4095 
4096 typedef struct IOCTLEntry IOCTLEntry;
4097 
4098 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4099                              int fd, int cmd, abi_long arg);
4100 
4101 struct IOCTLEntry {
4102     int target_cmd;
4103     unsigned int host_cmd;
4104     const char *name;
4105     int access;
4106     do_ioctl_fn *do_ioctl;
4107     const argtype arg_type[5];
4108 };
4109 
4110 #define IOC_R 0x0001
4111 #define IOC_W 0x0002
4112 #define IOC_RW (IOC_R | IOC_W)
4113 
4114 #define MAX_STRUCT_SIZE 4096
4115 
4116 #ifdef CONFIG_FIEMAP
4117 /* So fiemap access checks don't overflow on 32 bit systems.
4118  * This is very slightly smaller than the limit imposed by
4119  * the underlying kernel.
4120  */
4121 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4122                             / sizeof(struct fiemap_extent))
4123 
4124 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4125                                        int fd, int cmd, abi_long arg)
4126 {
4127     /* The parameter for this ioctl is a struct fiemap followed
4128      * by an array of struct fiemap_extent whose size is set
4129      * in fiemap->fm_extent_count. The array is filled in by the
4130      * ioctl.
4131      */
4132     int target_size_in, target_size_out;
4133     struct fiemap *fm;
4134     const argtype *arg_type = ie->arg_type;
4135     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4136     void *argptr, *p;
4137     abi_long ret;
4138     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4139     uint32_t outbufsz;
4140     int free_fm = 0;
4141 
4142     assert(arg_type[0] == TYPE_PTR);
4143     assert(ie->access == IOC_RW);
4144     arg_type++;
4145     target_size_in = thunk_type_size(arg_type, 0);
4146     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4147     if (!argptr) {
4148         return -TARGET_EFAULT;
4149     }
4150     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4151     unlock_user(argptr, arg, 0);
4152     fm = (struct fiemap *)buf_temp;
4153     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4154         return -TARGET_EINVAL;
4155     }
4156 
4157     outbufsz = sizeof (*fm) +
4158         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4159 
4160     if (outbufsz > MAX_STRUCT_SIZE) {
4161         /* We can't fit all the extents into the fixed size buffer.
4162          * Allocate one that is large enough and use it instead.
4163          */
4164         fm = g_try_malloc(outbufsz);
4165         if (!fm) {
4166             return -TARGET_ENOMEM;
4167         }
4168         memcpy(fm, buf_temp, sizeof(struct fiemap));
4169         free_fm = 1;
4170     }
4171     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4172     if (!is_error(ret)) {
4173         target_size_out = target_size_in;
4174         /* An extent_count of 0 means we were only counting the extents
4175          * so there are no structs to copy
4176          */
4177         if (fm->fm_extent_count != 0) {
4178             target_size_out += fm->fm_mapped_extents * extent_size;
4179         }
4180         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4181         if (!argptr) {
4182             ret = -TARGET_EFAULT;
4183         } else {
4184             /* Convert the struct fiemap */
4185             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4186             if (fm->fm_extent_count != 0) {
4187                 p = argptr + target_size_in;
4188                 /* ...and then all the struct fiemap_extents */
4189                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4190                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4191                                   THUNK_TARGET);
4192                     p += extent_size;
4193                 }
4194             }
4195             unlock_user(argptr, arg, target_size_out);
4196         }
4197     }
4198     if (free_fm) {
4199         g_free(fm);
4200     }
4201     return ret;
4202 }
4203 #endif
4204 
4205 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4206                                 int fd, int cmd, abi_long arg)
4207 {
4208     const argtype *arg_type = ie->arg_type;
4209     int target_size;
4210     void *argptr;
4211     int ret;
4212     struct ifconf *host_ifconf;
4213     uint32_t outbufsz;
4214     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4215     int target_ifreq_size;
4216     int nb_ifreq;
4217     int free_buf = 0;
4218     int i;
4219     int target_ifc_len;
4220     abi_long target_ifc_buf;
4221     int host_ifc_len;
4222     char *host_ifc_buf;
4223 
4224     assert(arg_type[0] == TYPE_PTR);
4225     assert(ie->access == IOC_RW);
4226 
4227     arg_type++;
4228     target_size = thunk_type_size(arg_type, 0);
4229 
4230     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4231     if (!argptr)
4232         return -TARGET_EFAULT;
4233     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4234     unlock_user(argptr, arg, 0);
4235 
4236     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4237     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4238     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4239 
4240     if (target_ifc_buf != 0) {
4241         target_ifc_len = host_ifconf->ifc_len;
4242         nb_ifreq = target_ifc_len / target_ifreq_size;
4243         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4244 
4245         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4246         if (outbufsz > MAX_STRUCT_SIZE) {
4247             /*
4248              * We can't fit all the extents into the fixed size buffer.
4249              * Allocate one that is large enough and use it instead.
4250              */
4251             host_ifconf = malloc(outbufsz);
4252             if (!host_ifconf) {
4253                 return -TARGET_ENOMEM;
4254             }
4255             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4256             free_buf = 1;
4257         }
4258         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4259 
4260         host_ifconf->ifc_len = host_ifc_len;
4261     } else {
4262       host_ifc_buf = NULL;
4263     }
4264     host_ifconf->ifc_buf = host_ifc_buf;
4265 
4266     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4267     if (!is_error(ret)) {
4268 	/* convert host ifc_len to target ifc_len */
4269 
4270         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4271         target_ifc_len = nb_ifreq * target_ifreq_size;
4272         host_ifconf->ifc_len = target_ifc_len;
4273 
4274 	/* restore target ifc_buf */
4275 
4276         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4277 
4278 	/* copy struct ifconf to target user */
4279 
4280         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4281         if (!argptr)
4282             return -TARGET_EFAULT;
4283         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4284         unlock_user(argptr, arg, target_size);
4285 
4286         if (target_ifc_buf != 0) {
4287             /* copy ifreq[] to target user */
4288             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4289             for (i = 0; i < nb_ifreq ; i++) {
4290                 thunk_convert(argptr + i * target_ifreq_size,
4291                               host_ifc_buf + i * sizeof(struct ifreq),
4292                               ifreq_arg_type, THUNK_TARGET);
4293             }
4294             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4295         }
4296     }
4297 
4298     if (free_buf) {
4299         free(host_ifconf);
4300     }
4301 
4302     return ret;
4303 }
4304 
4305 #if defined(CONFIG_USBFS)
4306 #if HOST_LONG_BITS > 64
4307 #error USBDEVFS thunks do not support >64 bit hosts yet.
4308 #endif
4309 struct live_urb {
4310     uint64_t target_urb_adr;
4311     uint64_t target_buf_adr;
4312     char *target_buf_ptr;
4313     struct usbdevfs_urb host_urb;
4314 };
4315 
4316 static GHashTable *usbdevfs_urb_hashtable(void)
4317 {
4318     static GHashTable *urb_hashtable;
4319 
4320     if (!urb_hashtable) {
4321         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4322     }
4323     return urb_hashtable;
4324 }
4325 
4326 static void urb_hashtable_insert(struct live_urb *urb)
4327 {
4328     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4329     g_hash_table_insert(urb_hashtable, urb, urb);
4330 }
4331 
4332 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4333 {
4334     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4335     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4336 }
4337 
4338 static void urb_hashtable_remove(struct live_urb *urb)
4339 {
4340     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4341     g_hash_table_remove(urb_hashtable, urb);
4342 }
4343 
4344 static abi_long
4345 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4346                           int fd, int cmd, abi_long arg)
4347 {
4348     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4349     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4350     struct live_urb *lurb;
4351     void *argptr;
4352     uint64_t hurb;
4353     int target_size;
4354     uintptr_t target_urb_adr;
4355     abi_long ret;
4356 
4357     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4358 
4359     memset(buf_temp, 0, sizeof(uint64_t));
4360     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4361     if (is_error(ret)) {
4362         return ret;
4363     }
4364 
4365     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4366     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4367     if (!lurb->target_urb_adr) {
4368         return -TARGET_EFAULT;
4369     }
4370     urb_hashtable_remove(lurb);
4371     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4372         lurb->host_urb.buffer_length);
4373     lurb->target_buf_ptr = NULL;
4374 
4375     /* restore the guest buffer pointer */
4376     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4377 
4378     /* update the guest urb struct */
4379     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4380     if (!argptr) {
4381         g_free(lurb);
4382         return -TARGET_EFAULT;
4383     }
4384     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4385     unlock_user(argptr, lurb->target_urb_adr, target_size);
4386 
4387     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4388     /* write back the urb handle */
4389     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4390     if (!argptr) {
4391         g_free(lurb);
4392         return -TARGET_EFAULT;
4393     }
4394 
4395     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4396     target_urb_adr = lurb->target_urb_adr;
4397     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4398     unlock_user(argptr, arg, target_size);
4399 
4400     g_free(lurb);
4401     return ret;
4402 }
4403 
4404 static abi_long
4405 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4406                              uint8_t *buf_temp __attribute__((unused)),
4407                              int fd, int cmd, abi_long arg)
4408 {
4409     struct live_urb *lurb;
4410 
4411     /* map target address back to host URB with metadata. */
4412     lurb = urb_hashtable_lookup(arg);
4413     if (!lurb) {
4414         return -TARGET_EFAULT;
4415     }
4416     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4417 }
4418 
4419 static abi_long
4420 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4421                             int fd, int cmd, abi_long arg)
4422 {
4423     const argtype *arg_type = ie->arg_type;
4424     int target_size;
4425     abi_long ret;
4426     void *argptr;
4427     int rw_dir;
4428     struct live_urb *lurb;
4429 
4430     /*
4431      * each submitted URB needs to map to a unique ID for the
4432      * kernel, and that unique ID needs to be a pointer to
4433      * host memory.  hence, we need to malloc for each URB.
4434      * isochronous transfers have a variable length struct.
4435      */
4436     arg_type++;
4437     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4438 
4439     /* construct host copy of urb and metadata */
4440     lurb = g_try_malloc0(sizeof(struct live_urb));
4441     if (!lurb) {
4442         return -TARGET_ENOMEM;
4443     }
4444 
4445     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4446     if (!argptr) {
4447         g_free(lurb);
4448         return -TARGET_EFAULT;
4449     }
4450     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4451     unlock_user(argptr, arg, 0);
4452 
4453     lurb->target_urb_adr = arg;
4454     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4455 
4456     /* buffer space used depends on endpoint type so lock the entire buffer */
4457     /* control type urbs should check the buffer contents for true direction */
4458     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4459     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4460         lurb->host_urb.buffer_length, 1);
4461     if (lurb->target_buf_ptr == NULL) {
4462         g_free(lurb);
4463         return -TARGET_EFAULT;
4464     }
4465 
4466     /* update buffer pointer in host copy */
4467     lurb->host_urb.buffer = lurb->target_buf_ptr;
4468 
4469     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4470     if (is_error(ret)) {
4471         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4472         g_free(lurb);
4473     } else {
4474         urb_hashtable_insert(lurb);
4475     }
4476 
4477     return ret;
4478 }
4479 #endif /* CONFIG_USBFS */
4480 
4481 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4482                             int cmd, abi_long arg)
4483 {
4484     void *argptr;
4485     struct dm_ioctl *host_dm;
4486     abi_long guest_data;
4487     uint32_t guest_data_size;
4488     int target_size;
4489     const argtype *arg_type = ie->arg_type;
4490     abi_long ret;
4491     void *big_buf = NULL;
4492     char *host_data;
4493 
4494     arg_type++;
4495     target_size = thunk_type_size(arg_type, 0);
4496     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4497     if (!argptr) {
4498         ret = -TARGET_EFAULT;
4499         goto out;
4500     }
4501     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4502     unlock_user(argptr, arg, 0);
4503 
4504     /* buf_temp is too small, so fetch things into a bigger buffer */
4505     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4506     memcpy(big_buf, buf_temp, target_size);
4507     buf_temp = big_buf;
4508     host_dm = big_buf;
4509 
4510     guest_data = arg + host_dm->data_start;
4511     if ((guest_data - arg) < 0) {
4512         ret = -TARGET_EINVAL;
4513         goto out;
4514     }
4515     guest_data_size = host_dm->data_size - host_dm->data_start;
4516     host_data = (char*)host_dm + host_dm->data_start;
4517 
4518     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4519     if (!argptr) {
4520         ret = -TARGET_EFAULT;
4521         goto out;
4522     }
4523 
4524     switch (ie->host_cmd) {
4525     case DM_REMOVE_ALL:
4526     case DM_LIST_DEVICES:
4527     case DM_DEV_CREATE:
4528     case DM_DEV_REMOVE:
4529     case DM_DEV_SUSPEND:
4530     case DM_DEV_STATUS:
4531     case DM_DEV_WAIT:
4532     case DM_TABLE_STATUS:
4533     case DM_TABLE_CLEAR:
4534     case DM_TABLE_DEPS:
4535     case DM_LIST_VERSIONS:
4536         /* no input data */
4537         break;
4538     case DM_DEV_RENAME:
4539     case DM_DEV_SET_GEOMETRY:
4540         /* data contains only strings */
4541         memcpy(host_data, argptr, guest_data_size);
4542         break;
4543     case DM_TARGET_MSG:
4544         memcpy(host_data, argptr, guest_data_size);
4545         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4546         break;
4547     case DM_TABLE_LOAD:
4548     {
4549         void *gspec = argptr;
4550         void *cur_data = host_data;
4551         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4552         int spec_size = thunk_type_size(arg_type, 0);
4553         int i;
4554 
4555         for (i = 0; i < host_dm->target_count; i++) {
4556             struct dm_target_spec *spec = cur_data;
4557             uint32_t next;
4558             int slen;
4559 
4560             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4561             slen = strlen((char*)gspec + spec_size) + 1;
4562             next = spec->next;
4563             spec->next = sizeof(*spec) + slen;
4564             strcpy((char*)&spec[1], gspec + spec_size);
4565             gspec += next;
4566             cur_data += spec->next;
4567         }
4568         break;
4569     }
4570     default:
4571         ret = -TARGET_EINVAL;
4572         unlock_user(argptr, guest_data, 0);
4573         goto out;
4574     }
4575     unlock_user(argptr, guest_data, 0);
4576 
4577     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4578     if (!is_error(ret)) {
4579         guest_data = arg + host_dm->data_start;
4580         guest_data_size = host_dm->data_size - host_dm->data_start;
4581         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4582         switch (ie->host_cmd) {
4583         case DM_REMOVE_ALL:
4584         case DM_DEV_CREATE:
4585         case DM_DEV_REMOVE:
4586         case DM_DEV_RENAME:
4587         case DM_DEV_SUSPEND:
4588         case DM_DEV_STATUS:
4589         case DM_TABLE_LOAD:
4590         case DM_TABLE_CLEAR:
4591         case DM_TARGET_MSG:
4592         case DM_DEV_SET_GEOMETRY:
4593             /* no return data */
4594             break;
4595         case DM_LIST_DEVICES:
4596         {
4597             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4598             uint32_t remaining_data = guest_data_size;
4599             void *cur_data = argptr;
4600             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4601             int nl_size = 12; /* can't use thunk_size due to alignment */
4602 
4603             while (1) {
4604                 uint32_t next = nl->next;
4605                 if (next) {
4606                     nl->next = nl_size + (strlen(nl->name) + 1);
4607                 }
4608                 if (remaining_data < nl->next) {
4609                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4610                     break;
4611                 }
4612                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4613                 strcpy(cur_data + nl_size, nl->name);
4614                 cur_data += nl->next;
4615                 remaining_data -= nl->next;
4616                 if (!next) {
4617                     break;
4618                 }
4619                 nl = (void*)nl + next;
4620             }
4621             break;
4622         }
4623         case DM_DEV_WAIT:
4624         case DM_TABLE_STATUS:
4625         {
4626             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4627             void *cur_data = argptr;
4628             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4629             int spec_size = thunk_type_size(arg_type, 0);
4630             int i;
4631 
4632             for (i = 0; i < host_dm->target_count; i++) {
4633                 uint32_t next = spec->next;
4634                 int slen = strlen((char*)&spec[1]) + 1;
4635                 spec->next = (cur_data - argptr) + spec_size + slen;
4636                 if (guest_data_size < spec->next) {
4637                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4638                     break;
4639                 }
4640                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4641                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4642                 cur_data = argptr + spec->next;
4643                 spec = (void*)host_dm + host_dm->data_start + next;
4644             }
4645             break;
4646         }
4647         case DM_TABLE_DEPS:
4648         {
4649             void *hdata = (void*)host_dm + host_dm->data_start;
4650             int count = *(uint32_t*)hdata;
4651             uint64_t *hdev = hdata + 8;
4652             uint64_t *gdev = argptr + 8;
4653             int i;
4654 
4655             *(uint32_t*)argptr = tswap32(count);
4656             for (i = 0; i < count; i++) {
4657                 *gdev = tswap64(*hdev);
4658                 gdev++;
4659                 hdev++;
4660             }
4661             break;
4662         }
4663         case DM_LIST_VERSIONS:
4664         {
4665             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4666             uint32_t remaining_data = guest_data_size;
4667             void *cur_data = argptr;
4668             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4669             int vers_size = thunk_type_size(arg_type, 0);
4670 
4671             while (1) {
4672                 uint32_t next = vers->next;
4673                 if (next) {
4674                     vers->next = vers_size + (strlen(vers->name) + 1);
4675                 }
4676                 if (remaining_data < vers->next) {
4677                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4678                     break;
4679                 }
4680                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4681                 strcpy(cur_data + vers_size, vers->name);
4682                 cur_data += vers->next;
4683                 remaining_data -= vers->next;
4684                 if (!next) {
4685                     break;
4686                 }
4687                 vers = (void*)vers + next;
4688             }
4689             break;
4690         }
4691         default:
4692             unlock_user(argptr, guest_data, 0);
4693             ret = -TARGET_EINVAL;
4694             goto out;
4695         }
4696         unlock_user(argptr, guest_data, guest_data_size);
4697 
4698         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4699         if (!argptr) {
4700             ret = -TARGET_EFAULT;
4701             goto out;
4702         }
4703         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4704         unlock_user(argptr, arg, target_size);
4705     }
4706 out:
4707     g_free(big_buf);
4708     return ret;
4709 }
4710 
4711 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4712                                int cmd, abi_long arg)
4713 {
4714     void *argptr;
4715     int target_size;
4716     const argtype *arg_type = ie->arg_type;
4717     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4718     abi_long ret;
4719 
4720     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4721     struct blkpg_partition host_part;
4722 
4723     /* Read and convert blkpg */
4724     arg_type++;
4725     target_size = thunk_type_size(arg_type, 0);
4726     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4727     if (!argptr) {
4728         ret = -TARGET_EFAULT;
4729         goto out;
4730     }
4731     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4732     unlock_user(argptr, arg, 0);
4733 
4734     switch (host_blkpg->op) {
4735     case BLKPG_ADD_PARTITION:
4736     case BLKPG_DEL_PARTITION:
4737         /* payload is struct blkpg_partition */
4738         break;
4739     default:
4740         /* Unknown opcode */
4741         ret = -TARGET_EINVAL;
4742         goto out;
4743     }
4744 
4745     /* Read and convert blkpg->data */
4746     arg = (abi_long)(uintptr_t)host_blkpg->data;
4747     target_size = thunk_type_size(part_arg_type, 0);
4748     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4749     if (!argptr) {
4750         ret = -TARGET_EFAULT;
4751         goto out;
4752     }
4753     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4754     unlock_user(argptr, arg, 0);
4755 
4756     /* Swizzle the data pointer to our local copy and call! */
4757     host_blkpg->data = &host_part;
4758     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4759 
4760 out:
4761     return ret;
4762 }
4763 
4764 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4765                                 int fd, int cmd, abi_long arg)
4766 {
4767     const argtype *arg_type = ie->arg_type;
4768     const StructEntry *se;
4769     const argtype *field_types;
4770     const int *dst_offsets, *src_offsets;
4771     int target_size;
4772     void *argptr;
4773     abi_ulong *target_rt_dev_ptr = NULL;
4774     unsigned long *host_rt_dev_ptr = NULL;
4775     abi_long ret;
4776     int i;
4777 
4778     assert(ie->access == IOC_W);
4779     assert(*arg_type == TYPE_PTR);
4780     arg_type++;
4781     assert(*arg_type == TYPE_STRUCT);
4782     target_size = thunk_type_size(arg_type, 0);
4783     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4784     if (!argptr) {
4785         return -TARGET_EFAULT;
4786     }
4787     arg_type++;
4788     assert(*arg_type == (int)STRUCT_rtentry);
4789     se = struct_entries + *arg_type++;
4790     assert(se->convert[0] == NULL);
4791     /* convert struct here to be able to catch rt_dev string */
4792     field_types = se->field_types;
4793     dst_offsets = se->field_offsets[THUNK_HOST];
4794     src_offsets = se->field_offsets[THUNK_TARGET];
4795     for (i = 0; i < se->nb_fields; i++) {
4796         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4797             assert(*field_types == TYPE_PTRVOID);
4798             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4799             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4800             if (*target_rt_dev_ptr != 0) {
4801                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4802                                                   tswapal(*target_rt_dev_ptr));
4803                 if (!*host_rt_dev_ptr) {
4804                     unlock_user(argptr, arg, 0);
4805                     return -TARGET_EFAULT;
4806                 }
4807             } else {
4808                 *host_rt_dev_ptr = 0;
4809             }
4810             field_types++;
4811             continue;
4812         }
4813         field_types = thunk_convert(buf_temp + dst_offsets[i],
4814                                     argptr + src_offsets[i],
4815                                     field_types, THUNK_HOST);
4816     }
4817     unlock_user(argptr, arg, 0);
4818 
4819     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4820 
4821     assert(host_rt_dev_ptr != NULL);
4822     assert(target_rt_dev_ptr != NULL);
4823     if (*host_rt_dev_ptr != 0) {
4824         unlock_user((void *)*host_rt_dev_ptr,
4825                     *target_rt_dev_ptr, 0);
4826     }
4827     return ret;
4828 }
4829 
4830 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4831                                      int fd, int cmd, abi_long arg)
4832 {
4833     int sig = target_to_host_signal(arg);
4834     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4835 }
4836 
4837 #ifdef TIOCGPTPEER
4838 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4839                                      int fd, int cmd, abi_long arg)
4840 {
4841     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4842     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4843 }
4844 #endif
4845 
4846 static IOCTLEntry ioctl_entries[] = {
4847 #define IOCTL(cmd, access, ...) \
4848     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4849 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4850     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4851 #define IOCTL_IGNORE(cmd) \
4852     { TARGET_ ## cmd, 0, #cmd },
4853 #include "ioctls.h"
4854     { 0, 0, },
4855 };
4856 
4857 /* ??? Implement proper locking for ioctls.  */
4858 /* do_ioctl() Must return target values and target errnos. */
4859 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4860 {
4861     const IOCTLEntry *ie;
4862     const argtype *arg_type;
4863     abi_long ret;
4864     uint8_t buf_temp[MAX_STRUCT_SIZE];
4865     int target_size;
4866     void *argptr;
4867 
4868     ie = ioctl_entries;
4869     for(;;) {
4870         if (ie->target_cmd == 0) {
4871             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4872             return -TARGET_ENOSYS;
4873         }
4874         if (ie->target_cmd == cmd)
4875             break;
4876         ie++;
4877     }
4878     arg_type = ie->arg_type;
4879     if (ie->do_ioctl) {
4880         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4881     } else if (!ie->host_cmd) {
4882         /* Some architectures define BSD ioctls in their headers
4883            that are not implemented in Linux.  */
4884         return -TARGET_ENOSYS;
4885     }
4886 
4887     switch(arg_type[0]) {
4888     case TYPE_NULL:
4889         /* no argument */
4890         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4891         break;
4892     case TYPE_PTRVOID:
4893     case TYPE_INT:
4894         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4895         break;
4896     case TYPE_PTR:
4897         arg_type++;
4898         target_size = thunk_type_size(arg_type, 0);
4899         switch(ie->access) {
4900         case IOC_R:
4901             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4902             if (!is_error(ret)) {
4903                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4904                 if (!argptr)
4905                     return -TARGET_EFAULT;
4906                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4907                 unlock_user(argptr, arg, target_size);
4908             }
4909             break;
4910         case IOC_W:
4911             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4912             if (!argptr)
4913                 return -TARGET_EFAULT;
4914             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4915             unlock_user(argptr, arg, 0);
4916             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4917             break;
4918         default:
4919         case IOC_RW:
4920             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4921             if (!argptr)
4922                 return -TARGET_EFAULT;
4923             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4924             unlock_user(argptr, arg, 0);
4925             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4926             if (!is_error(ret)) {
4927                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4928                 if (!argptr)
4929                     return -TARGET_EFAULT;
4930                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4931                 unlock_user(argptr, arg, target_size);
4932             }
4933             break;
4934         }
4935         break;
4936     default:
4937         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4938                  (long)cmd, arg_type[0]);
4939         ret = -TARGET_ENOSYS;
4940         break;
4941     }
4942     return ret;
4943 }
4944 
4945 static const bitmask_transtbl iflag_tbl[] = {
4946         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4947         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4948         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4949         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4950         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4951         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4952         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4953         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4954         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4955         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4956         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4957         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4958         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4959         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4960         { 0, 0, 0, 0 }
4961 };
4962 
4963 static const bitmask_transtbl oflag_tbl[] = {
4964 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4965 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4966 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4967 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4968 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4969 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4970 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4971 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4972 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4973 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4974 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4975 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4976 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4977 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4978 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4979 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4980 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4981 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4982 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4983 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4984 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4985 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4986 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4987 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4988 	{ 0, 0, 0, 0 }
4989 };
4990 
4991 static const bitmask_transtbl cflag_tbl[] = {
4992 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4993 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4994 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4995 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4996 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4997 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4998 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4999 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5000 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5001 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5002 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5003 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5004 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5005 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5006 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5007 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5008 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5009 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5010 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5011 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5012 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5013 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5014 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5015 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5016 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5017 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5018 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5019 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5020 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5021 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5022 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5023 	{ 0, 0, 0, 0 }
5024 };
5025 
5026 static const bitmask_transtbl lflag_tbl[] = {
5027 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5028 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5029 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5030 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5031 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5032 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5033 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5034 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5035 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5036 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5037 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5038 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5039 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5040 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5041 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5042 	{ 0, 0, 0, 0 }
5043 };
5044 
5045 static void target_to_host_termios (void *dst, const void *src)
5046 {
5047     struct host_termios *host = dst;
5048     const struct target_termios *target = src;
5049 
5050     host->c_iflag =
5051         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5052     host->c_oflag =
5053         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5054     host->c_cflag =
5055         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5056     host->c_lflag =
5057         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5058     host->c_line = target->c_line;
5059 
5060     memset(host->c_cc, 0, sizeof(host->c_cc));
5061     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5062     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5063     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5064     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5065     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5066     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5067     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5068     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5069     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5070     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5071     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5072     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5073     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5074     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5075     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5076     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5077     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5078 }
5079 
5080 static void host_to_target_termios (void *dst, const void *src)
5081 {
5082     struct target_termios *target = dst;
5083     const struct host_termios *host = src;
5084 
5085     target->c_iflag =
5086         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5087     target->c_oflag =
5088         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5089     target->c_cflag =
5090         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5091     target->c_lflag =
5092         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5093     target->c_line = host->c_line;
5094 
5095     memset(target->c_cc, 0, sizeof(target->c_cc));
5096     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5097     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5098     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5099     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5100     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5101     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5102     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5103     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5104     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5105     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5106     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5107     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5108     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5109     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5110     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5111     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5112     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5113 }
5114 
5115 static const StructEntry struct_termios_def = {
5116     .convert = { host_to_target_termios, target_to_host_termios },
5117     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5118     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5119 };
5120 
5121 static bitmask_transtbl mmap_flags_tbl[] = {
5122     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5123     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5124     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5125     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5126       MAP_ANONYMOUS, MAP_ANONYMOUS },
5127     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5128       MAP_GROWSDOWN, MAP_GROWSDOWN },
5129     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5130       MAP_DENYWRITE, MAP_DENYWRITE },
5131     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5132       MAP_EXECUTABLE, MAP_EXECUTABLE },
5133     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5134     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5135       MAP_NORESERVE, MAP_NORESERVE },
5136     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5137     /* MAP_STACK had been ignored by the kernel for quite some time.
5138        Recognize it for the target insofar as we do not want to pass
5139        it through to the host.  */
5140     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5141     { 0, 0, 0, 0 }
5142 };
5143 
5144 #if defined(TARGET_I386)
5145 
5146 /* NOTE: there is really one LDT for all the threads */
5147 static uint8_t *ldt_table;
5148 
5149 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5150 {
5151     int size;
5152     void *p;
5153 
5154     if (!ldt_table)
5155         return 0;
5156     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5157     if (size > bytecount)
5158         size = bytecount;
5159     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5160     if (!p)
5161         return -TARGET_EFAULT;
5162     /* ??? Should this by byteswapped?  */
5163     memcpy(p, ldt_table, size);
5164     unlock_user(p, ptr, size);
5165     return size;
5166 }
5167 
5168 /* XXX: add locking support */
5169 static abi_long write_ldt(CPUX86State *env,
5170                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5171 {
5172     struct target_modify_ldt_ldt_s ldt_info;
5173     struct target_modify_ldt_ldt_s *target_ldt_info;
5174     int seg_32bit, contents, read_exec_only, limit_in_pages;
5175     int seg_not_present, useable, lm;
5176     uint32_t *lp, entry_1, entry_2;
5177 
5178     if (bytecount != sizeof(ldt_info))
5179         return -TARGET_EINVAL;
5180     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5181         return -TARGET_EFAULT;
5182     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5183     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5184     ldt_info.limit = tswap32(target_ldt_info->limit);
5185     ldt_info.flags = tswap32(target_ldt_info->flags);
5186     unlock_user_struct(target_ldt_info, ptr, 0);
5187 
5188     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5189         return -TARGET_EINVAL;
5190     seg_32bit = ldt_info.flags & 1;
5191     contents = (ldt_info.flags >> 1) & 3;
5192     read_exec_only = (ldt_info.flags >> 3) & 1;
5193     limit_in_pages = (ldt_info.flags >> 4) & 1;
5194     seg_not_present = (ldt_info.flags >> 5) & 1;
5195     useable = (ldt_info.flags >> 6) & 1;
5196 #ifdef TARGET_ABI32
5197     lm = 0;
5198 #else
5199     lm = (ldt_info.flags >> 7) & 1;
5200 #endif
5201     if (contents == 3) {
5202         if (oldmode)
5203             return -TARGET_EINVAL;
5204         if (seg_not_present == 0)
5205             return -TARGET_EINVAL;
5206     }
5207     /* allocate the LDT */
5208     if (!ldt_table) {
5209         env->ldt.base = target_mmap(0,
5210                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5211                                     PROT_READ|PROT_WRITE,
5212                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5213         if (env->ldt.base == -1)
5214             return -TARGET_ENOMEM;
5215         memset(g2h(env->ldt.base), 0,
5216                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5217         env->ldt.limit = 0xffff;
5218         ldt_table = g2h(env->ldt.base);
5219     }
5220 
5221     /* NOTE: same code as Linux kernel */
5222     /* Allow LDTs to be cleared by the user. */
5223     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5224         if (oldmode ||
5225             (contents == 0		&&
5226              read_exec_only == 1	&&
5227              seg_32bit == 0		&&
5228              limit_in_pages == 0	&&
5229              seg_not_present == 1	&&
5230              useable == 0 )) {
5231             entry_1 = 0;
5232             entry_2 = 0;
5233             goto install;
5234         }
5235     }
5236 
5237     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5238         (ldt_info.limit & 0x0ffff);
5239     entry_2 = (ldt_info.base_addr & 0xff000000) |
5240         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5241         (ldt_info.limit & 0xf0000) |
5242         ((read_exec_only ^ 1) << 9) |
5243         (contents << 10) |
5244         ((seg_not_present ^ 1) << 15) |
5245         (seg_32bit << 22) |
5246         (limit_in_pages << 23) |
5247         (lm << 21) |
5248         0x7000;
5249     if (!oldmode)
5250         entry_2 |= (useable << 20);
5251 
5252     /* Install the new entry ...  */
5253 install:
5254     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5255     lp[0] = tswap32(entry_1);
5256     lp[1] = tswap32(entry_2);
5257     return 0;
5258 }
5259 
5260 /* specific and weird i386 syscalls */
5261 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5262                               unsigned long bytecount)
5263 {
5264     abi_long ret;
5265 
5266     switch (func) {
5267     case 0:
5268         ret = read_ldt(ptr, bytecount);
5269         break;
5270     case 1:
5271         ret = write_ldt(env, ptr, bytecount, 1);
5272         break;
5273     case 0x11:
5274         ret = write_ldt(env, ptr, bytecount, 0);
5275         break;
5276     default:
5277         ret = -TARGET_ENOSYS;
5278         break;
5279     }
5280     return ret;
5281 }
5282 
5283 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5284 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5285 {
5286     uint64_t *gdt_table = g2h(env->gdt.base);
5287     struct target_modify_ldt_ldt_s ldt_info;
5288     struct target_modify_ldt_ldt_s *target_ldt_info;
5289     int seg_32bit, contents, read_exec_only, limit_in_pages;
5290     int seg_not_present, useable, lm;
5291     uint32_t *lp, entry_1, entry_2;
5292     int i;
5293 
5294     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5295     if (!target_ldt_info)
5296         return -TARGET_EFAULT;
5297     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5298     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5299     ldt_info.limit = tswap32(target_ldt_info->limit);
5300     ldt_info.flags = tswap32(target_ldt_info->flags);
5301     if (ldt_info.entry_number == -1) {
5302         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5303             if (gdt_table[i] == 0) {
5304                 ldt_info.entry_number = i;
5305                 target_ldt_info->entry_number = tswap32(i);
5306                 break;
5307             }
5308         }
5309     }
5310     unlock_user_struct(target_ldt_info, ptr, 1);
5311 
5312     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5313         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5314            return -TARGET_EINVAL;
5315     seg_32bit = ldt_info.flags & 1;
5316     contents = (ldt_info.flags >> 1) & 3;
5317     read_exec_only = (ldt_info.flags >> 3) & 1;
5318     limit_in_pages = (ldt_info.flags >> 4) & 1;
5319     seg_not_present = (ldt_info.flags >> 5) & 1;
5320     useable = (ldt_info.flags >> 6) & 1;
5321 #ifdef TARGET_ABI32
5322     lm = 0;
5323 #else
5324     lm = (ldt_info.flags >> 7) & 1;
5325 #endif
5326 
5327     if (contents == 3) {
5328         if (seg_not_present == 0)
5329             return -TARGET_EINVAL;
5330     }
5331 
5332     /* NOTE: same code as Linux kernel */
5333     /* Allow LDTs to be cleared by the user. */
5334     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5335         if ((contents == 0             &&
5336              read_exec_only == 1       &&
5337              seg_32bit == 0            &&
5338              limit_in_pages == 0       &&
5339              seg_not_present == 1      &&
5340              useable == 0 )) {
5341             entry_1 = 0;
5342             entry_2 = 0;
5343             goto install;
5344         }
5345     }
5346 
5347     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5348         (ldt_info.limit & 0x0ffff);
5349     entry_2 = (ldt_info.base_addr & 0xff000000) |
5350         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5351         (ldt_info.limit & 0xf0000) |
5352         ((read_exec_only ^ 1) << 9) |
5353         (contents << 10) |
5354         ((seg_not_present ^ 1) << 15) |
5355         (seg_32bit << 22) |
5356         (limit_in_pages << 23) |
5357         (useable << 20) |
5358         (lm << 21) |
5359         0x7000;
5360 
5361     /* Install the new entry ...  */
5362 install:
5363     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5364     lp[0] = tswap32(entry_1);
5365     lp[1] = tswap32(entry_2);
5366     return 0;
5367 }
5368 
5369 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5370 {
5371     struct target_modify_ldt_ldt_s *target_ldt_info;
5372     uint64_t *gdt_table = g2h(env->gdt.base);
5373     uint32_t base_addr, limit, flags;
5374     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5375     int seg_not_present, useable, lm;
5376     uint32_t *lp, entry_1, entry_2;
5377 
5378     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5379     if (!target_ldt_info)
5380         return -TARGET_EFAULT;
5381     idx = tswap32(target_ldt_info->entry_number);
5382     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5383         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5384         unlock_user_struct(target_ldt_info, ptr, 1);
5385         return -TARGET_EINVAL;
5386     }
5387     lp = (uint32_t *)(gdt_table + idx);
5388     entry_1 = tswap32(lp[0]);
5389     entry_2 = tswap32(lp[1]);
5390 
5391     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5392     contents = (entry_2 >> 10) & 3;
5393     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5394     seg_32bit = (entry_2 >> 22) & 1;
5395     limit_in_pages = (entry_2 >> 23) & 1;
5396     useable = (entry_2 >> 20) & 1;
5397 #ifdef TARGET_ABI32
5398     lm = 0;
5399 #else
5400     lm = (entry_2 >> 21) & 1;
5401 #endif
5402     flags = (seg_32bit << 0) | (contents << 1) |
5403         (read_exec_only << 3) | (limit_in_pages << 4) |
5404         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5405     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5406     base_addr = (entry_1 >> 16) |
5407         (entry_2 & 0xff000000) |
5408         ((entry_2 & 0xff) << 16);
5409     target_ldt_info->base_addr = tswapal(base_addr);
5410     target_ldt_info->limit = tswap32(limit);
5411     target_ldt_info->flags = tswap32(flags);
5412     unlock_user_struct(target_ldt_info, ptr, 1);
5413     return 0;
5414 }
5415 #endif /* TARGET_I386 && TARGET_ABI32 */
5416 
5417 #ifndef TARGET_ABI32
5418 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5419 {
5420     abi_long ret = 0;
5421     abi_ulong val;
5422     int idx;
5423 
5424     switch(code) {
5425     case TARGET_ARCH_SET_GS:
5426     case TARGET_ARCH_SET_FS:
5427         if (code == TARGET_ARCH_SET_GS)
5428             idx = R_GS;
5429         else
5430             idx = R_FS;
5431         cpu_x86_load_seg(env, idx, 0);
5432         env->segs[idx].base = addr;
5433         break;
5434     case TARGET_ARCH_GET_GS:
5435     case TARGET_ARCH_GET_FS:
5436         if (code == TARGET_ARCH_GET_GS)
5437             idx = R_GS;
5438         else
5439             idx = R_FS;
5440         val = env->segs[idx].base;
5441         if (put_user(val, addr, abi_ulong))
5442             ret = -TARGET_EFAULT;
5443         break;
5444     default:
5445         ret = -TARGET_EINVAL;
5446         break;
5447     }
5448     return ret;
5449 }
5450 #endif
5451 
5452 #endif /* defined(TARGET_I386) */
5453 
5454 #define NEW_STACK_SIZE 0x40000
5455 
5456 
5457 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5458 typedef struct {
5459     CPUArchState *env;
5460     pthread_mutex_t mutex;
5461     pthread_cond_t cond;
5462     pthread_t thread;
5463     uint32_t tid;
5464     abi_ulong child_tidptr;
5465     abi_ulong parent_tidptr;
5466     sigset_t sigmask;
5467 } new_thread_info;
5468 
5469 static void *clone_func(void *arg)
5470 {
5471     new_thread_info *info = arg;
5472     CPUArchState *env;
5473     CPUState *cpu;
5474     TaskState *ts;
5475 
5476     rcu_register_thread();
5477     tcg_register_thread();
5478     env = info->env;
5479     cpu = ENV_GET_CPU(env);
5480     thread_cpu = cpu;
5481     ts = (TaskState *)cpu->opaque;
5482     info->tid = sys_gettid();
5483     task_settid(ts);
5484     if (info->child_tidptr)
5485         put_user_u32(info->tid, info->child_tidptr);
5486     if (info->parent_tidptr)
5487         put_user_u32(info->tid, info->parent_tidptr);
5488     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5489     /* Enable signals.  */
5490     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5491     /* Signal to the parent that we're ready.  */
5492     pthread_mutex_lock(&info->mutex);
5493     pthread_cond_broadcast(&info->cond);
5494     pthread_mutex_unlock(&info->mutex);
5495     /* Wait until the parent has finished initializing the tls state.  */
5496     pthread_mutex_lock(&clone_lock);
5497     pthread_mutex_unlock(&clone_lock);
5498     cpu_loop(env);
5499     /* never exits */
5500     return NULL;
5501 }
5502 
5503 /* do_fork() Must return host values and target errnos (unlike most
5504    do_*() functions). */
5505 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5506                    abi_ulong parent_tidptr, target_ulong newtls,
5507                    abi_ulong child_tidptr)
5508 {
5509     CPUState *cpu = ENV_GET_CPU(env);
5510     int ret;
5511     TaskState *ts;
5512     CPUState *new_cpu;
5513     CPUArchState *new_env;
5514     sigset_t sigmask;
5515 
5516     flags &= ~CLONE_IGNORED_FLAGS;
5517 
5518     /* Emulate vfork() with fork() */
5519     if (flags & CLONE_VFORK)
5520         flags &= ~(CLONE_VFORK | CLONE_VM);
5521 
5522     if (flags & CLONE_VM) {
5523         TaskState *parent_ts = (TaskState *)cpu->opaque;
5524         new_thread_info info;
5525         pthread_attr_t attr;
5526 
5527         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5528             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5529             return -TARGET_EINVAL;
5530         }
5531 
5532         ts = g_new0(TaskState, 1);
5533         init_task_state(ts);
5534 
5535         /* Grab a mutex so that thread setup appears atomic.  */
5536         pthread_mutex_lock(&clone_lock);
5537 
5538         /* we create a new CPU instance. */
5539         new_env = cpu_copy(env);
5540         /* Init regs that differ from the parent.  */
5541         cpu_clone_regs(new_env, newsp);
5542         new_cpu = ENV_GET_CPU(new_env);
5543         new_cpu->opaque = ts;
5544         ts->bprm = parent_ts->bprm;
5545         ts->info = parent_ts->info;
5546         ts->signal_mask = parent_ts->signal_mask;
5547 
5548         if (flags & CLONE_CHILD_CLEARTID) {
5549             ts->child_tidptr = child_tidptr;
5550         }
5551 
5552         if (flags & CLONE_SETTLS) {
5553             cpu_set_tls (new_env, newtls);
5554         }
5555 
5556         memset(&info, 0, sizeof(info));
5557         pthread_mutex_init(&info.mutex, NULL);
5558         pthread_mutex_lock(&info.mutex);
5559         pthread_cond_init(&info.cond, NULL);
5560         info.env = new_env;
5561         if (flags & CLONE_CHILD_SETTID) {
5562             info.child_tidptr = child_tidptr;
5563         }
5564         if (flags & CLONE_PARENT_SETTID) {
5565             info.parent_tidptr = parent_tidptr;
5566         }
5567 
5568         ret = pthread_attr_init(&attr);
5569         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5570         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5571         /* It is not safe to deliver signals until the child has finished
5572            initializing, so temporarily block all signals.  */
5573         sigfillset(&sigmask);
5574         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5575         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5576 
5577         /* If this is our first additional thread, we need to ensure we
5578          * generate code for parallel execution and flush old translations.
5579          */
5580         if (!parallel_cpus) {
5581             parallel_cpus = true;
5582             tb_flush(cpu);
5583         }
5584 
5585         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5586         /* TODO: Free new CPU state if thread creation failed.  */
5587 
5588         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5589         pthread_attr_destroy(&attr);
5590         if (ret == 0) {
5591             /* Wait for the child to initialize.  */
5592             pthread_cond_wait(&info.cond, &info.mutex);
5593             ret = info.tid;
5594         } else {
5595             ret = -1;
5596         }
5597         pthread_mutex_unlock(&info.mutex);
5598         pthread_cond_destroy(&info.cond);
5599         pthread_mutex_destroy(&info.mutex);
5600         pthread_mutex_unlock(&clone_lock);
5601     } else {
5602         /* if no CLONE_VM, we consider it is a fork */
5603         if (flags & CLONE_INVALID_FORK_FLAGS) {
5604             return -TARGET_EINVAL;
5605         }
5606 
5607         /* We can't support custom termination signals */
5608         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5609             return -TARGET_EINVAL;
5610         }
5611 
5612         if (block_signals()) {
5613             return -TARGET_ERESTARTSYS;
5614         }
5615 
5616         fork_start();
5617         ret = fork();
5618         if (ret == 0) {
5619             /* Child Process.  */
5620             cpu_clone_regs(env, newsp);
5621             fork_end(1);
5622             /* There is a race condition here.  The parent process could
5623                theoretically read the TID in the child process before the child
5624                tid is set.  This would require using either ptrace
5625                (not implemented) or having *_tidptr to point at a shared memory
5626                mapping.  We can't repeat the spinlock hack used above because
5627                the child process gets its own copy of the lock.  */
5628             if (flags & CLONE_CHILD_SETTID)
5629                 put_user_u32(sys_gettid(), child_tidptr);
5630             if (flags & CLONE_PARENT_SETTID)
5631                 put_user_u32(sys_gettid(), parent_tidptr);
5632             ts = (TaskState *)cpu->opaque;
5633             if (flags & CLONE_SETTLS)
5634                 cpu_set_tls (env, newtls);
5635             if (flags & CLONE_CHILD_CLEARTID)
5636                 ts->child_tidptr = child_tidptr;
5637         } else {
5638             fork_end(0);
5639         }
5640     }
5641     return ret;
5642 }
5643 
5644 /* warning : doesn't handle linux specific flags... */
5645 static int target_to_host_fcntl_cmd(int cmd)
5646 {
5647     int ret;
5648 
5649     switch(cmd) {
5650     case TARGET_F_DUPFD:
5651     case TARGET_F_GETFD:
5652     case TARGET_F_SETFD:
5653     case TARGET_F_GETFL:
5654     case TARGET_F_SETFL:
5655         ret = cmd;
5656         break;
5657     case TARGET_F_GETLK:
5658         ret = F_GETLK64;
5659         break;
5660     case TARGET_F_SETLK:
5661         ret = F_SETLK64;
5662         break;
5663     case TARGET_F_SETLKW:
5664         ret = F_SETLKW64;
5665         break;
5666     case TARGET_F_GETOWN:
5667         ret = F_GETOWN;
5668         break;
5669     case TARGET_F_SETOWN:
5670         ret = F_SETOWN;
5671         break;
5672     case TARGET_F_GETSIG:
5673         ret = F_GETSIG;
5674         break;
5675     case TARGET_F_SETSIG:
5676         ret = F_SETSIG;
5677         break;
5678 #if TARGET_ABI_BITS == 32
5679     case TARGET_F_GETLK64:
5680         ret = F_GETLK64;
5681         break;
5682     case TARGET_F_SETLK64:
5683         ret = F_SETLK64;
5684         break;
5685     case TARGET_F_SETLKW64:
5686         ret = F_SETLKW64;
5687         break;
5688 #endif
5689     case TARGET_F_SETLEASE:
5690         ret = F_SETLEASE;
5691         break;
5692     case TARGET_F_GETLEASE:
5693         ret = F_GETLEASE;
5694         break;
5695 #ifdef F_DUPFD_CLOEXEC
5696     case TARGET_F_DUPFD_CLOEXEC:
5697         ret = F_DUPFD_CLOEXEC;
5698         break;
5699 #endif
5700     case TARGET_F_NOTIFY:
5701         ret = F_NOTIFY;
5702         break;
5703 #ifdef F_GETOWN_EX
5704     case TARGET_F_GETOWN_EX:
5705         ret = F_GETOWN_EX;
5706         break;
5707 #endif
5708 #ifdef F_SETOWN_EX
5709     case TARGET_F_SETOWN_EX:
5710         ret = F_SETOWN_EX;
5711         break;
5712 #endif
5713 #ifdef F_SETPIPE_SZ
5714     case TARGET_F_SETPIPE_SZ:
5715         ret = F_SETPIPE_SZ;
5716         break;
5717     case TARGET_F_GETPIPE_SZ:
5718         ret = F_GETPIPE_SZ;
5719         break;
5720 #endif
5721     default:
5722         ret = -TARGET_EINVAL;
5723         break;
5724     }
5725 
5726 #if defined(__powerpc64__)
5727     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5728      * is not supported by kernel. The glibc fcntl call actually adjusts
5729      * them to 5, 6 and 7 before making the syscall(). Since we make the
5730      * syscall directly, adjust to what is supported by the kernel.
5731      */
5732     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5733         ret -= F_GETLK64 - 5;
5734     }
5735 #endif
5736 
5737     return ret;
5738 }
5739 
5740 #define FLOCK_TRANSTBL \
5741     switch (type) { \
5742     TRANSTBL_CONVERT(F_RDLCK); \
5743     TRANSTBL_CONVERT(F_WRLCK); \
5744     TRANSTBL_CONVERT(F_UNLCK); \
5745     TRANSTBL_CONVERT(F_EXLCK); \
5746     TRANSTBL_CONVERT(F_SHLCK); \
5747     }
5748 
5749 static int target_to_host_flock(int type)
5750 {
5751 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5752     FLOCK_TRANSTBL
5753 #undef  TRANSTBL_CONVERT
5754     return -TARGET_EINVAL;
5755 }
5756 
5757 static int host_to_target_flock(int type)
5758 {
5759 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5760     FLOCK_TRANSTBL
5761 #undef  TRANSTBL_CONVERT
5762     /* if we don't know how to convert the value coming
5763      * from the host we copy to the target field as-is
5764      */
5765     return type;
5766 }
5767 
5768 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5769                                             abi_ulong target_flock_addr)
5770 {
5771     struct target_flock *target_fl;
5772     int l_type;
5773 
5774     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5775         return -TARGET_EFAULT;
5776     }
5777 
5778     __get_user(l_type, &target_fl->l_type);
5779     l_type = target_to_host_flock(l_type);
5780     if (l_type < 0) {
5781         return l_type;
5782     }
5783     fl->l_type = l_type;
5784     __get_user(fl->l_whence, &target_fl->l_whence);
5785     __get_user(fl->l_start, &target_fl->l_start);
5786     __get_user(fl->l_len, &target_fl->l_len);
5787     __get_user(fl->l_pid, &target_fl->l_pid);
5788     unlock_user_struct(target_fl, target_flock_addr, 0);
5789     return 0;
5790 }
5791 
5792 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5793                                           const struct flock64 *fl)
5794 {
5795     struct target_flock *target_fl;
5796     short l_type;
5797 
5798     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5799         return -TARGET_EFAULT;
5800     }
5801 
5802     l_type = host_to_target_flock(fl->l_type);
5803     __put_user(l_type, &target_fl->l_type);
5804     __put_user(fl->l_whence, &target_fl->l_whence);
5805     __put_user(fl->l_start, &target_fl->l_start);
5806     __put_user(fl->l_len, &target_fl->l_len);
5807     __put_user(fl->l_pid, &target_fl->l_pid);
5808     unlock_user_struct(target_fl, target_flock_addr, 1);
5809     return 0;
5810 }
5811 
5812 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5813 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5814 
5815 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5816 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5817                                                    abi_ulong target_flock_addr)
5818 {
5819     struct target_oabi_flock64 *target_fl;
5820     int l_type;
5821 
5822     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5823         return -TARGET_EFAULT;
5824     }
5825 
5826     __get_user(l_type, &target_fl->l_type);
5827     l_type = target_to_host_flock(l_type);
5828     if (l_type < 0) {
5829         return l_type;
5830     }
5831     fl->l_type = l_type;
5832     __get_user(fl->l_whence, &target_fl->l_whence);
5833     __get_user(fl->l_start, &target_fl->l_start);
5834     __get_user(fl->l_len, &target_fl->l_len);
5835     __get_user(fl->l_pid, &target_fl->l_pid);
5836     unlock_user_struct(target_fl, target_flock_addr, 0);
5837     return 0;
5838 }
5839 
5840 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5841                                                  const struct flock64 *fl)
5842 {
5843     struct target_oabi_flock64 *target_fl;
5844     short l_type;
5845 
5846     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5847         return -TARGET_EFAULT;
5848     }
5849 
5850     l_type = host_to_target_flock(fl->l_type);
5851     __put_user(l_type, &target_fl->l_type);
5852     __put_user(fl->l_whence, &target_fl->l_whence);
5853     __put_user(fl->l_start, &target_fl->l_start);
5854     __put_user(fl->l_len, &target_fl->l_len);
5855     __put_user(fl->l_pid, &target_fl->l_pid);
5856     unlock_user_struct(target_fl, target_flock_addr, 1);
5857     return 0;
5858 }
5859 #endif
5860 
5861 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5862                                               abi_ulong target_flock_addr)
5863 {
5864     struct target_flock64 *target_fl;
5865     int l_type;
5866 
5867     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5868         return -TARGET_EFAULT;
5869     }
5870 
5871     __get_user(l_type, &target_fl->l_type);
5872     l_type = target_to_host_flock(l_type);
5873     if (l_type < 0) {
5874         return l_type;
5875     }
5876     fl->l_type = l_type;
5877     __get_user(fl->l_whence, &target_fl->l_whence);
5878     __get_user(fl->l_start, &target_fl->l_start);
5879     __get_user(fl->l_len, &target_fl->l_len);
5880     __get_user(fl->l_pid, &target_fl->l_pid);
5881     unlock_user_struct(target_fl, target_flock_addr, 0);
5882     return 0;
5883 }
5884 
5885 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5886                                             const struct flock64 *fl)
5887 {
5888     struct target_flock64 *target_fl;
5889     short l_type;
5890 
5891     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5892         return -TARGET_EFAULT;
5893     }
5894 
5895     l_type = host_to_target_flock(fl->l_type);
5896     __put_user(l_type, &target_fl->l_type);
5897     __put_user(fl->l_whence, &target_fl->l_whence);
5898     __put_user(fl->l_start, &target_fl->l_start);
5899     __put_user(fl->l_len, &target_fl->l_len);
5900     __put_user(fl->l_pid, &target_fl->l_pid);
5901     unlock_user_struct(target_fl, target_flock_addr, 1);
5902     return 0;
5903 }
5904 
5905 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5906 {
5907     struct flock64 fl64;
5908 #ifdef F_GETOWN_EX
5909     struct f_owner_ex fox;
5910     struct target_f_owner_ex *target_fox;
5911 #endif
5912     abi_long ret;
5913     int host_cmd = target_to_host_fcntl_cmd(cmd);
5914 
5915     if (host_cmd == -TARGET_EINVAL)
5916 	    return host_cmd;
5917 
5918     switch(cmd) {
5919     case TARGET_F_GETLK:
5920         ret = copy_from_user_flock(&fl64, arg);
5921         if (ret) {
5922             return ret;
5923         }
5924         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5925         if (ret == 0) {
5926             ret = copy_to_user_flock(arg, &fl64);
5927         }
5928         break;
5929 
5930     case TARGET_F_SETLK:
5931     case TARGET_F_SETLKW:
5932         ret = copy_from_user_flock(&fl64, arg);
5933         if (ret) {
5934             return ret;
5935         }
5936         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5937         break;
5938 
5939     case TARGET_F_GETLK64:
5940         ret = copy_from_user_flock64(&fl64, arg);
5941         if (ret) {
5942             return ret;
5943         }
5944         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5945         if (ret == 0) {
5946             ret = copy_to_user_flock64(arg, &fl64);
5947         }
5948         break;
5949     case TARGET_F_SETLK64:
5950     case TARGET_F_SETLKW64:
5951         ret = copy_from_user_flock64(&fl64, arg);
5952         if (ret) {
5953             return ret;
5954         }
5955         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5956         break;
5957 
5958     case TARGET_F_GETFL:
5959         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5960         if (ret >= 0) {
5961             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5962         }
5963         break;
5964 
5965     case TARGET_F_SETFL:
5966         ret = get_errno(safe_fcntl(fd, host_cmd,
5967                                    target_to_host_bitmask(arg,
5968                                                           fcntl_flags_tbl)));
5969         break;
5970 
5971 #ifdef F_GETOWN_EX
5972     case TARGET_F_GETOWN_EX:
5973         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5974         if (ret >= 0) {
5975             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5976                 return -TARGET_EFAULT;
5977             target_fox->type = tswap32(fox.type);
5978             target_fox->pid = tswap32(fox.pid);
5979             unlock_user_struct(target_fox, arg, 1);
5980         }
5981         break;
5982 #endif
5983 
5984 #ifdef F_SETOWN_EX
5985     case TARGET_F_SETOWN_EX:
5986         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5987             return -TARGET_EFAULT;
5988         fox.type = tswap32(target_fox->type);
5989         fox.pid = tswap32(target_fox->pid);
5990         unlock_user_struct(target_fox, arg, 0);
5991         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5992         break;
5993 #endif
5994 
5995     case TARGET_F_SETOWN:
5996     case TARGET_F_GETOWN:
5997     case TARGET_F_SETSIG:
5998     case TARGET_F_GETSIG:
5999     case TARGET_F_SETLEASE:
6000     case TARGET_F_GETLEASE:
6001     case TARGET_F_SETPIPE_SZ:
6002     case TARGET_F_GETPIPE_SZ:
6003         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6004         break;
6005 
6006     default:
6007         ret = get_errno(safe_fcntl(fd, cmd, arg));
6008         break;
6009     }
6010     return ret;
6011 }
6012 
6013 #ifdef USE_UID16
6014 
6015 static inline int high2lowuid(int uid)
6016 {
6017     if (uid > 65535)
6018         return 65534;
6019     else
6020         return uid;
6021 }
6022 
6023 static inline int high2lowgid(int gid)
6024 {
6025     if (gid > 65535)
6026         return 65534;
6027     else
6028         return gid;
6029 }
6030 
6031 static inline int low2highuid(int uid)
6032 {
6033     if ((int16_t)uid == -1)
6034         return -1;
6035     else
6036         return uid;
6037 }
6038 
6039 static inline int low2highgid(int gid)
6040 {
6041     if ((int16_t)gid == -1)
6042         return -1;
6043     else
6044         return gid;
6045 }
6046 static inline int tswapid(int id)
6047 {
6048     return tswap16(id);
6049 }
6050 
6051 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6052 
6053 #else /* !USE_UID16 */
6054 static inline int high2lowuid(int uid)
6055 {
6056     return uid;
6057 }
6058 static inline int high2lowgid(int gid)
6059 {
6060     return gid;
6061 }
6062 static inline int low2highuid(int uid)
6063 {
6064     return uid;
6065 }
6066 static inline int low2highgid(int gid)
6067 {
6068     return gid;
6069 }
6070 static inline int tswapid(int id)
6071 {
6072     return tswap32(id);
6073 }
6074 
6075 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6076 
6077 #endif /* USE_UID16 */
6078 
6079 /* We must do direct syscalls for setting UID/GID, because we want to
6080  * implement the Linux system call semantics of "change only for this thread",
6081  * not the libc/POSIX semantics of "change for all threads in process".
6082  * (See http://ewontfix.com/17/ for more details.)
6083  * We use the 32-bit version of the syscalls if present; if it is not
6084  * then either the host architecture supports 32-bit UIDs natively with
6085  * the standard syscall, or the 16-bit UID is the best we can do.
6086  */
6087 #ifdef __NR_setuid32
6088 #define __NR_sys_setuid __NR_setuid32
6089 #else
6090 #define __NR_sys_setuid __NR_setuid
6091 #endif
6092 #ifdef __NR_setgid32
6093 #define __NR_sys_setgid __NR_setgid32
6094 #else
6095 #define __NR_sys_setgid __NR_setgid
6096 #endif
6097 #ifdef __NR_setresuid32
6098 #define __NR_sys_setresuid __NR_setresuid32
6099 #else
6100 #define __NR_sys_setresuid __NR_setresuid
6101 #endif
6102 #ifdef __NR_setresgid32
6103 #define __NR_sys_setresgid __NR_setresgid32
6104 #else
6105 #define __NR_sys_setresgid __NR_setresgid
6106 #endif
6107 
6108 _syscall1(int, sys_setuid, uid_t, uid)
6109 _syscall1(int, sys_setgid, gid_t, gid)
6110 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6111 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6112 
6113 void syscall_init(void)
6114 {
6115     IOCTLEntry *ie;
6116     const argtype *arg_type;
6117     int size;
6118     int i;
6119 
6120     thunk_init(STRUCT_MAX);
6121 
6122 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6123 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6124 #include "syscall_types.h"
6125 #undef STRUCT
6126 #undef STRUCT_SPECIAL
6127 
6128     /* Build target_to_host_errno_table[] table from
6129      * host_to_target_errno_table[]. */
6130     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6131         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6132     }
6133 
6134     /* we patch the ioctl size if necessary. We rely on the fact that
6135        no ioctl has all the bits at '1' in the size field */
6136     ie = ioctl_entries;
6137     while (ie->target_cmd != 0) {
6138         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6139             TARGET_IOC_SIZEMASK) {
6140             arg_type = ie->arg_type;
6141             if (arg_type[0] != TYPE_PTR) {
6142                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6143                         ie->target_cmd);
6144                 exit(1);
6145             }
6146             arg_type++;
6147             size = thunk_type_size(arg_type, 0);
6148             ie->target_cmd = (ie->target_cmd &
6149                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6150                 (size << TARGET_IOC_SIZESHIFT);
6151         }
6152 
6153         /* automatic consistency check if same arch */
6154 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6155     (defined(__x86_64__) && defined(TARGET_X86_64))
6156         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6157             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6158                     ie->name, ie->target_cmd, ie->host_cmd);
6159         }
6160 #endif
6161         ie++;
6162     }
6163 }
6164 
6165 #if TARGET_ABI_BITS == 32
6166 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6167 {
6168 #ifdef TARGET_WORDS_BIGENDIAN
6169     return ((uint64_t)word0 << 32) | word1;
6170 #else
6171     return ((uint64_t)word1 << 32) | word0;
6172 #endif
6173 }
6174 #else /* TARGET_ABI_BITS == 32 */
6175 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6176 {
6177     return word0;
6178 }
6179 #endif /* TARGET_ABI_BITS != 32 */
6180 
6181 #ifdef TARGET_NR_truncate64
6182 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6183                                          abi_long arg2,
6184                                          abi_long arg3,
6185                                          abi_long arg4)
6186 {
6187     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6188         arg2 = arg3;
6189         arg3 = arg4;
6190     }
6191     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6192 }
6193 #endif
6194 
6195 #ifdef TARGET_NR_ftruncate64
6196 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6197                                           abi_long arg2,
6198                                           abi_long arg3,
6199                                           abi_long arg4)
6200 {
6201     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6202         arg2 = arg3;
6203         arg3 = arg4;
6204     }
6205     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6206 }
6207 #endif
6208 
6209 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6210                                                abi_ulong target_addr)
6211 {
6212     struct target_timespec *target_ts;
6213 
6214     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6215         return -TARGET_EFAULT;
6216     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6217     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6218     unlock_user_struct(target_ts, target_addr, 0);
6219     return 0;
6220 }
6221 
6222 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6223                                                struct timespec *host_ts)
6224 {
6225     struct target_timespec *target_ts;
6226 
6227     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6228         return -TARGET_EFAULT;
6229     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6230     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6231     unlock_user_struct(target_ts, target_addr, 1);
6232     return 0;
6233 }
6234 
6235 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6236                                                  abi_ulong target_addr)
6237 {
6238     struct target_itimerspec *target_itspec;
6239 
6240     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6241         return -TARGET_EFAULT;
6242     }
6243 
6244     host_itspec->it_interval.tv_sec =
6245                             tswapal(target_itspec->it_interval.tv_sec);
6246     host_itspec->it_interval.tv_nsec =
6247                             tswapal(target_itspec->it_interval.tv_nsec);
6248     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6249     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6250 
6251     unlock_user_struct(target_itspec, target_addr, 1);
6252     return 0;
6253 }
6254 
6255 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6256                                                struct itimerspec *host_its)
6257 {
6258     struct target_itimerspec *target_itspec;
6259 
6260     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6261         return -TARGET_EFAULT;
6262     }
6263 
6264     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6265     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6266 
6267     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6268     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6269 
6270     unlock_user_struct(target_itspec, target_addr, 0);
6271     return 0;
6272 }
6273 
6274 static inline abi_long target_to_host_timex(struct timex *host_tx,
6275                                             abi_long target_addr)
6276 {
6277     struct target_timex *target_tx;
6278 
6279     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6280         return -TARGET_EFAULT;
6281     }
6282 
6283     __get_user(host_tx->modes, &target_tx->modes);
6284     __get_user(host_tx->offset, &target_tx->offset);
6285     __get_user(host_tx->freq, &target_tx->freq);
6286     __get_user(host_tx->maxerror, &target_tx->maxerror);
6287     __get_user(host_tx->esterror, &target_tx->esterror);
6288     __get_user(host_tx->status, &target_tx->status);
6289     __get_user(host_tx->constant, &target_tx->constant);
6290     __get_user(host_tx->precision, &target_tx->precision);
6291     __get_user(host_tx->tolerance, &target_tx->tolerance);
6292     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6293     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6294     __get_user(host_tx->tick, &target_tx->tick);
6295     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6296     __get_user(host_tx->jitter, &target_tx->jitter);
6297     __get_user(host_tx->shift, &target_tx->shift);
6298     __get_user(host_tx->stabil, &target_tx->stabil);
6299     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6300     __get_user(host_tx->calcnt, &target_tx->calcnt);
6301     __get_user(host_tx->errcnt, &target_tx->errcnt);
6302     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6303     __get_user(host_tx->tai, &target_tx->tai);
6304 
6305     unlock_user_struct(target_tx, target_addr, 0);
6306     return 0;
6307 }
6308 
6309 static inline abi_long host_to_target_timex(abi_long target_addr,
6310                                             struct timex *host_tx)
6311 {
6312     struct target_timex *target_tx;
6313 
6314     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6315         return -TARGET_EFAULT;
6316     }
6317 
6318     __put_user(host_tx->modes, &target_tx->modes);
6319     __put_user(host_tx->offset, &target_tx->offset);
6320     __put_user(host_tx->freq, &target_tx->freq);
6321     __put_user(host_tx->maxerror, &target_tx->maxerror);
6322     __put_user(host_tx->esterror, &target_tx->esterror);
6323     __put_user(host_tx->status, &target_tx->status);
6324     __put_user(host_tx->constant, &target_tx->constant);
6325     __put_user(host_tx->precision, &target_tx->precision);
6326     __put_user(host_tx->tolerance, &target_tx->tolerance);
6327     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6328     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6329     __put_user(host_tx->tick, &target_tx->tick);
6330     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6331     __put_user(host_tx->jitter, &target_tx->jitter);
6332     __put_user(host_tx->shift, &target_tx->shift);
6333     __put_user(host_tx->stabil, &target_tx->stabil);
6334     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6335     __put_user(host_tx->calcnt, &target_tx->calcnt);
6336     __put_user(host_tx->errcnt, &target_tx->errcnt);
6337     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6338     __put_user(host_tx->tai, &target_tx->tai);
6339 
6340     unlock_user_struct(target_tx, target_addr, 1);
6341     return 0;
6342 }
6343 
6344 
6345 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6346                                                abi_ulong target_addr)
6347 {
6348     struct target_sigevent *target_sevp;
6349 
6350     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6351         return -TARGET_EFAULT;
6352     }
6353 
6354     /* This union is awkward on 64 bit systems because it has a 32 bit
6355      * integer and a pointer in it; we follow the conversion approach
6356      * used for handling sigval types in signal.c so the guest should get
6357      * the correct value back even if we did a 64 bit byteswap and it's
6358      * using the 32 bit integer.
6359      */
6360     host_sevp->sigev_value.sival_ptr =
6361         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6362     host_sevp->sigev_signo =
6363         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6364     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6365     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6366 
6367     unlock_user_struct(target_sevp, target_addr, 1);
6368     return 0;
6369 }
6370 
6371 #if defined(TARGET_NR_mlockall)
6372 static inline int target_to_host_mlockall_arg(int arg)
6373 {
6374     int result = 0;
6375 
6376     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6377         result |= MCL_CURRENT;
6378     }
6379     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6380         result |= MCL_FUTURE;
6381     }
6382     return result;
6383 }
6384 #endif
6385 
6386 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6387      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6388      defined(TARGET_NR_newfstatat))
6389 static inline abi_long host_to_target_stat64(void *cpu_env,
6390                                              abi_ulong target_addr,
6391                                              struct stat *host_st)
6392 {
6393 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6394     if (((CPUARMState *)cpu_env)->eabi) {
6395         struct target_eabi_stat64 *target_st;
6396 
6397         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6398             return -TARGET_EFAULT;
6399         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6400         __put_user(host_st->st_dev, &target_st->st_dev);
6401         __put_user(host_st->st_ino, &target_st->st_ino);
6402 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6403         __put_user(host_st->st_ino, &target_st->__st_ino);
6404 #endif
6405         __put_user(host_st->st_mode, &target_st->st_mode);
6406         __put_user(host_st->st_nlink, &target_st->st_nlink);
6407         __put_user(host_st->st_uid, &target_st->st_uid);
6408         __put_user(host_st->st_gid, &target_st->st_gid);
6409         __put_user(host_st->st_rdev, &target_st->st_rdev);
6410         __put_user(host_st->st_size, &target_st->st_size);
6411         __put_user(host_st->st_blksize, &target_st->st_blksize);
6412         __put_user(host_st->st_blocks, &target_st->st_blocks);
6413         __put_user(host_st->st_atime, &target_st->target_st_atime);
6414         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6415         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6416 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6417         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6418         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6419         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6420 #endif
6421         unlock_user_struct(target_st, target_addr, 1);
6422     } else
6423 #endif
6424     {
6425 #if defined(TARGET_HAS_STRUCT_STAT64)
6426         struct target_stat64 *target_st;
6427 #else
6428         struct target_stat *target_st;
6429 #endif
6430 
6431         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6432             return -TARGET_EFAULT;
6433         memset(target_st, 0, sizeof(*target_st));
6434         __put_user(host_st->st_dev, &target_st->st_dev);
6435         __put_user(host_st->st_ino, &target_st->st_ino);
6436 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6437         __put_user(host_st->st_ino, &target_st->__st_ino);
6438 #endif
6439         __put_user(host_st->st_mode, &target_st->st_mode);
6440         __put_user(host_st->st_nlink, &target_st->st_nlink);
6441         __put_user(host_st->st_uid, &target_st->st_uid);
6442         __put_user(host_st->st_gid, &target_st->st_gid);
6443         __put_user(host_st->st_rdev, &target_st->st_rdev);
6444         /* XXX: better use of kernel struct */
6445         __put_user(host_st->st_size, &target_st->st_size);
6446         __put_user(host_st->st_blksize, &target_st->st_blksize);
6447         __put_user(host_st->st_blocks, &target_st->st_blocks);
6448         __put_user(host_st->st_atime, &target_st->target_st_atime);
6449         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6450         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6451 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6452         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6453         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6454         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6455 #endif
6456         unlock_user_struct(target_st, target_addr, 1);
6457     }
6458 
6459     return 0;
6460 }
6461 #endif
6462 
6463 /* ??? Using host futex calls even when target atomic operations
6464    are not really atomic probably breaks things.  However implementing
6465    futexes locally would make futexes shared between multiple processes
6466    tricky.  However they're probably useless because guest atomic
6467    operations won't work either.  */
6468 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6469                     target_ulong uaddr2, int val3)
6470 {
6471     struct timespec ts, *pts;
6472     int base_op;
6473 
6474     /* ??? We assume FUTEX_* constants are the same on both host
6475        and target.  */
6476 #ifdef FUTEX_CMD_MASK
6477     base_op = op & FUTEX_CMD_MASK;
6478 #else
6479     base_op = op;
6480 #endif
6481     switch (base_op) {
6482     case FUTEX_WAIT:
6483     case FUTEX_WAIT_BITSET:
6484         if (timeout) {
6485             pts = &ts;
6486             target_to_host_timespec(pts, timeout);
6487         } else {
6488             pts = NULL;
6489         }
6490         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6491                          pts, NULL, val3));
6492     case FUTEX_WAKE:
6493         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6494     case FUTEX_FD:
6495         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6496     case FUTEX_REQUEUE:
6497     case FUTEX_CMP_REQUEUE:
6498     case FUTEX_WAKE_OP:
6499         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6500            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6501            But the prototype takes a `struct timespec *'; insert casts
6502            to satisfy the compiler.  We do not need to tswap TIMEOUT
6503            since it's not compared to guest memory.  */
6504         pts = (struct timespec *)(uintptr_t) timeout;
6505         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6506                                     g2h(uaddr2),
6507                                     (base_op == FUTEX_CMP_REQUEUE
6508                                      ? tswap32(val3)
6509                                      : val3)));
6510     default:
6511         return -TARGET_ENOSYS;
6512     }
6513 }
6514 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6515 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6516                                      abi_long handle, abi_long mount_id,
6517                                      abi_long flags)
6518 {
6519     struct file_handle *target_fh;
6520     struct file_handle *fh;
6521     int mid = 0;
6522     abi_long ret;
6523     char *name;
6524     unsigned int size, total_size;
6525 
6526     if (get_user_s32(size, handle)) {
6527         return -TARGET_EFAULT;
6528     }
6529 
6530     name = lock_user_string(pathname);
6531     if (!name) {
6532         return -TARGET_EFAULT;
6533     }
6534 
6535     total_size = sizeof(struct file_handle) + size;
6536     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6537     if (!target_fh) {
6538         unlock_user(name, pathname, 0);
6539         return -TARGET_EFAULT;
6540     }
6541 
6542     fh = g_malloc0(total_size);
6543     fh->handle_bytes = size;
6544 
6545     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6546     unlock_user(name, pathname, 0);
6547 
6548     /* man name_to_handle_at(2):
6549      * Other than the use of the handle_bytes field, the caller should treat
6550      * the file_handle structure as an opaque data type
6551      */
6552 
6553     memcpy(target_fh, fh, total_size);
6554     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6555     target_fh->handle_type = tswap32(fh->handle_type);
6556     g_free(fh);
6557     unlock_user(target_fh, handle, total_size);
6558 
6559     if (put_user_s32(mid, mount_id)) {
6560         return -TARGET_EFAULT;
6561     }
6562 
6563     return ret;
6564 
6565 }
6566 #endif
6567 
6568 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6569 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6570                                      abi_long flags)
6571 {
6572     struct file_handle *target_fh;
6573     struct file_handle *fh;
6574     unsigned int size, total_size;
6575     abi_long ret;
6576 
6577     if (get_user_s32(size, handle)) {
6578         return -TARGET_EFAULT;
6579     }
6580 
6581     total_size = sizeof(struct file_handle) + size;
6582     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6583     if (!target_fh) {
6584         return -TARGET_EFAULT;
6585     }
6586 
6587     fh = g_memdup(target_fh, total_size);
6588     fh->handle_bytes = size;
6589     fh->handle_type = tswap32(target_fh->handle_type);
6590 
6591     ret = get_errno(open_by_handle_at(mount_fd, fh,
6592                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6593 
6594     g_free(fh);
6595 
6596     unlock_user(target_fh, handle, total_size);
6597 
6598     return ret;
6599 }
6600 #endif
6601 
6602 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6603 
6604 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6605 {
6606     int host_flags;
6607     target_sigset_t *target_mask;
6608     sigset_t host_mask;
6609     abi_long ret;
6610 
6611     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6612         return -TARGET_EINVAL;
6613     }
6614     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6615         return -TARGET_EFAULT;
6616     }
6617 
6618     target_to_host_sigset(&host_mask, target_mask);
6619 
6620     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6621 
6622     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6623     if (ret >= 0) {
6624         fd_trans_register(ret, &target_signalfd_trans);
6625     }
6626 
6627     unlock_user_struct(target_mask, mask, 0);
6628 
6629     return ret;
6630 }
6631 #endif
6632 
6633 /* Map host to target signal numbers for the wait family of syscalls.
6634    Assume all other status bits are the same.  */
6635 int host_to_target_waitstatus(int status)
6636 {
6637     if (WIFSIGNALED(status)) {
6638         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6639     }
6640     if (WIFSTOPPED(status)) {
6641         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6642                | (status & 0xff);
6643     }
6644     return status;
6645 }
6646 
6647 static int open_self_cmdline(void *cpu_env, int fd)
6648 {
6649     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6650     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6651     int i;
6652 
6653     for (i = 0; i < bprm->argc; i++) {
6654         size_t len = strlen(bprm->argv[i]) + 1;
6655 
6656         if (write(fd, bprm->argv[i], len) != len) {
6657             return -1;
6658         }
6659     }
6660 
6661     return 0;
6662 }
6663 
6664 static int open_self_maps(void *cpu_env, int fd)
6665 {
6666     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6667     TaskState *ts = cpu->opaque;
6668     FILE *fp;
6669     char *line = NULL;
6670     size_t len = 0;
6671     ssize_t read;
6672 
6673     fp = fopen("/proc/self/maps", "r");
6674     if (fp == NULL) {
6675         return -1;
6676     }
6677 
6678     while ((read = getline(&line, &len, fp)) != -1) {
6679         int fields, dev_maj, dev_min, inode;
6680         uint64_t min, max, offset;
6681         char flag_r, flag_w, flag_x, flag_p;
6682         char path[512] = "";
6683         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6684                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6685                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6686 
6687         if ((fields < 10) || (fields > 11)) {
6688             continue;
6689         }
6690         if (h2g_valid(min)) {
6691             int flags = page_get_flags(h2g(min));
6692             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6693             if (page_check_range(h2g(min), max - min, flags) == -1) {
6694                 continue;
6695             }
6696             if (h2g(min) == ts->info->stack_limit) {
6697                 pstrcpy(path, sizeof(path), "      [stack]");
6698             }
6699             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6700                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6701                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6702                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6703                     path[0] ? "         " : "", path);
6704         }
6705     }
6706 
6707     free(line);
6708     fclose(fp);
6709 
6710     return 0;
6711 }
6712 
6713 static int open_self_stat(void *cpu_env, int fd)
6714 {
6715     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6716     TaskState *ts = cpu->opaque;
6717     abi_ulong start_stack = ts->info->start_stack;
6718     int i;
6719 
6720     for (i = 0; i < 44; i++) {
6721       char buf[128];
6722       int len;
6723       uint64_t val = 0;
6724 
6725       if (i == 0) {
6726         /* pid */
6727         val = getpid();
6728         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6729       } else if (i == 1) {
6730         /* app name */
6731         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6732       } else if (i == 27) {
6733         /* stack bottom */
6734         val = start_stack;
6735         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6736       } else {
6737         /* for the rest, there is MasterCard */
6738         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6739       }
6740 
6741       len = strlen(buf);
6742       if (write(fd, buf, len) != len) {
6743           return -1;
6744       }
6745     }
6746 
6747     return 0;
6748 }
6749 
6750 static int open_self_auxv(void *cpu_env, int fd)
6751 {
6752     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6753     TaskState *ts = cpu->opaque;
6754     abi_ulong auxv = ts->info->saved_auxv;
6755     abi_ulong len = ts->info->auxv_len;
6756     char *ptr;
6757 
6758     /*
6759      * Auxiliary vector is stored in target process stack.
6760      * read in whole auxv vector and copy it to file
6761      */
6762     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6763     if (ptr != NULL) {
6764         while (len > 0) {
6765             ssize_t r;
6766             r = write(fd, ptr, len);
6767             if (r <= 0) {
6768                 break;
6769             }
6770             len -= r;
6771             ptr += r;
6772         }
6773         lseek(fd, 0, SEEK_SET);
6774         unlock_user(ptr, auxv, len);
6775     }
6776 
6777     return 0;
6778 }
6779 
6780 static int is_proc_myself(const char *filename, const char *entry)
6781 {
6782     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6783         filename += strlen("/proc/");
6784         if (!strncmp(filename, "self/", strlen("self/"))) {
6785             filename += strlen("self/");
6786         } else if (*filename >= '1' && *filename <= '9') {
6787             char myself[80];
6788             snprintf(myself, sizeof(myself), "%d/", getpid());
6789             if (!strncmp(filename, myself, strlen(myself))) {
6790                 filename += strlen(myself);
6791             } else {
6792                 return 0;
6793             }
6794         } else {
6795             return 0;
6796         }
6797         if (!strcmp(filename, entry)) {
6798             return 1;
6799         }
6800     }
6801     return 0;
6802 }
6803 
6804 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6805     defined(TARGET_SPARC) || defined(TARGET_M68K)
6806 static int is_proc(const char *filename, const char *entry)
6807 {
6808     return strcmp(filename, entry) == 0;
6809 }
6810 #endif
6811 
6812 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6813 static int open_net_route(void *cpu_env, int fd)
6814 {
6815     FILE *fp;
6816     char *line = NULL;
6817     size_t len = 0;
6818     ssize_t read;
6819 
6820     fp = fopen("/proc/net/route", "r");
6821     if (fp == NULL) {
6822         return -1;
6823     }
6824 
6825     /* read header */
6826 
6827     read = getline(&line, &len, fp);
6828     dprintf(fd, "%s", line);
6829 
6830     /* read routes */
6831 
6832     while ((read = getline(&line, &len, fp)) != -1) {
6833         char iface[16];
6834         uint32_t dest, gw, mask;
6835         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6836         int fields;
6837 
6838         fields = sscanf(line,
6839                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6840                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6841                         &mask, &mtu, &window, &irtt);
6842         if (fields != 11) {
6843             continue;
6844         }
6845         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6846                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6847                 metric, tswap32(mask), mtu, window, irtt);
6848     }
6849 
6850     free(line);
6851     fclose(fp);
6852 
6853     return 0;
6854 }
6855 #endif
6856 
6857 #if defined(TARGET_SPARC)
6858 static int open_cpuinfo(void *cpu_env, int fd)
6859 {
6860     dprintf(fd, "type\t\t: sun4u\n");
6861     return 0;
6862 }
6863 #endif
6864 
6865 #if defined(TARGET_M68K)
6866 static int open_hardware(void *cpu_env, int fd)
6867 {
6868     dprintf(fd, "Model:\t\tqemu-m68k\n");
6869     return 0;
6870 }
6871 #endif
6872 
6873 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6874 {
6875     struct fake_open {
6876         const char *filename;
6877         int (*fill)(void *cpu_env, int fd);
6878         int (*cmp)(const char *s1, const char *s2);
6879     };
6880     const struct fake_open *fake_open;
6881     static const struct fake_open fakes[] = {
6882         { "maps", open_self_maps, is_proc_myself },
6883         { "stat", open_self_stat, is_proc_myself },
6884         { "auxv", open_self_auxv, is_proc_myself },
6885         { "cmdline", open_self_cmdline, is_proc_myself },
6886 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6887         { "/proc/net/route", open_net_route, is_proc },
6888 #endif
6889 #if defined(TARGET_SPARC)
6890         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6891 #endif
6892 #if defined(TARGET_M68K)
6893         { "/proc/hardware", open_hardware, is_proc },
6894 #endif
6895         { NULL, NULL, NULL }
6896     };
6897 
6898     if (is_proc_myself(pathname, "exe")) {
6899         int execfd = qemu_getauxval(AT_EXECFD);
6900         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6901     }
6902 
6903     for (fake_open = fakes; fake_open->filename; fake_open++) {
6904         if (fake_open->cmp(pathname, fake_open->filename)) {
6905             break;
6906         }
6907     }
6908 
6909     if (fake_open->filename) {
6910         const char *tmpdir;
6911         char filename[PATH_MAX];
6912         int fd, r;
6913 
6914         /* create temporary file to map stat to */
6915         tmpdir = getenv("TMPDIR");
6916         if (!tmpdir)
6917             tmpdir = "/tmp";
6918         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6919         fd = mkstemp(filename);
6920         if (fd < 0) {
6921             return fd;
6922         }
6923         unlink(filename);
6924 
6925         if ((r = fake_open->fill(cpu_env, fd))) {
6926             int e = errno;
6927             close(fd);
6928             errno = e;
6929             return r;
6930         }
6931         lseek(fd, 0, SEEK_SET);
6932 
6933         return fd;
6934     }
6935 
6936     return safe_openat(dirfd, path(pathname), flags, mode);
6937 }
6938 
6939 #define TIMER_MAGIC 0x0caf0000
6940 #define TIMER_MAGIC_MASK 0xffff0000
6941 
6942 /* Convert QEMU provided timer ID back to internal 16bit index format */
6943 static target_timer_t get_timer_id(abi_long arg)
6944 {
6945     target_timer_t timerid = arg;
6946 
6947     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6948         return -TARGET_EINVAL;
6949     }
6950 
6951     timerid &= 0xffff;
6952 
6953     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6954         return -TARGET_EINVAL;
6955     }
6956 
6957     return timerid;
6958 }
6959 
6960 static int target_to_host_cpu_mask(unsigned long *host_mask,
6961                                    size_t host_size,
6962                                    abi_ulong target_addr,
6963                                    size_t target_size)
6964 {
6965     unsigned target_bits = sizeof(abi_ulong) * 8;
6966     unsigned host_bits = sizeof(*host_mask) * 8;
6967     abi_ulong *target_mask;
6968     unsigned i, j;
6969 
6970     assert(host_size >= target_size);
6971 
6972     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6973     if (!target_mask) {
6974         return -TARGET_EFAULT;
6975     }
6976     memset(host_mask, 0, host_size);
6977 
6978     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6979         unsigned bit = i * target_bits;
6980         abi_ulong val;
6981 
6982         __get_user(val, &target_mask[i]);
6983         for (j = 0; j < target_bits; j++, bit++) {
6984             if (val & (1UL << j)) {
6985                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6986             }
6987         }
6988     }
6989 
6990     unlock_user(target_mask, target_addr, 0);
6991     return 0;
6992 }
6993 
6994 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6995                                    size_t host_size,
6996                                    abi_ulong target_addr,
6997                                    size_t target_size)
6998 {
6999     unsigned target_bits = sizeof(abi_ulong) * 8;
7000     unsigned host_bits = sizeof(*host_mask) * 8;
7001     abi_ulong *target_mask;
7002     unsigned i, j;
7003 
7004     assert(host_size >= target_size);
7005 
7006     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7007     if (!target_mask) {
7008         return -TARGET_EFAULT;
7009     }
7010 
7011     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7012         unsigned bit = i * target_bits;
7013         abi_ulong val = 0;
7014 
7015         for (j = 0; j < target_bits; j++, bit++) {
7016             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7017                 val |= 1UL << j;
7018             }
7019         }
7020         __put_user(val, &target_mask[i]);
7021     }
7022 
7023     unlock_user(target_mask, target_addr, target_size);
7024     return 0;
7025 }
7026 
7027 /* This is an internal helper for do_syscall so that it is easier
7028  * to have a single return point, so that actions, such as logging
7029  * of syscall results, can be performed.
7030  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7031  */
7032 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7033                             abi_long arg2, abi_long arg3, abi_long arg4,
7034                             abi_long arg5, abi_long arg6, abi_long arg7,
7035                             abi_long arg8)
7036 {
7037     CPUState *cpu = ENV_GET_CPU(cpu_env);
7038     abi_long ret;
7039 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7040     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7041     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7042     struct stat st;
7043 #endif
7044 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7045     || defined(TARGET_NR_fstatfs)
7046     struct statfs stfs;
7047 #endif
7048     void *p;
7049 
7050     switch(num) {
7051     case TARGET_NR_exit:
7052         /* In old applications this may be used to implement _exit(2).
7053            However in threaded applictions it is used for thread termination,
7054            and _exit_group is used for application termination.
7055            Do thread termination if we have more then one thread.  */
7056 
7057         if (block_signals()) {
7058             return -TARGET_ERESTARTSYS;
7059         }
7060 
7061         cpu_list_lock();
7062 
7063         if (CPU_NEXT(first_cpu)) {
7064             TaskState *ts;
7065 
7066             /* Remove the CPU from the list.  */
7067             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7068 
7069             cpu_list_unlock();
7070 
7071             ts = cpu->opaque;
7072             if (ts->child_tidptr) {
7073                 put_user_u32(0, ts->child_tidptr);
7074                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7075                           NULL, NULL, 0);
7076             }
7077             thread_cpu = NULL;
7078             object_unref(OBJECT(cpu));
7079             g_free(ts);
7080             rcu_unregister_thread();
7081             pthread_exit(NULL);
7082         }
7083 
7084         cpu_list_unlock();
7085         preexit_cleanup(cpu_env, arg1);
7086         _exit(arg1);
7087         return 0; /* avoid warning */
7088     case TARGET_NR_read:
7089         if (arg2 == 0 && arg3 == 0) {
7090             return get_errno(safe_read(arg1, 0, 0));
7091         } else {
7092             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7093                 return -TARGET_EFAULT;
7094             ret = get_errno(safe_read(arg1, p, arg3));
7095             if (ret >= 0 &&
7096                 fd_trans_host_to_target_data(arg1)) {
7097                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7098             }
7099             unlock_user(p, arg2, ret);
7100         }
7101         return ret;
7102     case TARGET_NR_write:
7103         if (arg2 == 0 && arg3 == 0) {
7104             return get_errno(safe_write(arg1, 0, 0));
7105         }
7106         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7107             return -TARGET_EFAULT;
7108         if (fd_trans_target_to_host_data(arg1)) {
7109             void *copy = g_malloc(arg3);
7110             memcpy(copy, p, arg3);
7111             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7112             if (ret >= 0) {
7113                 ret = get_errno(safe_write(arg1, copy, ret));
7114             }
7115             g_free(copy);
7116         } else {
7117             ret = get_errno(safe_write(arg1, p, arg3));
7118         }
7119         unlock_user(p, arg2, 0);
7120         return ret;
7121 
7122 #ifdef TARGET_NR_open
7123     case TARGET_NR_open:
7124         if (!(p = lock_user_string(arg1)))
7125             return -TARGET_EFAULT;
7126         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7127                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7128                                   arg3));
7129         fd_trans_unregister(ret);
7130         unlock_user(p, arg1, 0);
7131         return ret;
7132 #endif
7133     case TARGET_NR_openat:
7134         if (!(p = lock_user_string(arg2)))
7135             return -TARGET_EFAULT;
7136         ret = get_errno(do_openat(cpu_env, arg1, p,
7137                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7138                                   arg4));
7139         fd_trans_unregister(ret);
7140         unlock_user(p, arg2, 0);
7141         return ret;
7142 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7143     case TARGET_NR_name_to_handle_at:
7144         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7145         return ret;
7146 #endif
7147 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7148     case TARGET_NR_open_by_handle_at:
7149         ret = do_open_by_handle_at(arg1, arg2, arg3);
7150         fd_trans_unregister(ret);
7151         return ret;
7152 #endif
7153     case TARGET_NR_close:
7154         fd_trans_unregister(arg1);
7155         return get_errno(close(arg1));
7156 
7157     case TARGET_NR_brk:
7158         return do_brk(arg1);
7159 #ifdef TARGET_NR_fork
7160     case TARGET_NR_fork:
7161         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7162 #endif
7163 #ifdef TARGET_NR_waitpid
7164     case TARGET_NR_waitpid:
7165         {
7166             int status;
7167             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7168             if (!is_error(ret) && arg2 && ret
7169                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7170                 return -TARGET_EFAULT;
7171         }
7172         return ret;
7173 #endif
7174 #ifdef TARGET_NR_waitid
7175     case TARGET_NR_waitid:
7176         {
7177             siginfo_t info;
7178             info.si_pid = 0;
7179             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7180             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7181                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7182                     return -TARGET_EFAULT;
7183                 host_to_target_siginfo(p, &info);
7184                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7185             }
7186         }
7187         return ret;
7188 #endif
7189 #ifdef TARGET_NR_creat /* not on alpha */
7190     case TARGET_NR_creat:
7191         if (!(p = lock_user_string(arg1)))
7192             return -TARGET_EFAULT;
7193         ret = get_errno(creat(p, arg2));
7194         fd_trans_unregister(ret);
7195         unlock_user(p, arg1, 0);
7196         return ret;
7197 #endif
7198 #ifdef TARGET_NR_link
7199     case TARGET_NR_link:
7200         {
7201             void * p2;
7202             p = lock_user_string(arg1);
7203             p2 = lock_user_string(arg2);
7204             if (!p || !p2)
7205                 ret = -TARGET_EFAULT;
7206             else
7207                 ret = get_errno(link(p, p2));
7208             unlock_user(p2, arg2, 0);
7209             unlock_user(p, arg1, 0);
7210         }
7211         return ret;
7212 #endif
7213 #if defined(TARGET_NR_linkat)
7214     case TARGET_NR_linkat:
7215         {
7216             void * p2 = NULL;
7217             if (!arg2 || !arg4)
7218                 return -TARGET_EFAULT;
7219             p  = lock_user_string(arg2);
7220             p2 = lock_user_string(arg4);
7221             if (!p || !p2)
7222                 ret = -TARGET_EFAULT;
7223             else
7224                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7225             unlock_user(p, arg2, 0);
7226             unlock_user(p2, arg4, 0);
7227         }
7228         return ret;
7229 #endif
7230 #ifdef TARGET_NR_unlink
7231     case TARGET_NR_unlink:
7232         if (!(p = lock_user_string(arg1)))
7233             return -TARGET_EFAULT;
7234         ret = get_errno(unlink(p));
7235         unlock_user(p, arg1, 0);
7236         return ret;
7237 #endif
7238 #if defined(TARGET_NR_unlinkat)
7239     case TARGET_NR_unlinkat:
7240         if (!(p = lock_user_string(arg2)))
7241             return -TARGET_EFAULT;
7242         ret = get_errno(unlinkat(arg1, p, arg3));
7243         unlock_user(p, arg2, 0);
7244         return ret;
7245 #endif
7246     case TARGET_NR_execve:
7247         {
7248             char **argp, **envp;
7249             int argc, envc;
7250             abi_ulong gp;
7251             abi_ulong guest_argp;
7252             abi_ulong guest_envp;
7253             abi_ulong addr;
7254             char **q;
7255             int total_size = 0;
7256 
7257             argc = 0;
7258             guest_argp = arg2;
7259             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7260                 if (get_user_ual(addr, gp))
7261                     return -TARGET_EFAULT;
7262                 if (!addr)
7263                     break;
7264                 argc++;
7265             }
7266             envc = 0;
7267             guest_envp = arg3;
7268             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7269                 if (get_user_ual(addr, gp))
7270                     return -TARGET_EFAULT;
7271                 if (!addr)
7272                     break;
7273                 envc++;
7274             }
7275 
7276             argp = g_new0(char *, argc + 1);
7277             envp = g_new0(char *, envc + 1);
7278 
7279             for (gp = guest_argp, q = argp; gp;
7280                   gp += sizeof(abi_ulong), q++) {
7281                 if (get_user_ual(addr, gp))
7282                     goto execve_efault;
7283                 if (!addr)
7284                     break;
7285                 if (!(*q = lock_user_string(addr)))
7286                     goto execve_efault;
7287                 total_size += strlen(*q) + 1;
7288             }
7289             *q = NULL;
7290 
7291             for (gp = guest_envp, q = envp; gp;
7292                   gp += sizeof(abi_ulong), q++) {
7293                 if (get_user_ual(addr, gp))
7294                     goto execve_efault;
7295                 if (!addr)
7296                     break;
7297                 if (!(*q = lock_user_string(addr)))
7298                     goto execve_efault;
7299                 total_size += strlen(*q) + 1;
7300             }
7301             *q = NULL;
7302 
7303             if (!(p = lock_user_string(arg1)))
7304                 goto execve_efault;
7305             /* Although execve() is not an interruptible syscall it is
7306              * a special case where we must use the safe_syscall wrapper:
7307              * if we allow a signal to happen before we make the host
7308              * syscall then we will 'lose' it, because at the point of
7309              * execve the process leaves QEMU's control. So we use the
7310              * safe syscall wrapper to ensure that we either take the
7311              * signal as a guest signal, or else it does not happen
7312              * before the execve completes and makes it the other
7313              * program's problem.
7314              */
7315             ret = get_errno(safe_execve(p, argp, envp));
7316             unlock_user(p, arg1, 0);
7317 
7318             goto execve_end;
7319 
7320         execve_efault:
7321             ret = -TARGET_EFAULT;
7322 
7323         execve_end:
7324             for (gp = guest_argp, q = argp; *q;
7325                   gp += sizeof(abi_ulong), q++) {
7326                 if (get_user_ual(addr, gp)
7327                     || !addr)
7328                     break;
7329                 unlock_user(*q, addr, 0);
7330             }
7331             for (gp = guest_envp, q = envp; *q;
7332                   gp += sizeof(abi_ulong), q++) {
7333                 if (get_user_ual(addr, gp)
7334                     || !addr)
7335                     break;
7336                 unlock_user(*q, addr, 0);
7337             }
7338 
7339             g_free(argp);
7340             g_free(envp);
7341         }
7342         return ret;
7343     case TARGET_NR_chdir:
7344         if (!(p = lock_user_string(arg1)))
7345             return -TARGET_EFAULT;
7346         ret = get_errno(chdir(p));
7347         unlock_user(p, arg1, 0);
7348         return ret;
7349 #ifdef TARGET_NR_time
7350     case TARGET_NR_time:
7351         {
7352             time_t host_time;
7353             ret = get_errno(time(&host_time));
7354             if (!is_error(ret)
7355                 && arg1
7356                 && put_user_sal(host_time, arg1))
7357                 return -TARGET_EFAULT;
7358         }
7359         return ret;
7360 #endif
7361 #ifdef TARGET_NR_mknod
7362     case TARGET_NR_mknod:
7363         if (!(p = lock_user_string(arg1)))
7364             return -TARGET_EFAULT;
7365         ret = get_errno(mknod(p, arg2, arg3));
7366         unlock_user(p, arg1, 0);
7367         return ret;
7368 #endif
7369 #if defined(TARGET_NR_mknodat)
7370     case TARGET_NR_mknodat:
7371         if (!(p = lock_user_string(arg2)))
7372             return -TARGET_EFAULT;
7373         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7374         unlock_user(p, arg2, 0);
7375         return ret;
7376 #endif
7377 #ifdef TARGET_NR_chmod
7378     case TARGET_NR_chmod:
7379         if (!(p = lock_user_string(arg1)))
7380             return -TARGET_EFAULT;
7381         ret = get_errno(chmod(p, arg2));
7382         unlock_user(p, arg1, 0);
7383         return ret;
7384 #endif
7385 #ifdef TARGET_NR_lseek
7386     case TARGET_NR_lseek:
7387         return get_errno(lseek(arg1, arg2, arg3));
7388 #endif
7389 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7390     /* Alpha specific */
7391     case TARGET_NR_getxpid:
7392         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7393         return get_errno(getpid());
7394 #endif
7395 #ifdef TARGET_NR_getpid
7396     case TARGET_NR_getpid:
7397         return get_errno(getpid());
7398 #endif
7399     case TARGET_NR_mount:
7400         {
7401             /* need to look at the data field */
7402             void *p2, *p3;
7403 
7404             if (arg1) {
7405                 p = lock_user_string(arg1);
7406                 if (!p) {
7407                     return -TARGET_EFAULT;
7408                 }
7409             } else {
7410                 p = NULL;
7411             }
7412 
7413             p2 = lock_user_string(arg2);
7414             if (!p2) {
7415                 if (arg1) {
7416                     unlock_user(p, arg1, 0);
7417                 }
7418                 return -TARGET_EFAULT;
7419             }
7420 
7421             if (arg3) {
7422                 p3 = lock_user_string(arg3);
7423                 if (!p3) {
7424                     if (arg1) {
7425                         unlock_user(p, arg1, 0);
7426                     }
7427                     unlock_user(p2, arg2, 0);
7428                     return -TARGET_EFAULT;
7429                 }
7430             } else {
7431                 p3 = NULL;
7432             }
7433 
7434             /* FIXME - arg5 should be locked, but it isn't clear how to
7435              * do that since it's not guaranteed to be a NULL-terminated
7436              * string.
7437              */
7438             if (!arg5) {
7439                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7440             } else {
7441                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7442             }
7443             ret = get_errno(ret);
7444 
7445             if (arg1) {
7446                 unlock_user(p, arg1, 0);
7447             }
7448             unlock_user(p2, arg2, 0);
7449             if (arg3) {
7450                 unlock_user(p3, arg3, 0);
7451             }
7452         }
7453         return ret;
7454 #ifdef TARGET_NR_umount
7455     case TARGET_NR_umount:
7456         if (!(p = lock_user_string(arg1)))
7457             return -TARGET_EFAULT;
7458         ret = get_errno(umount(p));
7459         unlock_user(p, arg1, 0);
7460         return ret;
7461 #endif
7462 #ifdef TARGET_NR_stime /* not on alpha */
7463     case TARGET_NR_stime:
7464         {
7465             time_t host_time;
7466             if (get_user_sal(host_time, arg1))
7467                 return -TARGET_EFAULT;
7468             return get_errno(stime(&host_time));
7469         }
7470 #endif
7471 #ifdef TARGET_NR_alarm /* not on alpha */
7472     case TARGET_NR_alarm:
7473         return alarm(arg1);
7474 #endif
7475 #ifdef TARGET_NR_pause /* not on alpha */
7476     case TARGET_NR_pause:
7477         if (!block_signals()) {
7478             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7479         }
7480         return -TARGET_EINTR;
7481 #endif
7482 #ifdef TARGET_NR_utime
7483     case TARGET_NR_utime:
7484         {
7485             struct utimbuf tbuf, *host_tbuf;
7486             struct target_utimbuf *target_tbuf;
7487             if (arg2) {
7488                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7489                     return -TARGET_EFAULT;
7490                 tbuf.actime = tswapal(target_tbuf->actime);
7491                 tbuf.modtime = tswapal(target_tbuf->modtime);
7492                 unlock_user_struct(target_tbuf, arg2, 0);
7493                 host_tbuf = &tbuf;
7494             } else {
7495                 host_tbuf = NULL;
7496             }
7497             if (!(p = lock_user_string(arg1)))
7498                 return -TARGET_EFAULT;
7499             ret = get_errno(utime(p, host_tbuf));
7500             unlock_user(p, arg1, 0);
7501         }
7502         return ret;
7503 #endif
7504 #ifdef TARGET_NR_utimes
7505     case TARGET_NR_utimes:
7506         {
7507             struct timeval *tvp, tv[2];
7508             if (arg2) {
7509                 if (copy_from_user_timeval(&tv[0], arg2)
7510                     || copy_from_user_timeval(&tv[1],
7511                                               arg2 + sizeof(struct target_timeval)))
7512                     return -TARGET_EFAULT;
7513                 tvp = tv;
7514             } else {
7515                 tvp = NULL;
7516             }
7517             if (!(p = lock_user_string(arg1)))
7518                 return -TARGET_EFAULT;
7519             ret = get_errno(utimes(p, tvp));
7520             unlock_user(p, arg1, 0);
7521         }
7522         return ret;
7523 #endif
7524 #if defined(TARGET_NR_futimesat)
7525     case TARGET_NR_futimesat:
7526         {
7527             struct timeval *tvp, tv[2];
7528             if (arg3) {
7529                 if (copy_from_user_timeval(&tv[0], arg3)
7530                     || copy_from_user_timeval(&tv[1],
7531                                               arg3 + sizeof(struct target_timeval)))
7532                     return -TARGET_EFAULT;
7533                 tvp = tv;
7534             } else {
7535                 tvp = NULL;
7536             }
7537             if (!(p = lock_user_string(arg2))) {
7538                 return -TARGET_EFAULT;
7539             }
7540             ret = get_errno(futimesat(arg1, path(p), tvp));
7541             unlock_user(p, arg2, 0);
7542         }
7543         return ret;
7544 #endif
7545 #ifdef TARGET_NR_access
7546     case TARGET_NR_access:
7547         if (!(p = lock_user_string(arg1))) {
7548             return -TARGET_EFAULT;
7549         }
7550         ret = get_errno(access(path(p), arg2));
7551         unlock_user(p, arg1, 0);
7552         return ret;
7553 #endif
7554 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7555     case TARGET_NR_faccessat:
7556         if (!(p = lock_user_string(arg2))) {
7557             return -TARGET_EFAULT;
7558         }
7559         ret = get_errno(faccessat(arg1, p, arg3, 0));
7560         unlock_user(p, arg2, 0);
7561         return ret;
7562 #endif
7563 #ifdef TARGET_NR_nice /* not on alpha */
7564     case TARGET_NR_nice:
7565         return get_errno(nice(arg1));
7566 #endif
7567     case TARGET_NR_sync:
7568         sync();
7569         return 0;
7570 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7571     case TARGET_NR_syncfs:
7572         return get_errno(syncfs(arg1));
7573 #endif
7574     case TARGET_NR_kill:
7575         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7576 #ifdef TARGET_NR_rename
7577     case TARGET_NR_rename:
7578         {
7579             void *p2;
7580             p = lock_user_string(arg1);
7581             p2 = lock_user_string(arg2);
7582             if (!p || !p2)
7583                 ret = -TARGET_EFAULT;
7584             else
7585                 ret = get_errno(rename(p, p2));
7586             unlock_user(p2, arg2, 0);
7587             unlock_user(p, arg1, 0);
7588         }
7589         return ret;
7590 #endif
7591 #if defined(TARGET_NR_renameat)
7592     case TARGET_NR_renameat:
7593         {
7594             void *p2;
7595             p  = lock_user_string(arg2);
7596             p2 = lock_user_string(arg4);
7597             if (!p || !p2)
7598                 ret = -TARGET_EFAULT;
7599             else
7600                 ret = get_errno(renameat(arg1, p, arg3, p2));
7601             unlock_user(p2, arg4, 0);
7602             unlock_user(p, arg2, 0);
7603         }
7604         return ret;
7605 #endif
7606 #if defined(TARGET_NR_renameat2)
7607     case TARGET_NR_renameat2:
7608         {
7609             void *p2;
7610             p  = lock_user_string(arg2);
7611             p2 = lock_user_string(arg4);
7612             if (!p || !p2) {
7613                 ret = -TARGET_EFAULT;
7614             } else {
7615                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7616             }
7617             unlock_user(p2, arg4, 0);
7618             unlock_user(p, arg2, 0);
7619         }
7620         return ret;
7621 #endif
7622 #ifdef TARGET_NR_mkdir
7623     case TARGET_NR_mkdir:
7624         if (!(p = lock_user_string(arg1)))
7625             return -TARGET_EFAULT;
7626         ret = get_errno(mkdir(p, arg2));
7627         unlock_user(p, arg1, 0);
7628         return ret;
7629 #endif
7630 #if defined(TARGET_NR_mkdirat)
7631     case TARGET_NR_mkdirat:
7632         if (!(p = lock_user_string(arg2)))
7633             return -TARGET_EFAULT;
7634         ret = get_errno(mkdirat(arg1, p, arg3));
7635         unlock_user(p, arg2, 0);
7636         return ret;
7637 #endif
7638 #ifdef TARGET_NR_rmdir
7639     case TARGET_NR_rmdir:
7640         if (!(p = lock_user_string(arg1)))
7641             return -TARGET_EFAULT;
7642         ret = get_errno(rmdir(p));
7643         unlock_user(p, arg1, 0);
7644         return ret;
7645 #endif
7646     case TARGET_NR_dup:
7647         ret = get_errno(dup(arg1));
7648         if (ret >= 0) {
7649             fd_trans_dup(arg1, ret);
7650         }
7651         return ret;
7652 #ifdef TARGET_NR_pipe
7653     case TARGET_NR_pipe:
7654         return do_pipe(cpu_env, arg1, 0, 0);
7655 #endif
7656 #ifdef TARGET_NR_pipe2
7657     case TARGET_NR_pipe2:
7658         return do_pipe(cpu_env, arg1,
7659                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7660 #endif
7661     case TARGET_NR_times:
7662         {
7663             struct target_tms *tmsp;
7664             struct tms tms;
7665             ret = get_errno(times(&tms));
7666             if (arg1) {
7667                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7668                 if (!tmsp)
7669                     return -TARGET_EFAULT;
7670                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7671                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7672                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7673                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7674             }
7675             if (!is_error(ret))
7676                 ret = host_to_target_clock_t(ret);
7677         }
7678         return ret;
7679     case TARGET_NR_acct:
7680         if (arg1 == 0) {
7681             ret = get_errno(acct(NULL));
7682         } else {
7683             if (!(p = lock_user_string(arg1))) {
7684                 return -TARGET_EFAULT;
7685             }
7686             ret = get_errno(acct(path(p)));
7687             unlock_user(p, arg1, 0);
7688         }
7689         return ret;
7690 #ifdef TARGET_NR_umount2
7691     case TARGET_NR_umount2:
7692         if (!(p = lock_user_string(arg1)))
7693             return -TARGET_EFAULT;
7694         ret = get_errno(umount2(p, arg2));
7695         unlock_user(p, arg1, 0);
7696         return ret;
7697 #endif
7698     case TARGET_NR_ioctl:
7699         return do_ioctl(arg1, arg2, arg3);
7700 #ifdef TARGET_NR_fcntl
7701     case TARGET_NR_fcntl:
7702         return do_fcntl(arg1, arg2, arg3);
7703 #endif
7704     case TARGET_NR_setpgid:
7705         return get_errno(setpgid(arg1, arg2));
7706     case TARGET_NR_umask:
7707         return get_errno(umask(arg1));
7708     case TARGET_NR_chroot:
7709         if (!(p = lock_user_string(arg1)))
7710             return -TARGET_EFAULT;
7711         ret = get_errno(chroot(p));
7712         unlock_user(p, arg1, 0);
7713         return ret;
7714 #ifdef TARGET_NR_dup2
7715     case TARGET_NR_dup2:
7716         ret = get_errno(dup2(arg1, arg2));
7717         if (ret >= 0) {
7718             fd_trans_dup(arg1, arg2);
7719         }
7720         return ret;
7721 #endif
7722 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7723     case TARGET_NR_dup3:
7724     {
7725         int host_flags;
7726 
7727         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7728             return -EINVAL;
7729         }
7730         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7731         ret = get_errno(dup3(arg1, arg2, host_flags));
7732         if (ret >= 0) {
7733             fd_trans_dup(arg1, arg2);
7734         }
7735         return ret;
7736     }
7737 #endif
7738 #ifdef TARGET_NR_getppid /* not on alpha */
7739     case TARGET_NR_getppid:
7740         return get_errno(getppid());
7741 #endif
7742 #ifdef TARGET_NR_getpgrp
7743     case TARGET_NR_getpgrp:
7744         return get_errno(getpgrp());
7745 #endif
7746     case TARGET_NR_setsid:
7747         return get_errno(setsid());
7748 #ifdef TARGET_NR_sigaction
7749     case TARGET_NR_sigaction:
7750         {
7751 #if defined(TARGET_ALPHA)
7752             struct target_sigaction act, oact, *pact = 0;
7753             struct target_old_sigaction *old_act;
7754             if (arg2) {
7755                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7756                     return -TARGET_EFAULT;
7757                 act._sa_handler = old_act->_sa_handler;
7758                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7759                 act.sa_flags = old_act->sa_flags;
7760                 act.sa_restorer = 0;
7761                 unlock_user_struct(old_act, arg2, 0);
7762                 pact = &act;
7763             }
7764             ret = get_errno(do_sigaction(arg1, pact, &oact));
7765             if (!is_error(ret) && arg3) {
7766                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7767                     return -TARGET_EFAULT;
7768                 old_act->_sa_handler = oact._sa_handler;
7769                 old_act->sa_mask = oact.sa_mask.sig[0];
7770                 old_act->sa_flags = oact.sa_flags;
7771                 unlock_user_struct(old_act, arg3, 1);
7772             }
7773 #elif defined(TARGET_MIPS)
7774 	    struct target_sigaction act, oact, *pact, *old_act;
7775 
7776 	    if (arg2) {
7777                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7778                     return -TARGET_EFAULT;
7779 		act._sa_handler = old_act->_sa_handler;
7780 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7781 		act.sa_flags = old_act->sa_flags;
7782 		unlock_user_struct(old_act, arg2, 0);
7783 		pact = &act;
7784 	    } else {
7785 		pact = NULL;
7786 	    }
7787 
7788 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7789 
7790 	    if (!is_error(ret) && arg3) {
7791                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7792                     return -TARGET_EFAULT;
7793 		old_act->_sa_handler = oact._sa_handler;
7794 		old_act->sa_flags = oact.sa_flags;
7795 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7796 		old_act->sa_mask.sig[1] = 0;
7797 		old_act->sa_mask.sig[2] = 0;
7798 		old_act->sa_mask.sig[3] = 0;
7799 		unlock_user_struct(old_act, arg3, 1);
7800 	    }
7801 #else
7802             struct target_old_sigaction *old_act;
7803             struct target_sigaction act, oact, *pact;
7804             if (arg2) {
7805                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7806                     return -TARGET_EFAULT;
7807                 act._sa_handler = old_act->_sa_handler;
7808                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7809                 act.sa_flags = old_act->sa_flags;
7810                 act.sa_restorer = old_act->sa_restorer;
7811 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7812                 act.ka_restorer = 0;
7813 #endif
7814                 unlock_user_struct(old_act, arg2, 0);
7815                 pact = &act;
7816             } else {
7817                 pact = NULL;
7818             }
7819             ret = get_errno(do_sigaction(arg1, pact, &oact));
7820             if (!is_error(ret) && arg3) {
7821                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7822                     return -TARGET_EFAULT;
7823                 old_act->_sa_handler = oact._sa_handler;
7824                 old_act->sa_mask = oact.sa_mask.sig[0];
7825                 old_act->sa_flags = oact.sa_flags;
7826                 old_act->sa_restorer = oact.sa_restorer;
7827                 unlock_user_struct(old_act, arg3, 1);
7828             }
7829 #endif
7830         }
7831         return ret;
7832 #endif
7833     case TARGET_NR_rt_sigaction:
7834         {
7835 #if defined(TARGET_ALPHA)
7836             /* For Alpha and SPARC this is a 5 argument syscall, with
7837              * a 'restorer' parameter which must be copied into the
7838              * sa_restorer field of the sigaction struct.
7839              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7840              * and arg5 is the sigsetsize.
7841              * Alpha also has a separate rt_sigaction struct that it uses
7842              * here; SPARC uses the usual sigaction struct.
7843              */
7844             struct target_rt_sigaction *rt_act;
7845             struct target_sigaction act, oact, *pact = 0;
7846 
7847             if (arg4 != sizeof(target_sigset_t)) {
7848                 return -TARGET_EINVAL;
7849             }
7850             if (arg2) {
7851                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7852                     return -TARGET_EFAULT;
7853                 act._sa_handler = rt_act->_sa_handler;
7854                 act.sa_mask = rt_act->sa_mask;
7855                 act.sa_flags = rt_act->sa_flags;
7856                 act.sa_restorer = arg5;
7857                 unlock_user_struct(rt_act, arg2, 0);
7858                 pact = &act;
7859             }
7860             ret = get_errno(do_sigaction(arg1, pact, &oact));
7861             if (!is_error(ret) && arg3) {
7862                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7863                     return -TARGET_EFAULT;
7864                 rt_act->_sa_handler = oact._sa_handler;
7865                 rt_act->sa_mask = oact.sa_mask;
7866                 rt_act->sa_flags = oact.sa_flags;
7867                 unlock_user_struct(rt_act, arg3, 1);
7868             }
7869 #else
7870 #ifdef TARGET_SPARC
7871             target_ulong restorer = arg4;
7872             target_ulong sigsetsize = arg5;
7873 #else
7874             target_ulong sigsetsize = arg4;
7875 #endif
7876             struct target_sigaction *act;
7877             struct target_sigaction *oact;
7878 
7879             if (sigsetsize != sizeof(target_sigset_t)) {
7880                 return -TARGET_EINVAL;
7881             }
7882             if (arg2) {
7883                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7884                     return -TARGET_EFAULT;
7885                 }
7886 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7887                 act->ka_restorer = restorer;
7888 #endif
7889             } else {
7890                 act = NULL;
7891             }
7892             if (arg3) {
7893                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7894                     ret = -TARGET_EFAULT;
7895                     goto rt_sigaction_fail;
7896                 }
7897             } else
7898                 oact = NULL;
7899             ret = get_errno(do_sigaction(arg1, act, oact));
7900 	rt_sigaction_fail:
7901             if (act)
7902                 unlock_user_struct(act, arg2, 0);
7903             if (oact)
7904                 unlock_user_struct(oact, arg3, 1);
7905 #endif
7906         }
7907         return ret;
7908 #ifdef TARGET_NR_sgetmask /* not on alpha */
7909     case TARGET_NR_sgetmask:
7910         {
7911             sigset_t cur_set;
7912             abi_ulong target_set;
7913             ret = do_sigprocmask(0, NULL, &cur_set);
7914             if (!ret) {
7915                 host_to_target_old_sigset(&target_set, &cur_set);
7916                 ret = target_set;
7917             }
7918         }
7919         return ret;
7920 #endif
7921 #ifdef TARGET_NR_ssetmask /* not on alpha */
7922     case TARGET_NR_ssetmask:
7923         {
7924             sigset_t set, oset;
7925             abi_ulong target_set = arg1;
7926             target_to_host_old_sigset(&set, &target_set);
7927             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7928             if (!ret) {
7929                 host_to_target_old_sigset(&target_set, &oset);
7930                 ret = target_set;
7931             }
7932         }
7933         return ret;
7934 #endif
7935 #ifdef TARGET_NR_sigprocmask
7936     case TARGET_NR_sigprocmask:
7937         {
7938 #if defined(TARGET_ALPHA)
7939             sigset_t set, oldset;
7940             abi_ulong mask;
7941             int how;
7942 
7943             switch (arg1) {
7944             case TARGET_SIG_BLOCK:
7945                 how = SIG_BLOCK;
7946                 break;
7947             case TARGET_SIG_UNBLOCK:
7948                 how = SIG_UNBLOCK;
7949                 break;
7950             case TARGET_SIG_SETMASK:
7951                 how = SIG_SETMASK;
7952                 break;
7953             default:
7954                 return -TARGET_EINVAL;
7955             }
7956             mask = arg2;
7957             target_to_host_old_sigset(&set, &mask);
7958 
7959             ret = do_sigprocmask(how, &set, &oldset);
7960             if (!is_error(ret)) {
7961                 host_to_target_old_sigset(&mask, &oldset);
7962                 ret = mask;
7963                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7964             }
7965 #else
7966             sigset_t set, oldset, *set_ptr;
7967             int how;
7968 
7969             if (arg2) {
7970                 switch (arg1) {
7971                 case TARGET_SIG_BLOCK:
7972                     how = SIG_BLOCK;
7973                     break;
7974                 case TARGET_SIG_UNBLOCK:
7975                     how = SIG_UNBLOCK;
7976                     break;
7977                 case TARGET_SIG_SETMASK:
7978                     how = SIG_SETMASK;
7979                     break;
7980                 default:
7981                     return -TARGET_EINVAL;
7982                 }
7983                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7984                     return -TARGET_EFAULT;
7985                 target_to_host_old_sigset(&set, p);
7986                 unlock_user(p, arg2, 0);
7987                 set_ptr = &set;
7988             } else {
7989                 how = 0;
7990                 set_ptr = NULL;
7991             }
7992             ret = do_sigprocmask(how, set_ptr, &oldset);
7993             if (!is_error(ret) && arg3) {
7994                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7995                     return -TARGET_EFAULT;
7996                 host_to_target_old_sigset(p, &oldset);
7997                 unlock_user(p, arg3, sizeof(target_sigset_t));
7998             }
7999 #endif
8000         }
8001         return ret;
8002 #endif
8003     case TARGET_NR_rt_sigprocmask:
8004         {
8005             int how = arg1;
8006             sigset_t set, oldset, *set_ptr;
8007 
8008             if (arg4 != sizeof(target_sigset_t)) {
8009                 return -TARGET_EINVAL;
8010             }
8011 
8012             if (arg2) {
8013                 switch(how) {
8014                 case TARGET_SIG_BLOCK:
8015                     how = SIG_BLOCK;
8016                     break;
8017                 case TARGET_SIG_UNBLOCK:
8018                     how = SIG_UNBLOCK;
8019                     break;
8020                 case TARGET_SIG_SETMASK:
8021                     how = SIG_SETMASK;
8022                     break;
8023                 default:
8024                     return -TARGET_EINVAL;
8025                 }
8026                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8027                     return -TARGET_EFAULT;
8028                 target_to_host_sigset(&set, p);
8029                 unlock_user(p, arg2, 0);
8030                 set_ptr = &set;
8031             } else {
8032                 how = 0;
8033                 set_ptr = NULL;
8034             }
8035             ret = do_sigprocmask(how, set_ptr, &oldset);
8036             if (!is_error(ret) && arg3) {
8037                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8038                     return -TARGET_EFAULT;
8039                 host_to_target_sigset(p, &oldset);
8040                 unlock_user(p, arg3, sizeof(target_sigset_t));
8041             }
8042         }
8043         return ret;
8044 #ifdef TARGET_NR_sigpending
8045     case TARGET_NR_sigpending:
8046         {
8047             sigset_t set;
8048             ret = get_errno(sigpending(&set));
8049             if (!is_error(ret)) {
8050                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8051                     return -TARGET_EFAULT;
8052                 host_to_target_old_sigset(p, &set);
8053                 unlock_user(p, arg1, sizeof(target_sigset_t));
8054             }
8055         }
8056         return ret;
8057 #endif
8058     case TARGET_NR_rt_sigpending:
8059         {
8060             sigset_t set;
8061 
8062             /* Yes, this check is >, not != like most. We follow the kernel's
8063              * logic and it does it like this because it implements
8064              * NR_sigpending through the same code path, and in that case
8065              * the old_sigset_t is smaller in size.
8066              */
8067             if (arg2 > sizeof(target_sigset_t)) {
8068                 return -TARGET_EINVAL;
8069             }
8070 
8071             ret = get_errno(sigpending(&set));
8072             if (!is_error(ret)) {
8073                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8074                     return -TARGET_EFAULT;
8075                 host_to_target_sigset(p, &set);
8076                 unlock_user(p, arg1, sizeof(target_sigset_t));
8077             }
8078         }
8079         return ret;
8080 #ifdef TARGET_NR_sigsuspend
8081     case TARGET_NR_sigsuspend:
8082         {
8083             TaskState *ts = cpu->opaque;
8084 #if defined(TARGET_ALPHA)
8085             abi_ulong mask = arg1;
8086             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8087 #else
8088             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8089                 return -TARGET_EFAULT;
8090             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8091             unlock_user(p, arg1, 0);
8092 #endif
8093             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8094                                                SIGSET_T_SIZE));
8095             if (ret != -TARGET_ERESTARTSYS) {
8096                 ts->in_sigsuspend = 1;
8097             }
8098         }
8099         return ret;
8100 #endif
8101     case TARGET_NR_rt_sigsuspend:
8102         {
8103             TaskState *ts = cpu->opaque;
8104 
8105             if (arg2 != sizeof(target_sigset_t)) {
8106                 return -TARGET_EINVAL;
8107             }
8108             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8109                 return -TARGET_EFAULT;
8110             target_to_host_sigset(&ts->sigsuspend_mask, p);
8111             unlock_user(p, arg1, 0);
8112             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8113                                                SIGSET_T_SIZE));
8114             if (ret != -TARGET_ERESTARTSYS) {
8115                 ts->in_sigsuspend = 1;
8116             }
8117         }
8118         return ret;
8119     case TARGET_NR_rt_sigtimedwait:
8120         {
8121             sigset_t set;
8122             struct timespec uts, *puts;
8123             siginfo_t uinfo;
8124 
8125             if (arg4 != sizeof(target_sigset_t)) {
8126                 return -TARGET_EINVAL;
8127             }
8128 
8129             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8130                 return -TARGET_EFAULT;
8131             target_to_host_sigset(&set, p);
8132             unlock_user(p, arg1, 0);
8133             if (arg3) {
8134                 puts = &uts;
8135                 target_to_host_timespec(puts, arg3);
8136             } else {
8137                 puts = NULL;
8138             }
8139             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8140                                                  SIGSET_T_SIZE));
8141             if (!is_error(ret)) {
8142                 if (arg2) {
8143                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8144                                   0);
8145                     if (!p) {
8146                         return -TARGET_EFAULT;
8147                     }
8148                     host_to_target_siginfo(p, &uinfo);
8149                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8150                 }
8151                 ret = host_to_target_signal(ret);
8152             }
8153         }
8154         return ret;
8155     case TARGET_NR_rt_sigqueueinfo:
8156         {
8157             siginfo_t uinfo;
8158 
8159             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8160             if (!p) {
8161                 return -TARGET_EFAULT;
8162             }
8163             target_to_host_siginfo(&uinfo, p);
8164             unlock_user(p, arg3, 0);
8165             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8166         }
8167         return ret;
8168     case TARGET_NR_rt_tgsigqueueinfo:
8169         {
8170             siginfo_t uinfo;
8171 
8172             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8173             if (!p) {
8174                 return -TARGET_EFAULT;
8175             }
8176             target_to_host_siginfo(&uinfo, p);
8177             unlock_user(p, arg4, 0);
8178             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8179         }
8180         return ret;
8181 #ifdef TARGET_NR_sigreturn
8182     case TARGET_NR_sigreturn:
8183         if (block_signals()) {
8184             return -TARGET_ERESTARTSYS;
8185         }
8186         return do_sigreturn(cpu_env);
8187 #endif
8188     case TARGET_NR_rt_sigreturn:
8189         if (block_signals()) {
8190             return -TARGET_ERESTARTSYS;
8191         }
8192         return do_rt_sigreturn(cpu_env);
8193     case TARGET_NR_sethostname:
8194         if (!(p = lock_user_string(arg1)))
8195             return -TARGET_EFAULT;
8196         ret = get_errno(sethostname(p, arg2));
8197         unlock_user(p, arg1, 0);
8198         return ret;
8199 #ifdef TARGET_NR_setrlimit
8200     case TARGET_NR_setrlimit:
8201         {
8202             int resource = target_to_host_resource(arg1);
8203             struct target_rlimit *target_rlim;
8204             struct rlimit rlim;
8205             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8206                 return -TARGET_EFAULT;
8207             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8208             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8209             unlock_user_struct(target_rlim, arg2, 0);
8210             /*
8211              * If we just passed through resource limit settings for memory then
8212              * they would also apply to QEMU's own allocations, and QEMU will
8213              * crash or hang or die if its allocations fail. Ideally we would
8214              * track the guest allocations in QEMU and apply the limits ourselves.
8215              * For now, just tell the guest the call succeeded but don't actually
8216              * limit anything.
8217              */
8218             if (resource != RLIMIT_AS &&
8219                 resource != RLIMIT_DATA &&
8220                 resource != RLIMIT_STACK) {
8221                 return get_errno(setrlimit(resource, &rlim));
8222             } else {
8223                 return 0;
8224             }
8225         }
8226 #endif
8227 #ifdef TARGET_NR_getrlimit
8228     case TARGET_NR_getrlimit:
8229         {
8230             int resource = target_to_host_resource(arg1);
8231             struct target_rlimit *target_rlim;
8232             struct rlimit rlim;
8233 
8234             ret = get_errno(getrlimit(resource, &rlim));
8235             if (!is_error(ret)) {
8236                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8237                     return -TARGET_EFAULT;
8238                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8239                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8240                 unlock_user_struct(target_rlim, arg2, 1);
8241             }
8242         }
8243         return ret;
8244 #endif
8245     case TARGET_NR_getrusage:
8246         {
8247             struct rusage rusage;
8248             ret = get_errno(getrusage(arg1, &rusage));
8249             if (!is_error(ret)) {
8250                 ret = host_to_target_rusage(arg2, &rusage);
8251             }
8252         }
8253         return ret;
8254     case TARGET_NR_gettimeofday:
8255         {
8256             struct timeval tv;
8257             ret = get_errno(gettimeofday(&tv, NULL));
8258             if (!is_error(ret)) {
8259                 if (copy_to_user_timeval(arg1, &tv))
8260                     return -TARGET_EFAULT;
8261             }
8262         }
8263         return ret;
8264     case TARGET_NR_settimeofday:
8265         {
8266             struct timeval tv, *ptv = NULL;
8267             struct timezone tz, *ptz = NULL;
8268 
8269             if (arg1) {
8270                 if (copy_from_user_timeval(&tv, arg1)) {
8271                     return -TARGET_EFAULT;
8272                 }
8273                 ptv = &tv;
8274             }
8275 
8276             if (arg2) {
8277                 if (copy_from_user_timezone(&tz, arg2)) {
8278                     return -TARGET_EFAULT;
8279                 }
8280                 ptz = &tz;
8281             }
8282 
8283             return get_errno(settimeofday(ptv, ptz));
8284         }
8285 #if defined(TARGET_NR_select)
8286     case TARGET_NR_select:
8287 #if defined(TARGET_WANT_NI_OLD_SELECT)
8288         /* some architectures used to have old_select here
8289          * but now ENOSYS it.
8290          */
8291         ret = -TARGET_ENOSYS;
8292 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8293         ret = do_old_select(arg1);
8294 #else
8295         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8296 #endif
8297         return ret;
8298 #endif
8299 #ifdef TARGET_NR_pselect6
8300     case TARGET_NR_pselect6:
8301         {
8302             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8303             fd_set rfds, wfds, efds;
8304             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8305             struct timespec ts, *ts_ptr;
8306 
8307             /*
8308              * The 6th arg is actually two args smashed together,
8309              * so we cannot use the C library.
8310              */
8311             sigset_t set;
8312             struct {
8313                 sigset_t *set;
8314                 size_t size;
8315             } sig, *sig_ptr;
8316 
8317             abi_ulong arg_sigset, arg_sigsize, *arg7;
8318             target_sigset_t *target_sigset;
8319 
8320             n = arg1;
8321             rfd_addr = arg2;
8322             wfd_addr = arg3;
8323             efd_addr = arg4;
8324             ts_addr = arg5;
8325 
8326             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8327             if (ret) {
8328                 return ret;
8329             }
8330             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8331             if (ret) {
8332                 return ret;
8333             }
8334             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8335             if (ret) {
8336                 return ret;
8337             }
8338 
8339             /*
8340              * This takes a timespec, and not a timeval, so we cannot
8341              * use the do_select() helper ...
8342              */
8343             if (ts_addr) {
8344                 if (target_to_host_timespec(&ts, ts_addr)) {
8345                     return -TARGET_EFAULT;
8346                 }
8347                 ts_ptr = &ts;
8348             } else {
8349                 ts_ptr = NULL;
8350             }
8351 
8352             /* Extract the two packed args for the sigset */
8353             if (arg6) {
8354                 sig_ptr = &sig;
8355                 sig.size = SIGSET_T_SIZE;
8356 
8357                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8358                 if (!arg7) {
8359                     return -TARGET_EFAULT;
8360                 }
8361                 arg_sigset = tswapal(arg7[0]);
8362                 arg_sigsize = tswapal(arg7[1]);
8363                 unlock_user(arg7, arg6, 0);
8364 
8365                 if (arg_sigset) {
8366                     sig.set = &set;
8367                     if (arg_sigsize != sizeof(*target_sigset)) {
8368                         /* Like the kernel, we enforce correct size sigsets */
8369                         return -TARGET_EINVAL;
8370                     }
8371                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8372                                               sizeof(*target_sigset), 1);
8373                     if (!target_sigset) {
8374                         return -TARGET_EFAULT;
8375                     }
8376                     target_to_host_sigset(&set, target_sigset);
8377                     unlock_user(target_sigset, arg_sigset, 0);
8378                 } else {
8379                     sig.set = NULL;
8380                 }
8381             } else {
8382                 sig_ptr = NULL;
8383             }
8384 
8385             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8386                                           ts_ptr, sig_ptr));
8387 
8388             if (!is_error(ret)) {
8389                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8390                     return -TARGET_EFAULT;
8391                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8392                     return -TARGET_EFAULT;
8393                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8394                     return -TARGET_EFAULT;
8395 
8396                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8397                     return -TARGET_EFAULT;
8398             }
8399         }
8400         return ret;
8401 #endif
8402 #ifdef TARGET_NR_symlink
8403     case TARGET_NR_symlink:
8404         {
8405             void *p2;
8406             p = lock_user_string(arg1);
8407             p2 = lock_user_string(arg2);
8408             if (!p || !p2)
8409                 ret = -TARGET_EFAULT;
8410             else
8411                 ret = get_errno(symlink(p, p2));
8412             unlock_user(p2, arg2, 0);
8413             unlock_user(p, arg1, 0);
8414         }
8415         return ret;
8416 #endif
8417 #if defined(TARGET_NR_symlinkat)
8418     case TARGET_NR_symlinkat:
8419         {
8420             void *p2;
8421             p  = lock_user_string(arg1);
8422             p2 = lock_user_string(arg3);
8423             if (!p || !p2)
8424                 ret = -TARGET_EFAULT;
8425             else
8426                 ret = get_errno(symlinkat(p, arg2, p2));
8427             unlock_user(p2, arg3, 0);
8428             unlock_user(p, arg1, 0);
8429         }
8430         return ret;
8431 #endif
8432 #ifdef TARGET_NR_readlink
8433     case TARGET_NR_readlink:
8434         {
8435             void *p2;
8436             p = lock_user_string(arg1);
8437             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8438             if (!p || !p2) {
8439                 ret = -TARGET_EFAULT;
8440             } else if (!arg3) {
8441                 /* Short circuit this for the magic exe check. */
8442                 ret = -TARGET_EINVAL;
8443             } else if (is_proc_myself((const char *)p, "exe")) {
8444                 char real[PATH_MAX], *temp;
8445                 temp = realpath(exec_path, real);
8446                 /* Return value is # of bytes that we wrote to the buffer. */
8447                 if (temp == NULL) {
8448                     ret = get_errno(-1);
8449                 } else {
8450                     /* Don't worry about sign mismatch as earlier mapping
8451                      * logic would have thrown a bad address error. */
8452                     ret = MIN(strlen(real), arg3);
8453                     /* We cannot NUL terminate the string. */
8454                     memcpy(p2, real, ret);
8455                 }
8456             } else {
8457                 ret = get_errno(readlink(path(p), p2, arg3));
8458             }
8459             unlock_user(p2, arg2, ret);
8460             unlock_user(p, arg1, 0);
8461         }
8462         return ret;
8463 #endif
8464 #if defined(TARGET_NR_readlinkat)
8465     case TARGET_NR_readlinkat:
8466         {
8467             void *p2;
8468             p  = lock_user_string(arg2);
8469             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8470             if (!p || !p2) {
8471                 ret = -TARGET_EFAULT;
8472             } else if (is_proc_myself((const char *)p, "exe")) {
8473                 char real[PATH_MAX], *temp;
8474                 temp = realpath(exec_path, real);
8475                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8476                 snprintf((char *)p2, arg4, "%s", real);
8477             } else {
8478                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8479             }
8480             unlock_user(p2, arg3, ret);
8481             unlock_user(p, arg2, 0);
8482         }
8483         return ret;
8484 #endif
8485 #ifdef TARGET_NR_swapon
8486     case TARGET_NR_swapon:
8487         if (!(p = lock_user_string(arg1)))
8488             return -TARGET_EFAULT;
8489         ret = get_errno(swapon(p, arg2));
8490         unlock_user(p, arg1, 0);
8491         return ret;
8492 #endif
8493     case TARGET_NR_reboot:
8494         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8495            /* arg4 must be ignored in all other cases */
8496            p = lock_user_string(arg4);
8497            if (!p) {
8498                return -TARGET_EFAULT;
8499            }
8500            ret = get_errno(reboot(arg1, arg2, arg3, p));
8501            unlock_user(p, arg4, 0);
8502         } else {
8503            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8504         }
8505         return ret;
8506 #ifdef TARGET_NR_mmap
8507     case TARGET_NR_mmap:
8508 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8509     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8510     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8511     || defined(TARGET_S390X)
8512         {
8513             abi_ulong *v;
8514             abi_ulong v1, v2, v3, v4, v5, v6;
8515             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8516                 return -TARGET_EFAULT;
8517             v1 = tswapal(v[0]);
8518             v2 = tswapal(v[1]);
8519             v3 = tswapal(v[2]);
8520             v4 = tswapal(v[3]);
8521             v5 = tswapal(v[4]);
8522             v6 = tswapal(v[5]);
8523             unlock_user(v, arg1, 0);
8524             ret = get_errno(target_mmap(v1, v2, v3,
8525                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8526                                         v5, v6));
8527         }
8528 #else
8529         ret = get_errno(target_mmap(arg1, arg2, arg3,
8530                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8531                                     arg5,
8532                                     arg6));
8533 #endif
8534         return ret;
8535 #endif
8536 #ifdef TARGET_NR_mmap2
8537     case TARGET_NR_mmap2:
8538 #ifndef MMAP_SHIFT
8539 #define MMAP_SHIFT 12
8540 #endif
8541         ret = target_mmap(arg1, arg2, arg3,
8542                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8543                           arg5, arg6 << MMAP_SHIFT);
8544         return get_errno(ret);
8545 #endif
8546     case TARGET_NR_munmap:
8547         return get_errno(target_munmap(arg1, arg2));
8548     case TARGET_NR_mprotect:
8549         {
8550             TaskState *ts = cpu->opaque;
8551             /* Special hack to detect libc making the stack executable.  */
8552             if ((arg3 & PROT_GROWSDOWN)
8553                 && arg1 >= ts->info->stack_limit
8554                 && arg1 <= ts->info->start_stack) {
8555                 arg3 &= ~PROT_GROWSDOWN;
8556                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8557                 arg1 = ts->info->stack_limit;
8558             }
8559         }
8560         return get_errno(target_mprotect(arg1, arg2, arg3));
8561 #ifdef TARGET_NR_mremap
8562     case TARGET_NR_mremap:
8563         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8564 #endif
8565         /* ??? msync/mlock/munlock are broken for softmmu.  */
8566 #ifdef TARGET_NR_msync
8567     case TARGET_NR_msync:
8568         return get_errno(msync(g2h(arg1), arg2, arg3));
8569 #endif
8570 #ifdef TARGET_NR_mlock
8571     case TARGET_NR_mlock:
8572         return get_errno(mlock(g2h(arg1), arg2));
8573 #endif
8574 #ifdef TARGET_NR_munlock
8575     case TARGET_NR_munlock:
8576         return get_errno(munlock(g2h(arg1), arg2));
8577 #endif
8578 #ifdef TARGET_NR_mlockall
8579     case TARGET_NR_mlockall:
8580         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8581 #endif
8582 #ifdef TARGET_NR_munlockall
8583     case TARGET_NR_munlockall:
8584         return get_errno(munlockall());
8585 #endif
8586 #ifdef TARGET_NR_truncate
8587     case TARGET_NR_truncate:
8588         if (!(p = lock_user_string(arg1)))
8589             return -TARGET_EFAULT;
8590         ret = get_errno(truncate(p, arg2));
8591         unlock_user(p, arg1, 0);
8592         return ret;
8593 #endif
8594 #ifdef TARGET_NR_ftruncate
8595     case TARGET_NR_ftruncate:
8596         return get_errno(ftruncate(arg1, arg2));
8597 #endif
8598     case TARGET_NR_fchmod:
8599         return get_errno(fchmod(arg1, arg2));
8600 #if defined(TARGET_NR_fchmodat)
8601     case TARGET_NR_fchmodat:
8602         if (!(p = lock_user_string(arg2)))
8603             return -TARGET_EFAULT;
8604         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8605         unlock_user(p, arg2, 0);
8606         return ret;
8607 #endif
8608     case TARGET_NR_getpriority:
8609         /* Note that negative values are valid for getpriority, so we must
8610            differentiate based on errno settings.  */
8611         errno = 0;
8612         ret = getpriority(arg1, arg2);
8613         if (ret == -1 && errno != 0) {
8614             return -host_to_target_errno(errno);
8615         }
8616 #ifdef TARGET_ALPHA
8617         /* Return value is the unbiased priority.  Signal no error.  */
8618         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8619 #else
8620         /* Return value is a biased priority to avoid negative numbers.  */
8621         ret = 20 - ret;
8622 #endif
8623         return ret;
8624     case TARGET_NR_setpriority:
8625         return get_errno(setpriority(arg1, arg2, arg3));
8626 #ifdef TARGET_NR_statfs
8627     case TARGET_NR_statfs:
8628         if (!(p = lock_user_string(arg1))) {
8629             return -TARGET_EFAULT;
8630         }
8631         ret = get_errno(statfs(path(p), &stfs));
8632         unlock_user(p, arg1, 0);
8633     convert_statfs:
8634         if (!is_error(ret)) {
8635             struct target_statfs *target_stfs;
8636 
8637             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8638                 return -TARGET_EFAULT;
8639             __put_user(stfs.f_type, &target_stfs->f_type);
8640             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8641             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8642             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8643             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8644             __put_user(stfs.f_files, &target_stfs->f_files);
8645             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8646             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8647             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8648             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8649             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8650 #ifdef _STATFS_F_FLAGS
8651             __put_user(stfs.f_flags, &target_stfs->f_flags);
8652 #else
8653             __put_user(0, &target_stfs->f_flags);
8654 #endif
8655             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8656             unlock_user_struct(target_stfs, arg2, 1);
8657         }
8658         return ret;
8659 #endif
8660 #ifdef TARGET_NR_fstatfs
8661     case TARGET_NR_fstatfs:
8662         ret = get_errno(fstatfs(arg1, &stfs));
8663         goto convert_statfs;
8664 #endif
8665 #ifdef TARGET_NR_statfs64
8666     case TARGET_NR_statfs64:
8667         if (!(p = lock_user_string(arg1))) {
8668             return -TARGET_EFAULT;
8669         }
8670         ret = get_errno(statfs(path(p), &stfs));
8671         unlock_user(p, arg1, 0);
8672     convert_statfs64:
8673         if (!is_error(ret)) {
8674             struct target_statfs64 *target_stfs;
8675 
8676             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8677                 return -TARGET_EFAULT;
8678             __put_user(stfs.f_type, &target_stfs->f_type);
8679             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8680             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8681             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8682             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8683             __put_user(stfs.f_files, &target_stfs->f_files);
8684             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8685             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8686             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8687             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8688             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8689             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8690             unlock_user_struct(target_stfs, arg3, 1);
8691         }
8692         return ret;
8693     case TARGET_NR_fstatfs64:
8694         ret = get_errno(fstatfs(arg1, &stfs));
8695         goto convert_statfs64;
8696 #endif
8697 #ifdef TARGET_NR_socketcall
8698     case TARGET_NR_socketcall:
8699         return do_socketcall(arg1, arg2);
8700 #endif
8701 #ifdef TARGET_NR_accept
8702     case TARGET_NR_accept:
8703         return do_accept4(arg1, arg2, arg3, 0);
8704 #endif
8705 #ifdef TARGET_NR_accept4
8706     case TARGET_NR_accept4:
8707         return do_accept4(arg1, arg2, arg3, arg4);
8708 #endif
8709 #ifdef TARGET_NR_bind
8710     case TARGET_NR_bind:
8711         return do_bind(arg1, arg2, arg3);
8712 #endif
8713 #ifdef TARGET_NR_connect
8714     case TARGET_NR_connect:
8715         return do_connect(arg1, arg2, arg3);
8716 #endif
8717 #ifdef TARGET_NR_getpeername
8718     case TARGET_NR_getpeername:
8719         return do_getpeername(arg1, arg2, arg3);
8720 #endif
8721 #ifdef TARGET_NR_getsockname
8722     case TARGET_NR_getsockname:
8723         return do_getsockname(arg1, arg2, arg3);
8724 #endif
8725 #ifdef TARGET_NR_getsockopt
8726     case TARGET_NR_getsockopt:
8727         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8728 #endif
8729 #ifdef TARGET_NR_listen
8730     case TARGET_NR_listen:
8731         return get_errno(listen(arg1, arg2));
8732 #endif
8733 #ifdef TARGET_NR_recv
8734     case TARGET_NR_recv:
8735         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8736 #endif
8737 #ifdef TARGET_NR_recvfrom
8738     case TARGET_NR_recvfrom:
8739         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8740 #endif
8741 #ifdef TARGET_NR_recvmsg
8742     case TARGET_NR_recvmsg:
8743         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8744 #endif
8745 #ifdef TARGET_NR_send
8746     case TARGET_NR_send:
8747         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8748 #endif
8749 #ifdef TARGET_NR_sendmsg
8750     case TARGET_NR_sendmsg:
8751         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8752 #endif
8753 #ifdef TARGET_NR_sendmmsg
8754     case TARGET_NR_sendmmsg:
8755         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8756     case TARGET_NR_recvmmsg:
8757         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8758 #endif
8759 #ifdef TARGET_NR_sendto
8760     case TARGET_NR_sendto:
8761         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8762 #endif
8763 #ifdef TARGET_NR_shutdown
8764     case TARGET_NR_shutdown:
8765         return get_errno(shutdown(arg1, arg2));
8766 #endif
8767 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8768     case TARGET_NR_getrandom:
8769         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8770         if (!p) {
8771             return -TARGET_EFAULT;
8772         }
8773         ret = get_errno(getrandom(p, arg2, arg3));
8774         unlock_user(p, arg1, ret);
8775         return ret;
8776 #endif
8777 #ifdef TARGET_NR_socket
8778     case TARGET_NR_socket:
8779         return do_socket(arg1, arg2, arg3);
8780 #endif
8781 #ifdef TARGET_NR_socketpair
8782     case TARGET_NR_socketpair:
8783         return do_socketpair(arg1, arg2, arg3, arg4);
8784 #endif
8785 #ifdef TARGET_NR_setsockopt
8786     case TARGET_NR_setsockopt:
8787         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8788 #endif
8789 #if defined(TARGET_NR_syslog)
8790     case TARGET_NR_syslog:
8791         {
8792             int len = arg2;
8793 
8794             switch (arg1) {
8795             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8796             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8797             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8798             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8799             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8800             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8801             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8802             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8803                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8804             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8805             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8806             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8807                 {
8808                     if (len < 0) {
8809                         return -TARGET_EINVAL;
8810                     }
8811                     if (len == 0) {
8812                         return 0;
8813                     }
8814                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8815                     if (!p) {
8816                         return -TARGET_EFAULT;
8817                     }
8818                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8819                     unlock_user(p, arg2, arg3);
8820                 }
8821                 return ret;
8822             default:
8823                 return -TARGET_EINVAL;
8824             }
8825         }
8826         break;
8827 #endif
8828     case TARGET_NR_setitimer:
8829         {
8830             struct itimerval value, ovalue, *pvalue;
8831 
8832             if (arg2) {
8833                 pvalue = &value;
8834                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8835                     || copy_from_user_timeval(&pvalue->it_value,
8836                                               arg2 + sizeof(struct target_timeval)))
8837                     return -TARGET_EFAULT;
8838             } else {
8839                 pvalue = NULL;
8840             }
8841             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8842             if (!is_error(ret) && arg3) {
8843                 if (copy_to_user_timeval(arg3,
8844                                          &ovalue.it_interval)
8845                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8846                                             &ovalue.it_value))
8847                     return -TARGET_EFAULT;
8848             }
8849         }
8850         return ret;
8851     case TARGET_NR_getitimer:
8852         {
8853             struct itimerval value;
8854 
8855             ret = get_errno(getitimer(arg1, &value));
8856             if (!is_error(ret) && arg2) {
8857                 if (copy_to_user_timeval(arg2,
8858                                          &value.it_interval)
8859                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8860                                             &value.it_value))
8861                     return -TARGET_EFAULT;
8862             }
8863         }
8864         return ret;
8865 #ifdef TARGET_NR_stat
8866     case TARGET_NR_stat:
8867         if (!(p = lock_user_string(arg1))) {
8868             return -TARGET_EFAULT;
8869         }
8870         ret = get_errno(stat(path(p), &st));
8871         unlock_user(p, arg1, 0);
8872         goto do_stat;
8873 #endif
8874 #ifdef TARGET_NR_lstat
8875     case TARGET_NR_lstat:
8876         if (!(p = lock_user_string(arg1))) {
8877             return -TARGET_EFAULT;
8878         }
8879         ret = get_errno(lstat(path(p), &st));
8880         unlock_user(p, arg1, 0);
8881         goto do_stat;
8882 #endif
8883 #ifdef TARGET_NR_fstat
8884     case TARGET_NR_fstat:
8885         {
8886             ret = get_errno(fstat(arg1, &st));
8887 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8888         do_stat:
8889 #endif
8890             if (!is_error(ret)) {
8891                 struct target_stat *target_st;
8892 
8893                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8894                     return -TARGET_EFAULT;
8895                 memset(target_st, 0, sizeof(*target_st));
8896                 __put_user(st.st_dev, &target_st->st_dev);
8897                 __put_user(st.st_ino, &target_st->st_ino);
8898                 __put_user(st.st_mode, &target_st->st_mode);
8899                 __put_user(st.st_uid, &target_st->st_uid);
8900                 __put_user(st.st_gid, &target_st->st_gid);
8901                 __put_user(st.st_nlink, &target_st->st_nlink);
8902                 __put_user(st.st_rdev, &target_st->st_rdev);
8903                 __put_user(st.st_size, &target_st->st_size);
8904                 __put_user(st.st_blksize, &target_st->st_blksize);
8905                 __put_user(st.st_blocks, &target_st->st_blocks);
8906                 __put_user(st.st_atime, &target_st->target_st_atime);
8907                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8908                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8909 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8910     defined(TARGET_STAT_HAVE_NSEC)
8911                 __put_user(st.st_atim.tv_nsec,
8912                            &target_st->target_st_atime_nsec);
8913                 __put_user(st.st_mtim.tv_nsec,
8914                            &target_st->target_st_mtime_nsec);
8915                 __put_user(st.st_ctim.tv_nsec,
8916                            &target_st->target_st_ctime_nsec);
8917 #endif
8918                 unlock_user_struct(target_st, arg2, 1);
8919             }
8920         }
8921         return ret;
8922 #endif
8923     case TARGET_NR_vhangup:
8924         return get_errno(vhangup());
8925 #ifdef TARGET_NR_syscall
8926     case TARGET_NR_syscall:
8927         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8928                           arg6, arg7, arg8, 0);
8929 #endif
8930     case TARGET_NR_wait4:
8931         {
8932             int status;
8933             abi_long status_ptr = arg2;
8934             struct rusage rusage, *rusage_ptr;
8935             abi_ulong target_rusage = arg4;
8936             abi_long rusage_err;
8937             if (target_rusage)
8938                 rusage_ptr = &rusage;
8939             else
8940                 rusage_ptr = NULL;
8941             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8942             if (!is_error(ret)) {
8943                 if (status_ptr && ret) {
8944                     status = host_to_target_waitstatus(status);
8945                     if (put_user_s32(status, status_ptr))
8946                         return -TARGET_EFAULT;
8947                 }
8948                 if (target_rusage) {
8949                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8950                     if (rusage_err) {
8951                         ret = rusage_err;
8952                     }
8953                 }
8954             }
8955         }
8956         return ret;
8957 #ifdef TARGET_NR_swapoff
8958     case TARGET_NR_swapoff:
8959         if (!(p = lock_user_string(arg1)))
8960             return -TARGET_EFAULT;
8961         ret = get_errno(swapoff(p));
8962         unlock_user(p, arg1, 0);
8963         return ret;
8964 #endif
8965     case TARGET_NR_sysinfo:
8966         {
8967             struct target_sysinfo *target_value;
8968             struct sysinfo value;
8969             ret = get_errno(sysinfo(&value));
8970             if (!is_error(ret) && arg1)
8971             {
8972                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8973                     return -TARGET_EFAULT;
8974                 __put_user(value.uptime, &target_value->uptime);
8975                 __put_user(value.loads[0], &target_value->loads[0]);
8976                 __put_user(value.loads[1], &target_value->loads[1]);
8977                 __put_user(value.loads[2], &target_value->loads[2]);
8978                 __put_user(value.totalram, &target_value->totalram);
8979                 __put_user(value.freeram, &target_value->freeram);
8980                 __put_user(value.sharedram, &target_value->sharedram);
8981                 __put_user(value.bufferram, &target_value->bufferram);
8982                 __put_user(value.totalswap, &target_value->totalswap);
8983                 __put_user(value.freeswap, &target_value->freeswap);
8984                 __put_user(value.procs, &target_value->procs);
8985                 __put_user(value.totalhigh, &target_value->totalhigh);
8986                 __put_user(value.freehigh, &target_value->freehigh);
8987                 __put_user(value.mem_unit, &target_value->mem_unit);
8988                 unlock_user_struct(target_value, arg1, 1);
8989             }
8990         }
8991         return ret;
8992 #ifdef TARGET_NR_ipc
8993     case TARGET_NR_ipc:
8994         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8995 #endif
8996 #ifdef TARGET_NR_semget
8997     case TARGET_NR_semget:
8998         return get_errno(semget(arg1, arg2, arg3));
8999 #endif
9000 #ifdef TARGET_NR_semop
9001     case TARGET_NR_semop:
9002         return do_semop(arg1, arg2, arg3);
9003 #endif
9004 #ifdef TARGET_NR_semctl
9005     case TARGET_NR_semctl:
9006         return do_semctl(arg1, arg2, arg3, arg4);
9007 #endif
9008 #ifdef TARGET_NR_msgctl
9009     case TARGET_NR_msgctl:
9010         return do_msgctl(arg1, arg2, arg3);
9011 #endif
9012 #ifdef TARGET_NR_msgget
9013     case TARGET_NR_msgget:
9014         return get_errno(msgget(arg1, arg2));
9015 #endif
9016 #ifdef TARGET_NR_msgrcv
9017     case TARGET_NR_msgrcv:
9018         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9019 #endif
9020 #ifdef TARGET_NR_msgsnd
9021     case TARGET_NR_msgsnd:
9022         return do_msgsnd(arg1, arg2, arg3, arg4);
9023 #endif
9024 #ifdef TARGET_NR_shmget
9025     case TARGET_NR_shmget:
9026         return get_errno(shmget(arg1, arg2, arg3));
9027 #endif
9028 #ifdef TARGET_NR_shmctl
9029     case TARGET_NR_shmctl:
9030         return do_shmctl(arg1, arg2, arg3);
9031 #endif
9032 #ifdef TARGET_NR_shmat
9033     case TARGET_NR_shmat:
9034         return do_shmat(cpu_env, arg1, arg2, arg3);
9035 #endif
9036 #ifdef TARGET_NR_shmdt
9037     case TARGET_NR_shmdt:
9038         return do_shmdt(arg1);
9039 #endif
9040     case TARGET_NR_fsync:
9041         return get_errno(fsync(arg1));
9042     case TARGET_NR_clone:
9043         /* Linux manages to have three different orderings for its
9044          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9045          * match the kernel's CONFIG_CLONE_* settings.
9046          * Microblaze is further special in that it uses a sixth
9047          * implicit argument to clone for the TLS pointer.
9048          */
9049 #if defined(TARGET_MICROBLAZE)
9050         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9051 #elif defined(TARGET_CLONE_BACKWARDS)
9052         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9053 #elif defined(TARGET_CLONE_BACKWARDS2)
9054         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9055 #else
9056         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9057 #endif
9058         return ret;
9059 #ifdef __NR_exit_group
9060         /* new thread calls */
9061     case TARGET_NR_exit_group:
9062         preexit_cleanup(cpu_env, arg1);
9063         return get_errno(exit_group(arg1));
9064 #endif
9065     case TARGET_NR_setdomainname:
9066         if (!(p = lock_user_string(arg1)))
9067             return -TARGET_EFAULT;
9068         ret = get_errno(setdomainname(p, arg2));
9069         unlock_user(p, arg1, 0);
9070         return ret;
9071     case TARGET_NR_uname:
9072         /* no need to transcode because we use the linux syscall */
9073         {
9074             struct new_utsname * buf;
9075 
9076             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9077                 return -TARGET_EFAULT;
9078             ret = get_errno(sys_uname(buf));
9079             if (!is_error(ret)) {
9080                 /* Overwrite the native machine name with whatever is being
9081                    emulated. */
9082                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9083                           sizeof(buf->machine));
9084                 /* Allow the user to override the reported release.  */
9085                 if (qemu_uname_release && *qemu_uname_release) {
9086                     g_strlcpy(buf->release, qemu_uname_release,
9087                               sizeof(buf->release));
9088                 }
9089             }
9090             unlock_user_struct(buf, arg1, 1);
9091         }
9092         return ret;
9093 #ifdef TARGET_I386
9094     case TARGET_NR_modify_ldt:
9095         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9096 #if !defined(TARGET_X86_64)
9097     case TARGET_NR_vm86:
9098         return do_vm86(cpu_env, arg1, arg2);
9099 #endif
9100 #endif
9101     case TARGET_NR_adjtimex:
9102         {
9103             struct timex host_buf;
9104 
9105             if (target_to_host_timex(&host_buf, arg1) != 0) {
9106                 return -TARGET_EFAULT;
9107             }
9108             ret = get_errno(adjtimex(&host_buf));
9109             if (!is_error(ret)) {
9110                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9111                     return -TARGET_EFAULT;
9112                 }
9113             }
9114         }
9115         return ret;
9116 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9117     case TARGET_NR_clock_adjtime:
9118         {
9119             struct timex htx, *phtx = &htx;
9120 
9121             if (target_to_host_timex(phtx, arg2) != 0) {
9122                 return -TARGET_EFAULT;
9123             }
9124             ret = get_errno(clock_adjtime(arg1, phtx));
9125             if (!is_error(ret) && phtx) {
9126                 if (host_to_target_timex(arg2, phtx) != 0) {
9127                     return -TARGET_EFAULT;
9128                 }
9129             }
9130         }
9131         return ret;
9132 #endif
9133     case TARGET_NR_getpgid:
9134         return get_errno(getpgid(arg1));
9135     case TARGET_NR_fchdir:
9136         return get_errno(fchdir(arg1));
9137     case TARGET_NR_personality:
9138         return get_errno(personality(arg1));
9139 #ifdef TARGET_NR__llseek /* Not on alpha */
9140     case TARGET_NR__llseek:
9141         {
9142             int64_t res;
9143 #if !defined(__NR_llseek)
9144             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9145             if (res == -1) {
9146                 ret = get_errno(res);
9147             } else {
9148                 ret = 0;
9149             }
9150 #else
9151             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9152 #endif
9153             if ((ret == 0) && put_user_s64(res, arg4)) {
9154                 return -TARGET_EFAULT;
9155             }
9156         }
9157         return ret;
9158 #endif
9159 #ifdef TARGET_NR_getdents
9160     case TARGET_NR_getdents:
9161 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9162 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9163         {
9164             struct target_dirent *target_dirp;
9165             struct linux_dirent *dirp;
9166             abi_long count = arg3;
9167 
9168             dirp = g_try_malloc(count);
9169             if (!dirp) {
9170                 return -TARGET_ENOMEM;
9171             }
9172 
9173             ret = get_errno(sys_getdents(arg1, dirp, count));
9174             if (!is_error(ret)) {
9175                 struct linux_dirent *de;
9176 		struct target_dirent *tde;
9177                 int len = ret;
9178                 int reclen, treclen;
9179 		int count1, tnamelen;
9180 
9181 		count1 = 0;
9182                 de = dirp;
9183                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9184                     return -TARGET_EFAULT;
9185 		tde = target_dirp;
9186                 while (len > 0) {
9187                     reclen = de->d_reclen;
9188                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9189                     assert(tnamelen >= 0);
9190                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9191                     assert(count1 + treclen <= count);
9192                     tde->d_reclen = tswap16(treclen);
9193                     tde->d_ino = tswapal(de->d_ino);
9194                     tde->d_off = tswapal(de->d_off);
9195                     memcpy(tde->d_name, de->d_name, tnamelen);
9196                     de = (struct linux_dirent *)((char *)de + reclen);
9197                     len -= reclen;
9198                     tde = (struct target_dirent *)((char *)tde + treclen);
9199 		    count1 += treclen;
9200                 }
9201 		ret = count1;
9202                 unlock_user(target_dirp, arg2, ret);
9203             }
9204             g_free(dirp);
9205         }
9206 #else
9207         {
9208             struct linux_dirent *dirp;
9209             abi_long count = arg3;
9210 
9211             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9212                 return -TARGET_EFAULT;
9213             ret = get_errno(sys_getdents(arg1, dirp, count));
9214             if (!is_error(ret)) {
9215                 struct linux_dirent *de;
9216                 int len = ret;
9217                 int reclen;
9218                 de = dirp;
9219                 while (len > 0) {
9220                     reclen = de->d_reclen;
9221                     if (reclen > len)
9222                         break;
9223                     de->d_reclen = tswap16(reclen);
9224                     tswapls(&de->d_ino);
9225                     tswapls(&de->d_off);
9226                     de = (struct linux_dirent *)((char *)de + reclen);
9227                     len -= reclen;
9228                 }
9229             }
9230             unlock_user(dirp, arg2, ret);
9231         }
9232 #endif
9233 #else
9234         /* Implement getdents in terms of getdents64 */
9235         {
9236             struct linux_dirent64 *dirp;
9237             abi_long count = arg3;
9238 
9239             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9240             if (!dirp) {
9241                 return -TARGET_EFAULT;
9242             }
9243             ret = get_errno(sys_getdents64(arg1, dirp, count));
9244             if (!is_error(ret)) {
9245                 /* Convert the dirent64 structs to target dirent.  We do this
9246                  * in-place, since we can guarantee that a target_dirent is no
9247                  * larger than a dirent64; however this means we have to be
9248                  * careful to read everything before writing in the new format.
9249                  */
9250                 struct linux_dirent64 *de;
9251                 struct target_dirent *tde;
9252                 int len = ret;
9253                 int tlen = 0;
9254 
9255                 de = dirp;
9256                 tde = (struct target_dirent *)dirp;
9257                 while (len > 0) {
9258                     int namelen, treclen;
9259                     int reclen = de->d_reclen;
9260                     uint64_t ino = de->d_ino;
9261                     int64_t off = de->d_off;
9262                     uint8_t type = de->d_type;
9263 
9264                     namelen = strlen(de->d_name);
9265                     treclen = offsetof(struct target_dirent, d_name)
9266                         + namelen + 2;
9267                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9268 
9269                     memmove(tde->d_name, de->d_name, namelen + 1);
9270                     tde->d_ino = tswapal(ino);
9271                     tde->d_off = tswapal(off);
9272                     tde->d_reclen = tswap16(treclen);
9273                     /* The target_dirent type is in what was formerly a padding
9274                      * byte at the end of the structure:
9275                      */
9276                     *(((char *)tde) + treclen - 1) = type;
9277 
9278                     de = (struct linux_dirent64 *)((char *)de + reclen);
9279                     tde = (struct target_dirent *)((char *)tde + treclen);
9280                     len -= reclen;
9281                     tlen += treclen;
9282                 }
9283                 ret = tlen;
9284             }
9285             unlock_user(dirp, arg2, ret);
9286         }
9287 #endif
9288         return ret;
9289 #endif /* TARGET_NR_getdents */
9290 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9291     case TARGET_NR_getdents64:
9292         {
9293             struct linux_dirent64 *dirp;
9294             abi_long count = arg3;
9295             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9296                 return -TARGET_EFAULT;
9297             ret = get_errno(sys_getdents64(arg1, dirp, count));
9298             if (!is_error(ret)) {
9299                 struct linux_dirent64 *de;
9300                 int len = ret;
9301                 int reclen;
9302                 de = dirp;
9303                 while (len > 0) {
9304                     reclen = de->d_reclen;
9305                     if (reclen > len)
9306                         break;
9307                     de->d_reclen = tswap16(reclen);
9308                     tswap64s((uint64_t *)&de->d_ino);
9309                     tswap64s((uint64_t *)&de->d_off);
9310                     de = (struct linux_dirent64 *)((char *)de + reclen);
9311                     len -= reclen;
9312                 }
9313             }
9314             unlock_user(dirp, arg2, ret);
9315         }
9316         return ret;
9317 #endif /* TARGET_NR_getdents64 */
9318 #if defined(TARGET_NR__newselect)
9319     case TARGET_NR__newselect:
9320         return do_select(arg1, arg2, arg3, arg4, arg5);
9321 #endif
9322 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9323 # ifdef TARGET_NR_poll
9324     case TARGET_NR_poll:
9325 # endif
9326 # ifdef TARGET_NR_ppoll
9327     case TARGET_NR_ppoll:
9328 # endif
9329         {
9330             struct target_pollfd *target_pfd;
9331             unsigned int nfds = arg2;
9332             struct pollfd *pfd;
9333             unsigned int i;
9334 
9335             pfd = NULL;
9336             target_pfd = NULL;
9337             if (nfds) {
9338                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9339                     return -TARGET_EINVAL;
9340                 }
9341 
9342                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9343                                        sizeof(struct target_pollfd) * nfds, 1);
9344                 if (!target_pfd) {
9345                     return -TARGET_EFAULT;
9346                 }
9347 
9348                 pfd = alloca(sizeof(struct pollfd) * nfds);
9349                 for (i = 0; i < nfds; i++) {
9350                     pfd[i].fd = tswap32(target_pfd[i].fd);
9351                     pfd[i].events = tswap16(target_pfd[i].events);
9352                 }
9353             }
9354 
9355             switch (num) {
9356 # ifdef TARGET_NR_ppoll
9357             case TARGET_NR_ppoll:
9358             {
9359                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9360                 target_sigset_t *target_set;
9361                 sigset_t _set, *set = &_set;
9362 
9363                 if (arg3) {
9364                     if (target_to_host_timespec(timeout_ts, arg3)) {
9365                         unlock_user(target_pfd, arg1, 0);
9366                         return -TARGET_EFAULT;
9367                     }
9368                 } else {
9369                     timeout_ts = NULL;
9370                 }
9371 
9372                 if (arg4) {
9373                     if (arg5 != sizeof(target_sigset_t)) {
9374                         unlock_user(target_pfd, arg1, 0);
9375                         return -TARGET_EINVAL;
9376                     }
9377 
9378                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9379                     if (!target_set) {
9380                         unlock_user(target_pfd, arg1, 0);
9381                         return -TARGET_EFAULT;
9382                     }
9383                     target_to_host_sigset(set, target_set);
9384                 } else {
9385                     set = NULL;
9386                 }
9387 
9388                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9389                                            set, SIGSET_T_SIZE));
9390 
9391                 if (!is_error(ret) && arg3) {
9392                     host_to_target_timespec(arg3, timeout_ts);
9393                 }
9394                 if (arg4) {
9395                     unlock_user(target_set, arg4, 0);
9396                 }
9397                 break;
9398             }
9399 # endif
9400 # ifdef TARGET_NR_poll
9401             case TARGET_NR_poll:
9402             {
9403                 struct timespec ts, *pts;
9404 
9405                 if (arg3 >= 0) {
9406                     /* Convert ms to secs, ns */
9407                     ts.tv_sec = arg3 / 1000;
9408                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9409                     pts = &ts;
9410                 } else {
9411                     /* -ve poll() timeout means "infinite" */
9412                     pts = NULL;
9413                 }
9414                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9415                 break;
9416             }
9417 # endif
9418             default:
9419                 g_assert_not_reached();
9420             }
9421 
9422             if (!is_error(ret)) {
9423                 for(i = 0; i < nfds; i++) {
9424                     target_pfd[i].revents = tswap16(pfd[i].revents);
9425                 }
9426             }
9427             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9428         }
9429         return ret;
9430 #endif
9431     case TARGET_NR_flock:
9432         /* NOTE: the flock constant seems to be the same for every
9433            Linux platform */
9434         return get_errno(safe_flock(arg1, arg2));
9435     case TARGET_NR_readv:
9436         {
9437             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9438             if (vec != NULL) {
9439                 ret = get_errno(safe_readv(arg1, vec, arg3));
9440                 unlock_iovec(vec, arg2, arg3, 1);
9441             } else {
9442                 ret = -host_to_target_errno(errno);
9443             }
9444         }
9445         return ret;
9446     case TARGET_NR_writev:
9447         {
9448             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9449             if (vec != NULL) {
9450                 ret = get_errno(safe_writev(arg1, vec, arg3));
9451                 unlock_iovec(vec, arg2, arg3, 0);
9452             } else {
9453                 ret = -host_to_target_errno(errno);
9454             }
9455         }
9456         return ret;
9457 #if defined(TARGET_NR_preadv)
9458     case TARGET_NR_preadv:
9459         {
9460             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9461             if (vec != NULL) {
9462                 unsigned long low, high;
9463 
9464                 target_to_host_low_high(arg4, arg5, &low, &high);
9465                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9466                 unlock_iovec(vec, arg2, arg3, 1);
9467             } else {
9468                 ret = -host_to_target_errno(errno);
9469            }
9470         }
9471         return ret;
9472 #endif
9473 #if defined(TARGET_NR_pwritev)
9474     case TARGET_NR_pwritev:
9475         {
9476             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9477             if (vec != NULL) {
9478                 unsigned long low, high;
9479 
9480                 target_to_host_low_high(arg4, arg5, &low, &high);
9481                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9482                 unlock_iovec(vec, arg2, arg3, 0);
9483             } else {
9484                 ret = -host_to_target_errno(errno);
9485            }
9486         }
9487         return ret;
9488 #endif
9489     case TARGET_NR_getsid:
9490         return get_errno(getsid(arg1));
9491 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9492     case TARGET_NR_fdatasync:
9493         return get_errno(fdatasync(arg1));
9494 #endif
9495 #ifdef TARGET_NR__sysctl
9496     case TARGET_NR__sysctl:
9497         /* We don't implement this, but ENOTDIR is always a safe
9498            return value. */
9499         return -TARGET_ENOTDIR;
9500 #endif
9501     case TARGET_NR_sched_getaffinity:
9502         {
9503             unsigned int mask_size;
9504             unsigned long *mask;
9505 
9506             /*
9507              * sched_getaffinity needs multiples of ulong, so need to take
9508              * care of mismatches between target ulong and host ulong sizes.
9509              */
9510             if (arg2 & (sizeof(abi_ulong) - 1)) {
9511                 return -TARGET_EINVAL;
9512             }
9513             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9514 
9515             mask = alloca(mask_size);
9516             memset(mask, 0, mask_size);
9517             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9518 
9519             if (!is_error(ret)) {
9520                 if (ret > arg2) {
9521                     /* More data returned than the caller's buffer will fit.
9522                      * This only happens if sizeof(abi_long) < sizeof(long)
9523                      * and the caller passed us a buffer holding an odd number
9524                      * of abi_longs. If the host kernel is actually using the
9525                      * extra 4 bytes then fail EINVAL; otherwise we can just
9526                      * ignore them and only copy the interesting part.
9527                      */
9528                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9529                     if (numcpus > arg2 * 8) {
9530                         return -TARGET_EINVAL;
9531                     }
9532                     ret = arg2;
9533                 }
9534 
9535                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9536                     return -TARGET_EFAULT;
9537                 }
9538             }
9539         }
9540         return ret;
9541     case TARGET_NR_sched_setaffinity:
9542         {
9543             unsigned int mask_size;
9544             unsigned long *mask;
9545 
9546             /*
9547              * sched_setaffinity needs multiples of ulong, so need to take
9548              * care of mismatches between target ulong and host ulong sizes.
9549              */
9550             if (arg2 & (sizeof(abi_ulong) - 1)) {
9551                 return -TARGET_EINVAL;
9552             }
9553             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9554             mask = alloca(mask_size);
9555 
9556             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9557             if (ret) {
9558                 return ret;
9559             }
9560 
9561             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9562         }
9563     case TARGET_NR_getcpu:
9564         {
9565             unsigned cpu, node;
9566             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9567                                        arg2 ? &node : NULL,
9568                                        NULL));
9569             if (is_error(ret)) {
9570                 return ret;
9571             }
9572             if (arg1 && put_user_u32(cpu, arg1)) {
9573                 return -TARGET_EFAULT;
9574             }
9575             if (arg2 && put_user_u32(node, arg2)) {
9576                 return -TARGET_EFAULT;
9577             }
9578         }
9579         return ret;
9580     case TARGET_NR_sched_setparam:
9581         {
9582             struct sched_param *target_schp;
9583             struct sched_param schp;
9584 
9585             if (arg2 == 0) {
9586                 return -TARGET_EINVAL;
9587             }
9588             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9589                 return -TARGET_EFAULT;
9590             schp.sched_priority = tswap32(target_schp->sched_priority);
9591             unlock_user_struct(target_schp, arg2, 0);
9592             return get_errno(sched_setparam(arg1, &schp));
9593         }
9594     case TARGET_NR_sched_getparam:
9595         {
9596             struct sched_param *target_schp;
9597             struct sched_param schp;
9598 
9599             if (arg2 == 0) {
9600                 return -TARGET_EINVAL;
9601             }
9602             ret = get_errno(sched_getparam(arg1, &schp));
9603             if (!is_error(ret)) {
9604                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9605                     return -TARGET_EFAULT;
9606                 target_schp->sched_priority = tswap32(schp.sched_priority);
9607                 unlock_user_struct(target_schp, arg2, 1);
9608             }
9609         }
9610         return ret;
9611     case TARGET_NR_sched_setscheduler:
9612         {
9613             struct sched_param *target_schp;
9614             struct sched_param schp;
9615             if (arg3 == 0) {
9616                 return -TARGET_EINVAL;
9617             }
9618             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9619                 return -TARGET_EFAULT;
9620             schp.sched_priority = tswap32(target_schp->sched_priority);
9621             unlock_user_struct(target_schp, arg3, 0);
9622             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9623         }
9624     case TARGET_NR_sched_getscheduler:
9625         return get_errno(sched_getscheduler(arg1));
9626     case TARGET_NR_sched_yield:
9627         return get_errno(sched_yield());
9628     case TARGET_NR_sched_get_priority_max:
9629         return get_errno(sched_get_priority_max(arg1));
9630     case TARGET_NR_sched_get_priority_min:
9631         return get_errno(sched_get_priority_min(arg1));
9632     case TARGET_NR_sched_rr_get_interval:
9633         {
9634             struct timespec ts;
9635             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9636             if (!is_error(ret)) {
9637                 ret = host_to_target_timespec(arg2, &ts);
9638             }
9639         }
9640         return ret;
9641     case TARGET_NR_nanosleep:
9642         {
9643             struct timespec req, rem;
9644             target_to_host_timespec(&req, arg1);
9645             ret = get_errno(safe_nanosleep(&req, &rem));
9646             if (is_error(ret) && arg2) {
9647                 host_to_target_timespec(arg2, &rem);
9648             }
9649         }
9650         return ret;
9651     case TARGET_NR_prctl:
9652         switch (arg1) {
9653         case PR_GET_PDEATHSIG:
9654         {
9655             int deathsig;
9656             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9657             if (!is_error(ret) && arg2
9658                 && put_user_ual(deathsig, arg2)) {
9659                 return -TARGET_EFAULT;
9660             }
9661             return ret;
9662         }
9663 #ifdef PR_GET_NAME
9664         case PR_GET_NAME:
9665         {
9666             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9667             if (!name) {
9668                 return -TARGET_EFAULT;
9669             }
9670             ret = get_errno(prctl(arg1, (unsigned long)name,
9671                                   arg3, arg4, arg5));
9672             unlock_user(name, arg2, 16);
9673             return ret;
9674         }
9675         case PR_SET_NAME:
9676         {
9677             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9678             if (!name) {
9679                 return -TARGET_EFAULT;
9680             }
9681             ret = get_errno(prctl(arg1, (unsigned long)name,
9682                                   arg3, arg4, arg5));
9683             unlock_user(name, arg2, 0);
9684             return ret;
9685         }
9686 #endif
9687 #ifdef TARGET_MIPS
9688         case TARGET_PR_GET_FP_MODE:
9689         {
9690             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9691             ret = 0;
9692             if (env->CP0_Status & (1 << CP0St_FR)) {
9693                 ret |= TARGET_PR_FP_MODE_FR;
9694             }
9695             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9696                 ret |= TARGET_PR_FP_MODE_FRE;
9697             }
9698             return ret;
9699         }
9700         case TARGET_PR_SET_FP_MODE:
9701         {
9702             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9703             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9704             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9705             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9706             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9707 
9708             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9709                                             TARGET_PR_FP_MODE_FRE;
9710 
9711             /* If nothing to change, return right away, successfully.  */
9712             if (old_fr == new_fr && old_fre == new_fre) {
9713                 return 0;
9714             }
9715             /* Check the value is valid */
9716             if (arg2 & ~known_bits) {
9717                 return -TARGET_EOPNOTSUPP;
9718             }
9719             /* Setting FRE without FR is not supported.  */
9720             if (new_fre && !new_fr) {
9721                 return -TARGET_EOPNOTSUPP;
9722             }
9723             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9724                 /* FR1 is not supported */
9725                 return -TARGET_EOPNOTSUPP;
9726             }
9727             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9728                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9729                 /* cannot set FR=0 */
9730                 return -TARGET_EOPNOTSUPP;
9731             }
9732             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9733                 /* Cannot set FRE=1 */
9734                 return -TARGET_EOPNOTSUPP;
9735             }
9736 
9737             int i;
9738             fpr_t *fpr = env->active_fpu.fpr;
9739             for (i = 0; i < 32 ; i += 2) {
9740                 if (!old_fr && new_fr) {
9741                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9742                 } else if (old_fr && !new_fr) {
9743                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9744                 }
9745             }
9746 
9747             if (new_fr) {
9748                 env->CP0_Status |= (1 << CP0St_FR);
9749                 env->hflags |= MIPS_HFLAG_F64;
9750             } else {
9751                 env->CP0_Status &= ~(1 << CP0St_FR);
9752                 env->hflags &= ~MIPS_HFLAG_F64;
9753             }
9754             if (new_fre) {
9755                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9756                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9757                     env->hflags |= MIPS_HFLAG_FRE;
9758                 }
9759             } else {
9760                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9761                 env->hflags &= ~MIPS_HFLAG_FRE;
9762             }
9763 
9764             return 0;
9765         }
9766 #endif /* MIPS */
9767 #ifdef TARGET_AARCH64
9768         case TARGET_PR_SVE_SET_VL:
9769             /*
9770              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9771              * PR_SVE_VL_INHERIT.  Note the kernel definition
9772              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9773              * even though the current architectural maximum is VQ=16.
9774              */
9775             ret = -TARGET_EINVAL;
9776             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9777                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9778                 CPUARMState *env = cpu_env;
9779                 ARMCPU *cpu = arm_env_get_cpu(env);
9780                 uint32_t vq, old_vq;
9781 
9782                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9783                 vq = MAX(arg2 / 16, 1);
9784                 vq = MIN(vq, cpu->sve_max_vq);
9785 
9786                 if (vq < old_vq) {
9787                     aarch64_sve_narrow_vq(env, vq);
9788                 }
9789                 env->vfp.zcr_el[1] = vq - 1;
9790                 ret = vq * 16;
9791             }
9792             return ret;
9793         case TARGET_PR_SVE_GET_VL:
9794             ret = -TARGET_EINVAL;
9795             {
9796                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9797                 if (cpu_isar_feature(aa64_sve, cpu)) {
9798                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9799                 }
9800             }
9801             return ret;
9802         case TARGET_PR_PAC_RESET_KEYS:
9803             {
9804                 CPUARMState *env = cpu_env;
9805                 ARMCPU *cpu = arm_env_get_cpu(env);
9806 
9807                 if (arg3 || arg4 || arg5) {
9808                     return -TARGET_EINVAL;
9809                 }
9810                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9811                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9812                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9813                                TARGET_PR_PAC_APGAKEY);
9814                     int ret = 0;
9815                     Error *err = NULL;
9816 
9817                     if (arg2 == 0) {
9818                         arg2 = all;
9819                     } else if (arg2 & ~all) {
9820                         return -TARGET_EINVAL;
9821                     }
9822                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9823                         ret |= qemu_guest_getrandom(&env->keys.apia,
9824                                                     sizeof(ARMPACKey), &err);
9825                     }
9826                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9827                         ret |= qemu_guest_getrandom(&env->keys.apib,
9828                                                     sizeof(ARMPACKey), &err);
9829                     }
9830                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9831                         ret |= qemu_guest_getrandom(&env->keys.apda,
9832                                                     sizeof(ARMPACKey), &err);
9833                     }
9834                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9835                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9836                                                     sizeof(ARMPACKey), &err);
9837                     }
9838                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9839                         ret |= qemu_guest_getrandom(&env->keys.apga,
9840                                                     sizeof(ARMPACKey), &err);
9841                     }
9842                     if (ret != 0) {
9843                         /*
9844                          * Some unknown failure in the crypto.  The best
9845                          * we can do is log it and fail the syscall.
9846                          * The real syscall cannot fail this way.
9847                          */
9848                         qemu_log_mask(LOG_UNIMP,
9849                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9850                                       error_get_pretty(err));
9851                         error_free(err);
9852                         return -TARGET_EIO;
9853                     }
9854                     return 0;
9855                 }
9856             }
9857             return -TARGET_EINVAL;
9858 #endif /* AARCH64 */
9859         case PR_GET_SECCOMP:
9860         case PR_SET_SECCOMP:
9861             /* Disable seccomp to prevent the target disabling syscalls we
9862              * need. */
9863             return -TARGET_EINVAL;
9864         default:
9865             /* Most prctl options have no pointer arguments */
9866             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9867         }
9868         break;
9869 #ifdef TARGET_NR_arch_prctl
9870     case TARGET_NR_arch_prctl:
9871 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9872         return do_arch_prctl(cpu_env, arg1, arg2);
9873 #else
9874 #error unreachable
9875 #endif
9876 #endif
9877 #ifdef TARGET_NR_pread64
9878     case TARGET_NR_pread64:
9879         if (regpairs_aligned(cpu_env, num)) {
9880             arg4 = arg5;
9881             arg5 = arg6;
9882         }
9883         if (arg2 == 0 && arg3 == 0) {
9884             /* Special-case NULL buffer and zero length, which should succeed */
9885             p = 0;
9886         } else {
9887             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9888             if (!p) {
9889                 return -TARGET_EFAULT;
9890             }
9891         }
9892         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9893         unlock_user(p, arg2, ret);
9894         return ret;
9895     case TARGET_NR_pwrite64:
9896         if (regpairs_aligned(cpu_env, num)) {
9897             arg4 = arg5;
9898             arg5 = arg6;
9899         }
9900         if (arg2 == 0 && arg3 == 0) {
9901             /* Special-case NULL buffer and zero length, which should succeed */
9902             p = 0;
9903         } else {
9904             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9905             if (!p) {
9906                 return -TARGET_EFAULT;
9907             }
9908         }
9909         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9910         unlock_user(p, arg2, 0);
9911         return ret;
9912 #endif
9913     case TARGET_NR_getcwd:
9914         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9915             return -TARGET_EFAULT;
9916         ret = get_errno(sys_getcwd1(p, arg2));
9917         unlock_user(p, arg1, ret);
9918         return ret;
9919     case TARGET_NR_capget:
9920     case TARGET_NR_capset:
9921     {
9922         struct target_user_cap_header *target_header;
9923         struct target_user_cap_data *target_data = NULL;
9924         struct __user_cap_header_struct header;
9925         struct __user_cap_data_struct data[2];
9926         struct __user_cap_data_struct *dataptr = NULL;
9927         int i, target_datalen;
9928         int data_items = 1;
9929 
9930         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9931             return -TARGET_EFAULT;
9932         }
9933         header.version = tswap32(target_header->version);
9934         header.pid = tswap32(target_header->pid);
9935 
9936         if (header.version != _LINUX_CAPABILITY_VERSION) {
9937             /* Version 2 and up takes pointer to two user_data structs */
9938             data_items = 2;
9939         }
9940 
9941         target_datalen = sizeof(*target_data) * data_items;
9942 
9943         if (arg2) {
9944             if (num == TARGET_NR_capget) {
9945                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9946             } else {
9947                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9948             }
9949             if (!target_data) {
9950                 unlock_user_struct(target_header, arg1, 0);
9951                 return -TARGET_EFAULT;
9952             }
9953 
9954             if (num == TARGET_NR_capset) {
9955                 for (i = 0; i < data_items; i++) {
9956                     data[i].effective = tswap32(target_data[i].effective);
9957                     data[i].permitted = tswap32(target_data[i].permitted);
9958                     data[i].inheritable = tswap32(target_data[i].inheritable);
9959                 }
9960             }
9961 
9962             dataptr = data;
9963         }
9964 
9965         if (num == TARGET_NR_capget) {
9966             ret = get_errno(capget(&header, dataptr));
9967         } else {
9968             ret = get_errno(capset(&header, dataptr));
9969         }
9970 
9971         /* The kernel always updates version for both capget and capset */
9972         target_header->version = tswap32(header.version);
9973         unlock_user_struct(target_header, arg1, 1);
9974 
9975         if (arg2) {
9976             if (num == TARGET_NR_capget) {
9977                 for (i = 0; i < data_items; i++) {
9978                     target_data[i].effective = tswap32(data[i].effective);
9979                     target_data[i].permitted = tswap32(data[i].permitted);
9980                     target_data[i].inheritable = tswap32(data[i].inheritable);
9981                 }
9982                 unlock_user(target_data, arg2, target_datalen);
9983             } else {
9984                 unlock_user(target_data, arg2, 0);
9985             }
9986         }
9987         return ret;
9988     }
9989     case TARGET_NR_sigaltstack:
9990         return do_sigaltstack(arg1, arg2,
9991                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9992 
9993 #ifdef CONFIG_SENDFILE
9994 #ifdef TARGET_NR_sendfile
9995     case TARGET_NR_sendfile:
9996     {
9997         off_t *offp = NULL;
9998         off_t off;
9999         if (arg3) {
10000             ret = get_user_sal(off, arg3);
10001             if (is_error(ret)) {
10002                 return ret;
10003             }
10004             offp = &off;
10005         }
10006         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10007         if (!is_error(ret) && arg3) {
10008             abi_long ret2 = put_user_sal(off, arg3);
10009             if (is_error(ret2)) {
10010                 ret = ret2;
10011             }
10012         }
10013         return ret;
10014     }
10015 #endif
10016 #ifdef TARGET_NR_sendfile64
10017     case TARGET_NR_sendfile64:
10018     {
10019         off_t *offp = NULL;
10020         off_t off;
10021         if (arg3) {
10022             ret = get_user_s64(off, arg3);
10023             if (is_error(ret)) {
10024                 return ret;
10025             }
10026             offp = &off;
10027         }
10028         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10029         if (!is_error(ret) && arg3) {
10030             abi_long ret2 = put_user_s64(off, arg3);
10031             if (is_error(ret2)) {
10032                 ret = ret2;
10033             }
10034         }
10035         return ret;
10036     }
10037 #endif
10038 #endif
10039 #ifdef TARGET_NR_vfork
10040     case TARGET_NR_vfork:
10041         return get_errno(do_fork(cpu_env,
10042                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10043                          0, 0, 0, 0));
10044 #endif
10045 #ifdef TARGET_NR_ugetrlimit
10046     case TARGET_NR_ugetrlimit:
10047     {
10048 	struct rlimit rlim;
10049 	int resource = target_to_host_resource(arg1);
10050 	ret = get_errno(getrlimit(resource, &rlim));
10051 	if (!is_error(ret)) {
10052 	    struct target_rlimit *target_rlim;
10053             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10054                 return -TARGET_EFAULT;
10055 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10056 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10057             unlock_user_struct(target_rlim, arg2, 1);
10058 	}
10059         return ret;
10060     }
10061 #endif
10062 #ifdef TARGET_NR_truncate64
10063     case TARGET_NR_truncate64:
10064         if (!(p = lock_user_string(arg1)))
10065             return -TARGET_EFAULT;
10066 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10067         unlock_user(p, arg1, 0);
10068         return ret;
10069 #endif
10070 #ifdef TARGET_NR_ftruncate64
10071     case TARGET_NR_ftruncate64:
10072         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10073 #endif
10074 #ifdef TARGET_NR_stat64
10075     case TARGET_NR_stat64:
10076         if (!(p = lock_user_string(arg1))) {
10077             return -TARGET_EFAULT;
10078         }
10079         ret = get_errno(stat(path(p), &st));
10080         unlock_user(p, arg1, 0);
10081         if (!is_error(ret))
10082             ret = host_to_target_stat64(cpu_env, arg2, &st);
10083         return ret;
10084 #endif
10085 #ifdef TARGET_NR_lstat64
10086     case TARGET_NR_lstat64:
10087         if (!(p = lock_user_string(arg1))) {
10088             return -TARGET_EFAULT;
10089         }
10090         ret = get_errno(lstat(path(p), &st));
10091         unlock_user(p, arg1, 0);
10092         if (!is_error(ret))
10093             ret = host_to_target_stat64(cpu_env, arg2, &st);
10094         return ret;
10095 #endif
10096 #ifdef TARGET_NR_fstat64
10097     case TARGET_NR_fstat64:
10098         ret = get_errno(fstat(arg1, &st));
10099         if (!is_error(ret))
10100             ret = host_to_target_stat64(cpu_env, arg2, &st);
10101         return ret;
10102 #endif
10103 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10104 #ifdef TARGET_NR_fstatat64
10105     case TARGET_NR_fstatat64:
10106 #endif
10107 #ifdef TARGET_NR_newfstatat
10108     case TARGET_NR_newfstatat:
10109 #endif
10110         if (!(p = lock_user_string(arg2))) {
10111             return -TARGET_EFAULT;
10112         }
10113         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10114         unlock_user(p, arg2, 0);
10115         if (!is_error(ret))
10116             ret = host_to_target_stat64(cpu_env, arg3, &st);
10117         return ret;
10118 #endif
10119 #ifdef TARGET_NR_lchown
10120     case TARGET_NR_lchown:
10121         if (!(p = lock_user_string(arg1)))
10122             return -TARGET_EFAULT;
10123         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10124         unlock_user(p, arg1, 0);
10125         return ret;
10126 #endif
10127 #ifdef TARGET_NR_getuid
10128     case TARGET_NR_getuid:
10129         return get_errno(high2lowuid(getuid()));
10130 #endif
10131 #ifdef TARGET_NR_getgid
10132     case TARGET_NR_getgid:
10133         return get_errno(high2lowgid(getgid()));
10134 #endif
10135 #ifdef TARGET_NR_geteuid
10136     case TARGET_NR_geteuid:
10137         return get_errno(high2lowuid(geteuid()));
10138 #endif
10139 #ifdef TARGET_NR_getegid
10140     case TARGET_NR_getegid:
10141         return get_errno(high2lowgid(getegid()));
10142 #endif
10143     case TARGET_NR_setreuid:
10144         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10145     case TARGET_NR_setregid:
10146         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10147     case TARGET_NR_getgroups:
10148         {
10149             int gidsetsize = arg1;
10150             target_id *target_grouplist;
10151             gid_t *grouplist;
10152             int i;
10153 
10154             grouplist = alloca(gidsetsize * sizeof(gid_t));
10155             ret = get_errno(getgroups(gidsetsize, grouplist));
10156             if (gidsetsize == 0)
10157                 return ret;
10158             if (!is_error(ret)) {
10159                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10160                 if (!target_grouplist)
10161                     return -TARGET_EFAULT;
10162                 for(i = 0;i < ret; i++)
10163                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10164                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10165             }
10166         }
10167         return ret;
10168     case TARGET_NR_setgroups:
10169         {
10170             int gidsetsize = arg1;
10171             target_id *target_grouplist;
10172             gid_t *grouplist = NULL;
10173             int i;
10174             if (gidsetsize) {
10175                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10176                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10177                 if (!target_grouplist) {
10178                     return -TARGET_EFAULT;
10179                 }
10180                 for (i = 0; i < gidsetsize; i++) {
10181                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10182                 }
10183                 unlock_user(target_grouplist, arg2, 0);
10184             }
10185             return get_errno(setgroups(gidsetsize, grouplist));
10186         }
10187     case TARGET_NR_fchown:
10188         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10189 #if defined(TARGET_NR_fchownat)
10190     case TARGET_NR_fchownat:
10191         if (!(p = lock_user_string(arg2)))
10192             return -TARGET_EFAULT;
10193         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10194                                  low2highgid(arg4), arg5));
10195         unlock_user(p, arg2, 0);
10196         return ret;
10197 #endif
10198 #ifdef TARGET_NR_setresuid
10199     case TARGET_NR_setresuid:
10200         return get_errno(sys_setresuid(low2highuid(arg1),
10201                                        low2highuid(arg2),
10202                                        low2highuid(arg3)));
10203 #endif
10204 #ifdef TARGET_NR_getresuid
10205     case TARGET_NR_getresuid:
10206         {
10207             uid_t ruid, euid, suid;
10208             ret = get_errno(getresuid(&ruid, &euid, &suid));
10209             if (!is_error(ret)) {
10210                 if (put_user_id(high2lowuid(ruid), arg1)
10211                     || put_user_id(high2lowuid(euid), arg2)
10212                     || put_user_id(high2lowuid(suid), arg3))
10213                     return -TARGET_EFAULT;
10214             }
10215         }
10216         return ret;
10217 #endif
10218 #ifdef TARGET_NR_getresgid
10219     case TARGET_NR_setresgid:
10220         return get_errno(sys_setresgid(low2highgid(arg1),
10221                                        low2highgid(arg2),
10222                                        low2highgid(arg3)));
10223 #endif
10224 #ifdef TARGET_NR_getresgid
10225     case TARGET_NR_getresgid:
10226         {
10227             gid_t rgid, egid, sgid;
10228             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10229             if (!is_error(ret)) {
10230                 if (put_user_id(high2lowgid(rgid), arg1)
10231                     || put_user_id(high2lowgid(egid), arg2)
10232                     || put_user_id(high2lowgid(sgid), arg3))
10233                     return -TARGET_EFAULT;
10234             }
10235         }
10236         return ret;
10237 #endif
10238 #ifdef TARGET_NR_chown
10239     case TARGET_NR_chown:
10240         if (!(p = lock_user_string(arg1)))
10241             return -TARGET_EFAULT;
10242         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10243         unlock_user(p, arg1, 0);
10244         return ret;
10245 #endif
10246     case TARGET_NR_setuid:
10247         return get_errno(sys_setuid(low2highuid(arg1)));
10248     case TARGET_NR_setgid:
10249         return get_errno(sys_setgid(low2highgid(arg1)));
10250     case TARGET_NR_setfsuid:
10251         return get_errno(setfsuid(arg1));
10252     case TARGET_NR_setfsgid:
10253         return get_errno(setfsgid(arg1));
10254 
10255 #ifdef TARGET_NR_lchown32
10256     case TARGET_NR_lchown32:
10257         if (!(p = lock_user_string(arg1)))
10258             return -TARGET_EFAULT;
10259         ret = get_errno(lchown(p, arg2, arg3));
10260         unlock_user(p, arg1, 0);
10261         return ret;
10262 #endif
10263 #ifdef TARGET_NR_getuid32
10264     case TARGET_NR_getuid32:
10265         return get_errno(getuid());
10266 #endif
10267 
10268 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10269    /* Alpha specific */
10270     case TARGET_NR_getxuid:
10271          {
10272             uid_t euid;
10273             euid=geteuid();
10274             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10275          }
10276         return get_errno(getuid());
10277 #endif
10278 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10279    /* Alpha specific */
10280     case TARGET_NR_getxgid:
10281          {
10282             uid_t egid;
10283             egid=getegid();
10284             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10285          }
10286         return get_errno(getgid());
10287 #endif
10288 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10289     /* Alpha specific */
10290     case TARGET_NR_osf_getsysinfo:
10291         ret = -TARGET_EOPNOTSUPP;
10292         switch (arg1) {
10293           case TARGET_GSI_IEEE_FP_CONTROL:
10294             {
10295                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10296                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10297 
10298                 swcr &= ~SWCR_STATUS_MASK;
10299                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10300 
10301                 if (put_user_u64 (swcr, arg2))
10302                         return -TARGET_EFAULT;
10303                 ret = 0;
10304             }
10305             break;
10306 
10307           /* case GSI_IEEE_STATE_AT_SIGNAL:
10308              -- Not implemented in linux kernel.
10309              case GSI_UACPROC:
10310              -- Retrieves current unaligned access state; not much used.
10311              case GSI_PROC_TYPE:
10312              -- Retrieves implver information; surely not used.
10313              case GSI_GET_HWRPB:
10314              -- Grabs a copy of the HWRPB; surely not used.
10315           */
10316         }
10317         return ret;
10318 #endif
10319 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10320     /* Alpha specific */
10321     case TARGET_NR_osf_setsysinfo:
10322         ret = -TARGET_EOPNOTSUPP;
10323         switch (arg1) {
10324           case TARGET_SSI_IEEE_FP_CONTROL:
10325             {
10326                 uint64_t swcr, fpcr;
10327 
10328                 if (get_user_u64 (swcr, arg2)) {
10329                     return -TARGET_EFAULT;
10330                 }
10331 
10332                 /*
10333                  * The kernel calls swcr_update_status to update the
10334                  * status bits from the fpcr at every point that it
10335                  * could be queried.  Therefore, we store the status
10336                  * bits only in FPCR.
10337                  */
10338                 ((CPUAlphaState *)cpu_env)->swcr
10339                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10340 
10341                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10342                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10343                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10344                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10345                 ret = 0;
10346             }
10347             break;
10348 
10349           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10350             {
10351                 uint64_t exc, fpcr, fex;
10352 
10353                 if (get_user_u64(exc, arg2)) {
10354                     return -TARGET_EFAULT;
10355                 }
10356                 exc &= SWCR_STATUS_MASK;
10357                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10358 
10359                 /* Old exceptions are not signaled.  */
10360                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10361                 fex = exc & ~fex;
10362                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10363                 fex &= ((CPUArchState *)cpu_env)->swcr;
10364 
10365                 /* Update the hardware fpcr.  */
10366                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10367                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10368 
10369                 if (fex) {
10370                     int si_code = TARGET_FPE_FLTUNK;
10371                     target_siginfo_t info;
10372 
10373                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10374                         si_code = TARGET_FPE_FLTUND;
10375                     }
10376                     if (fex & SWCR_TRAP_ENABLE_INE) {
10377                         si_code = TARGET_FPE_FLTRES;
10378                     }
10379                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10380                         si_code = TARGET_FPE_FLTUND;
10381                     }
10382                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10383                         si_code = TARGET_FPE_FLTOVF;
10384                     }
10385                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10386                         si_code = TARGET_FPE_FLTDIV;
10387                     }
10388                     if (fex & SWCR_TRAP_ENABLE_INV) {
10389                         si_code = TARGET_FPE_FLTINV;
10390                     }
10391 
10392                     info.si_signo = SIGFPE;
10393                     info.si_errno = 0;
10394                     info.si_code = si_code;
10395                     info._sifields._sigfault._addr
10396                         = ((CPUArchState *)cpu_env)->pc;
10397                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10398                                  QEMU_SI_FAULT, &info);
10399                 }
10400                 ret = 0;
10401             }
10402             break;
10403 
10404           /* case SSI_NVPAIRS:
10405              -- Used with SSIN_UACPROC to enable unaligned accesses.
10406              case SSI_IEEE_STATE_AT_SIGNAL:
10407              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10408              -- Not implemented in linux kernel
10409           */
10410         }
10411         return ret;
10412 #endif
10413 #ifdef TARGET_NR_osf_sigprocmask
10414     /* Alpha specific.  */
10415     case TARGET_NR_osf_sigprocmask:
10416         {
10417             abi_ulong mask;
10418             int how;
10419             sigset_t set, oldset;
10420 
10421             switch(arg1) {
10422             case TARGET_SIG_BLOCK:
10423                 how = SIG_BLOCK;
10424                 break;
10425             case TARGET_SIG_UNBLOCK:
10426                 how = SIG_UNBLOCK;
10427                 break;
10428             case TARGET_SIG_SETMASK:
10429                 how = SIG_SETMASK;
10430                 break;
10431             default:
10432                 return -TARGET_EINVAL;
10433             }
10434             mask = arg2;
10435             target_to_host_old_sigset(&set, &mask);
10436             ret = do_sigprocmask(how, &set, &oldset);
10437             if (!ret) {
10438                 host_to_target_old_sigset(&mask, &oldset);
10439                 ret = mask;
10440             }
10441         }
10442         return ret;
10443 #endif
10444 
10445 #ifdef TARGET_NR_getgid32
10446     case TARGET_NR_getgid32:
10447         return get_errno(getgid());
10448 #endif
10449 #ifdef TARGET_NR_geteuid32
10450     case TARGET_NR_geteuid32:
10451         return get_errno(geteuid());
10452 #endif
10453 #ifdef TARGET_NR_getegid32
10454     case TARGET_NR_getegid32:
10455         return get_errno(getegid());
10456 #endif
10457 #ifdef TARGET_NR_setreuid32
10458     case TARGET_NR_setreuid32:
10459         return get_errno(setreuid(arg1, arg2));
10460 #endif
10461 #ifdef TARGET_NR_setregid32
10462     case TARGET_NR_setregid32:
10463         return get_errno(setregid(arg1, arg2));
10464 #endif
10465 #ifdef TARGET_NR_getgroups32
10466     case TARGET_NR_getgroups32:
10467         {
10468             int gidsetsize = arg1;
10469             uint32_t *target_grouplist;
10470             gid_t *grouplist;
10471             int i;
10472 
10473             grouplist = alloca(gidsetsize * sizeof(gid_t));
10474             ret = get_errno(getgroups(gidsetsize, grouplist));
10475             if (gidsetsize == 0)
10476                 return ret;
10477             if (!is_error(ret)) {
10478                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10479                 if (!target_grouplist) {
10480                     return -TARGET_EFAULT;
10481                 }
10482                 for(i = 0;i < ret; i++)
10483                     target_grouplist[i] = tswap32(grouplist[i]);
10484                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10485             }
10486         }
10487         return ret;
10488 #endif
10489 #ifdef TARGET_NR_setgroups32
10490     case TARGET_NR_setgroups32:
10491         {
10492             int gidsetsize = arg1;
10493             uint32_t *target_grouplist;
10494             gid_t *grouplist;
10495             int i;
10496 
10497             grouplist = alloca(gidsetsize * sizeof(gid_t));
10498             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10499             if (!target_grouplist) {
10500                 return -TARGET_EFAULT;
10501             }
10502             for(i = 0;i < gidsetsize; i++)
10503                 grouplist[i] = tswap32(target_grouplist[i]);
10504             unlock_user(target_grouplist, arg2, 0);
10505             return get_errno(setgroups(gidsetsize, grouplist));
10506         }
10507 #endif
10508 #ifdef TARGET_NR_fchown32
10509     case TARGET_NR_fchown32:
10510         return get_errno(fchown(arg1, arg2, arg3));
10511 #endif
10512 #ifdef TARGET_NR_setresuid32
10513     case TARGET_NR_setresuid32:
10514         return get_errno(sys_setresuid(arg1, arg2, arg3));
10515 #endif
10516 #ifdef TARGET_NR_getresuid32
10517     case TARGET_NR_getresuid32:
10518         {
10519             uid_t ruid, euid, suid;
10520             ret = get_errno(getresuid(&ruid, &euid, &suid));
10521             if (!is_error(ret)) {
10522                 if (put_user_u32(ruid, arg1)
10523                     || put_user_u32(euid, arg2)
10524                     || put_user_u32(suid, arg3))
10525                     return -TARGET_EFAULT;
10526             }
10527         }
10528         return ret;
10529 #endif
10530 #ifdef TARGET_NR_setresgid32
10531     case TARGET_NR_setresgid32:
10532         return get_errno(sys_setresgid(arg1, arg2, arg3));
10533 #endif
10534 #ifdef TARGET_NR_getresgid32
10535     case TARGET_NR_getresgid32:
10536         {
10537             gid_t rgid, egid, sgid;
10538             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10539             if (!is_error(ret)) {
10540                 if (put_user_u32(rgid, arg1)
10541                     || put_user_u32(egid, arg2)
10542                     || put_user_u32(sgid, arg3))
10543                     return -TARGET_EFAULT;
10544             }
10545         }
10546         return ret;
10547 #endif
10548 #ifdef TARGET_NR_chown32
10549     case TARGET_NR_chown32:
10550         if (!(p = lock_user_string(arg1)))
10551             return -TARGET_EFAULT;
10552         ret = get_errno(chown(p, arg2, arg3));
10553         unlock_user(p, arg1, 0);
10554         return ret;
10555 #endif
10556 #ifdef TARGET_NR_setuid32
10557     case TARGET_NR_setuid32:
10558         return get_errno(sys_setuid(arg1));
10559 #endif
10560 #ifdef TARGET_NR_setgid32
10561     case TARGET_NR_setgid32:
10562         return get_errno(sys_setgid(arg1));
10563 #endif
10564 #ifdef TARGET_NR_setfsuid32
10565     case TARGET_NR_setfsuid32:
10566         return get_errno(setfsuid(arg1));
10567 #endif
10568 #ifdef TARGET_NR_setfsgid32
10569     case TARGET_NR_setfsgid32:
10570         return get_errno(setfsgid(arg1));
10571 #endif
10572 #ifdef TARGET_NR_mincore
10573     case TARGET_NR_mincore:
10574         {
10575             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10576             if (!a) {
10577                 return -TARGET_ENOMEM;
10578             }
10579             p = lock_user_string(arg3);
10580             if (!p) {
10581                 ret = -TARGET_EFAULT;
10582             } else {
10583                 ret = get_errno(mincore(a, arg2, p));
10584                 unlock_user(p, arg3, ret);
10585             }
10586             unlock_user(a, arg1, 0);
10587         }
10588         return ret;
10589 #endif
10590 #ifdef TARGET_NR_arm_fadvise64_64
10591     case TARGET_NR_arm_fadvise64_64:
10592         /* arm_fadvise64_64 looks like fadvise64_64 but
10593          * with different argument order: fd, advice, offset, len
10594          * rather than the usual fd, offset, len, advice.
10595          * Note that offset and len are both 64-bit so appear as
10596          * pairs of 32-bit registers.
10597          */
10598         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10599                             target_offset64(arg5, arg6), arg2);
10600         return -host_to_target_errno(ret);
10601 #endif
10602 
10603 #if TARGET_ABI_BITS == 32
10604 
10605 #ifdef TARGET_NR_fadvise64_64
10606     case TARGET_NR_fadvise64_64:
10607 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10608         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10609         ret = arg2;
10610         arg2 = arg3;
10611         arg3 = arg4;
10612         arg4 = arg5;
10613         arg5 = arg6;
10614         arg6 = ret;
10615 #else
10616         /* 6 args: fd, offset (high, low), len (high, low), advice */
10617         if (regpairs_aligned(cpu_env, num)) {
10618             /* offset is in (3,4), len in (5,6) and advice in 7 */
10619             arg2 = arg3;
10620             arg3 = arg4;
10621             arg4 = arg5;
10622             arg5 = arg6;
10623             arg6 = arg7;
10624         }
10625 #endif
10626         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10627                             target_offset64(arg4, arg5), arg6);
10628         return -host_to_target_errno(ret);
10629 #endif
10630 
10631 #ifdef TARGET_NR_fadvise64
10632     case TARGET_NR_fadvise64:
10633         /* 5 args: fd, offset (high, low), len, advice */
10634         if (regpairs_aligned(cpu_env, num)) {
10635             /* offset is in (3,4), len in 5 and advice in 6 */
10636             arg2 = arg3;
10637             arg3 = arg4;
10638             arg4 = arg5;
10639             arg5 = arg6;
10640         }
10641         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10642         return -host_to_target_errno(ret);
10643 #endif
10644 
10645 #else /* not a 32-bit ABI */
10646 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10647 #ifdef TARGET_NR_fadvise64_64
10648     case TARGET_NR_fadvise64_64:
10649 #endif
10650 #ifdef TARGET_NR_fadvise64
10651     case TARGET_NR_fadvise64:
10652 #endif
10653 #ifdef TARGET_S390X
10654         switch (arg4) {
10655         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10656         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10657         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10658         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10659         default: break;
10660         }
10661 #endif
10662         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10663 #endif
10664 #endif /* end of 64-bit ABI fadvise handling */
10665 
10666 #ifdef TARGET_NR_madvise
10667     case TARGET_NR_madvise:
10668         /* A straight passthrough may not be safe because qemu sometimes
10669            turns private file-backed mappings into anonymous mappings.
10670            This will break MADV_DONTNEED.
10671            This is a hint, so ignoring and returning success is ok.  */
10672         return 0;
10673 #endif
10674 #if TARGET_ABI_BITS == 32
10675     case TARGET_NR_fcntl64:
10676     {
10677 	int cmd;
10678 	struct flock64 fl;
10679         from_flock64_fn *copyfrom = copy_from_user_flock64;
10680         to_flock64_fn *copyto = copy_to_user_flock64;
10681 
10682 #ifdef TARGET_ARM
10683         if (!((CPUARMState *)cpu_env)->eabi) {
10684             copyfrom = copy_from_user_oabi_flock64;
10685             copyto = copy_to_user_oabi_flock64;
10686         }
10687 #endif
10688 
10689 	cmd = target_to_host_fcntl_cmd(arg2);
10690         if (cmd == -TARGET_EINVAL) {
10691             return cmd;
10692         }
10693 
10694         switch(arg2) {
10695         case TARGET_F_GETLK64:
10696             ret = copyfrom(&fl, arg3);
10697             if (ret) {
10698                 break;
10699             }
10700             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10701             if (ret == 0) {
10702                 ret = copyto(arg3, &fl);
10703             }
10704 	    break;
10705 
10706         case TARGET_F_SETLK64:
10707         case TARGET_F_SETLKW64:
10708             ret = copyfrom(&fl, arg3);
10709             if (ret) {
10710                 break;
10711             }
10712             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10713 	    break;
10714         default:
10715             ret = do_fcntl(arg1, arg2, arg3);
10716             break;
10717         }
10718         return ret;
10719     }
10720 #endif
10721 #ifdef TARGET_NR_cacheflush
10722     case TARGET_NR_cacheflush:
10723         /* self-modifying code is handled automatically, so nothing needed */
10724         return 0;
10725 #endif
10726 #ifdef TARGET_NR_getpagesize
10727     case TARGET_NR_getpagesize:
10728         return TARGET_PAGE_SIZE;
10729 #endif
10730     case TARGET_NR_gettid:
10731         return get_errno(sys_gettid());
10732 #ifdef TARGET_NR_readahead
10733     case TARGET_NR_readahead:
10734 #if TARGET_ABI_BITS == 32
10735         if (regpairs_aligned(cpu_env, num)) {
10736             arg2 = arg3;
10737             arg3 = arg4;
10738             arg4 = arg5;
10739         }
10740         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10741 #else
10742         ret = get_errno(readahead(arg1, arg2, arg3));
10743 #endif
10744         return ret;
10745 #endif
10746 #ifdef CONFIG_ATTR
10747 #ifdef TARGET_NR_setxattr
10748     case TARGET_NR_listxattr:
10749     case TARGET_NR_llistxattr:
10750     {
10751         void *p, *b = 0;
10752         if (arg2) {
10753             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10754             if (!b) {
10755                 return -TARGET_EFAULT;
10756             }
10757         }
10758         p = lock_user_string(arg1);
10759         if (p) {
10760             if (num == TARGET_NR_listxattr) {
10761                 ret = get_errno(listxattr(p, b, arg3));
10762             } else {
10763                 ret = get_errno(llistxattr(p, b, arg3));
10764             }
10765         } else {
10766             ret = -TARGET_EFAULT;
10767         }
10768         unlock_user(p, arg1, 0);
10769         unlock_user(b, arg2, arg3);
10770         return ret;
10771     }
10772     case TARGET_NR_flistxattr:
10773     {
10774         void *b = 0;
10775         if (arg2) {
10776             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10777             if (!b) {
10778                 return -TARGET_EFAULT;
10779             }
10780         }
10781         ret = get_errno(flistxattr(arg1, b, arg3));
10782         unlock_user(b, arg2, arg3);
10783         return ret;
10784     }
10785     case TARGET_NR_setxattr:
10786     case TARGET_NR_lsetxattr:
10787         {
10788             void *p, *n, *v = 0;
10789             if (arg3) {
10790                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10791                 if (!v) {
10792                     return -TARGET_EFAULT;
10793                 }
10794             }
10795             p = lock_user_string(arg1);
10796             n = lock_user_string(arg2);
10797             if (p && n) {
10798                 if (num == TARGET_NR_setxattr) {
10799                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10800                 } else {
10801                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10802                 }
10803             } else {
10804                 ret = -TARGET_EFAULT;
10805             }
10806             unlock_user(p, arg1, 0);
10807             unlock_user(n, arg2, 0);
10808             unlock_user(v, arg3, 0);
10809         }
10810         return ret;
10811     case TARGET_NR_fsetxattr:
10812         {
10813             void *n, *v = 0;
10814             if (arg3) {
10815                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10816                 if (!v) {
10817                     return -TARGET_EFAULT;
10818                 }
10819             }
10820             n = lock_user_string(arg2);
10821             if (n) {
10822                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10823             } else {
10824                 ret = -TARGET_EFAULT;
10825             }
10826             unlock_user(n, arg2, 0);
10827             unlock_user(v, arg3, 0);
10828         }
10829         return ret;
10830     case TARGET_NR_getxattr:
10831     case TARGET_NR_lgetxattr:
10832         {
10833             void *p, *n, *v = 0;
10834             if (arg3) {
10835                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10836                 if (!v) {
10837                     return -TARGET_EFAULT;
10838                 }
10839             }
10840             p = lock_user_string(arg1);
10841             n = lock_user_string(arg2);
10842             if (p && n) {
10843                 if (num == TARGET_NR_getxattr) {
10844                     ret = get_errno(getxattr(p, n, v, arg4));
10845                 } else {
10846                     ret = get_errno(lgetxattr(p, n, v, arg4));
10847                 }
10848             } else {
10849                 ret = -TARGET_EFAULT;
10850             }
10851             unlock_user(p, arg1, 0);
10852             unlock_user(n, arg2, 0);
10853             unlock_user(v, arg3, arg4);
10854         }
10855         return ret;
10856     case TARGET_NR_fgetxattr:
10857         {
10858             void *n, *v = 0;
10859             if (arg3) {
10860                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10861                 if (!v) {
10862                     return -TARGET_EFAULT;
10863                 }
10864             }
10865             n = lock_user_string(arg2);
10866             if (n) {
10867                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10868             } else {
10869                 ret = -TARGET_EFAULT;
10870             }
10871             unlock_user(n, arg2, 0);
10872             unlock_user(v, arg3, arg4);
10873         }
10874         return ret;
10875     case TARGET_NR_removexattr:
10876     case TARGET_NR_lremovexattr:
10877         {
10878             void *p, *n;
10879             p = lock_user_string(arg1);
10880             n = lock_user_string(arg2);
10881             if (p && n) {
10882                 if (num == TARGET_NR_removexattr) {
10883                     ret = get_errno(removexattr(p, n));
10884                 } else {
10885                     ret = get_errno(lremovexattr(p, n));
10886                 }
10887             } else {
10888                 ret = -TARGET_EFAULT;
10889             }
10890             unlock_user(p, arg1, 0);
10891             unlock_user(n, arg2, 0);
10892         }
10893         return ret;
10894     case TARGET_NR_fremovexattr:
10895         {
10896             void *n;
10897             n = lock_user_string(arg2);
10898             if (n) {
10899                 ret = get_errno(fremovexattr(arg1, n));
10900             } else {
10901                 ret = -TARGET_EFAULT;
10902             }
10903             unlock_user(n, arg2, 0);
10904         }
10905         return ret;
10906 #endif
10907 #endif /* CONFIG_ATTR */
10908 #ifdef TARGET_NR_set_thread_area
10909     case TARGET_NR_set_thread_area:
10910 #if defined(TARGET_MIPS)
10911       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10912       return 0;
10913 #elif defined(TARGET_CRIS)
10914       if (arg1 & 0xff)
10915           ret = -TARGET_EINVAL;
10916       else {
10917           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10918           ret = 0;
10919       }
10920       return ret;
10921 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10922       return do_set_thread_area(cpu_env, arg1);
10923 #elif defined(TARGET_M68K)
10924       {
10925           TaskState *ts = cpu->opaque;
10926           ts->tp_value = arg1;
10927           return 0;
10928       }
10929 #else
10930       return -TARGET_ENOSYS;
10931 #endif
10932 #endif
10933 #ifdef TARGET_NR_get_thread_area
10934     case TARGET_NR_get_thread_area:
10935 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10936         return do_get_thread_area(cpu_env, arg1);
10937 #elif defined(TARGET_M68K)
10938         {
10939             TaskState *ts = cpu->opaque;
10940             return ts->tp_value;
10941         }
10942 #else
10943         return -TARGET_ENOSYS;
10944 #endif
10945 #endif
10946 #ifdef TARGET_NR_getdomainname
10947     case TARGET_NR_getdomainname:
10948         return -TARGET_ENOSYS;
10949 #endif
10950 
10951 #ifdef TARGET_NR_clock_settime
10952     case TARGET_NR_clock_settime:
10953     {
10954         struct timespec ts;
10955 
10956         ret = target_to_host_timespec(&ts, arg2);
10957         if (!is_error(ret)) {
10958             ret = get_errno(clock_settime(arg1, &ts));
10959         }
10960         return ret;
10961     }
10962 #endif
10963 #ifdef TARGET_NR_clock_gettime
10964     case TARGET_NR_clock_gettime:
10965     {
10966         struct timespec ts;
10967         ret = get_errno(clock_gettime(arg1, &ts));
10968         if (!is_error(ret)) {
10969             ret = host_to_target_timespec(arg2, &ts);
10970         }
10971         return ret;
10972     }
10973 #endif
10974 #ifdef TARGET_NR_clock_getres
10975     case TARGET_NR_clock_getres:
10976     {
10977         struct timespec ts;
10978         ret = get_errno(clock_getres(arg1, &ts));
10979         if (!is_error(ret)) {
10980             host_to_target_timespec(arg2, &ts);
10981         }
10982         return ret;
10983     }
10984 #endif
10985 #ifdef TARGET_NR_clock_nanosleep
10986     case TARGET_NR_clock_nanosleep:
10987     {
10988         struct timespec ts;
10989         target_to_host_timespec(&ts, arg3);
10990         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10991                                              &ts, arg4 ? &ts : NULL));
10992         if (arg4)
10993             host_to_target_timespec(arg4, &ts);
10994 
10995 #if defined(TARGET_PPC)
10996         /* clock_nanosleep is odd in that it returns positive errno values.
10997          * On PPC, CR0 bit 3 should be set in such a situation. */
10998         if (ret && ret != -TARGET_ERESTARTSYS) {
10999             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11000         }
11001 #endif
11002         return ret;
11003     }
11004 #endif
11005 
11006 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11007     case TARGET_NR_set_tid_address:
11008         return get_errno(set_tid_address((int *)g2h(arg1)));
11009 #endif
11010 
11011     case TARGET_NR_tkill:
11012         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11013 
11014     case TARGET_NR_tgkill:
11015         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11016                          target_to_host_signal(arg3)));
11017 
11018 #ifdef TARGET_NR_set_robust_list
11019     case TARGET_NR_set_robust_list:
11020     case TARGET_NR_get_robust_list:
11021         /* The ABI for supporting robust futexes has userspace pass
11022          * the kernel a pointer to a linked list which is updated by
11023          * userspace after the syscall; the list is walked by the kernel
11024          * when the thread exits. Since the linked list in QEMU guest
11025          * memory isn't a valid linked list for the host and we have
11026          * no way to reliably intercept the thread-death event, we can't
11027          * support these. Silently return ENOSYS so that guest userspace
11028          * falls back to a non-robust futex implementation (which should
11029          * be OK except in the corner case of the guest crashing while
11030          * holding a mutex that is shared with another process via
11031          * shared memory).
11032          */
11033         return -TARGET_ENOSYS;
11034 #endif
11035 
11036 #if defined(TARGET_NR_utimensat)
11037     case TARGET_NR_utimensat:
11038         {
11039             struct timespec *tsp, ts[2];
11040             if (!arg3) {
11041                 tsp = NULL;
11042             } else {
11043                 target_to_host_timespec(ts, arg3);
11044                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11045                 tsp = ts;
11046             }
11047             if (!arg2)
11048                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11049             else {
11050                 if (!(p = lock_user_string(arg2))) {
11051                     return -TARGET_EFAULT;
11052                 }
11053                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11054                 unlock_user(p, arg2, 0);
11055             }
11056         }
11057         return ret;
11058 #endif
11059     case TARGET_NR_futex:
11060         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11061 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11062     case TARGET_NR_inotify_init:
11063         ret = get_errno(sys_inotify_init());
11064         if (ret >= 0) {
11065             fd_trans_register(ret, &target_inotify_trans);
11066         }
11067         return ret;
11068 #endif
11069 #ifdef CONFIG_INOTIFY1
11070 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11071     case TARGET_NR_inotify_init1:
11072         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11073                                           fcntl_flags_tbl)));
11074         if (ret >= 0) {
11075             fd_trans_register(ret, &target_inotify_trans);
11076         }
11077         return ret;
11078 #endif
11079 #endif
11080 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11081     case TARGET_NR_inotify_add_watch:
11082         p = lock_user_string(arg2);
11083         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11084         unlock_user(p, arg2, 0);
11085         return ret;
11086 #endif
11087 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11088     case TARGET_NR_inotify_rm_watch:
11089         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11090 #endif
11091 
11092 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11093     case TARGET_NR_mq_open:
11094         {
11095             struct mq_attr posix_mq_attr;
11096             struct mq_attr *pposix_mq_attr;
11097             int host_flags;
11098 
11099             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11100             pposix_mq_attr = NULL;
11101             if (arg4) {
11102                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11103                     return -TARGET_EFAULT;
11104                 }
11105                 pposix_mq_attr = &posix_mq_attr;
11106             }
11107             p = lock_user_string(arg1 - 1);
11108             if (!p) {
11109                 return -TARGET_EFAULT;
11110             }
11111             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11112             unlock_user (p, arg1, 0);
11113         }
11114         return ret;
11115 
11116     case TARGET_NR_mq_unlink:
11117         p = lock_user_string(arg1 - 1);
11118         if (!p) {
11119             return -TARGET_EFAULT;
11120         }
11121         ret = get_errno(mq_unlink(p));
11122         unlock_user (p, arg1, 0);
11123         return ret;
11124 
11125     case TARGET_NR_mq_timedsend:
11126         {
11127             struct timespec ts;
11128 
11129             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11130             if (arg5 != 0) {
11131                 target_to_host_timespec(&ts, arg5);
11132                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11133                 host_to_target_timespec(arg5, &ts);
11134             } else {
11135                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11136             }
11137             unlock_user (p, arg2, arg3);
11138         }
11139         return ret;
11140 
11141     case TARGET_NR_mq_timedreceive:
11142         {
11143             struct timespec ts;
11144             unsigned int prio;
11145 
11146             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11147             if (arg5 != 0) {
11148                 target_to_host_timespec(&ts, arg5);
11149                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11150                                                      &prio, &ts));
11151                 host_to_target_timespec(arg5, &ts);
11152             } else {
11153                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11154                                                      &prio, NULL));
11155             }
11156             unlock_user (p, arg2, arg3);
11157             if (arg4 != 0)
11158                 put_user_u32(prio, arg4);
11159         }
11160         return ret;
11161 
11162     /* Not implemented for now... */
11163 /*     case TARGET_NR_mq_notify: */
11164 /*         break; */
11165 
11166     case TARGET_NR_mq_getsetattr:
11167         {
11168             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11169             ret = 0;
11170             if (arg2 != 0) {
11171                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11172                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11173                                            &posix_mq_attr_out));
11174             } else if (arg3 != 0) {
11175                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11176             }
11177             if (ret == 0 && arg3 != 0) {
11178                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11179             }
11180         }
11181         return ret;
11182 #endif
11183 
11184 #ifdef CONFIG_SPLICE
11185 #ifdef TARGET_NR_tee
11186     case TARGET_NR_tee:
11187         {
11188             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11189         }
11190         return ret;
11191 #endif
11192 #ifdef TARGET_NR_splice
11193     case TARGET_NR_splice:
11194         {
11195             loff_t loff_in, loff_out;
11196             loff_t *ploff_in = NULL, *ploff_out = NULL;
11197             if (arg2) {
11198                 if (get_user_u64(loff_in, arg2)) {
11199                     return -TARGET_EFAULT;
11200                 }
11201                 ploff_in = &loff_in;
11202             }
11203             if (arg4) {
11204                 if (get_user_u64(loff_out, arg4)) {
11205                     return -TARGET_EFAULT;
11206                 }
11207                 ploff_out = &loff_out;
11208             }
11209             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11210             if (arg2) {
11211                 if (put_user_u64(loff_in, arg2)) {
11212                     return -TARGET_EFAULT;
11213                 }
11214             }
11215             if (arg4) {
11216                 if (put_user_u64(loff_out, arg4)) {
11217                     return -TARGET_EFAULT;
11218                 }
11219             }
11220         }
11221         return ret;
11222 #endif
11223 #ifdef TARGET_NR_vmsplice
11224 	case TARGET_NR_vmsplice:
11225         {
11226             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11227             if (vec != NULL) {
11228                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11229                 unlock_iovec(vec, arg2, arg3, 0);
11230             } else {
11231                 ret = -host_to_target_errno(errno);
11232             }
11233         }
11234         return ret;
11235 #endif
11236 #endif /* CONFIG_SPLICE */
11237 #ifdef CONFIG_EVENTFD
11238 #if defined(TARGET_NR_eventfd)
11239     case TARGET_NR_eventfd:
11240         ret = get_errno(eventfd(arg1, 0));
11241         if (ret >= 0) {
11242             fd_trans_register(ret, &target_eventfd_trans);
11243         }
11244         return ret;
11245 #endif
11246 #if defined(TARGET_NR_eventfd2)
11247     case TARGET_NR_eventfd2:
11248     {
11249         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11250         if (arg2 & TARGET_O_NONBLOCK) {
11251             host_flags |= O_NONBLOCK;
11252         }
11253         if (arg2 & TARGET_O_CLOEXEC) {
11254             host_flags |= O_CLOEXEC;
11255         }
11256         ret = get_errno(eventfd(arg1, host_flags));
11257         if (ret >= 0) {
11258             fd_trans_register(ret, &target_eventfd_trans);
11259         }
11260         return ret;
11261     }
11262 #endif
11263 #endif /* CONFIG_EVENTFD  */
11264 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11265     case TARGET_NR_fallocate:
11266 #if TARGET_ABI_BITS == 32
11267         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11268                                   target_offset64(arg5, arg6)));
11269 #else
11270         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11271 #endif
11272         return ret;
11273 #endif
11274 #if defined(CONFIG_SYNC_FILE_RANGE)
11275 #if defined(TARGET_NR_sync_file_range)
11276     case TARGET_NR_sync_file_range:
11277 #if TARGET_ABI_BITS == 32
11278 #if defined(TARGET_MIPS)
11279         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11280                                         target_offset64(arg5, arg6), arg7));
11281 #else
11282         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11283                                         target_offset64(arg4, arg5), arg6));
11284 #endif /* !TARGET_MIPS */
11285 #else
11286         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11287 #endif
11288         return ret;
11289 #endif
11290 #if defined(TARGET_NR_sync_file_range2)
11291     case TARGET_NR_sync_file_range2:
11292         /* This is like sync_file_range but the arguments are reordered */
11293 #if TARGET_ABI_BITS == 32
11294         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11295                                         target_offset64(arg5, arg6), arg2));
11296 #else
11297         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11298 #endif
11299         return ret;
11300 #endif
11301 #endif
11302 #if defined(TARGET_NR_signalfd4)
11303     case TARGET_NR_signalfd4:
11304         return do_signalfd4(arg1, arg2, arg4);
11305 #endif
11306 #if defined(TARGET_NR_signalfd)
11307     case TARGET_NR_signalfd:
11308         return do_signalfd4(arg1, arg2, 0);
11309 #endif
11310 #if defined(CONFIG_EPOLL)
11311 #if defined(TARGET_NR_epoll_create)
11312     case TARGET_NR_epoll_create:
11313         return get_errno(epoll_create(arg1));
11314 #endif
11315 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11316     case TARGET_NR_epoll_create1:
11317         return get_errno(epoll_create1(arg1));
11318 #endif
11319 #if defined(TARGET_NR_epoll_ctl)
11320     case TARGET_NR_epoll_ctl:
11321     {
11322         struct epoll_event ep;
11323         struct epoll_event *epp = 0;
11324         if (arg4) {
11325             struct target_epoll_event *target_ep;
11326             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11327                 return -TARGET_EFAULT;
11328             }
11329             ep.events = tswap32(target_ep->events);
11330             /* The epoll_data_t union is just opaque data to the kernel,
11331              * so we transfer all 64 bits across and need not worry what
11332              * actual data type it is.
11333              */
11334             ep.data.u64 = tswap64(target_ep->data.u64);
11335             unlock_user_struct(target_ep, arg4, 0);
11336             epp = &ep;
11337         }
11338         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11339     }
11340 #endif
11341 
11342 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11343 #if defined(TARGET_NR_epoll_wait)
11344     case TARGET_NR_epoll_wait:
11345 #endif
11346 #if defined(TARGET_NR_epoll_pwait)
11347     case TARGET_NR_epoll_pwait:
11348 #endif
11349     {
11350         struct target_epoll_event *target_ep;
11351         struct epoll_event *ep;
11352         int epfd = arg1;
11353         int maxevents = arg3;
11354         int timeout = arg4;
11355 
11356         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11357             return -TARGET_EINVAL;
11358         }
11359 
11360         target_ep = lock_user(VERIFY_WRITE, arg2,
11361                               maxevents * sizeof(struct target_epoll_event), 1);
11362         if (!target_ep) {
11363             return -TARGET_EFAULT;
11364         }
11365 
11366         ep = g_try_new(struct epoll_event, maxevents);
11367         if (!ep) {
11368             unlock_user(target_ep, arg2, 0);
11369             return -TARGET_ENOMEM;
11370         }
11371 
11372         switch (num) {
11373 #if defined(TARGET_NR_epoll_pwait)
11374         case TARGET_NR_epoll_pwait:
11375         {
11376             target_sigset_t *target_set;
11377             sigset_t _set, *set = &_set;
11378 
11379             if (arg5) {
11380                 if (arg6 != sizeof(target_sigset_t)) {
11381                     ret = -TARGET_EINVAL;
11382                     break;
11383                 }
11384 
11385                 target_set = lock_user(VERIFY_READ, arg5,
11386                                        sizeof(target_sigset_t), 1);
11387                 if (!target_set) {
11388                     ret = -TARGET_EFAULT;
11389                     break;
11390                 }
11391                 target_to_host_sigset(set, target_set);
11392                 unlock_user(target_set, arg5, 0);
11393             } else {
11394                 set = NULL;
11395             }
11396 
11397             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11398                                              set, SIGSET_T_SIZE));
11399             break;
11400         }
11401 #endif
11402 #if defined(TARGET_NR_epoll_wait)
11403         case TARGET_NR_epoll_wait:
11404             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11405                                              NULL, 0));
11406             break;
11407 #endif
11408         default:
11409             ret = -TARGET_ENOSYS;
11410         }
11411         if (!is_error(ret)) {
11412             int i;
11413             for (i = 0; i < ret; i++) {
11414                 target_ep[i].events = tswap32(ep[i].events);
11415                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11416             }
11417             unlock_user(target_ep, arg2,
11418                         ret * sizeof(struct target_epoll_event));
11419         } else {
11420             unlock_user(target_ep, arg2, 0);
11421         }
11422         g_free(ep);
11423         return ret;
11424     }
11425 #endif
11426 #endif
11427 #ifdef TARGET_NR_prlimit64
11428     case TARGET_NR_prlimit64:
11429     {
11430         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11431         struct target_rlimit64 *target_rnew, *target_rold;
11432         struct host_rlimit64 rnew, rold, *rnewp = 0;
11433         int resource = target_to_host_resource(arg2);
11434         if (arg3) {
11435             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11436                 return -TARGET_EFAULT;
11437             }
11438             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11439             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11440             unlock_user_struct(target_rnew, arg3, 0);
11441             rnewp = &rnew;
11442         }
11443 
11444         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11445         if (!is_error(ret) && arg4) {
11446             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11447                 return -TARGET_EFAULT;
11448             }
11449             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11450             target_rold->rlim_max = tswap64(rold.rlim_max);
11451             unlock_user_struct(target_rold, arg4, 1);
11452         }
11453         return ret;
11454     }
11455 #endif
11456 #ifdef TARGET_NR_gethostname
11457     case TARGET_NR_gethostname:
11458     {
11459         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11460         if (name) {
11461             ret = get_errno(gethostname(name, arg2));
11462             unlock_user(name, arg1, arg2);
11463         } else {
11464             ret = -TARGET_EFAULT;
11465         }
11466         return ret;
11467     }
11468 #endif
11469 #ifdef TARGET_NR_atomic_cmpxchg_32
11470     case TARGET_NR_atomic_cmpxchg_32:
11471     {
11472         /* should use start_exclusive from main.c */
11473         abi_ulong mem_value;
11474         if (get_user_u32(mem_value, arg6)) {
11475             target_siginfo_t info;
11476             info.si_signo = SIGSEGV;
11477             info.si_errno = 0;
11478             info.si_code = TARGET_SEGV_MAPERR;
11479             info._sifields._sigfault._addr = arg6;
11480             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11481                          QEMU_SI_FAULT, &info);
11482             ret = 0xdeadbeef;
11483 
11484         }
11485         if (mem_value == arg2)
11486             put_user_u32(arg1, arg6);
11487         return mem_value;
11488     }
11489 #endif
11490 #ifdef TARGET_NR_atomic_barrier
11491     case TARGET_NR_atomic_barrier:
11492         /* Like the kernel implementation and the
11493            qemu arm barrier, no-op this? */
11494         return 0;
11495 #endif
11496 
11497 #ifdef TARGET_NR_timer_create
11498     case TARGET_NR_timer_create:
11499     {
11500         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11501 
11502         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11503 
11504         int clkid = arg1;
11505         int timer_index = next_free_host_timer();
11506 
11507         if (timer_index < 0) {
11508             ret = -TARGET_EAGAIN;
11509         } else {
11510             timer_t *phtimer = g_posix_timers  + timer_index;
11511 
11512             if (arg2) {
11513                 phost_sevp = &host_sevp;
11514                 ret = target_to_host_sigevent(phost_sevp, arg2);
11515                 if (ret != 0) {
11516                     return ret;
11517                 }
11518             }
11519 
11520             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11521             if (ret) {
11522                 phtimer = NULL;
11523             } else {
11524                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11525                     return -TARGET_EFAULT;
11526                 }
11527             }
11528         }
11529         return ret;
11530     }
11531 #endif
11532 
11533 #ifdef TARGET_NR_timer_settime
11534     case TARGET_NR_timer_settime:
11535     {
11536         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11537          * struct itimerspec * old_value */
11538         target_timer_t timerid = get_timer_id(arg1);
11539 
11540         if (timerid < 0) {
11541             ret = timerid;
11542         } else if (arg3 == 0) {
11543             ret = -TARGET_EINVAL;
11544         } else {
11545             timer_t htimer = g_posix_timers[timerid];
11546             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11547 
11548             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11549                 return -TARGET_EFAULT;
11550             }
11551             ret = get_errno(
11552                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11553             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11554                 return -TARGET_EFAULT;
11555             }
11556         }
11557         return ret;
11558     }
11559 #endif
11560 
11561 #ifdef TARGET_NR_timer_gettime
11562     case TARGET_NR_timer_gettime:
11563     {
11564         /* args: timer_t timerid, struct itimerspec *curr_value */
11565         target_timer_t timerid = get_timer_id(arg1);
11566 
11567         if (timerid < 0) {
11568             ret = timerid;
11569         } else if (!arg2) {
11570             ret = -TARGET_EFAULT;
11571         } else {
11572             timer_t htimer = g_posix_timers[timerid];
11573             struct itimerspec hspec;
11574             ret = get_errno(timer_gettime(htimer, &hspec));
11575 
11576             if (host_to_target_itimerspec(arg2, &hspec)) {
11577                 ret = -TARGET_EFAULT;
11578             }
11579         }
11580         return ret;
11581     }
11582 #endif
11583 
11584 #ifdef TARGET_NR_timer_getoverrun
11585     case TARGET_NR_timer_getoverrun:
11586     {
11587         /* args: timer_t timerid */
11588         target_timer_t timerid = get_timer_id(arg1);
11589 
11590         if (timerid < 0) {
11591             ret = timerid;
11592         } else {
11593             timer_t htimer = g_posix_timers[timerid];
11594             ret = get_errno(timer_getoverrun(htimer));
11595         }
11596         fd_trans_unregister(ret);
11597         return ret;
11598     }
11599 #endif
11600 
11601 #ifdef TARGET_NR_timer_delete
11602     case TARGET_NR_timer_delete:
11603     {
11604         /* args: timer_t timerid */
11605         target_timer_t timerid = get_timer_id(arg1);
11606 
11607         if (timerid < 0) {
11608             ret = timerid;
11609         } else {
11610             timer_t htimer = g_posix_timers[timerid];
11611             ret = get_errno(timer_delete(htimer));
11612             g_posix_timers[timerid] = 0;
11613         }
11614         return ret;
11615     }
11616 #endif
11617 
11618 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11619     case TARGET_NR_timerfd_create:
11620         return get_errno(timerfd_create(arg1,
11621                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11622 #endif
11623 
11624 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11625     case TARGET_NR_timerfd_gettime:
11626         {
11627             struct itimerspec its_curr;
11628 
11629             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11630 
11631             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11632                 return -TARGET_EFAULT;
11633             }
11634         }
11635         return ret;
11636 #endif
11637 
11638 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11639     case TARGET_NR_timerfd_settime:
11640         {
11641             struct itimerspec its_new, its_old, *p_new;
11642 
11643             if (arg3) {
11644                 if (target_to_host_itimerspec(&its_new, arg3)) {
11645                     return -TARGET_EFAULT;
11646                 }
11647                 p_new = &its_new;
11648             } else {
11649                 p_new = NULL;
11650             }
11651 
11652             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11653 
11654             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11655                 return -TARGET_EFAULT;
11656             }
11657         }
11658         return ret;
11659 #endif
11660 
11661 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11662     case TARGET_NR_ioprio_get:
11663         return get_errno(ioprio_get(arg1, arg2));
11664 #endif
11665 
11666 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11667     case TARGET_NR_ioprio_set:
11668         return get_errno(ioprio_set(arg1, arg2, arg3));
11669 #endif
11670 
11671 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11672     case TARGET_NR_setns:
11673         return get_errno(setns(arg1, arg2));
11674 #endif
11675 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11676     case TARGET_NR_unshare:
11677         return get_errno(unshare(arg1));
11678 #endif
11679 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11680     case TARGET_NR_kcmp:
11681         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11682 #endif
11683 #ifdef TARGET_NR_swapcontext
11684     case TARGET_NR_swapcontext:
11685         /* PowerPC specific.  */
11686         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11687 #endif
11688 
11689     default:
11690         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11691         return -TARGET_ENOSYS;
11692     }
11693     return ret;
11694 }
11695 
11696 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11697                     abi_long arg2, abi_long arg3, abi_long arg4,
11698                     abi_long arg5, abi_long arg6, abi_long arg7,
11699                     abi_long arg8)
11700 {
11701     CPUState *cpu = ENV_GET_CPU(cpu_env);
11702     abi_long ret;
11703 
11704 #ifdef DEBUG_ERESTARTSYS
11705     /* Debug-only code for exercising the syscall-restart code paths
11706      * in the per-architecture cpu main loops: restart every syscall
11707      * the guest makes once before letting it through.
11708      */
11709     {
11710         static bool flag;
11711         flag = !flag;
11712         if (flag) {
11713             return -TARGET_ERESTARTSYS;
11714         }
11715     }
11716 #endif
11717 
11718     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11719                              arg5, arg6, arg7, arg8);
11720 
11721     if (unlikely(do_strace)) {
11722         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11723         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11724                           arg5, arg6, arg7, arg8);
11725         print_syscall_ret(num, ret);
11726     } else {
11727         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11728                           arg5, arg6, arg7, arg8);
11729     }
11730 
11731     trace_guest_user_syscall_ret(cpu, num, ret);
11732     return ret;
11733 }
11734