xref: /openbmc/qemu/linux-user/syscall.c (revision b9cce6d7)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
111 
112 #include "qemu.h"
113 #include "fd-trans.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #define __NR_sys_gettid __NR_gettid
253 _syscall0(int, sys_gettid)
254 
255 /* For the 64-bit guest on 32-bit host case we must emulate
256  * getdents using getdents64, because otherwise the host
257  * might hand us back more dirent records than we can fit
258  * into the guest buffer after structure format conversion.
259  * Otherwise we emulate getdents with getdents if the host has it.
260  */
261 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
262 #define EMULATE_GETDENTS_WITH_GETDENTS
263 #endif
264 
265 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
266 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
267 #endif
268 #if (defined(TARGET_NR_getdents) && \
269       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
270     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
272 #endif
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
275           loff_t *, res, uint, wh);
276 #endif
277 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
278 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
279           siginfo_t *, uinfo)
280 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
281 #ifdef __NR_exit_group
282 _syscall1(int,exit_group,int,error_code)
283 #endif
284 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
285 _syscall1(int,set_tid_address,int *,tidptr)
286 #endif
287 #if defined(TARGET_NR_futex) && defined(__NR_futex)
288 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
289           const struct timespec *,timeout,int *,uaddr2,int,val3)
290 #endif
291 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
292 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
295 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
296           unsigned long *, user_mask_ptr);
297 #define __NR_sys_getcpu __NR_getcpu
298 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
299 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
300           void *, arg);
301 _syscall2(int, capget, struct __user_cap_header_struct *, header,
302           struct __user_cap_data_struct *, data);
303 _syscall2(int, capset, struct __user_cap_header_struct *, header,
304           struct __user_cap_data_struct *, data);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get, int, which, int, who)
307 #endif
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
310 #endif
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
313 #endif
314 
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
317           unsigned long, idx1, unsigned long, idx2)
318 #endif
319 
320 static bitmask_transtbl fcntl_flags_tbl[] = {
321   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
322   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
323   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
324   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
325   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
326   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
327   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
328   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
329   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
330   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
331   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
332   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
333   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
334 #if defined(O_DIRECT)
335   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
336 #endif
337 #if defined(O_NOATIME)
338   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
339 #endif
340 #if defined(O_CLOEXEC)
341   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
342 #endif
343 #if defined(O_PATH)
344   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
345 #endif
346 #if defined(O_TMPFILE)
347   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
348 #endif
349   /* Don't terminate the list prematurely on 64-bit host+guest.  */
350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
351   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
352 #endif
353   { 0, 0, 0, 0 }
354 };
355 
356 static int sys_getcwd1(char *buf, size_t size)
357 {
358   if (getcwd(buf, size) == NULL) {
359       /* getcwd() sets errno */
360       return (-1);
361   }
362   return strlen(buf)+1;
363 }
364 
365 #ifdef TARGET_NR_utimensat
366 #if defined(__NR_utimensat)
367 #define __NR_sys_utimensat __NR_utimensat
368 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
369           const struct timespec *,tsp,int,flags)
370 #else
371 static int sys_utimensat(int dirfd, const char *pathname,
372                          const struct timespec times[2], int flags)
373 {
374     errno = ENOSYS;
375     return -1;
376 }
377 #endif
378 #endif /* TARGET_NR_utimensat */
379 
380 #ifdef TARGET_NR_renameat2
381 #if defined(__NR_renameat2)
382 #define __NR_sys_renameat2 __NR_renameat2
383 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
384           const char *, new, unsigned int, flags)
385 #else
386 static int sys_renameat2(int oldfd, const char *old,
387                          int newfd, const char *new, int flags)
388 {
389     if (flags == 0) {
390         return renameat(oldfd, old, newfd, new);
391     }
392     errno = ENOSYS;
393     return -1;
394 }
395 #endif
396 #endif /* TARGET_NR_renameat2 */
397 
398 #ifdef CONFIG_INOTIFY
399 #include <sys/inotify.h>
400 
401 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
402 static int sys_inotify_init(void)
403 {
404   return (inotify_init());
405 }
406 #endif
407 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
408 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
409 {
410   return (inotify_add_watch(fd, pathname, mask));
411 }
412 #endif
413 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
414 static int sys_inotify_rm_watch(int fd, int32_t wd)
415 {
416   return (inotify_rm_watch(fd, wd));
417 }
418 #endif
419 #ifdef CONFIG_INOTIFY1
420 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
421 static int sys_inotify_init1(int flags)
422 {
423   return (inotify_init1(flags));
424 }
425 #endif
426 #endif
427 #else
428 /* Userspace can usually survive runtime without inotify */
429 #undef TARGET_NR_inotify_init
430 #undef TARGET_NR_inotify_init1
431 #undef TARGET_NR_inotify_add_watch
432 #undef TARGET_NR_inotify_rm_watch
433 #endif /* CONFIG_INOTIFY  */
434 
435 #if defined(TARGET_NR_prlimit64)
436 #ifndef __NR_prlimit64
437 # define __NR_prlimit64 -1
438 #endif
439 #define __NR_sys_prlimit64 __NR_prlimit64
440 /* The glibc rlimit structure may not be that used by the underlying syscall */
441 struct host_rlimit64 {
442     uint64_t rlim_cur;
443     uint64_t rlim_max;
444 };
445 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
446           const struct host_rlimit64 *, new_limit,
447           struct host_rlimit64 *, old_limit)
448 #endif
449 
450 
451 #if defined(TARGET_NR_timer_create)
452 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
453 static timer_t g_posix_timers[32] = { 0, } ;
454 
455 static inline int next_free_host_timer(void)
456 {
457     int k ;
458     /* FIXME: Does finding the next free slot require a lock? */
459     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
460         if (g_posix_timers[k] == 0) {
461             g_posix_timers[k] = (timer_t) 1;
462             return k;
463         }
464     }
465     return -1;
466 }
467 #endif
468 
469 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
470 #ifdef TARGET_ARM
471 static inline int regpairs_aligned(void *cpu_env, int num)
472 {
473     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
474 }
475 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
476 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
477 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
478 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
479  * of registers which translates to the same as ARM/MIPS, because we start with
480  * r3 as arg1 */
481 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
482 #elif defined(TARGET_SH4)
483 /* SH4 doesn't align register pairs, except for p{read,write}64 */
484 static inline int regpairs_aligned(void *cpu_env, int num)
485 {
486     switch (num) {
487     case TARGET_NR_pread64:
488     case TARGET_NR_pwrite64:
489         return 1;
490 
491     default:
492         return 0;
493     }
494 }
495 #elif defined(TARGET_XTENSA)
496 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
497 #else
498 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
499 #endif
500 
501 #define ERRNO_TABLE_SIZE 1200
502 
503 /* target_to_host_errno_table[] is initialized from
504  * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 };
507 
508 /*
509  * This list is the union of errno values overridden in asm-<arch>/errno.h
510  * minus the errnos that are not actually generic to all archs.
511  */
512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
513     [EAGAIN]		= TARGET_EAGAIN,
514     [EIDRM]		= TARGET_EIDRM,
515     [ECHRNG]		= TARGET_ECHRNG,
516     [EL2NSYNC]		= TARGET_EL2NSYNC,
517     [EL3HLT]		= TARGET_EL3HLT,
518     [EL3RST]		= TARGET_EL3RST,
519     [ELNRNG]		= TARGET_ELNRNG,
520     [EUNATCH]		= TARGET_EUNATCH,
521     [ENOCSI]		= TARGET_ENOCSI,
522     [EL2HLT]		= TARGET_EL2HLT,
523     [EDEADLK]		= TARGET_EDEADLK,
524     [ENOLCK]		= TARGET_ENOLCK,
525     [EBADE]		= TARGET_EBADE,
526     [EBADR]		= TARGET_EBADR,
527     [EXFULL]		= TARGET_EXFULL,
528     [ENOANO]		= TARGET_ENOANO,
529     [EBADRQC]		= TARGET_EBADRQC,
530     [EBADSLT]		= TARGET_EBADSLT,
531     [EBFONT]		= TARGET_EBFONT,
532     [ENOSTR]		= TARGET_ENOSTR,
533     [ENODATA]		= TARGET_ENODATA,
534     [ETIME]		= TARGET_ETIME,
535     [ENOSR]		= TARGET_ENOSR,
536     [ENONET]		= TARGET_ENONET,
537     [ENOPKG]		= TARGET_ENOPKG,
538     [EREMOTE]		= TARGET_EREMOTE,
539     [ENOLINK]		= TARGET_ENOLINK,
540     [EADV]		= TARGET_EADV,
541     [ESRMNT]		= TARGET_ESRMNT,
542     [ECOMM]		= TARGET_ECOMM,
543     [EPROTO]		= TARGET_EPROTO,
544     [EDOTDOT]		= TARGET_EDOTDOT,
545     [EMULTIHOP]		= TARGET_EMULTIHOP,
546     [EBADMSG]		= TARGET_EBADMSG,
547     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
548     [EOVERFLOW]		= TARGET_EOVERFLOW,
549     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
550     [EBADFD]		= TARGET_EBADFD,
551     [EREMCHG]		= TARGET_EREMCHG,
552     [ELIBACC]		= TARGET_ELIBACC,
553     [ELIBBAD]		= TARGET_ELIBBAD,
554     [ELIBSCN]		= TARGET_ELIBSCN,
555     [ELIBMAX]		= TARGET_ELIBMAX,
556     [ELIBEXEC]		= TARGET_ELIBEXEC,
557     [EILSEQ]		= TARGET_EILSEQ,
558     [ENOSYS]		= TARGET_ENOSYS,
559     [ELOOP]		= TARGET_ELOOP,
560     [ERESTART]		= TARGET_ERESTART,
561     [ESTRPIPE]		= TARGET_ESTRPIPE,
562     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
563     [EUSERS]		= TARGET_EUSERS,
564     [ENOTSOCK]		= TARGET_ENOTSOCK,
565     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
566     [EMSGSIZE]		= TARGET_EMSGSIZE,
567     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
568     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
569     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
570     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
571     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
572     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
573     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
574     [EADDRINUSE]	= TARGET_EADDRINUSE,
575     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
576     [ENETDOWN]		= TARGET_ENETDOWN,
577     [ENETUNREACH]	= TARGET_ENETUNREACH,
578     [ENETRESET]		= TARGET_ENETRESET,
579     [ECONNABORTED]	= TARGET_ECONNABORTED,
580     [ECONNRESET]	= TARGET_ECONNRESET,
581     [ENOBUFS]		= TARGET_ENOBUFS,
582     [EISCONN]		= TARGET_EISCONN,
583     [ENOTCONN]		= TARGET_ENOTCONN,
584     [EUCLEAN]		= TARGET_EUCLEAN,
585     [ENOTNAM]		= TARGET_ENOTNAM,
586     [ENAVAIL]		= TARGET_ENAVAIL,
587     [EISNAM]		= TARGET_EISNAM,
588     [EREMOTEIO]		= TARGET_EREMOTEIO,
589     [EDQUOT]            = TARGET_EDQUOT,
590     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
591     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
592     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
593     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
594     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
595     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
596     [EALREADY]		= TARGET_EALREADY,
597     [EINPROGRESS]	= TARGET_EINPROGRESS,
598     [ESTALE]		= TARGET_ESTALE,
599     [ECANCELED]		= TARGET_ECANCELED,
600     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
601     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
602 #ifdef ENOKEY
603     [ENOKEY]		= TARGET_ENOKEY,
604 #endif
605 #ifdef EKEYEXPIRED
606     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
607 #endif
608 #ifdef EKEYREVOKED
609     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
610 #endif
611 #ifdef EKEYREJECTED
612     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
613 #endif
614 #ifdef EOWNERDEAD
615     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
616 #endif
617 #ifdef ENOTRECOVERABLE
618     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
619 #endif
620 #ifdef ENOMSG
621     [ENOMSG]            = TARGET_ENOMSG,
622 #endif
623 #ifdef ERKFILL
624     [ERFKILL]           = TARGET_ERFKILL,
625 #endif
626 #ifdef EHWPOISON
627     [EHWPOISON]         = TARGET_EHWPOISON,
628 #endif
629 };
630 
631 static inline int host_to_target_errno(int err)
632 {
633     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
634         host_to_target_errno_table[err]) {
635         return host_to_target_errno_table[err];
636     }
637     return err;
638 }
639 
640 static inline int target_to_host_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         target_to_host_errno_table[err]) {
644         return target_to_host_errno_table[err];
645     }
646     return err;
647 }
648 
649 static inline abi_long get_errno(abi_long ret)
650 {
651     if (ret == -1)
652         return -host_to_target_errno(errno);
653     else
654         return ret;
655 }
656 
657 const char *target_strerror(int err)
658 {
659     if (err == TARGET_ERESTARTSYS) {
660         return "To be restarted";
661     }
662     if (err == TARGET_QEMU_ESIGRETURN) {
663         return "Successful exit from sigreturn";
664     }
665 
666     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
667         return NULL;
668     }
669     return strerror(target_to_host_errno(err));
670 }
671 
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
674 { \
675     return safe_syscall(__NR_##name); \
676 }
677 
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
680 { \
681     return safe_syscall(__NR_##name, arg1); \
682 }
683 
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
686 { \
687     return safe_syscall(__NR_##name, arg1, arg2); \
688 }
689 
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
692 { \
693     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
694 }
695 
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697     type4, arg4) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
701 }
702 
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
706     type5 arg5) \
707 { \
708     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
709 }
710 
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712     type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714     type5 arg5, type6 arg6) \
715 { \
716     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
717 }
718 
719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
722               int, flags, mode_t, mode)
723 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
724               struct rusage *, rusage)
725 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
726               int, options, struct rusage *, rusage)
727 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
728 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
729               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
730 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
731               struct timespec *, tsp, const sigset_t *, sigmask,
732               size_t, sigsetsize)
733 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
734               int, maxevents, int, timeout, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
737               const struct timespec *,timeout,int *,uaddr2,int,val3)
738 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
739 safe_syscall2(int, kill, pid_t, pid, int, sig)
740 safe_syscall2(int, tkill, int, tid, int, sig)
741 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
742 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
744 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
745               unsigned long, pos_l, unsigned long, pos_h)
746 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
747               unsigned long, pos_l, unsigned long, pos_h)
748 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
749               socklen_t, addrlen)
750 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
751               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
752 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
753               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
754 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
755 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
756 safe_syscall2(int, flock, int, fd, int, operation)
757 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
758               const struct timespec *, uts, size_t, sigsetsize)
759 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
760               int, flags)
761 safe_syscall2(int, nanosleep, const struct timespec *, req,
762               struct timespec *, rem)
763 #ifdef TARGET_NR_clock_nanosleep
764 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
765               const struct timespec *, req, struct timespec *, rem)
766 #endif
767 #ifdef __NR_msgsnd
768 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
769               int, flags)
770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
771               long, msgtype, int, flags)
772 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
773               unsigned, nsops, const struct timespec *, timeout)
774 #else
775 /* This host kernel architecture uses a single ipc syscall; fake up
776  * wrappers for the sub-operations to hide this implementation detail.
777  * Annoyingly we can't include linux/ipc.h to get the constant definitions
778  * for the call parameter because some structs in there conflict with the
779  * sys/ipc.h ones. So we just define them here, and rely on them being
780  * the same for all host architectures.
781  */
782 #define Q_SEMTIMEDOP 4
783 #define Q_MSGSND 11
784 #define Q_MSGRCV 12
785 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
786 
787 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
788               void *, ptr, long, fifth)
789 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
790 {
791     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
792 }
793 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
794 {
795     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
796 }
797 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
798                            const struct timespec *timeout)
799 {
800     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
801                     (long)timeout);
802 }
803 #endif
804 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806               size_t, len, unsigned, prio, const struct timespec *, timeout)
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808               size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811  * "third argument might be integer or pointer or not present" behaviour of
812  * the libc function.
813  */
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817  *  use the flock64 struct rather than unsuffixed flock
818  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
819  */
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
825 
826 static inline int host_to_target_sock_type(int host_type)
827 {
828     int target_type;
829 
830     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831     case SOCK_DGRAM:
832         target_type = TARGET_SOCK_DGRAM;
833         break;
834     case SOCK_STREAM:
835         target_type = TARGET_SOCK_STREAM;
836         break;
837     default:
838         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839         break;
840     }
841 
842 #if defined(SOCK_CLOEXEC)
843     if (host_type & SOCK_CLOEXEC) {
844         target_type |= TARGET_SOCK_CLOEXEC;
845     }
846 #endif
847 
848 #if defined(SOCK_NONBLOCK)
849     if (host_type & SOCK_NONBLOCK) {
850         target_type |= TARGET_SOCK_NONBLOCK;
851     }
852 #endif
853 
854     return target_type;
855 }
856 
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
860 
861 void target_set_brk(abi_ulong new_brk)
862 {
863     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864     brk_page = HOST_PAGE_ALIGN(target_brk);
865 }
866 
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
869 
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
872 {
873     abi_long mapped_addr;
874     abi_ulong new_alloc_size;
875 
876     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
877 
878     if (!new_brk) {
879         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880         return target_brk;
881     }
882     if (new_brk < target_original_brk) {
883         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884                    target_brk);
885         return target_brk;
886     }
887 
888     /* If the new brk is less than the highest page reserved to the
889      * target heap allocation, set it and we're almost done...  */
890     if (new_brk <= brk_page) {
891         /* Heap contents are initialized to zero, as for anonymous
892          * mapped pages.  */
893         if (new_brk > target_brk) {
894             memset(g2h(target_brk), 0, new_brk - target_brk);
895         }
896 	target_brk = new_brk;
897         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 	return target_brk;
899     }
900 
901     /* We need to allocate more memory after the brk... Note that
902      * we don't use MAP_FIXED because that will map over the top of
903      * any existing mapping (like the one with the host libc or qemu
904      * itself); instead we treat "mapped but at wrong address" as
905      * a failure and unmap again.
906      */
907     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909                                         PROT_READ|PROT_WRITE,
910                                         MAP_ANON|MAP_PRIVATE, 0, 0));
911 
912     if (mapped_addr == brk_page) {
913         /* Heap contents are initialized to zero, as for anonymous
914          * mapped pages.  Technically the new pages are already
915          * initialized to zero since they *are* anonymous mapped
916          * pages, however we have to take care with the contents that
917          * come from the remaining part of the previous page: it may
918          * contains garbage data due to a previous heap usage (grown
919          * then shrunken).  */
920         memset(g2h(target_brk), 0, brk_page - target_brk);
921 
922         target_brk = new_brk;
923         brk_page = HOST_PAGE_ALIGN(target_brk);
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925             target_brk);
926         return target_brk;
927     } else if (mapped_addr != -1) {
928         /* Mapped but at wrong address, meaning there wasn't actually
929          * enough space for this brk.
930          */
931         target_munmap(mapped_addr, new_alloc_size);
932         mapped_addr = -1;
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
934     }
935     else {
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
937     }
938 
939 #if defined(TARGET_ALPHA)
940     /* We (partially) emulate OSF/1 on Alpha, which requires we
941        return a proper errno, not an unchanged brk value.  */
942     return -TARGET_ENOMEM;
943 #endif
944     /* For everything else, return the previous break. */
945     return target_brk;
946 }
947 
948 static inline abi_long copy_from_user_fdset(fd_set *fds,
949                                             abi_ulong target_fds_addr,
950                                             int n)
951 {
952     int i, nw, j, k;
953     abi_ulong b, *target_fds;
954 
955     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956     if (!(target_fds = lock_user(VERIFY_READ,
957                                  target_fds_addr,
958                                  sizeof(abi_ulong) * nw,
959                                  1)))
960         return -TARGET_EFAULT;
961 
962     FD_ZERO(fds);
963     k = 0;
964     for (i = 0; i < nw; i++) {
965         /* grab the abi_ulong */
966         __get_user(b, &target_fds[i]);
967         for (j = 0; j < TARGET_ABI_BITS; j++) {
968             /* check the bit inside the abi_ulong */
969             if ((b >> j) & 1)
970                 FD_SET(k, fds);
971             k++;
972         }
973     }
974 
975     unlock_user(target_fds, target_fds_addr, 0);
976 
977     return 0;
978 }
979 
980 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
981                                                  abi_ulong target_fds_addr,
982                                                  int n)
983 {
984     if (target_fds_addr) {
985         if (copy_from_user_fdset(fds, target_fds_addr, n))
986             return -TARGET_EFAULT;
987         *fds_ptr = fds;
988     } else {
989         *fds_ptr = NULL;
990     }
991     return 0;
992 }
993 
994 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
995                                           const fd_set *fds,
996                                           int n)
997 {
998     int i, nw, j, k;
999     abi_long v;
1000     abi_ulong *target_fds;
1001 
1002     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1003     if (!(target_fds = lock_user(VERIFY_WRITE,
1004                                  target_fds_addr,
1005                                  sizeof(abi_ulong) * nw,
1006                                  0)))
1007         return -TARGET_EFAULT;
1008 
1009     k = 0;
1010     for (i = 0; i < nw; i++) {
1011         v = 0;
1012         for (j = 0; j < TARGET_ABI_BITS; j++) {
1013             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1014             k++;
1015         }
1016         __put_user(v, &target_fds[i]);
1017     }
1018 
1019     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1020 
1021     return 0;
1022 }
1023 
1024 #if defined(__alpha__)
1025 #define HOST_HZ 1024
1026 #else
1027 #define HOST_HZ 100
1028 #endif
1029 
1030 static inline abi_long host_to_target_clock_t(long ticks)
1031 {
1032 #if HOST_HZ == TARGET_HZ
1033     return ticks;
1034 #else
1035     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1036 #endif
1037 }
1038 
1039 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1040                                              const struct rusage *rusage)
1041 {
1042     struct target_rusage *target_rusage;
1043 
1044     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1045         return -TARGET_EFAULT;
1046     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1047     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1048     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1049     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1050     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1051     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1052     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1053     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1054     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1055     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1056     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1057     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1058     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1059     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1060     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1061     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1062     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1063     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1064     unlock_user_struct(target_rusage, target_addr, 1);
1065 
1066     return 0;
1067 }
1068 
1069 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1070 {
1071     abi_ulong target_rlim_swap;
1072     rlim_t result;
1073 
1074     target_rlim_swap = tswapal(target_rlim);
1075     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1076         return RLIM_INFINITY;
1077 
1078     result = target_rlim_swap;
1079     if (target_rlim_swap != (rlim_t)result)
1080         return RLIM_INFINITY;
1081 
1082     return result;
1083 }
1084 
1085 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1086 {
1087     abi_ulong target_rlim_swap;
1088     abi_ulong result;
1089 
1090     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1091         target_rlim_swap = TARGET_RLIM_INFINITY;
1092     else
1093         target_rlim_swap = rlim;
1094     result = tswapal(target_rlim_swap);
1095 
1096     return result;
1097 }
1098 
1099 static inline int target_to_host_resource(int code)
1100 {
1101     switch (code) {
1102     case TARGET_RLIMIT_AS:
1103         return RLIMIT_AS;
1104     case TARGET_RLIMIT_CORE:
1105         return RLIMIT_CORE;
1106     case TARGET_RLIMIT_CPU:
1107         return RLIMIT_CPU;
1108     case TARGET_RLIMIT_DATA:
1109         return RLIMIT_DATA;
1110     case TARGET_RLIMIT_FSIZE:
1111         return RLIMIT_FSIZE;
1112     case TARGET_RLIMIT_LOCKS:
1113         return RLIMIT_LOCKS;
1114     case TARGET_RLIMIT_MEMLOCK:
1115         return RLIMIT_MEMLOCK;
1116     case TARGET_RLIMIT_MSGQUEUE:
1117         return RLIMIT_MSGQUEUE;
1118     case TARGET_RLIMIT_NICE:
1119         return RLIMIT_NICE;
1120     case TARGET_RLIMIT_NOFILE:
1121         return RLIMIT_NOFILE;
1122     case TARGET_RLIMIT_NPROC:
1123         return RLIMIT_NPROC;
1124     case TARGET_RLIMIT_RSS:
1125         return RLIMIT_RSS;
1126     case TARGET_RLIMIT_RTPRIO:
1127         return RLIMIT_RTPRIO;
1128     case TARGET_RLIMIT_SIGPENDING:
1129         return RLIMIT_SIGPENDING;
1130     case TARGET_RLIMIT_STACK:
1131         return RLIMIT_STACK;
1132     default:
1133         return code;
1134     }
1135 }
1136 
1137 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1138                                               abi_ulong target_tv_addr)
1139 {
1140     struct target_timeval *target_tv;
1141 
1142     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1143         return -TARGET_EFAULT;
1144 
1145     __get_user(tv->tv_sec, &target_tv->tv_sec);
1146     __get_user(tv->tv_usec, &target_tv->tv_usec);
1147 
1148     unlock_user_struct(target_tv, target_tv_addr, 0);
1149 
1150     return 0;
1151 }
1152 
1153 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1154                                             const struct timeval *tv)
1155 {
1156     struct target_timeval *target_tv;
1157 
1158     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1159         return -TARGET_EFAULT;
1160 
1161     __put_user(tv->tv_sec, &target_tv->tv_sec);
1162     __put_user(tv->tv_usec, &target_tv->tv_usec);
1163 
1164     unlock_user_struct(target_tv, target_tv_addr, 1);
1165 
1166     return 0;
1167 }
1168 
1169 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1170                                                abi_ulong target_tz_addr)
1171 {
1172     struct target_timezone *target_tz;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177 
1178     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1179     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1180 
1181     unlock_user_struct(target_tz, target_tz_addr, 0);
1182 
1183     return 0;
1184 }
1185 
1186 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1187 #include <mqueue.h>
1188 
1189 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1190                                               abi_ulong target_mq_attr_addr)
1191 {
1192     struct target_mq_attr *target_mq_attr;
1193 
1194     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1195                           target_mq_attr_addr, 1))
1196         return -TARGET_EFAULT;
1197 
1198     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1199     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1200     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1201     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1202 
1203     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1204 
1205     return 0;
1206 }
1207 
1208 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1209                                             const struct mq_attr *attr)
1210 {
1211     struct target_mq_attr *target_mq_attr;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1214                           target_mq_attr_addr, 0))
1215         return -TARGET_EFAULT;
1216 
1217     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1218     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1219     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1220     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1221 
1222     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1223 
1224     return 0;
1225 }
1226 #endif
1227 
1228 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1229 /* do_select() must return target values and target errnos. */
1230 static abi_long do_select(int n,
1231                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1232                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1233 {
1234     fd_set rfds, wfds, efds;
1235     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1236     struct timeval tv;
1237     struct timespec ts, *ts_ptr;
1238     abi_long ret;
1239 
1240     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1241     if (ret) {
1242         return ret;
1243     }
1244     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1245     if (ret) {
1246         return ret;
1247     }
1248     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1249     if (ret) {
1250         return ret;
1251     }
1252 
1253     if (target_tv_addr) {
1254         if (copy_from_user_timeval(&tv, target_tv_addr))
1255             return -TARGET_EFAULT;
1256         ts.tv_sec = tv.tv_sec;
1257         ts.tv_nsec = tv.tv_usec * 1000;
1258         ts_ptr = &ts;
1259     } else {
1260         ts_ptr = NULL;
1261     }
1262 
1263     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1264                                   ts_ptr, NULL));
1265 
1266     if (!is_error(ret)) {
1267         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1268             return -TARGET_EFAULT;
1269         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1270             return -TARGET_EFAULT;
1271         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1272             return -TARGET_EFAULT;
1273 
1274         if (target_tv_addr) {
1275             tv.tv_sec = ts.tv_sec;
1276             tv.tv_usec = ts.tv_nsec / 1000;
1277             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1278                 return -TARGET_EFAULT;
1279             }
1280         }
1281     }
1282 
1283     return ret;
1284 }
1285 
1286 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1287 static abi_long do_old_select(abi_ulong arg1)
1288 {
1289     struct target_sel_arg_struct *sel;
1290     abi_ulong inp, outp, exp, tvp;
1291     long nsel;
1292 
1293     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1294         return -TARGET_EFAULT;
1295     }
1296 
1297     nsel = tswapal(sel->n);
1298     inp = tswapal(sel->inp);
1299     outp = tswapal(sel->outp);
1300     exp = tswapal(sel->exp);
1301     tvp = tswapal(sel->tvp);
1302 
1303     unlock_user_struct(sel, arg1, 0);
1304 
1305     return do_select(nsel, inp, outp, exp, tvp);
1306 }
1307 #endif
1308 #endif
1309 
1310 static abi_long do_pipe2(int host_pipe[], int flags)
1311 {
1312 #ifdef CONFIG_PIPE2
1313     return pipe2(host_pipe, flags);
1314 #else
1315     return -ENOSYS;
1316 #endif
1317 }
1318 
1319 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1320                         int flags, int is_pipe2)
1321 {
1322     int host_pipe[2];
1323     abi_long ret;
1324     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1325 
1326     if (is_error(ret))
1327         return get_errno(ret);
1328 
1329     /* Several targets have special calling conventions for the original
1330        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1331     if (!is_pipe2) {
1332 #if defined(TARGET_ALPHA)
1333         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1334         return host_pipe[0];
1335 #elif defined(TARGET_MIPS)
1336         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1337         return host_pipe[0];
1338 #elif defined(TARGET_SH4)
1339         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1340         return host_pipe[0];
1341 #elif defined(TARGET_SPARC)
1342         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1343         return host_pipe[0];
1344 #endif
1345     }
1346 
1347     if (put_user_s32(host_pipe[0], pipedes)
1348         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1349         return -TARGET_EFAULT;
1350     return get_errno(ret);
1351 }
1352 
1353 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1354                                               abi_ulong target_addr,
1355                                               socklen_t len)
1356 {
1357     struct target_ip_mreqn *target_smreqn;
1358 
1359     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1360     if (!target_smreqn)
1361         return -TARGET_EFAULT;
1362     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1363     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1364     if (len == sizeof(struct target_ip_mreqn))
1365         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1366     unlock_user(target_smreqn, target_addr, 0);
1367 
1368     return 0;
1369 }
1370 
1371 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1372                                                abi_ulong target_addr,
1373                                                socklen_t len)
1374 {
1375     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1376     sa_family_t sa_family;
1377     struct target_sockaddr *target_saddr;
1378 
1379     if (fd_trans_target_to_host_addr(fd)) {
1380         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1381     }
1382 
1383     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1384     if (!target_saddr)
1385         return -TARGET_EFAULT;
1386 
1387     sa_family = tswap16(target_saddr->sa_family);
1388 
1389     /* Oops. The caller might send a incomplete sun_path; sun_path
1390      * must be terminated by \0 (see the manual page), but
1391      * unfortunately it is quite common to specify sockaddr_un
1392      * length as "strlen(x->sun_path)" while it should be
1393      * "strlen(...) + 1". We'll fix that here if needed.
1394      * Linux kernel has a similar feature.
1395      */
1396 
1397     if (sa_family == AF_UNIX) {
1398         if (len < unix_maxlen && len > 0) {
1399             char *cp = (char*)target_saddr;
1400 
1401             if ( cp[len-1] && !cp[len] )
1402                 len++;
1403         }
1404         if (len > unix_maxlen)
1405             len = unix_maxlen;
1406     }
1407 
1408     memcpy(addr, target_saddr, len);
1409     addr->sa_family = sa_family;
1410     if (sa_family == AF_NETLINK) {
1411         struct sockaddr_nl *nladdr;
1412 
1413         nladdr = (struct sockaddr_nl *)addr;
1414         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1415         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1416     } else if (sa_family == AF_PACKET) {
1417 	struct target_sockaddr_ll *lladdr;
1418 
1419 	lladdr = (struct target_sockaddr_ll *)addr;
1420 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1421 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1422     }
1423     unlock_user(target_saddr, target_addr, 0);
1424 
1425     return 0;
1426 }
1427 
1428 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1429                                                struct sockaddr *addr,
1430                                                socklen_t len)
1431 {
1432     struct target_sockaddr *target_saddr;
1433 
1434     if (len == 0) {
1435         return 0;
1436     }
1437     assert(addr);
1438 
1439     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1440     if (!target_saddr)
1441         return -TARGET_EFAULT;
1442     memcpy(target_saddr, addr, len);
1443     if (len >= offsetof(struct target_sockaddr, sa_family) +
1444         sizeof(target_saddr->sa_family)) {
1445         target_saddr->sa_family = tswap16(addr->sa_family);
1446     }
1447     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1448         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1449         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1450         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1451     } else if (addr->sa_family == AF_PACKET) {
1452         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1453         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1454         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1455     } else if (addr->sa_family == AF_INET6 &&
1456                len >= sizeof(struct target_sockaddr_in6)) {
1457         struct target_sockaddr_in6 *target_in6 =
1458                (struct target_sockaddr_in6 *)target_saddr;
1459         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1460     }
1461     unlock_user(target_saddr, target_addr, len);
1462 
1463     return 0;
1464 }
1465 
1466 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1467                                            struct target_msghdr *target_msgh)
1468 {
1469     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1470     abi_long msg_controllen;
1471     abi_ulong target_cmsg_addr;
1472     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1473     socklen_t space = 0;
1474 
1475     msg_controllen = tswapal(target_msgh->msg_controllen);
1476     if (msg_controllen < sizeof (struct target_cmsghdr))
1477         goto the_end;
1478     target_cmsg_addr = tswapal(target_msgh->msg_control);
1479     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1480     target_cmsg_start = target_cmsg;
1481     if (!target_cmsg)
1482         return -TARGET_EFAULT;
1483 
1484     while (cmsg && target_cmsg) {
1485         void *data = CMSG_DATA(cmsg);
1486         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1487 
1488         int len = tswapal(target_cmsg->cmsg_len)
1489             - sizeof(struct target_cmsghdr);
1490 
1491         space += CMSG_SPACE(len);
1492         if (space > msgh->msg_controllen) {
1493             space -= CMSG_SPACE(len);
1494             /* This is a QEMU bug, since we allocated the payload
1495              * area ourselves (unlike overflow in host-to-target
1496              * conversion, which is just the guest giving us a buffer
1497              * that's too small). It can't happen for the payload types
1498              * we currently support; if it becomes an issue in future
1499              * we would need to improve our allocation strategy to
1500              * something more intelligent than "twice the size of the
1501              * target buffer we're reading from".
1502              */
1503             gemu_log("Host cmsg overflow\n");
1504             break;
1505         }
1506 
1507         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1508             cmsg->cmsg_level = SOL_SOCKET;
1509         } else {
1510             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1511         }
1512         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1513         cmsg->cmsg_len = CMSG_LEN(len);
1514 
1515         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1516             int *fd = (int *)data;
1517             int *target_fd = (int *)target_data;
1518             int i, numfds = len / sizeof(int);
1519 
1520             for (i = 0; i < numfds; i++) {
1521                 __get_user(fd[i], target_fd + i);
1522             }
1523         } else if (cmsg->cmsg_level == SOL_SOCKET
1524                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1525             struct ucred *cred = (struct ucred *)data;
1526             struct target_ucred *target_cred =
1527                 (struct target_ucred *)target_data;
1528 
1529             __get_user(cred->pid, &target_cred->pid);
1530             __get_user(cred->uid, &target_cred->uid);
1531             __get_user(cred->gid, &target_cred->gid);
1532         } else {
1533             gemu_log("Unsupported ancillary data: %d/%d\n",
1534                                         cmsg->cmsg_level, cmsg->cmsg_type);
1535             memcpy(data, target_data, len);
1536         }
1537 
1538         cmsg = CMSG_NXTHDR(msgh, cmsg);
1539         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1540                                          target_cmsg_start);
1541     }
1542     unlock_user(target_cmsg, target_cmsg_addr, 0);
1543  the_end:
1544     msgh->msg_controllen = space;
1545     return 0;
1546 }
1547 
1548 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1549                                            struct msghdr *msgh)
1550 {
1551     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1552     abi_long msg_controllen;
1553     abi_ulong target_cmsg_addr;
1554     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1555     socklen_t space = 0;
1556 
1557     msg_controllen = tswapal(target_msgh->msg_controllen);
1558     if (msg_controllen < sizeof (struct target_cmsghdr))
1559         goto the_end;
1560     target_cmsg_addr = tswapal(target_msgh->msg_control);
1561     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1562     target_cmsg_start = target_cmsg;
1563     if (!target_cmsg)
1564         return -TARGET_EFAULT;
1565 
1566     while (cmsg && target_cmsg) {
1567         void *data = CMSG_DATA(cmsg);
1568         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1569 
1570         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1571         int tgt_len, tgt_space;
1572 
1573         /* We never copy a half-header but may copy half-data;
1574          * this is Linux's behaviour in put_cmsg(). Note that
1575          * truncation here is a guest problem (which we report
1576          * to the guest via the CTRUNC bit), unlike truncation
1577          * in target_to_host_cmsg, which is a QEMU bug.
1578          */
1579         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1580             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1581             break;
1582         }
1583 
1584         if (cmsg->cmsg_level == SOL_SOCKET) {
1585             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1586         } else {
1587             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1588         }
1589         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1590 
1591         /* Payload types which need a different size of payload on
1592          * the target must adjust tgt_len here.
1593          */
1594         tgt_len = len;
1595         switch (cmsg->cmsg_level) {
1596         case SOL_SOCKET:
1597             switch (cmsg->cmsg_type) {
1598             case SO_TIMESTAMP:
1599                 tgt_len = sizeof(struct target_timeval);
1600                 break;
1601             default:
1602                 break;
1603             }
1604             break;
1605         default:
1606             break;
1607         }
1608 
1609         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1610             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1611             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1612         }
1613 
1614         /* We must now copy-and-convert len bytes of payload
1615          * into tgt_len bytes of destination space. Bear in mind
1616          * that in both source and destination we may be dealing
1617          * with a truncated value!
1618          */
1619         switch (cmsg->cmsg_level) {
1620         case SOL_SOCKET:
1621             switch (cmsg->cmsg_type) {
1622             case SCM_RIGHTS:
1623             {
1624                 int *fd = (int *)data;
1625                 int *target_fd = (int *)target_data;
1626                 int i, numfds = tgt_len / sizeof(int);
1627 
1628                 for (i = 0; i < numfds; i++) {
1629                     __put_user(fd[i], target_fd + i);
1630                 }
1631                 break;
1632             }
1633             case SO_TIMESTAMP:
1634             {
1635                 struct timeval *tv = (struct timeval *)data;
1636                 struct target_timeval *target_tv =
1637                     (struct target_timeval *)target_data;
1638 
1639                 if (len != sizeof(struct timeval) ||
1640                     tgt_len != sizeof(struct target_timeval)) {
1641                     goto unimplemented;
1642                 }
1643 
1644                 /* copy struct timeval to target */
1645                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1646                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1647                 break;
1648             }
1649             case SCM_CREDENTIALS:
1650             {
1651                 struct ucred *cred = (struct ucred *)data;
1652                 struct target_ucred *target_cred =
1653                     (struct target_ucred *)target_data;
1654 
1655                 __put_user(cred->pid, &target_cred->pid);
1656                 __put_user(cred->uid, &target_cred->uid);
1657                 __put_user(cred->gid, &target_cred->gid);
1658                 break;
1659             }
1660             default:
1661                 goto unimplemented;
1662             }
1663             break;
1664 
1665         case SOL_IP:
1666             switch (cmsg->cmsg_type) {
1667             case IP_TTL:
1668             {
1669                 uint32_t *v = (uint32_t *)data;
1670                 uint32_t *t_int = (uint32_t *)target_data;
1671 
1672                 if (len != sizeof(uint32_t) ||
1673                     tgt_len != sizeof(uint32_t)) {
1674                     goto unimplemented;
1675                 }
1676                 __put_user(*v, t_int);
1677                 break;
1678             }
1679             case IP_RECVERR:
1680             {
1681                 struct errhdr_t {
1682                    struct sock_extended_err ee;
1683                    struct sockaddr_in offender;
1684                 };
1685                 struct errhdr_t *errh = (struct errhdr_t *)data;
1686                 struct errhdr_t *target_errh =
1687                     (struct errhdr_t *)target_data;
1688 
1689                 if (len != sizeof(struct errhdr_t) ||
1690                     tgt_len != sizeof(struct errhdr_t)) {
1691                     goto unimplemented;
1692                 }
1693                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1694                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1695                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1696                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1697                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1698                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1699                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1700                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1701                     (void *) &errh->offender, sizeof(errh->offender));
1702                 break;
1703             }
1704             default:
1705                 goto unimplemented;
1706             }
1707             break;
1708 
1709         case SOL_IPV6:
1710             switch (cmsg->cmsg_type) {
1711             case IPV6_HOPLIMIT:
1712             {
1713                 uint32_t *v = (uint32_t *)data;
1714                 uint32_t *t_int = (uint32_t *)target_data;
1715 
1716                 if (len != sizeof(uint32_t) ||
1717                     tgt_len != sizeof(uint32_t)) {
1718                     goto unimplemented;
1719                 }
1720                 __put_user(*v, t_int);
1721                 break;
1722             }
1723             case IPV6_RECVERR:
1724             {
1725                 struct errhdr6_t {
1726                    struct sock_extended_err ee;
1727                    struct sockaddr_in6 offender;
1728                 };
1729                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1730                 struct errhdr6_t *target_errh =
1731                     (struct errhdr6_t *)target_data;
1732 
1733                 if (len != sizeof(struct errhdr6_t) ||
1734                     tgt_len != sizeof(struct errhdr6_t)) {
1735                     goto unimplemented;
1736                 }
1737                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1738                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1739                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1740                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1741                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1742                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1743                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1744                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1745                     (void *) &errh->offender, sizeof(errh->offender));
1746                 break;
1747             }
1748             default:
1749                 goto unimplemented;
1750             }
1751             break;
1752 
1753         default:
1754         unimplemented:
1755             gemu_log("Unsupported ancillary data: %d/%d\n",
1756                                         cmsg->cmsg_level, cmsg->cmsg_type);
1757             memcpy(target_data, data, MIN(len, tgt_len));
1758             if (tgt_len > len) {
1759                 memset(target_data + len, 0, tgt_len - len);
1760             }
1761         }
1762 
1763         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1764         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1765         if (msg_controllen < tgt_space) {
1766             tgt_space = msg_controllen;
1767         }
1768         msg_controllen -= tgt_space;
1769         space += tgt_space;
1770         cmsg = CMSG_NXTHDR(msgh, cmsg);
1771         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1772                                          target_cmsg_start);
1773     }
1774     unlock_user(target_cmsg, target_cmsg_addr, space);
1775  the_end:
1776     target_msgh->msg_controllen = tswapal(space);
1777     return 0;
1778 }
1779 
1780 /* do_setsockopt() Must return target values and target errnos. */
1781 static abi_long do_setsockopt(int sockfd, int level, int optname,
1782                               abi_ulong optval_addr, socklen_t optlen)
1783 {
1784     abi_long ret;
1785     int val;
1786     struct ip_mreqn *ip_mreq;
1787     struct ip_mreq_source *ip_mreq_source;
1788 
1789     switch(level) {
1790     case SOL_TCP:
1791         /* TCP options all take an 'int' value.  */
1792         if (optlen < sizeof(uint32_t))
1793             return -TARGET_EINVAL;
1794 
1795         if (get_user_u32(val, optval_addr))
1796             return -TARGET_EFAULT;
1797         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1798         break;
1799     case SOL_IP:
1800         switch(optname) {
1801         case IP_TOS:
1802         case IP_TTL:
1803         case IP_HDRINCL:
1804         case IP_ROUTER_ALERT:
1805         case IP_RECVOPTS:
1806         case IP_RETOPTS:
1807         case IP_PKTINFO:
1808         case IP_MTU_DISCOVER:
1809         case IP_RECVERR:
1810         case IP_RECVTTL:
1811         case IP_RECVTOS:
1812 #ifdef IP_FREEBIND
1813         case IP_FREEBIND:
1814 #endif
1815         case IP_MULTICAST_TTL:
1816         case IP_MULTICAST_LOOP:
1817             val = 0;
1818             if (optlen >= sizeof(uint32_t)) {
1819                 if (get_user_u32(val, optval_addr))
1820                     return -TARGET_EFAULT;
1821             } else if (optlen >= 1) {
1822                 if (get_user_u8(val, optval_addr))
1823                     return -TARGET_EFAULT;
1824             }
1825             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1826             break;
1827         case IP_ADD_MEMBERSHIP:
1828         case IP_DROP_MEMBERSHIP:
1829             if (optlen < sizeof (struct target_ip_mreq) ||
1830                 optlen > sizeof (struct target_ip_mreqn))
1831                 return -TARGET_EINVAL;
1832 
1833             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1834             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1835             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1836             break;
1837 
1838         case IP_BLOCK_SOURCE:
1839         case IP_UNBLOCK_SOURCE:
1840         case IP_ADD_SOURCE_MEMBERSHIP:
1841         case IP_DROP_SOURCE_MEMBERSHIP:
1842             if (optlen != sizeof (struct target_ip_mreq_source))
1843                 return -TARGET_EINVAL;
1844 
1845             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1846             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1847             unlock_user (ip_mreq_source, optval_addr, 0);
1848             break;
1849 
1850         default:
1851             goto unimplemented;
1852         }
1853         break;
1854     case SOL_IPV6:
1855         switch (optname) {
1856         case IPV6_MTU_DISCOVER:
1857         case IPV6_MTU:
1858         case IPV6_V6ONLY:
1859         case IPV6_RECVPKTINFO:
1860         case IPV6_UNICAST_HOPS:
1861         case IPV6_MULTICAST_HOPS:
1862         case IPV6_MULTICAST_LOOP:
1863         case IPV6_RECVERR:
1864         case IPV6_RECVHOPLIMIT:
1865         case IPV6_2292HOPLIMIT:
1866         case IPV6_CHECKSUM:
1867         case IPV6_ADDRFORM:
1868         case IPV6_2292PKTINFO:
1869         case IPV6_RECVTCLASS:
1870         case IPV6_RECVRTHDR:
1871         case IPV6_2292RTHDR:
1872         case IPV6_RECVHOPOPTS:
1873         case IPV6_2292HOPOPTS:
1874         case IPV6_RECVDSTOPTS:
1875         case IPV6_2292DSTOPTS:
1876         case IPV6_TCLASS:
1877 #ifdef IPV6_RECVPATHMTU
1878         case IPV6_RECVPATHMTU:
1879 #endif
1880 #ifdef IPV6_TRANSPARENT
1881         case IPV6_TRANSPARENT:
1882 #endif
1883 #ifdef IPV6_FREEBIND
1884         case IPV6_FREEBIND:
1885 #endif
1886 #ifdef IPV6_RECVORIGDSTADDR
1887         case IPV6_RECVORIGDSTADDR:
1888 #endif
1889             val = 0;
1890             if (optlen < sizeof(uint32_t)) {
1891                 return -TARGET_EINVAL;
1892             }
1893             if (get_user_u32(val, optval_addr)) {
1894                 return -TARGET_EFAULT;
1895             }
1896             ret = get_errno(setsockopt(sockfd, level, optname,
1897                                        &val, sizeof(val)));
1898             break;
1899         case IPV6_PKTINFO:
1900         {
1901             struct in6_pktinfo pki;
1902 
1903             if (optlen < sizeof(pki)) {
1904                 return -TARGET_EINVAL;
1905             }
1906 
1907             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1908                 return -TARGET_EFAULT;
1909             }
1910 
1911             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1912 
1913             ret = get_errno(setsockopt(sockfd, level, optname,
1914                                        &pki, sizeof(pki)));
1915             break;
1916         }
1917         default:
1918             goto unimplemented;
1919         }
1920         break;
1921     case SOL_ICMPV6:
1922         switch (optname) {
1923         case ICMPV6_FILTER:
1924         {
1925             struct icmp6_filter icmp6f;
1926 
1927             if (optlen > sizeof(icmp6f)) {
1928                 optlen = sizeof(icmp6f);
1929             }
1930 
1931             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1932                 return -TARGET_EFAULT;
1933             }
1934 
1935             for (val = 0; val < 8; val++) {
1936                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1937             }
1938 
1939             ret = get_errno(setsockopt(sockfd, level, optname,
1940                                        &icmp6f, optlen));
1941             break;
1942         }
1943         default:
1944             goto unimplemented;
1945         }
1946         break;
1947     case SOL_RAW:
1948         switch (optname) {
1949         case ICMP_FILTER:
1950         case IPV6_CHECKSUM:
1951             /* those take an u32 value */
1952             if (optlen < sizeof(uint32_t)) {
1953                 return -TARGET_EINVAL;
1954             }
1955 
1956             if (get_user_u32(val, optval_addr)) {
1957                 return -TARGET_EFAULT;
1958             }
1959             ret = get_errno(setsockopt(sockfd, level, optname,
1960                                        &val, sizeof(val)));
1961             break;
1962 
1963         default:
1964             goto unimplemented;
1965         }
1966         break;
1967     case TARGET_SOL_SOCKET:
1968         switch (optname) {
1969         case TARGET_SO_RCVTIMEO:
1970         {
1971                 struct timeval tv;
1972 
1973                 optname = SO_RCVTIMEO;
1974 
1975 set_timeout:
1976                 if (optlen != sizeof(struct target_timeval)) {
1977                     return -TARGET_EINVAL;
1978                 }
1979 
1980                 if (copy_from_user_timeval(&tv, optval_addr)) {
1981                     return -TARGET_EFAULT;
1982                 }
1983 
1984                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1985                                 &tv, sizeof(tv)));
1986                 return ret;
1987         }
1988         case TARGET_SO_SNDTIMEO:
1989                 optname = SO_SNDTIMEO;
1990                 goto set_timeout;
1991         case TARGET_SO_ATTACH_FILTER:
1992         {
1993                 struct target_sock_fprog *tfprog;
1994                 struct target_sock_filter *tfilter;
1995                 struct sock_fprog fprog;
1996                 struct sock_filter *filter;
1997                 int i;
1998 
1999                 if (optlen != sizeof(*tfprog)) {
2000                     return -TARGET_EINVAL;
2001                 }
2002                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2003                     return -TARGET_EFAULT;
2004                 }
2005                 if (!lock_user_struct(VERIFY_READ, tfilter,
2006                                       tswapal(tfprog->filter), 0)) {
2007                     unlock_user_struct(tfprog, optval_addr, 1);
2008                     return -TARGET_EFAULT;
2009                 }
2010 
2011                 fprog.len = tswap16(tfprog->len);
2012                 filter = g_try_new(struct sock_filter, fprog.len);
2013                 if (filter == NULL) {
2014                     unlock_user_struct(tfilter, tfprog->filter, 1);
2015                     unlock_user_struct(tfprog, optval_addr, 1);
2016                     return -TARGET_ENOMEM;
2017                 }
2018                 for (i = 0; i < fprog.len; i++) {
2019                     filter[i].code = tswap16(tfilter[i].code);
2020                     filter[i].jt = tfilter[i].jt;
2021                     filter[i].jf = tfilter[i].jf;
2022                     filter[i].k = tswap32(tfilter[i].k);
2023                 }
2024                 fprog.filter = filter;
2025 
2026                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2027                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2028                 g_free(filter);
2029 
2030                 unlock_user_struct(tfilter, tfprog->filter, 1);
2031                 unlock_user_struct(tfprog, optval_addr, 1);
2032                 return ret;
2033         }
2034 	case TARGET_SO_BINDTODEVICE:
2035 	{
2036 		char *dev_ifname, *addr_ifname;
2037 
2038 		if (optlen > IFNAMSIZ - 1) {
2039 		    optlen = IFNAMSIZ - 1;
2040 		}
2041 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2042 		if (!dev_ifname) {
2043 		    return -TARGET_EFAULT;
2044 		}
2045 		optname = SO_BINDTODEVICE;
2046 		addr_ifname = alloca(IFNAMSIZ);
2047 		memcpy(addr_ifname, dev_ifname, optlen);
2048 		addr_ifname[optlen] = 0;
2049 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2050                                            addr_ifname, optlen));
2051 		unlock_user (dev_ifname, optval_addr, 0);
2052 		return ret;
2053 	}
2054         case TARGET_SO_LINGER:
2055         {
2056                 struct linger lg;
2057                 struct target_linger *tlg;
2058 
2059                 if (optlen != sizeof(struct target_linger)) {
2060                     return -TARGET_EINVAL;
2061                 }
2062                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2063                     return -TARGET_EFAULT;
2064                 }
2065                 __get_user(lg.l_onoff, &tlg->l_onoff);
2066                 __get_user(lg.l_linger, &tlg->l_linger);
2067                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2068                                 &lg, sizeof(lg)));
2069                 unlock_user_struct(tlg, optval_addr, 0);
2070                 return ret;
2071         }
2072             /* Options with 'int' argument.  */
2073         case TARGET_SO_DEBUG:
2074 		optname = SO_DEBUG;
2075 		break;
2076         case TARGET_SO_REUSEADDR:
2077 		optname = SO_REUSEADDR;
2078 		break;
2079 #ifdef SO_REUSEPORT
2080         case TARGET_SO_REUSEPORT:
2081                 optname = SO_REUSEPORT;
2082                 break;
2083 #endif
2084         case TARGET_SO_TYPE:
2085 		optname = SO_TYPE;
2086 		break;
2087         case TARGET_SO_ERROR:
2088 		optname = SO_ERROR;
2089 		break;
2090         case TARGET_SO_DONTROUTE:
2091 		optname = SO_DONTROUTE;
2092 		break;
2093         case TARGET_SO_BROADCAST:
2094 		optname = SO_BROADCAST;
2095 		break;
2096         case TARGET_SO_SNDBUF:
2097 		optname = SO_SNDBUF;
2098 		break;
2099         case TARGET_SO_SNDBUFFORCE:
2100                 optname = SO_SNDBUFFORCE;
2101                 break;
2102         case TARGET_SO_RCVBUF:
2103 		optname = SO_RCVBUF;
2104 		break;
2105         case TARGET_SO_RCVBUFFORCE:
2106                 optname = SO_RCVBUFFORCE;
2107                 break;
2108         case TARGET_SO_KEEPALIVE:
2109 		optname = SO_KEEPALIVE;
2110 		break;
2111         case TARGET_SO_OOBINLINE:
2112 		optname = SO_OOBINLINE;
2113 		break;
2114         case TARGET_SO_NO_CHECK:
2115 		optname = SO_NO_CHECK;
2116 		break;
2117         case TARGET_SO_PRIORITY:
2118 		optname = SO_PRIORITY;
2119 		break;
2120 #ifdef SO_BSDCOMPAT
2121         case TARGET_SO_BSDCOMPAT:
2122 		optname = SO_BSDCOMPAT;
2123 		break;
2124 #endif
2125         case TARGET_SO_PASSCRED:
2126 		optname = SO_PASSCRED;
2127 		break;
2128         case TARGET_SO_PASSSEC:
2129                 optname = SO_PASSSEC;
2130                 break;
2131         case TARGET_SO_TIMESTAMP:
2132 		optname = SO_TIMESTAMP;
2133 		break;
2134         case TARGET_SO_RCVLOWAT:
2135 		optname = SO_RCVLOWAT;
2136 		break;
2137         default:
2138             goto unimplemented;
2139         }
2140 	if (optlen < sizeof(uint32_t))
2141             return -TARGET_EINVAL;
2142 
2143 	if (get_user_u32(val, optval_addr))
2144             return -TARGET_EFAULT;
2145 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2146         break;
2147     default:
2148     unimplemented:
2149         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2150         ret = -TARGET_ENOPROTOOPT;
2151     }
2152     return ret;
2153 }
2154 
2155 /* do_getsockopt() Must return target values and target errnos. */
2156 static abi_long do_getsockopt(int sockfd, int level, int optname,
2157                               abi_ulong optval_addr, abi_ulong optlen)
2158 {
2159     abi_long ret;
2160     int len, val;
2161     socklen_t lv;
2162 
2163     switch(level) {
2164     case TARGET_SOL_SOCKET:
2165         level = SOL_SOCKET;
2166         switch (optname) {
2167         /* These don't just return a single integer */
2168         case TARGET_SO_RCVTIMEO:
2169         case TARGET_SO_SNDTIMEO:
2170         case TARGET_SO_PEERNAME:
2171             goto unimplemented;
2172         case TARGET_SO_PEERCRED: {
2173             struct ucred cr;
2174             socklen_t crlen;
2175             struct target_ucred *tcr;
2176 
2177             if (get_user_u32(len, optlen)) {
2178                 return -TARGET_EFAULT;
2179             }
2180             if (len < 0) {
2181                 return -TARGET_EINVAL;
2182             }
2183 
2184             crlen = sizeof(cr);
2185             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2186                                        &cr, &crlen));
2187             if (ret < 0) {
2188                 return ret;
2189             }
2190             if (len > crlen) {
2191                 len = crlen;
2192             }
2193             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2194                 return -TARGET_EFAULT;
2195             }
2196             __put_user(cr.pid, &tcr->pid);
2197             __put_user(cr.uid, &tcr->uid);
2198             __put_user(cr.gid, &tcr->gid);
2199             unlock_user_struct(tcr, optval_addr, 1);
2200             if (put_user_u32(len, optlen)) {
2201                 return -TARGET_EFAULT;
2202             }
2203             break;
2204         }
2205         case TARGET_SO_LINGER:
2206         {
2207             struct linger lg;
2208             socklen_t lglen;
2209             struct target_linger *tlg;
2210 
2211             if (get_user_u32(len, optlen)) {
2212                 return -TARGET_EFAULT;
2213             }
2214             if (len < 0) {
2215                 return -TARGET_EINVAL;
2216             }
2217 
2218             lglen = sizeof(lg);
2219             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2220                                        &lg, &lglen));
2221             if (ret < 0) {
2222                 return ret;
2223             }
2224             if (len > lglen) {
2225                 len = lglen;
2226             }
2227             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2228                 return -TARGET_EFAULT;
2229             }
2230             __put_user(lg.l_onoff, &tlg->l_onoff);
2231             __put_user(lg.l_linger, &tlg->l_linger);
2232             unlock_user_struct(tlg, optval_addr, 1);
2233             if (put_user_u32(len, optlen)) {
2234                 return -TARGET_EFAULT;
2235             }
2236             break;
2237         }
2238         /* Options with 'int' argument.  */
2239         case TARGET_SO_DEBUG:
2240             optname = SO_DEBUG;
2241             goto int_case;
2242         case TARGET_SO_REUSEADDR:
2243             optname = SO_REUSEADDR;
2244             goto int_case;
2245 #ifdef SO_REUSEPORT
2246         case TARGET_SO_REUSEPORT:
2247             optname = SO_REUSEPORT;
2248             goto int_case;
2249 #endif
2250         case TARGET_SO_TYPE:
2251             optname = SO_TYPE;
2252             goto int_case;
2253         case TARGET_SO_ERROR:
2254             optname = SO_ERROR;
2255             goto int_case;
2256         case TARGET_SO_DONTROUTE:
2257             optname = SO_DONTROUTE;
2258             goto int_case;
2259         case TARGET_SO_BROADCAST:
2260             optname = SO_BROADCAST;
2261             goto int_case;
2262         case TARGET_SO_SNDBUF:
2263             optname = SO_SNDBUF;
2264             goto int_case;
2265         case TARGET_SO_RCVBUF:
2266             optname = SO_RCVBUF;
2267             goto int_case;
2268         case TARGET_SO_KEEPALIVE:
2269             optname = SO_KEEPALIVE;
2270             goto int_case;
2271         case TARGET_SO_OOBINLINE:
2272             optname = SO_OOBINLINE;
2273             goto int_case;
2274         case TARGET_SO_NO_CHECK:
2275             optname = SO_NO_CHECK;
2276             goto int_case;
2277         case TARGET_SO_PRIORITY:
2278             optname = SO_PRIORITY;
2279             goto int_case;
2280 #ifdef SO_BSDCOMPAT
2281         case TARGET_SO_BSDCOMPAT:
2282             optname = SO_BSDCOMPAT;
2283             goto int_case;
2284 #endif
2285         case TARGET_SO_PASSCRED:
2286             optname = SO_PASSCRED;
2287             goto int_case;
2288         case TARGET_SO_TIMESTAMP:
2289             optname = SO_TIMESTAMP;
2290             goto int_case;
2291         case TARGET_SO_RCVLOWAT:
2292             optname = SO_RCVLOWAT;
2293             goto int_case;
2294         case TARGET_SO_ACCEPTCONN:
2295             optname = SO_ACCEPTCONN;
2296             goto int_case;
2297         default:
2298             goto int_case;
2299         }
2300         break;
2301     case SOL_TCP:
2302         /* TCP options all take an 'int' value.  */
2303     int_case:
2304         if (get_user_u32(len, optlen))
2305             return -TARGET_EFAULT;
2306         if (len < 0)
2307             return -TARGET_EINVAL;
2308         lv = sizeof(lv);
2309         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2310         if (ret < 0)
2311             return ret;
2312         if (optname == SO_TYPE) {
2313             val = host_to_target_sock_type(val);
2314         }
2315         if (len > lv)
2316             len = lv;
2317         if (len == 4) {
2318             if (put_user_u32(val, optval_addr))
2319                 return -TARGET_EFAULT;
2320         } else {
2321             if (put_user_u8(val, optval_addr))
2322                 return -TARGET_EFAULT;
2323         }
2324         if (put_user_u32(len, optlen))
2325             return -TARGET_EFAULT;
2326         break;
2327     case SOL_IP:
2328         switch(optname) {
2329         case IP_TOS:
2330         case IP_TTL:
2331         case IP_HDRINCL:
2332         case IP_ROUTER_ALERT:
2333         case IP_RECVOPTS:
2334         case IP_RETOPTS:
2335         case IP_PKTINFO:
2336         case IP_MTU_DISCOVER:
2337         case IP_RECVERR:
2338         case IP_RECVTOS:
2339 #ifdef IP_FREEBIND
2340         case IP_FREEBIND:
2341 #endif
2342         case IP_MULTICAST_TTL:
2343         case IP_MULTICAST_LOOP:
2344             if (get_user_u32(len, optlen))
2345                 return -TARGET_EFAULT;
2346             if (len < 0)
2347                 return -TARGET_EINVAL;
2348             lv = sizeof(lv);
2349             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2350             if (ret < 0)
2351                 return ret;
2352             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2353                 len = 1;
2354                 if (put_user_u32(len, optlen)
2355                     || put_user_u8(val, optval_addr))
2356                     return -TARGET_EFAULT;
2357             } else {
2358                 if (len > sizeof(int))
2359                     len = sizeof(int);
2360                 if (put_user_u32(len, optlen)
2361                     || put_user_u32(val, optval_addr))
2362                     return -TARGET_EFAULT;
2363             }
2364             break;
2365         default:
2366             ret = -TARGET_ENOPROTOOPT;
2367             break;
2368         }
2369         break;
2370     case SOL_IPV6:
2371         switch (optname) {
2372         case IPV6_MTU_DISCOVER:
2373         case IPV6_MTU:
2374         case IPV6_V6ONLY:
2375         case IPV6_RECVPKTINFO:
2376         case IPV6_UNICAST_HOPS:
2377         case IPV6_MULTICAST_HOPS:
2378         case IPV6_MULTICAST_LOOP:
2379         case IPV6_RECVERR:
2380         case IPV6_RECVHOPLIMIT:
2381         case IPV6_2292HOPLIMIT:
2382         case IPV6_CHECKSUM:
2383         case IPV6_ADDRFORM:
2384         case IPV6_2292PKTINFO:
2385         case IPV6_RECVTCLASS:
2386         case IPV6_RECVRTHDR:
2387         case IPV6_2292RTHDR:
2388         case IPV6_RECVHOPOPTS:
2389         case IPV6_2292HOPOPTS:
2390         case IPV6_RECVDSTOPTS:
2391         case IPV6_2292DSTOPTS:
2392         case IPV6_TCLASS:
2393 #ifdef IPV6_RECVPATHMTU
2394         case IPV6_RECVPATHMTU:
2395 #endif
2396 #ifdef IPV6_TRANSPARENT
2397         case IPV6_TRANSPARENT:
2398 #endif
2399 #ifdef IPV6_FREEBIND
2400         case IPV6_FREEBIND:
2401 #endif
2402 #ifdef IPV6_RECVORIGDSTADDR
2403         case IPV6_RECVORIGDSTADDR:
2404 #endif
2405             if (get_user_u32(len, optlen))
2406                 return -TARGET_EFAULT;
2407             if (len < 0)
2408                 return -TARGET_EINVAL;
2409             lv = sizeof(lv);
2410             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2411             if (ret < 0)
2412                 return ret;
2413             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2414                 len = 1;
2415                 if (put_user_u32(len, optlen)
2416                     || put_user_u8(val, optval_addr))
2417                     return -TARGET_EFAULT;
2418             } else {
2419                 if (len > sizeof(int))
2420                     len = sizeof(int);
2421                 if (put_user_u32(len, optlen)
2422                     || put_user_u32(val, optval_addr))
2423                     return -TARGET_EFAULT;
2424             }
2425             break;
2426         default:
2427             ret = -TARGET_ENOPROTOOPT;
2428             break;
2429         }
2430         break;
2431     default:
2432     unimplemented:
2433         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2434                  level, optname);
2435         ret = -TARGET_EOPNOTSUPP;
2436         break;
2437     }
2438     return ret;
2439 }
2440 
2441 /* Convert target low/high pair representing file offset into the host
2442  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2443  * as the kernel doesn't handle them either.
2444  */
2445 static void target_to_host_low_high(abi_ulong tlow,
2446                                     abi_ulong thigh,
2447                                     unsigned long *hlow,
2448                                     unsigned long *hhigh)
2449 {
2450     uint64_t off = tlow |
2451         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2452         TARGET_LONG_BITS / 2;
2453 
2454     *hlow = off;
2455     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2456 }
2457 
2458 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2459                                 abi_ulong count, int copy)
2460 {
2461     struct target_iovec *target_vec;
2462     struct iovec *vec;
2463     abi_ulong total_len, max_len;
2464     int i;
2465     int err = 0;
2466     bool bad_address = false;
2467 
2468     if (count == 0) {
2469         errno = 0;
2470         return NULL;
2471     }
2472     if (count > IOV_MAX) {
2473         errno = EINVAL;
2474         return NULL;
2475     }
2476 
2477     vec = g_try_new0(struct iovec, count);
2478     if (vec == NULL) {
2479         errno = ENOMEM;
2480         return NULL;
2481     }
2482 
2483     target_vec = lock_user(VERIFY_READ, target_addr,
2484                            count * sizeof(struct target_iovec), 1);
2485     if (target_vec == NULL) {
2486         err = EFAULT;
2487         goto fail2;
2488     }
2489 
2490     /* ??? If host page size > target page size, this will result in a
2491        value larger than what we can actually support.  */
2492     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2493     total_len = 0;
2494 
2495     for (i = 0; i < count; i++) {
2496         abi_ulong base = tswapal(target_vec[i].iov_base);
2497         abi_long len = tswapal(target_vec[i].iov_len);
2498 
2499         if (len < 0) {
2500             err = EINVAL;
2501             goto fail;
2502         } else if (len == 0) {
2503             /* Zero length pointer is ignored.  */
2504             vec[i].iov_base = 0;
2505         } else {
2506             vec[i].iov_base = lock_user(type, base, len, copy);
2507             /* If the first buffer pointer is bad, this is a fault.  But
2508              * subsequent bad buffers will result in a partial write; this
2509              * is realized by filling the vector with null pointers and
2510              * zero lengths. */
2511             if (!vec[i].iov_base) {
2512                 if (i == 0) {
2513                     err = EFAULT;
2514                     goto fail;
2515                 } else {
2516                     bad_address = true;
2517                 }
2518             }
2519             if (bad_address) {
2520                 len = 0;
2521             }
2522             if (len > max_len - total_len) {
2523                 len = max_len - total_len;
2524             }
2525         }
2526         vec[i].iov_len = len;
2527         total_len += len;
2528     }
2529 
2530     unlock_user(target_vec, target_addr, 0);
2531     return vec;
2532 
2533  fail:
2534     while (--i >= 0) {
2535         if (tswapal(target_vec[i].iov_len) > 0) {
2536             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2537         }
2538     }
2539     unlock_user(target_vec, target_addr, 0);
2540  fail2:
2541     g_free(vec);
2542     errno = err;
2543     return NULL;
2544 }
2545 
2546 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2547                          abi_ulong count, int copy)
2548 {
2549     struct target_iovec *target_vec;
2550     int i;
2551 
2552     target_vec = lock_user(VERIFY_READ, target_addr,
2553                            count * sizeof(struct target_iovec), 1);
2554     if (target_vec) {
2555         for (i = 0; i < count; i++) {
2556             abi_ulong base = tswapal(target_vec[i].iov_base);
2557             abi_long len = tswapal(target_vec[i].iov_len);
2558             if (len < 0) {
2559                 break;
2560             }
2561             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2562         }
2563         unlock_user(target_vec, target_addr, 0);
2564     }
2565 
2566     g_free(vec);
2567 }
2568 
2569 static inline int target_to_host_sock_type(int *type)
2570 {
2571     int host_type = 0;
2572     int target_type = *type;
2573 
2574     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2575     case TARGET_SOCK_DGRAM:
2576         host_type = SOCK_DGRAM;
2577         break;
2578     case TARGET_SOCK_STREAM:
2579         host_type = SOCK_STREAM;
2580         break;
2581     default:
2582         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2583         break;
2584     }
2585     if (target_type & TARGET_SOCK_CLOEXEC) {
2586 #if defined(SOCK_CLOEXEC)
2587         host_type |= SOCK_CLOEXEC;
2588 #else
2589         return -TARGET_EINVAL;
2590 #endif
2591     }
2592     if (target_type & TARGET_SOCK_NONBLOCK) {
2593 #if defined(SOCK_NONBLOCK)
2594         host_type |= SOCK_NONBLOCK;
2595 #elif !defined(O_NONBLOCK)
2596         return -TARGET_EINVAL;
2597 #endif
2598     }
2599     *type = host_type;
2600     return 0;
2601 }
2602 
2603 /* Try to emulate socket type flags after socket creation.  */
2604 static int sock_flags_fixup(int fd, int target_type)
2605 {
2606 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2607     if (target_type & TARGET_SOCK_NONBLOCK) {
2608         int flags = fcntl(fd, F_GETFL);
2609         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2610             close(fd);
2611             return -TARGET_EINVAL;
2612         }
2613     }
2614 #endif
2615     return fd;
2616 }
2617 
2618 /* do_socket() Must return target values and target errnos. */
2619 static abi_long do_socket(int domain, int type, int protocol)
2620 {
2621     int target_type = type;
2622     int ret;
2623 
2624     ret = target_to_host_sock_type(&type);
2625     if (ret) {
2626         return ret;
2627     }
2628 
2629     if (domain == PF_NETLINK && !(
2630 #ifdef CONFIG_RTNETLINK
2631          protocol == NETLINK_ROUTE ||
2632 #endif
2633          protocol == NETLINK_KOBJECT_UEVENT ||
2634          protocol == NETLINK_AUDIT)) {
2635         return -EPFNOSUPPORT;
2636     }
2637 
2638     if (domain == AF_PACKET ||
2639         (domain == AF_INET && type == SOCK_PACKET)) {
2640         protocol = tswap16(protocol);
2641     }
2642 
2643     ret = get_errno(socket(domain, type, protocol));
2644     if (ret >= 0) {
2645         ret = sock_flags_fixup(ret, target_type);
2646         if (type == SOCK_PACKET) {
2647             /* Manage an obsolete case :
2648              * if socket type is SOCK_PACKET, bind by name
2649              */
2650             fd_trans_register(ret, &target_packet_trans);
2651         } else if (domain == PF_NETLINK) {
2652             switch (protocol) {
2653 #ifdef CONFIG_RTNETLINK
2654             case NETLINK_ROUTE:
2655                 fd_trans_register(ret, &target_netlink_route_trans);
2656                 break;
2657 #endif
2658             case NETLINK_KOBJECT_UEVENT:
2659                 /* nothing to do: messages are strings */
2660                 break;
2661             case NETLINK_AUDIT:
2662                 fd_trans_register(ret, &target_netlink_audit_trans);
2663                 break;
2664             default:
2665                 g_assert_not_reached();
2666             }
2667         }
2668     }
2669     return ret;
2670 }
2671 
2672 /* do_bind() Must return target values and target errnos. */
2673 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2674                         socklen_t addrlen)
2675 {
2676     void *addr;
2677     abi_long ret;
2678 
2679     if ((int)addrlen < 0) {
2680         return -TARGET_EINVAL;
2681     }
2682 
2683     addr = alloca(addrlen+1);
2684 
2685     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2686     if (ret)
2687         return ret;
2688 
2689     return get_errno(bind(sockfd, addr, addrlen));
2690 }
2691 
2692 /* do_connect() Must return target values and target errnos. */
2693 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2694                            socklen_t addrlen)
2695 {
2696     void *addr;
2697     abi_long ret;
2698 
2699     if ((int)addrlen < 0) {
2700         return -TARGET_EINVAL;
2701     }
2702 
2703     addr = alloca(addrlen+1);
2704 
2705     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2706     if (ret)
2707         return ret;
2708 
2709     return get_errno(safe_connect(sockfd, addr, addrlen));
2710 }
2711 
2712 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2713 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2714                                       int flags, int send)
2715 {
2716     abi_long ret, len;
2717     struct msghdr msg;
2718     abi_ulong count;
2719     struct iovec *vec;
2720     abi_ulong target_vec;
2721 
2722     if (msgp->msg_name) {
2723         msg.msg_namelen = tswap32(msgp->msg_namelen);
2724         msg.msg_name = alloca(msg.msg_namelen+1);
2725         ret = target_to_host_sockaddr(fd, msg.msg_name,
2726                                       tswapal(msgp->msg_name),
2727                                       msg.msg_namelen);
2728         if (ret == -TARGET_EFAULT) {
2729             /* For connected sockets msg_name and msg_namelen must
2730              * be ignored, so returning EFAULT immediately is wrong.
2731              * Instead, pass a bad msg_name to the host kernel, and
2732              * let it decide whether to return EFAULT or not.
2733              */
2734             msg.msg_name = (void *)-1;
2735         } else if (ret) {
2736             goto out2;
2737         }
2738     } else {
2739         msg.msg_name = NULL;
2740         msg.msg_namelen = 0;
2741     }
2742     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2743     msg.msg_control = alloca(msg.msg_controllen);
2744     memset(msg.msg_control, 0, msg.msg_controllen);
2745 
2746     msg.msg_flags = tswap32(msgp->msg_flags);
2747 
2748     count = tswapal(msgp->msg_iovlen);
2749     target_vec = tswapal(msgp->msg_iov);
2750 
2751     if (count > IOV_MAX) {
2752         /* sendrcvmsg returns a different errno for this condition than
2753          * readv/writev, so we must catch it here before lock_iovec() does.
2754          */
2755         ret = -TARGET_EMSGSIZE;
2756         goto out2;
2757     }
2758 
2759     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2760                      target_vec, count, send);
2761     if (vec == NULL) {
2762         ret = -host_to_target_errno(errno);
2763         goto out2;
2764     }
2765     msg.msg_iovlen = count;
2766     msg.msg_iov = vec;
2767 
2768     if (send) {
2769         if (fd_trans_target_to_host_data(fd)) {
2770             void *host_msg;
2771 
2772             host_msg = g_malloc(msg.msg_iov->iov_len);
2773             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2774             ret = fd_trans_target_to_host_data(fd)(host_msg,
2775                                                    msg.msg_iov->iov_len);
2776             if (ret >= 0) {
2777                 msg.msg_iov->iov_base = host_msg;
2778                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2779             }
2780             g_free(host_msg);
2781         } else {
2782             ret = target_to_host_cmsg(&msg, msgp);
2783             if (ret == 0) {
2784                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2785             }
2786         }
2787     } else {
2788         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2789         if (!is_error(ret)) {
2790             len = ret;
2791             if (fd_trans_host_to_target_data(fd)) {
2792                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2793                                                MIN(msg.msg_iov->iov_len, len));
2794             } else {
2795                 ret = host_to_target_cmsg(msgp, &msg);
2796             }
2797             if (!is_error(ret)) {
2798                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2799                 msgp->msg_flags = tswap32(msg.msg_flags);
2800                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2801                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2802                                     msg.msg_name, msg.msg_namelen);
2803                     if (ret) {
2804                         goto out;
2805                     }
2806                 }
2807 
2808                 ret = len;
2809             }
2810         }
2811     }
2812 
2813 out:
2814     unlock_iovec(vec, target_vec, count, !send);
2815 out2:
2816     return ret;
2817 }
2818 
2819 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2820                                int flags, int send)
2821 {
2822     abi_long ret;
2823     struct target_msghdr *msgp;
2824 
2825     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2826                           msgp,
2827                           target_msg,
2828                           send ? 1 : 0)) {
2829         return -TARGET_EFAULT;
2830     }
2831     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2832     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2833     return ret;
2834 }
2835 
2836 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2837  * so it might not have this *mmsg-specific flag either.
2838  */
2839 #ifndef MSG_WAITFORONE
2840 #define MSG_WAITFORONE 0x10000
2841 #endif
2842 
2843 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2844                                 unsigned int vlen, unsigned int flags,
2845                                 int send)
2846 {
2847     struct target_mmsghdr *mmsgp;
2848     abi_long ret = 0;
2849     int i;
2850 
2851     if (vlen > UIO_MAXIOV) {
2852         vlen = UIO_MAXIOV;
2853     }
2854 
2855     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2856     if (!mmsgp) {
2857         return -TARGET_EFAULT;
2858     }
2859 
2860     for (i = 0; i < vlen; i++) {
2861         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2862         if (is_error(ret)) {
2863             break;
2864         }
2865         mmsgp[i].msg_len = tswap32(ret);
2866         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2867         if (flags & MSG_WAITFORONE) {
2868             flags |= MSG_DONTWAIT;
2869         }
2870     }
2871 
2872     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2873 
2874     /* Return number of datagrams sent if we sent any at all;
2875      * otherwise return the error.
2876      */
2877     if (i) {
2878         return i;
2879     }
2880     return ret;
2881 }
2882 
2883 /* do_accept4() Must return target values and target errnos. */
2884 static abi_long do_accept4(int fd, abi_ulong target_addr,
2885                            abi_ulong target_addrlen_addr, int flags)
2886 {
2887     socklen_t addrlen, ret_addrlen;
2888     void *addr;
2889     abi_long ret;
2890     int host_flags;
2891 
2892     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2893 
2894     if (target_addr == 0) {
2895         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2896     }
2897 
2898     /* linux returns EINVAL if addrlen pointer is invalid */
2899     if (get_user_u32(addrlen, target_addrlen_addr))
2900         return -TARGET_EINVAL;
2901 
2902     if ((int)addrlen < 0) {
2903         return -TARGET_EINVAL;
2904     }
2905 
2906     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2907         return -TARGET_EINVAL;
2908 
2909     addr = alloca(addrlen);
2910 
2911     ret_addrlen = addrlen;
2912     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2913     if (!is_error(ret)) {
2914         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2915         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2916             ret = -TARGET_EFAULT;
2917         }
2918     }
2919     return ret;
2920 }
2921 
2922 /* do_getpeername() Must return target values and target errnos. */
2923 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2924                                abi_ulong target_addrlen_addr)
2925 {
2926     socklen_t addrlen, ret_addrlen;
2927     void *addr;
2928     abi_long ret;
2929 
2930     if (get_user_u32(addrlen, target_addrlen_addr))
2931         return -TARGET_EFAULT;
2932 
2933     if ((int)addrlen < 0) {
2934         return -TARGET_EINVAL;
2935     }
2936 
2937     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2938         return -TARGET_EFAULT;
2939 
2940     addr = alloca(addrlen);
2941 
2942     ret_addrlen = addrlen;
2943     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2944     if (!is_error(ret)) {
2945         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2946         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2947             ret = -TARGET_EFAULT;
2948         }
2949     }
2950     return ret;
2951 }
2952 
2953 /* do_getsockname() Must return target values and target errnos. */
2954 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2955                                abi_ulong target_addrlen_addr)
2956 {
2957     socklen_t addrlen, ret_addrlen;
2958     void *addr;
2959     abi_long ret;
2960 
2961     if (get_user_u32(addrlen, target_addrlen_addr))
2962         return -TARGET_EFAULT;
2963 
2964     if ((int)addrlen < 0) {
2965         return -TARGET_EINVAL;
2966     }
2967 
2968     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2969         return -TARGET_EFAULT;
2970 
2971     addr = alloca(addrlen);
2972 
2973     ret_addrlen = addrlen;
2974     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2975     if (!is_error(ret)) {
2976         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2977         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2978             ret = -TARGET_EFAULT;
2979         }
2980     }
2981     return ret;
2982 }
2983 
2984 /* do_socketpair() Must return target values and target errnos. */
2985 static abi_long do_socketpair(int domain, int type, int protocol,
2986                               abi_ulong target_tab_addr)
2987 {
2988     int tab[2];
2989     abi_long ret;
2990 
2991     target_to_host_sock_type(&type);
2992 
2993     ret = get_errno(socketpair(domain, type, protocol, tab));
2994     if (!is_error(ret)) {
2995         if (put_user_s32(tab[0], target_tab_addr)
2996             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2997             ret = -TARGET_EFAULT;
2998     }
2999     return ret;
3000 }
3001 
3002 /* do_sendto() Must return target values and target errnos. */
3003 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3004                           abi_ulong target_addr, socklen_t addrlen)
3005 {
3006     void *addr;
3007     void *host_msg;
3008     void *copy_msg = NULL;
3009     abi_long ret;
3010 
3011     if ((int)addrlen < 0) {
3012         return -TARGET_EINVAL;
3013     }
3014 
3015     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3016     if (!host_msg)
3017         return -TARGET_EFAULT;
3018     if (fd_trans_target_to_host_data(fd)) {
3019         copy_msg = host_msg;
3020         host_msg = g_malloc(len);
3021         memcpy(host_msg, copy_msg, len);
3022         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3023         if (ret < 0) {
3024             goto fail;
3025         }
3026     }
3027     if (target_addr) {
3028         addr = alloca(addrlen+1);
3029         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3030         if (ret) {
3031             goto fail;
3032         }
3033         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3034     } else {
3035         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3036     }
3037 fail:
3038     if (copy_msg) {
3039         g_free(host_msg);
3040         host_msg = copy_msg;
3041     }
3042     unlock_user(host_msg, msg, 0);
3043     return ret;
3044 }
3045 
3046 /* do_recvfrom() Must return target values and target errnos. */
3047 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3048                             abi_ulong target_addr,
3049                             abi_ulong target_addrlen)
3050 {
3051     socklen_t addrlen, ret_addrlen;
3052     void *addr;
3053     void *host_msg;
3054     abi_long ret;
3055 
3056     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3057     if (!host_msg)
3058         return -TARGET_EFAULT;
3059     if (target_addr) {
3060         if (get_user_u32(addrlen, target_addrlen)) {
3061             ret = -TARGET_EFAULT;
3062             goto fail;
3063         }
3064         if ((int)addrlen < 0) {
3065             ret = -TARGET_EINVAL;
3066             goto fail;
3067         }
3068         addr = alloca(addrlen);
3069         ret_addrlen = addrlen;
3070         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3071                                       addr, &ret_addrlen));
3072     } else {
3073         addr = NULL; /* To keep compiler quiet.  */
3074         addrlen = 0; /* To keep compiler quiet.  */
3075         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3076     }
3077     if (!is_error(ret)) {
3078         if (fd_trans_host_to_target_data(fd)) {
3079             abi_long trans;
3080             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3081             if (is_error(trans)) {
3082                 ret = trans;
3083                 goto fail;
3084             }
3085         }
3086         if (target_addr) {
3087             host_to_target_sockaddr(target_addr, addr,
3088                                     MIN(addrlen, ret_addrlen));
3089             if (put_user_u32(ret_addrlen, target_addrlen)) {
3090                 ret = -TARGET_EFAULT;
3091                 goto fail;
3092             }
3093         }
3094         unlock_user(host_msg, msg, len);
3095     } else {
3096 fail:
3097         unlock_user(host_msg, msg, 0);
3098     }
3099     return ret;
3100 }
3101 
3102 #ifdef TARGET_NR_socketcall
3103 /* do_socketcall() must return target values and target errnos. */
3104 static abi_long do_socketcall(int num, abi_ulong vptr)
3105 {
3106     static const unsigned nargs[] = { /* number of arguments per operation */
3107         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3108         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3109         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3110         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3111         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3112         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3113         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3114         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3115         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3116         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3117         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3118         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3119         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3120         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3121         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3122         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3123         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3124         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3125         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3126         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3127     };
3128     abi_long a[6]; /* max 6 args */
3129     unsigned i;
3130 
3131     /* check the range of the first argument num */
3132     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3133     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3134         return -TARGET_EINVAL;
3135     }
3136     /* ensure we have space for args */
3137     if (nargs[num] > ARRAY_SIZE(a)) {
3138         return -TARGET_EINVAL;
3139     }
3140     /* collect the arguments in a[] according to nargs[] */
3141     for (i = 0; i < nargs[num]; ++i) {
3142         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3143             return -TARGET_EFAULT;
3144         }
3145     }
3146     /* now when we have the args, invoke the appropriate underlying function */
3147     switch (num) {
3148     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3149         return do_socket(a[0], a[1], a[2]);
3150     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3151         return do_bind(a[0], a[1], a[2]);
3152     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3153         return do_connect(a[0], a[1], a[2]);
3154     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3155         return get_errno(listen(a[0], a[1]));
3156     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3157         return do_accept4(a[0], a[1], a[2], 0);
3158     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3159         return do_getsockname(a[0], a[1], a[2]);
3160     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3161         return do_getpeername(a[0], a[1], a[2]);
3162     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3163         return do_socketpair(a[0], a[1], a[2], a[3]);
3164     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3165         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3166     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3167         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3168     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3169         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3170     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3171         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3172     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3173         return get_errno(shutdown(a[0], a[1]));
3174     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3175         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3176     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3177         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3178     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3179         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3180     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3181         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3182     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3183         return do_accept4(a[0], a[1], a[2], a[3]);
3184     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3185         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3186     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3187         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3188     default:
3189         gemu_log("Unsupported socketcall: %d\n", num);
3190         return -TARGET_EINVAL;
3191     }
3192 }
3193 #endif
3194 
3195 #define N_SHM_REGIONS	32
3196 
3197 static struct shm_region {
3198     abi_ulong start;
3199     abi_ulong size;
3200     bool in_use;
3201 } shm_regions[N_SHM_REGIONS];
3202 
3203 #ifndef TARGET_SEMID64_DS
3204 /* asm-generic version of this struct */
3205 struct target_semid64_ds
3206 {
3207   struct target_ipc_perm sem_perm;
3208   abi_ulong sem_otime;
3209 #if TARGET_ABI_BITS == 32
3210   abi_ulong __unused1;
3211 #endif
3212   abi_ulong sem_ctime;
3213 #if TARGET_ABI_BITS == 32
3214   abi_ulong __unused2;
3215 #endif
3216   abi_ulong sem_nsems;
3217   abi_ulong __unused3;
3218   abi_ulong __unused4;
3219 };
3220 #endif
3221 
3222 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3223                                                abi_ulong target_addr)
3224 {
3225     struct target_ipc_perm *target_ip;
3226     struct target_semid64_ds *target_sd;
3227 
3228     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3229         return -TARGET_EFAULT;
3230     target_ip = &(target_sd->sem_perm);
3231     host_ip->__key = tswap32(target_ip->__key);
3232     host_ip->uid = tswap32(target_ip->uid);
3233     host_ip->gid = tswap32(target_ip->gid);
3234     host_ip->cuid = tswap32(target_ip->cuid);
3235     host_ip->cgid = tswap32(target_ip->cgid);
3236 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3237     host_ip->mode = tswap32(target_ip->mode);
3238 #else
3239     host_ip->mode = tswap16(target_ip->mode);
3240 #endif
3241 #if defined(TARGET_PPC)
3242     host_ip->__seq = tswap32(target_ip->__seq);
3243 #else
3244     host_ip->__seq = tswap16(target_ip->__seq);
3245 #endif
3246     unlock_user_struct(target_sd, target_addr, 0);
3247     return 0;
3248 }
3249 
3250 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3251                                                struct ipc_perm *host_ip)
3252 {
3253     struct target_ipc_perm *target_ip;
3254     struct target_semid64_ds *target_sd;
3255 
3256     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3257         return -TARGET_EFAULT;
3258     target_ip = &(target_sd->sem_perm);
3259     target_ip->__key = tswap32(host_ip->__key);
3260     target_ip->uid = tswap32(host_ip->uid);
3261     target_ip->gid = tswap32(host_ip->gid);
3262     target_ip->cuid = tswap32(host_ip->cuid);
3263     target_ip->cgid = tswap32(host_ip->cgid);
3264 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3265     target_ip->mode = tswap32(host_ip->mode);
3266 #else
3267     target_ip->mode = tswap16(host_ip->mode);
3268 #endif
3269 #if defined(TARGET_PPC)
3270     target_ip->__seq = tswap32(host_ip->__seq);
3271 #else
3272     target_ip->__seq = tswap16(host_ip->__seq);
3273 #endif
3274     unlock_user_struct(target_sd, target_addr, 1);
3275     return 0;
3276 }
3277 
3278 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3279                                                abi_ulong target_addr)
3280 {
3281     struct target_semid64_ds *target_sd;
3282 
3283     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3284         return -TARGET_EFAULT;
3285     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3286         return -TARGET_EFAULT;
3287     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3288     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3289     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3290     unlock_user_struct(target_sd, target_addr, 0);
3291     return 0;
3292 }
3293 
3294 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3295                                                struct semid_ds *host_sd)
3296 {
3297     struct target_semid64_ds *target_sd;
3298 
3299     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3300         return -TARGET_EFAULT;
3301     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3302         return -TARGET_EFAULT;
3303     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3304     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3305     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3306     unlock_user_struct(target_sd, target_addr, 1);
3307     return 0;
3308 }
3309 
3310 struct target_seminfo {
3311     int semmap;
3312     int semmni;
3313     int semmns;
3314     int semmnu;
3315     int semmsl;
3316     int semopm;
3317     int semume;
3318     int semusz;
3319     int semvmx;
3320     int semaem;
3321 };
3322 
3323 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3324                                               struct seminfo *host_seminfo)
3325 {
3326     struct target_seminfo *target_seminfo;
3327     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3328         return -TARGET_EFAULT;
3329     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3330     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3331     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3332     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3333     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3334     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3335     __put_user(host_seminfo->semume, &target_seminfo->semume);
3336     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3337     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3338     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3339     unlock_user_struct(target_seminfo, target_addr, 1);
3340     return 0;
3341 }
3342 
3343 union semun {
3344 	int val;
3345 	struct semid_ds *buf;
3346 	unsigned short *array;
3347 	struct seminfo *__buf;
3348 };
3349 
3350 union target_semun {
3351 	int val;
3352 	abi_ulong buf;
3353 	abi_ulong array;
3354 	abi_ulong __buf;
3355 };
3356 
3357 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3358                                                abi_ulong target_addr)
3359 {
3360     int nsems;
3361     unsigned short *array;
3362     union semun semun;
3363     struct semid_ds semid_ds;
3364     int i, ret;
3365 
3366     semun.buf = &semid_ds;
3367 
3368     ret = semctl(semid, 0, IPC_STAT, semun);
3369     if (ret == -1)
3370         return get_errno(ret);
3371 
3372     nsems = semid_ds.sem_nsems;
3373 
3374     *host_array = g_try_new(unsigned short, nsems);
3375     if (!*host_array) {
3376         return -TARGET_ENOMEM;
3377     }
3378     array = lock_user(VERIFY_READ, target_addr,
3379                       nsems*sizeof(unsigned short), 1);
3380     if (!array) {
3381         g_free(*host_array);
3382         return -TARGET_EFAULT;
3383     }
3384 
3385     for(i=0; i<nsems; i++) {
3386         __get_user((*host_array)[i], &array[i]);
3387     }
3388     unlock_user(array, target_addr, 0);
3389 
3390     return 0;
3391 }
3392 
3393 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3394                                                unsigned short **host_array)
3395 {
3396     int nsems;
3397     unsigned short *array;
3398     union semun semun;
3399     struct semid_ds semid_ds;
3400     int i, ret;
3401 
3402     semun.buf = &semid_ds;
3403 
3404     ret = semctl(semid, 0, IPC_STAT, semun);
3405     if (ret == -1)
3406         return get_errno(ret);
3407 
3408     nsems = semid_ds.sem_nsems;
3409 
3410     array = lock_user(VERIFY_WRITE, target_addr,
3411                       nsems*sizeof(unsigned short), 0);
3412     if (!array)
3413         return -TARGET_EFAULT;
3414 
3415     for(i=0; i<nsems; i++) {
3416         __put_user((*host_array)[i], &array[i]);
3417     }
3418     g_free(*host_array);
3419     unlock_user(array, target_addr, 1);
3420 
3421     return 0;
3422 }
3423 
3424 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3425                                  abi_ulong target_arg)
3426 {
3427     union target_semun target_su = { .buf = target_arg };
3428     union semun arg;
3429     struct semid_ds dsarg;
3430     unsigned short *array = NULL;
3431     struct seminfo seminfo;
3432     abi_long ret = -TARGET_EINVAL;
3433     abi_long err;
3434     cmd &= 0xff;
3435 
3436     switch( cmd ) {
3437 	case GETVAL:
3438 	case SETVAL:
3439             /* In 64 bit cross-endian situations, we will erroneously pick up
3440              * the wrong half of the union for the "val" element.  To rectify
3441              * this, the entire 8-byte structure is byteswapped, followed by
3442 	     * a swap of the 4 byte val field. In other cases, the data is
3443 	     * already in proper host byte order. */
3444 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3445 		target_su.buf = tswapal(target_su.buf);
3446 		arg.val = tswap32(target_su.val);
3447 	    } else {
3448 		arg.val = target_su.val;
3449 	    }
3450             ret = get_errno(semctl(semid, semnum, cmd, arg));
3451             break;
3452 	case GETALL:
3453 	case SETALL:
3454             err = target_to_host_semarray(semid, &array, target_su.array);
3455             if (err)
3456                 return err;
3457             arg.array = array;
3458             ret = get_errno(semctl(semid, semnum, cmd, arg));
3459             err = host_to_target_semarray(semid, target_su.array, &array);
3460             if (err)
3461                 return err;
3462             break;
3463 	case IPC_STAT:
3464 	case IPC_SET:
3465 	case SEM_STAT:
3466             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3467             if (err)
3468                 return err;
3469             arg.buf = &dsarg;
3470             ret = get_errno(semctl(semid, semnum, cmd, arg));
3471             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3472             if (err)
3473                 return err;
3474             break;
3475 	case IPC_INFO:
3476 	case SEM_INFO:
3477             arg.__buf = &seminfo;
3478             ret = get_errno(semctl(semid, semnum, cmd, arg));
3479             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3480             if (err)
3481                 return err;
3482             break;
3483 	case IPC_RMID:
3484 	case GETPID:
3485 	case GETNCNT:
3486 	case GETZCNT:
3487             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3488             break;
3489     }
3490 
3491     return ret;
3492 }
3493 
3494 struct target_sembuf {
3495     unsigned short sem_num;
3496     short sem_op;
3497     short sem_flg;
3498 };
3499 
3500 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3501                                              abi_ulong target_addr,
3502                                              unsigned nsops)
3503 {
3504     struct target_sembuf *target_sembuf;
3505     int i;
3506 
3507     target_sembuf = lock_user(VERIFY_READ, target_addr,
3508                               nsops*sizeof(struct target_sembuf), 1);
3509     if (!target_sembuf)
3510         return -TARGET_EFAULT;
3511 
3512     for(i=0; i<nsops; i++) {
3513         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3514         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3515         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3516     }
3517 
3518     unlock_user(target_sembuf, target_addr, 0);
3519 
3520     return 0;
3521 }
3522 
3523 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3524 {
3525     struct sembuf sops[nsops];
3526 
3527     if (target_to_host_sembuf(sops, ptr, nsops))
3528         return -TARGET_EFAULT;
3529 
3530     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3531 }
3532 
3533 struct target_msqid_ds
3534 {
3535     struct target_ipc_perm msg_perm;
3536     abi_ulong msg_stime;
3537 #if TARGET_ABI_BITS == 32
3538     abi_ulong __unused1;
3539 #endif
3540     abi_ulong msg_rtime;
3541 #if TARGET_ABI_BITS == 32
3542     abi_ulong __unused2;
3543 #endif
3544     abi_ulong msg_ctime;
3545 #if TARGET_ABI_BITS == 32
3546     abi_ulong __unused3;
3547 #endif
3548     abi_ulong __msg_cbytes;
3549     abi_ulong msg_qnum;
3550     abi_ulong msg_qbytes;
3551     abi_ulong msg_lspid;
3552     abi_ulong msg_lrpid;
3553     abi_ulong __unused4;
3554     abi_ulong __unused5;
3555 };
3556 
3557 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3558                                                abi_ulong target_addr)
3559 {
3560     struct target_msqid_ds *target_md;
3561 
3562     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3563         return -TARGET_EFAULT;
3564     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3565         return -TARGET_EFAULT;
3566     host_md->msg_stime = tswapal(target_md->msg_stime);
3567     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3568     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3569     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3570     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3571     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3572     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3573     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3574     unlock_user_struct(target_md, target_addr, 0);
3575     return 0;
3576 }
3577 
3578 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3579                                                struct msqid_ds *host_md)
3580 {
3581     struct target_msqid_ds *target_md;
3582 
3583     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3584         return -TARGET_EFAULT;
3585     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3586         return -TARGET_EFAULT;
3587     target_md->msg_stime = tswapal(host_md->msg_stime);
3588     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3589     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3590     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3591     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3592     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3593     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3594     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3595     unlock_user_struct(target_md, target_addr, 1);
3596     return 0;
3597 }
3598 
3599 struct target_msginfo {
3600     int msgpool;
3601     int msgmap;
3602     int msgmax;
3603     int msgmnb;
3604     int msgmni;
3605     int msgssz;
3606     int msgtql;
3607     unsigned short int msgseg;
3608 };
3609 
3610 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3611                                               struct msginfo *host_msginfo)
3612 {
3613     struct target_msginfo *target_msginfo;
3614     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3615         return -TARGET_EFAULT;
3616     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3617     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3618     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3619     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3620     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3621     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3622     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3623     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3624     unlock_user_struct(target_msginfo, target_addr, 1);
3625     return 0;
3626 }
3627 
3628 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3629 {
3630     struct msqid_ds dsarg;
3631     struct msginfo msginfo;
3632     abi_long ret = -TARGET_EINVAL;
3633 
3634     cmd &= 0xff;
3635 
3636     switch (cmd) {
3637     case IPC_STAT:
3638     case IPC_SET:
3639     case MSG_STAT:
3640         if (target_to_host_msqid_ds(&dsarg,ptr))
3641             return -TARGET_EFAULT;
3642         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3643         if (host_to_target_msqid_ds(ptr,&dsarg))
3644             return -TARGET_EFAULT;
3645         break;
3646     case IPC_RMID:
3647         ret = get_errno(msgctl(msgid, cmd, NULL));
3648         break;
3649     case IPC_INFO:
3650     case MSG_INFO:
3651         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3652         if (host_to_target_msginfo(ptr, &msginfo))
3653             return -TARGET_EFAULT;
3654         break;
3655     }
3656 
3657     return ret;
3658 }
3659 
3660 struct target_msgbuf {
3661     abi_long mtype;
3662     char	mtext[1];
3663 };
3664 
3665 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3666                                  ssize_t msgsz, int msgflg)
3667 {
3668     struct target_msgbuf *target_mb;
3669     struct msgbuf *host_mb;
3670     abi_long ret = 0;
3671 
3672     if (msgsz < 0) {
3673         return -TARGET_EINVAL;
3674     }
3675 
3676     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3677         return -TARGET_EFAULT;
3678     host_mb = g_try_malloc(msgsz + sizeof(long));
3679     if (!host_mb) {
3680         unlock_user_struct(target_mb, msgp, 0);
3681         return -TARGET_ENOMEM;
3682     }
3683     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3684     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3685     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3686     g_free(host_mb);
3687     unlock_user_struct(target_mb, msgp, 0);
3688 
3689     return ret;
3690 }
3691 
3692 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3693                                  ssize_t msgsz, abi_long msgtyp,
3694                                  int msgflg)
3695 {
3696     struct target_msgbuf *target_mb;
3697     char *target_mtext;
3698     struct msgbuf *host_mb;
3699     abi_long ret = 0;
3700 
3701     if (msgsz < 0) {
3702         return -TARGET_EINVAL;
3703     }
3704 
3705     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3706         return -TARGET_EFAULT;
3707 
3708     host_mb = g_try_malloc(msgsz + sizeof(long));
3709     if (!host_mb) {
3710         ret = -TARGET_ENOMEM;
3711         goto end;
3712     }
3713     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3714 
3715     if (ret > 0) {
3716         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3717         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3718         if (!target_mtext) {
3719             ret = -TARGET_EFAULT;
3720             goto end;
3721         }
3722         memcpy(target_mb->mtext, host_mb->mtext, ret);
3723         unlock_user(target_mtext, target_mtext_addr, ret);
3724     }
3725 
3726     target_mb->mtype = tswapal(host_mb->mtype);
3727 
3728 end:
3729     if (target_mb)
3730         unlock_user_struct(target_mb, msgp, 1);
3731     g_free(host_mb);
3732     return ret;
3733 }
3734 
3735 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3736                                                abi_ulong target_addr)
3737 {
3738     struct target_shmid_ds *target_sd;
3739 
3740     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3741         return -TARGET_EFAULT;
3742     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3743         return -TARGET_EFAULT;
3744     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3745     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3746     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3747     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3748     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3749     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3750     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3751     unlock_user_struct(target_sd, target_addr, 0);
3752     return 0;
3753 }
3754 
3755 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3756                                                struct shmid_ds *host_sd)
3757 {
3758     struct target_shmid_ds *target_sd;
3759 
3760     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3761         return -TARGET_EFAULT;
3762     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3763         return -TARGET_EFAULT;
3764     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3765     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3766     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3767     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3768     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3769     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3770     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3771     unlock_user_struct(target_sd, target_addr, 1);
3772     return 0;
3773 }
3774 
3775 struct  target_shminfo {
3776     abi_ulong shmmax;
3777     abi_ulong shmmin;
3778     abi_ulong shmmni;
3779     abi_ulong shmseg;
3780     abi_ulong shmall;
3781 };
3782 
3783 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3784                                               struct shminfo *host_shminfo)
3785 {
3786     struct target_shminfo *target_shminfo;
3787     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3788         return -TARGET_EFAULT;
3789     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3790     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3791     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3792     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3793     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3794     unlock_user_struct(target_shminfo, target_addr, 1);
3795     return 0;
3796 }
3797 
3798 struct target_shm_info {
3799     int used_ids;
3800     abi_ulong shm_tot;
3801     abi_ulong shm_rss;
3802     abi_ulong shm_swp;
3803     abi_ulong swap_attempts;
3804     abi_ulong swap_successes;
3805 };
3806 
3807 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3808                                                struct shm_info *host_shm_info)
3809 {
3810     struct target_shm_info *target_shm_info;
3811     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3812         return -TARGET_EFAULT;
3813     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3814     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3815     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3816     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3817     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3818     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3819     unlock_user_struct(target_shm_info, target_addr, 1);
3820     return 0;
3821 }
3822 
3823 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3824 {
3825     struct shmid_ds dsarg;
3826     struct shminfo shminfo;
3827     struct shm_info shm_info;
3828     abi_long ret = -TARGET_EINVAL;
3829 
3830     cmd &= 0xff;
3831 
3832     switch(cmd) {
3833     case IPC_STAT:
3834     case IPC_SET:
3835     case SHM_STAT:
3836         if (target_to_host_shmid_ds(&dsarg, buf))
3837             return -TARGET_EFAULT;
3838         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3839         if (host_to_target_shmid_ds(buf, &dsarg))
3840             return -TARGET_EFAULT;
3841         break;
3842     case IPC_INFO:
3843         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3844         if (host_to_target_shminfo(buf, &shminfo))
3845             return -TARGET_EFAULT;
3846         break;
3847     case SHM_INFO:
3848         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3849         if (host_to_target_shm_info(buf, &shm_info))
3850             return -TARGET_EFAULT;
3851         break;
3852     case IPC_RMID:
3853     case SHM_LOCK:
3854     case SHM_UNLOCK:
3855         ret = get_errno(shmctl(shmid, cmd, NULL));
3856         break;
3857     }
3858 
3859     return ret;
3860 }
3861 
3862 #ifndef TARGET_FORCE_SHMLBA
3863 /* For most architectures, SHMLBA is the same as the page size;
3864  * some architectures have larger values, in which case they should
3865  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3866  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3867  * and defining its own value for SHMLBA.
3868  *
3869  * The kernel also permits SHMLBA to be set by the architecture to a
3870  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3871  * this means that addresses are rounded to the large size if
3872  * SHM_RND is set but addresses not aligned to that size are not rejected
3873  * as long as they are at least page-aligned. Since the only architecture
3874  * which uses this is ia64 this code doesn't provide for that oddity.
3875  */
3876 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3877 {
3878     return TARGET_PAGE_SIZE;
3879 }
3880 #endif
3881 
3882 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3883                                  int shmid, abi_ulong shmaddr, int shmflg)
3884 {
3885     abi_long raddr;
3886     void *host_raddr;
3887     struct shmid_ds shm_info;
3888     int i,ret;
3889     abi_ulong shmlba;
3890 
3891     /* find out the length of the shared memory segment */
3892     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3893     if (is_error(ret)) {
3894         /* can't get length, bail out */
3895         return ret;
3896     }
3897 
3898     shmlba = target_shmlba(cpu_env);
3899 
3900     if (shmaddr & (shmlba - 1)) {
3901         if (shmflg & SHM_RND) {
3902             shmaddr &= ~(shmlba - 1);
3903         } else {
3904             return -TARGET_EINVAL;
3905         }
3906     }
3907     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3908         return -TARGET_EINVAL;
3909     }
3910 
3911     mmap_lock();
3912 
3913     if (shmaddr)
3914         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3915     else {
3916         abi_ulong mmap_start;
3917 
3918         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3919 
3920         if (mmap_start == -1) {
3921             errno = ENOMEM;
3922             host_raddr = (void *)-1;
3923         } else
3924             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3925     }
3926 
3927     if (host_raddr == (void *)-1) {
3928         mmap_unlock();
3929         return get_errno((long)host_raddr);
3930     }
3931     raddr=h2g((unsigned long)host_raddr);
3932 
3933     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3934                    PAGE_VALID | PAGE_READ |
3935                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3936 
3937     for (i = 0; i < N_SHM_REGIONS; i++) {
3938         if (!shm_regions[i].in_use) {
3939             shm_regions[i].in_use = true;
3940             shm_regions[i].start = raddr;
3941             shm_regions[i].size = shm_info.shm_segsz;
3942             break;
3943         }
3944     }
3945 
3946     mmap_unlock();
3947     return raddr;
3948 
3949 }
3950 
3951 static inline abi_long do_shmdt(abi_ulong shmaddr)
3952 {
3953     int i;
3954     abi_long rv;
3955 
3956     mmap_lock();
3957 
3958     for (i = 0; i < N_SHM_REGIONS; ++i) {
3959         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3960             shm_regions[i].in_use = false;
3961             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3962             break;
3963         }
3964     }
3965     rv = get_errno(shmdt(g2h(shmaddr)));
3966 
3967     mmap_unlock();
3968 
3969     return rv;
3970 }
3971 
3972 #ifdef TARGET_NR_ipc
3973 /* ??? This only works with linear mappings.  */
3974 /* do_ipc() must return target values and target errnos. */
3975 static abi_long do_ipc(CPUArchState *cpu_env,
3976                        unsigned int call, abi_long first,
3977                        abi_long second, abi_long third,
3978                        abi_long ptr, abi_long fifth)
3979 {
3980     int version;
3981     abi_long ret = 0;
3982 
3983     version = call >> 16;
3984     call &= 0xffff;
3985 
3986     switch (call) {
3987     case IPCOP_semop:
3988         ret = do_semop(first, ptr, second);
3989         break;
3990 
3991     case IPCOP_semget:
3992         ret = get_errno(semget(first, second, third));
3993         break;
3994 
3995     case IPCOP_semctl: {
3996         /* The semun argument to semctl is passed by value, so dereference the
3997          * ptr argument. */
3998         abi_ulong atptr;
3999         get_user_ual(atptr, ptr);
4000         ret = do_semctl(first, second, third, atptr);
4001         break;
4002     }
4003 
4004     case IPCOP_msgget:
4005         ret = get_errno(msgget(first, second));
4006         break;
4007 
4008     case IPCOP_msgsnd:
4009         ret = do_msgsnd(first, ptr, second, third);
4010         break;
4011 
4012     case IPCOP_msgctl:
4013         ret = do_msgctl(first, second, ptr);
4014         break;
4015 
4016     case IPCOP_msgrcv:
4017         switch (version) {
4018         case 0:
4019             {
4020                 struct target_ipc_kludge {
4021                     abi_long msgp;
4022                     abi_long msgtyp;
4023                 } *tmp;
4024 
4025                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4026                     ret = -TARGET_EFAULT;
4027                     break;
4028                 }
4029 
4030                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4031 
4032                 unlock_user_struct(tmp, ptr, 0);
4033                 break;
4034             }
4035         default:
4036             ret = do_msgrcv(first, ptr, second, fifth, third);
4037         }
4038         break;
4039 
4040     case IPCOP_shmat:
4041         switch (version) {
4042         default:
4043         {
4044             abi_ulong raddr;
4045             raddr = do_shmat(cpu_env, first, ptr, second);
4046             if (is_error(raddr))
4047                 return get_errno(raddr);
4048             if (put_user_ual(raddr, third))
4049                 return -TARGET_EFAULT;
4050             break;
4051         }
4052         case 1:
4053             ret = -TARGET_EINVAL;
4054             break;
4055         }
4056 	break;
4057     case IPCOP_shmdt:
4058         ret = do_shmdt(ptr);
4059 	break;
4060 
4061     case IPCOP_shmget:
4062 	/* IPC_* flag values are the same on all linux platforms */
4063 	ret = get_errno(shmget(first, second, third));
4064 	break;
4065 
4066 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4067     case IPCOP_shmctl:
4068         ret = do_shmctl(first, second, ptr);
4069         break;
4070     default:
4071 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4072 	ret = -TARGET_ENOSYS;
4073 	break;
4074     }
4075     return ret;
4076 }
4077 #endif
4078 
4079 /* kernel structure types definitions */
4080 
4081 #define STRUCT(name, ...) STRUCT_ ## name,
4082 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4083 enum {
4084 #include "syscall_types.h"
4085 STRUCT_MAX
4086 };
4087 #undef STRUCT
4088 #undef STRUCT_SPECIAL
4089 
4090 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4091 #define STRUCT_SPECIAL(name)
4092 #include "syscall_types.h"
4093 #undef STRUCT
4094 #undef STRUCT_SPECIAL
4095 
4096 typedef struct IOCTLEntry IOCTLEntry;
4097 
4098 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4099                              int fd, int cmd, abi_long arg);
4100 
4101 struct IOCTLEntry {
4102     int target_cmd;
4103     unsigned int host_cmd;
4104     const char *name;
4105     int access;
4106     do_ioctl_fn *do_ioctl;
4107     const argtype arg_type[5];
4108 };
4109 
4110 #define IOC_R 0x0001
4111 #define IOC_W 0x0002
4112 #define IOC_RW (IOC_R | IOC_W)
4113 
4114 #define MAX_STRUCT_SIZE 4096
4115 
4116 #ifdef CONFIG_FIEMAP
4117 /* So fiemap access checks don't overflow on 32 bit systems.
4118  * This is very slightly smaller than the limit imposed by
4119  * the underlying kernel.
4120  */
4121 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4122                             / sizeof(struct fiemap_extent))
4123 
4124 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4125                                        int fd, int cmd, abi_long arg)
4126 {
4127     /* The parameter for this ioctl is a struct fiemap followed
4128      * by an array of struct fiemap_extent whose size is set
4129      * in fiemap->fm_extent_count. The array is filled in by the
4130      * ioctl.
4131      */
4132     int target_size_in, target_size_out;
4133     struct fiemap *fm;
4134     const argtype *arg_type = ie->arg_type;
4135     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4136     void *argptr, *p;
4137     abi_long ret;
4138     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4139     uint32_t outbufsz;
4140     int free_fm = 0;
4141 
4142     assert(arg_type[0] == TYPE_PTR);
4143     assert(ie->access == IOC_RW);
4144     arg_type++;
4145     target_size_in = thunk_type_size(arg_type, 0);
4146     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4147     if (!argptr) {
4148         return -TARGET_EFAULT;
4149     }
4150     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4151     unlock_user(argptr, arg, 0);
4152     fm = (struct fiemap *)buf_temp;
4153     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4154         return -TARGET_EINVAL;
4155     }
4156 
4157     outbufsz = sizeof (*fm) +
4158         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4159 
4160     if (outbufsz > MAX_STRUCT_SIZE) {
4161         /* We can't fit all the extents into the fixed size buffer.
4162          * Allocate one that is large enough and use it instead.
4163          */
4164         fm = g_try_malloc(outbufsz);
4165         if (!fm) {
4166             return -TARGET_ENOMEM;
4167         }
4168         memcpy(fm, buf_temp, sizeof(struct fiemap));
4169         free_fm = 1;
4170     }
4171     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4172     if (!is_error(ret)) {
4173         target_size_out = target_size_in;
4174         /* An extent_count of 0 means we were only counting the extents
4175          * so there are no structs to copy
4176          */
4177         if (fm->fm_extent_count != 0) {
4178             target_size_out += fm->fm_mapped_extents * extent_size;
4179         }
4180         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4181         if (!argptr) {
4182             ret = -TARGET_EFAULT;
4183         } else {
4184             /* Convert the struct fiemap */
4185             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4186             if (fm->fm_extent_count != 0) {
4187                 p = argptr + target_size_in;
4188                 /* ...and then all the struct fiemap_extents */
4189                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4190                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4191                                   THUNK_TARGET);
4192                     p += extent_size;
4193                 }
4194             }
4195             unlock_user(argptr, arg, target_size_out);
4196         }
4197     }
4198     if (free_fm) {
4199         g_free(fm);
4200     }
4201     return ret;
4202 }
4203 #endif
4204 
4205 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4206                                 int fd, int cmd, abi_long arg)
4207 {
4208     const argtype *arg_type = ie->arg_type;
4209     int target_size;
4210     void *argptr;
4211     int ret;
4212     struct ifconf *host_ifconf;
4213     uint32_t outbufsz;
4214     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4215     int target_ifreq_size;
4216     int nb_ifreq;
4217     int free_buf = 0;
4218     int i;
4219     int target_ifc_len;
4220     abi_long target_ifc_buf;
4221     int host_ifc_len;
4222     char *host_ifc_buf;
4223 
4224     assert(arg_type[0] == TYPE_PTR);
4225     assert(ie->access == IOC_RW);
4226 
4227     arg_type++;
4228     target_size = thunk_type_size(arg_type, 0);
4229 
4230     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4231     if (!argptr)
4232         return -TARGET_EFAULT;
4233     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4234     unlock_user(argptr, arg, 0);
4235 
4236     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4237     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4238     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4239 
4240     if (target_ifc_buf != 0) {
4241         target_ifc_len = host_ifconf->ifc_len;
4242         nb_ifreq = target_ifc_len / target_ifreq_size;
4243         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4244 
4245         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4246         if (outbufsz > MAX_STRUCT_SIZE) {
4247             /*
4248              * We can't fit all the extents into the fixed size buffer.
4249              * Allocate one that is large enough and use it instead.
4250              */
4251             host_ifconf = malloc(outbufsz);
4252             if (!host_ifconf) {
4253                 return -TARGET_ENOMEM;
4254             }
4255             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4256             free_buf = 1;
4257         }
4258         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4259 
4260         host_ifconf->ifc_len = host_ifc_len;
4261     } else {
4262       host_ifc_buf = NULL;
4263     }
4264     host_ifconf->ifc_buf = host_ifc_buf;
4265 
4266     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4267     if (!is_error(ret)) {
4268 	/* convert host ifc_len to target ifc_len */
4269 
4270         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4271         target_ifc_len = nb_ifreq * target_ifreq_size;
4272         host_ifconf->ifc_len = target_ifc_len;
4273 
4274 	/* restore target ifc_buf */
4275 
4276         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4277 
4278 	/* copy struct ifconf to target user */
4279 
4280         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4281         if (!argptr)
4282             return -TARGET_EFAULT;
4283         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4284         unlock_user(argptr, arg, target_size);
4285 
4286         if (target_ifc_buf != 0) {
4287             /* copy ifreq[] to target user */
4288             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4289             for (i = 0; i < nb_ifreq ; i++) {
4290                 thunk_convert(argptr + i * target_ifreq_size,
4291                               host_ifc_buf + i * sizeof(struct ifreq),
4292                               ifreq_arg_type, THUNK_TARGET);
4293             }
4294             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4295         }
4296     }
4297 
4298     if (free_buf) {
4299         free(host_ifconf);
4300     }
4301 
4302     return ret;
4303 }
4304 
4305 #if defined(CONFIG_USBFS)
4306 #if HOST_LONG_BITS > 64
4307 #error USBDEVFS thunks do not support >64 bit hosts yet.
4308 #endif
4309 struct live_urb {
4310     uint64_t target_urb_adr;
4311     uint64_t target_buf_adr;
4312     char *target_buf_ptr;
4313     struct usbdevfs_urb host_urb;
4314 };
4315 
4316 static GHashTable *usbdevfs_urb_hashtable(void)
4317 {
4318     static GHashTable *urb_hashtable;
4319 
4320     if (!urb_hashtable) {
4321         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4322     }
4323     return urb_hashtable;
4324 }
4325 
4326 static void urb_hashtable_insert(struct live_urb *urb)
4327 {
4328     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4329     g_hash_table_insert(urb_hashtable, urb, urb);
4330 }
4331 
4332 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4333 {
4334     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4335     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4336 }
4337 
4338 static void urb_hashtable_remove(struct live_urb *urb)
4339 {
4340     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4341     g_hash_table_remove(urb_hashtable, urb);
4342 }
4343 
4344 static abi_long
4345 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4346                           int fd, int cmd, abi_long arg)
4347 {
4348     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4349     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4350     struct live_urb *lurb;
4351     void *argptr;
4352     uint64_t hurb;
4353     int target_size;
4354     uintptr_t target_urb_adr;
4355     abi_long ret;
4356 
4357     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4358 
4359     memset(buf_temp, 0, sizeof(uint64_t));
4360     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4361     if (is_error(ret)) {
4362         return ret;
4363     }
4364 
4365     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4366     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4367     if (!lurb->target_urb_adr) {
4368         return -TARGET_EFAULT;
4369     }
4370     urb_hashtable_remove(lurb);
4371     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4372         lurb->host_urb.buffer_length);
4373     lurb->target_buf_ptr = NULL;
4374 
4375     /* restore the guest buffer pointer */
4376     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4377 
4378     /* update the guest urb struct */
4379     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4380     if (!argptr) {
4381         g_free(lurb);
4382         return -TARGET_EFAULT;
4383     }
4384     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4385     unlock_user(argptr, lurb->target_urb_adr, target_size);
4386 
4387     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4388     /* write back the urb handle */
4389     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4390     if (!argptr) {
4391         g_free(lurb);
4392         return -TARGET_EFAULT;
4393     }
4394 
4395     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4396     target_urb_adr = lurb->target_urb_adr;
4397     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4398     unlock_user(argptr, arg, target_size);
4399 
4400     g_free(lurb);
4401     return ret;
4402 }
4403 
4404 static abi_long
4405 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4406                              uint8_t *buf_temp __attribute__((unused)),
4407                              int fd, int cmd, abi_long arg)
4408 {
4409     struct live_urb *lurb;
4410 
4411     /* map target address back to host URB with metadata. */
4412     lurb = urb_hashtable_lookup(arg);
4413     if (!lurb) {
4414         return -TARGET_EFAULT;
4415     }
4416     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4417 }
4418 
4419 static abi_long
4420 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4421                             int fd, int cmd, abi_long arg)
4422 {
4423     const argtype *arg_type = ie->arg_type;
4424     int target_size;
4425     abi_long ret;
4426     void *argptr;
4427     int rw_dir;
4428     struct live_urb *lurb;
4429 
4430     /*
4431      * each submitted URB needs to map to a unique ID for the
4432      * kernel, and that unique ID needs to be a pointer to
4433      * host memory.  hence, we need to malloc for each URB.
4434      * isochronous transfers have a variable length struct.
4435      */
4436     arg_type++;
4437     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4438 
4439     /* construct host copy of urb and metadata */
4440     lurb = g_try_malloc0(sizeof(struct live_urb));
4441     if (!lurb) {
4442         return -TARGET_ENOMEM;
4443     }
4444 
4445     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4446     if (!argptr) {
4447         g_free(lurb);
4448         return -TARGET_EFAULT;
4449     }
4450     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4451     unlock_user(argptr, arg, 0);
4452 
4453     lurb->target_urb_adr = arg;
4454     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4455 
4456     /* buffer space used depends on endpoint type so lock the entire buffer */
4457     /* control type urbs should check the buffer contents for true direction */
4458     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4459     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4460         lurb->host_urb.buffer_length, 1);
4461     if (lurb->target_buf_ptr == NULL) {
4462         g_free(lurb);
4463         return -TARGET_EFAULT;
4464     }
4465 
4466     /* update buffer pointer in host copy */
4467     lurb->host_urb.buffer = lurb->target_buf_ptr;
4468 
4469     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4470     if (is_error(ret)) {
4471         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4472         g_free(lurb);
4473     } else {
4474         urb_hashtable_insert(lurb);
4475     }
4476 
4477     return ret;
4478 }
4479 #endif /* CONFIG_USBFS */
4480 
4481 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4482                             int cmd, abi_long arg)
4483 {
4484     void *argptr;
4485     struct dm_ioctl *host_dm;
4486     abi_long guest_data;
4487     uint32_t guest_data_size;
4488     int target_size;
4489     const argtype *arg_type = ie->arg_type;
4490     abi_long ret;
4491     void *big_buf = NULL;
4492     char *host_data;
4493 
4494     arg_type++;
4495     target_size = thunk_type_size(arg_type, 0);
4496     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4497     if (!argptr) {
4498         ret = -TARGET_EFAULT;
4499         goto out;
4500     }
4501     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4502     unlock_user(argptr, arg, 0);
4503 
4504     /* buf_temp is too small, so fetch things into a bigger buffer */
4505     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4506     memcpy(big_buf, buf_temp, target_size);
4507     buf_temp = big_buf;
4508     host_dm = big_buf;
4509 
4510     guest_data = arg + host_dm->data_start;
4511     if ((guest_data - arg) < 0) {
4512         ret = -TARGET_EINVAL;
4513         goto out;
4514     }
4515     guest_data_size = host_dm->data_size - host_dm->data_start;
4516     host_data = (char*)host_dm + host_dm->data_start;
4517 
4518     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4519     if (!argptr) {
4520         ret = -TARGET_EFAULT;
4521         goto out;
4522     }
4523 
4524     switch (ie->host_cmd) {
4525     case DM_REMOVE_ALL:
4526     case DM_LIST_DEVICES:
4527     case DM_DEV_CREATE:
4528     case DM_DEV_REMOVE:
4529     case DM_DEV_SUSPEND:
4530     case DM_DEV_STATUS:
4531     case DM_DEV_WAIT:
4532     case DM_TABLE_STATUS:
4533     case DM_TABLE_CLEAR:
4534     case DM_TABLE_DEPS:
4535     case DM_LIST_VERSIONS:
4536         /* no input data */
4537         break;
4538     case DM_DEV_RENAME:
4539     case DM_DEV_SET_GEOMETRY:
4540         /* data contains only strings */
4541         memcpy(host_data, argptr, guest_data_size);
4542         break;
4543     case DM_TARGET_MSG:
4544         memcpy(host_data, argptr, guest_data_size);
4545         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4546         break;
4547     case DM_TABLE_LOAD:
4548     {
4549         void *gspec = argptr;
4550         void *cur_data = host_data;
4551         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4552         int spec_size = thunk_type_size(arg_type, 0);
4553         int i;
4554 
4555         for (i = 0; i < host_dm->target_count; i++) {
4556             struct dm_target_spec *spec = cur_data;
4557             uint32_t next;
4558             int slen;
4559 
4560             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4561             slen = strlen((char*)gspec + spec_size) + 1;
4562             next = spec->next;
4563             spec->next = sizeof(*spec) + slen;
4564             strcpy((char*)&spec[1], gspec + spec_size);
4565             gspec += next;
4566             cur_data += spec->next;
4567         }
4568         break;
4569     }
4570     default:
4571         ret = -TARGET_EINVAL;
4572         unlock_user(argptr, guest_data, 0);
4573         goto out;
4574     }
4575     unlock_user(argptr, guest_data, 0);
4576 
4577     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4578     if (!is_error(ret)) {
4579         guest_data = arg + host_dm->data_start;
4580         guest_data_size = host_dm->data_size - host_dm->data_start;
4581         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4582         switch (ie->host_cmd) {
4583         case DM_REMOVE_ALL:
4584         case DM_DEV_CREATE:
4585         case DM_DEV_REMOVE:
4586         case DM_DEV_RENAME:
4587         case DM_DEV_SUSPEND:
4588         case DM_DEV_STATUS:
4589         case DM_TABLE_LOAD:
4590         case DM_TABLE_CLEAR:
4591         case DM_TARGET_MSG:
4592         case DM_DEV_SET_GEOMETRY:
4593             /* no return data */
4594             break;
4595         case DM_LIST_DEVICES:
4596         {
4597             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4598             uint32_t remaining_data = guest_data_size;
4599             void *cur_data = argptr;
4600             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4601             int nl_size = 12; /* can't use thunk_size due to alignment */
4602 
4603             while (1) {
4604                 uint32_t next = nl->next;
4605                 if (next) {
4606                     nl->next = nl_size + (strlen(nl->name) + 1);
4607                 }
4608                 if (remaining_data < nl->next) {
4609                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4610                     break;
4611                 }
4612                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4613                 strcpy(cur_data + nl_size, nl->name);
4614                 cur_data += nl->next;
4615                 remaining_data -= nl->next;
4616                 if (!next) {
4617                     break;
4618                 }
4619                 nl = (void*)nl + next;
4620             }
4621             break;
4622         }
4623         case DM_DEV_WAIT:
4624         case DM_TABLE_STATUS:
4625         {
4626             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4627             void *cur_data = argptr;
4628             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4629             int spec_size = thunk_type_size(arg_type, 0);
4630             int i;
4631 
4632             for (i = 0; i < host_dm->target_count; i++) {
4633                 uint32_t next = spec->next;
4634                 int slen = strlen((char*)&spec[1]) + 1;
4635                 spec->next = (cur_data - argptr) + spec_size + slen;
4636                 if (guest_data_size < spec->next) {
4637                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4638                     break;
4639                 }
4640                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4641                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4642                 cur_data = argptr + spec->next;
4643                 spec = (void*)host_dm + host_dm->data_start + next;
4644             }
4645             break;
4646         }
4647         case DM_TABLE_DEPS:
4648         {
4649             void *hdata = (void*)host_dm + host_dm->data_start;
4650             int count = *(uint32_t*)hdata;
4651             uint64_t *hdev = hdata + 8;
4652             uint64_t *gdev = argptr + 8;
4653             int i;
4654 
4655             *(uint32_t*)argptr = tswap32(count);
4656             for (i = 0; i < count; i++) {
4657                 *gdev = tswap64(*hdev);
4658                 gdev++;
4659                 hdev++;
4660             }
4661             break;
4662         }
4663         case DM_LIST_VERSIONS:
4664         {
4665             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4666             uint32_t remaining_data = guest_data_size;
4667             void *cur_data = argptr;
4668             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4669             int vers_size = thunk_type_size(arg_type, 0);
4670 
4671             while (1) {
4672                 uint32_t next = vers->next;
4673                 if (next) {
4674                     vers->next = vers_size + (strlen(vers->name) + 1);
4675                 }
4676                 if (remaining_data < vers->next) {
4677                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4678                     break;
4679                 }
4680                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4681                 strcpy(cur_data + vers_size, vers->name);
4682                 cur_data += vers->next;
4683                 remaining_data -= vers->next;
4684                 if (!next) {
4685                     break;
4686                 }
4687                 vers = (void*)vers + next;
4688             }
4689             break;
4690         }
4691         default:
4692             unlock_user(argptr, guest_data, 0);
4693             ret = -TARGET_EINVAL;
4694             goto out;
4695         }
4696         unlock_user(argptr, guest_data, guest_data_size);
4697 
4698         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4699         if (!argptr) {
4700             ret = -TARGET_EFAULT;
4701             goto out;
4702         }
4703         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4704         unlock_user(argptr, arg, target_size);
4705     }
4706 out:
4707     g_free(big_buf);
4708     return ret;
4709 }
4710 
4711 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4712                                int cmd, abi_long arg)
4713 {
4714     void *argptr;
4715     int target_size;
4716     const argtype *arg_type = ie->arg_type;
4717     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4718     abi_long ret;
4719 
4720     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4721     struct blkpg_partition host_part;
4722 
4723     /* Read and convert blkpg */
4724     arg_type++;
4725     target_size = thunk_type_size(arg_type, 0);
4726     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4727     if (!argptr) {
4728         ret = -TARGET_EFAULT;
4729         goto out;
4730     }
4731     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4732     unlock_user(argptr, arg, 0);
4733 
4734     switch (host_blkpg->op) {
4735     case BLKPG_ADD_PARTITION:
4736     case BLKPG_DEL_PARTITION:
4737         /* payload is struct blkpg_partition */
4738         break;
4739     default:
4740         /* Unknown opcode */
4741         ret = -TARGET_EINVAL;
4742         goto out;
4743     }
4744 
4745     /* Read and convert blkpg->data */
4746     arg = (abi_long)(uintptr_t)host_blkpg->data;
4747     target_size = thunk_type_size(part_arg_type, 0);
4748     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4749     if (!argptr) {
4750         ret = -TARGET_EFAULT;
4751         goto out;
4752     }
4753     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4754     unlock_user(argptr, arg, 0);
4755 
4756     /* Swizzle the data pointer to our local copy and call! */
4757     host_blkpg->data = &host_part;
4758     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4759 
4760 out:
4761     return ret;
4762 }
4763 
4764 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4765                                 int fd, int cmd, abi_long arg)
4766 {
4767     const argtype *arg_type = ie->arg_type;
4768     const StructEntry *se;
4769     const argtype *field_types;
4770     const int *dst_offsets, *src_offsets;
4771     int target_size;
4772     void *argptr;
4773     abi_ulong *target_rt_dev_ptr = NULL;
4774     unsigned long *host_rt_dev_ptr = NULL;
4775     abi_long ret;
4776     int i;
4777 
4778     assert(ie->access == IOC_W);
4779     assert(*arg_type == TYPE_PTR);
4780     arg_type++;
4781     assert(*arg_type == TYPE_STRUCT);
4782     target_size = thunk_type_size(arg_type, 0);
4783     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4784     if (!argptr) {
4785         return -TARGET_EFAULT;
4786     }
4787     arg_type++;
4788     assert(*arg_type == (int)STRUCT_rtentry);
4789     se = struct_entries + *arg_type++;
4790     assert(se->convert[0] == NULL);
4791     /* convert struct here to be able to catch rt_dev string */
4792     field_types = se->field_types;
4793     dst_offsets = se->field_offsets[THUNK_HOST];
4794     src_offsets = se->field_offsets[THUNK_TARGET];
4795     for (i = 0; i < se->nb_fields; i++) {
4796         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4797             assert(*field_types == TYPE_PTRVOID);
4798             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4799             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4800             if (*target_rt_dev_ptr != 0) {
4801                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4802                                                   tswapal(*target_rt_dev_ptr));
4803                 if (!*host_rt_dev_ptr) {
4804                     unlock_user(argptr, arg, 0);
4805                     return -TARGET_EFAULT;
4806                 }
4807             } else {
4808                 *host_rt_dev_ptr = 0;
4809             }
4810             field_types++;
4811             continue;
4812         }
4813         field_types = thunk_convert(buf_temp + dst_offsets[i],
4814                                     argptr + src_offsets[i],
4815                                     field_types, THUNK_HOST);
4816     }
4817     unlock_user(argptr, arg, 0);
4818 
4819     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4820 
4821     assert(host_rt_dev_ptr != NULL);
4822     assert(target_rt_dev_ptr != NULL);
4823     if (*host_rt_dev_ptr != 0) {
4824         unlock_user((void *)*host_rt_dev_ptr,
4825                     *target_rt_dev_ptr, 0);
4826     }
4827     return ret;
4828 }
4829 
4830 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4831                                      int fd, int cmd, abi_long arg)
4832 {
4833     int sig = target_to_host_signal(arg);
4834     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4835 }
4836 
4837 #ifdef TIOCGPTPEER
4838 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4839                                      int fd, int cmd, abi_long arg)
4840 {
4841     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4842     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4843 }
4844 #endif
4845 
4846 static IOCTLEntry ioctl_entries[] = {
4847 #define IOCTL(cmd, access, ...) \
4848     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4849 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4850     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4851 #define IOCTL_IGNORE(cmd) \
4852     { TARGET_ ## cmd, 0, #cmd },
4853 #include "ioctls.h"
4854     { 0, 0, },
4855 };
4856 
4857 /* ??? Implement proper locking for ioctls.  */
4858 /* do_ioctl() Must return target values and target errnos. */
4859 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4860 {
4861     const IOCTLEntry *ie;
4862     const argtype *arg_type;
4863     abi_long ret;
4864     uint8_t buf_temp[MAX_STRUCT_SIZE];
4865     int target_size;
4866     void *argptr;
4867 
4868     ie = ioctl_entries;
4869     for(;;) {
4870         if (ie->target_cmd == 0) {
4871             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4872             return -TARGET_ENOSYS;
4873         }
4874         if (ie->target_cmd == cmd)
4875             break;
4876         ie++;
4877     }
4878     arg_type = ie->arg_type;
4879     if (ie->do_ioctl) {
4880         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4881     } else if (!ie->host_cmd) {
4882         /* Some architectures define BSD ioctls in their headers
4883            that are not implemented in Linux.  */
4884         return -TARGET_ENOSYS;
4885     }
4886 
4887     switch(arg_type[0]) {
4888     case TYPE_NULL:
4889         /* no argument */
4890         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4891         break;
4892     case TYPE_PTRVOID:
4893     case TYPE_INT:
4894         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4895         break;
4896     case TYPE_PTR:
4897         arg_type++;
4898         target_size = thunk_type_size(arg_type, 0);
4899         switch(ie->access) {
4900         case IOC_R:
4901             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4902             if (!is_error(ret)) {
4903                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4904                 if (!argptr)
4905                     return -TARGET_EFAULT;
4906                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4907                 unlock_user(argptr, arg, target_size);
4908             }
4909             break;
4910         case IOC_W:
4911             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4912             if (!argptr)
4913                 return -TARGET_EFAULT;
4914             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4915             unlock_user(argptr, arg, 0);
4916             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4917             break;
4918         default:
4919         case IOC_RW:
4920             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4921             if (!argptr)
4922                 return -TARGET_EFAULT;
4923             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4924             unlock_user(argptr, arg, 0);
4925             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4926             if (!is_error(ret)) {
4927                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4928                 if (!argptr)
4929                     return -TARGET_EFAULT;
4930                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4931                 unlock_user(argptr, arg, target_size);
4932             }
4933             break;
4934         }
4935         break;
4936     default:
4937         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4938                  (long)cmd, arg_type[0]);
4939         ret = -TARGET_ENOSYS;
4940         break;
4941     }
4942     return ret;
4943 }
4944 
4945 static const bitmask_transtbl iflag_tbl[] = {
4946         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4947         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4948         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4949         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4950         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4951         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4952         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4953         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4954         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4955         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4956         { TARGET_IXON, TARGET_IXON, IXON, IXON },
4957         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4958         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4959         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4960         { 0, 0, 0, 0 }
4961 };
4962 
4963 static const bitmask_transtbl oflag_tbl[] = {
4964 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4965 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4966 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4967 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4968 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4969 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4970 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4971 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4972 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4973 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4974 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4975 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4976 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4977 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4978 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4979 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4980 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4981 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4982 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4983 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4984 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4985 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4986 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4987 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4988 	{ 0, 0, 0, 0 }
4989 };
4990 
4991 static const bitmask_transtbl cflag_tbl[] = {
4992 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4993 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4994 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4995 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4996 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4997 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4998 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4999 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5000 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5001 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5002 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5003 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5004 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5005 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5006 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5007 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5008 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5009 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5010 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5011 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5012 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5013 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5014 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5015 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5016 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5017 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5018 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5019 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5020 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5021 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5022 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5023 	{ 0, 0, 0, 0 }
5024 };
5025 
5026 static const bitmask_transtbl lflag_tbl[] = {
5027 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5028 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5029 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5030 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5031 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5032 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5033 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5034 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5035 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5036 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5037 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5038 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5039 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5040 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5041 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5042 	{ 0, 0, 0, 0 }
5043 };
5044 
5045 static void target_to_host_termios (void *dst, const void *src)
5046 {
5047     struct host_termios *host = dst;
5048     const struct target_termios *target = src;
5049 
5050     host->c_iflag =
5051         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5052     host->c_oflag =
5053         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5054     host->c_cflag =
5055         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5056     host->c_lflag =
5057         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5058     host->c_line = target->c_line;
5059 
5060     memset(host->c_cc, 0, sizeof(host->c_cc));
5061     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5062     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5063     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5064     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5065     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5066     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5067     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5068     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5069     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5070     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5071     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5072     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5073     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5074     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5075     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5076     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5077     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5078 }
5079 
5080 static void host_to_target_termios (void *dst, const void *src)
5081 {
5082     struct target_termios *target = dst;
5083     const struct host_termios *host = src;
5084 
5085     target->c_iflag =
5086         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5087     target->c_oflag =
5088         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5089     target->c_cflag =
5090         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5091     target->c_lflag =
5092         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5093     target->c_line = host->c_line;
5094 
5095     memset(target->c_cc, 0, sizeof(target->c_cc));
5096     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5097     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5098     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5099     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5100     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5101     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5102     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5103     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5104     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5105     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5106     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5107     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5108     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5109     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5110     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5111     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5112     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5113 }
5114 
5115 static const StructEntry struct_termios_def = {
5116     .convert = { host_to_target_termios, target_to_host_termios },
5117     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5118     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5119 };
5120 
5121 static bitmask_transtbl mmap_flags_tbl[] = {
5122     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5123     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5124     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5125     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5126       MAP_ANONYMOUS, MAP_ANONYMOUS },
5127     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5128       MAP_GROWSDOWN, MAP_GROWSDOWN },
5129     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5130       MAP_DENYWRITE, MAP_DENYWRITE },
5131     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5132       MAP_EXECUTABLE, MAP_EXECUTABLE },
5133     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5134     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5135       MAP_NORESERVE, MAP_NORESERVE },
5136     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5137     /* MAP_STACK had been ignored by the kernel for quite some time.
5138        Recognize it for the target insofar as we do not want to pass
5139        it through to the host.  */
5140     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5141     { 0, 0, 0, 0 }
5142 };
5143 
5144 #if defined(TARGET_I386)
5145 
5146 /* NOTE: there is really one LDT for all the threads */
5147 static uint8_t *ldt_table;
5148 
5149 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5150 {
5151     int size;
5152     void *p;
5153 
5154     if (!ldt_table)
5155         return 0;
5156     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5157     if (size > bytecount)
5158         size = bytecount;
5159     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5160     if (!p)
5161         return -TARGET_EFAULT;
5162     /* ??? Should this by byteswapped?  */
5163     memcpy(p, ldt_table, size);
5164     unlock_user(p, ptr, size);
5165     return size;
5166 }
5167 
5168 /* XXX: add locking support */
5169 static abi_long write_ldt(CPUX86State *env,
5170                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5171 {
5172     struct target_modify_ldt_ldt_s ldt_info;
5173     struct target_modify_ldt_ldt_s *target_ldt_info;
5174     int seg_32bit, contents, read_exec_only, limit_in_pages;
5175     int seg_not_present, useable, lm;
5176     uint32_t *lp, entry_1, entry_2;
5177 
5178     if (bytecount != sizeof(ldt_info))
5179         return -TARGET_EINVAL;
5180     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5181         return -TARGET_EFAULT;
5182     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5183     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5184     ldt_info.limit = tswap32(target_ldt_info->limit);
5185     ldt_info.flags = tswap32(target_ldt_info->flags);
5186     unlock_user_struct(target_ldt_info, ptr, 0);
5187 
5188     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5189         return -TARGET_EINVAL;
5190     seg_32bit = ldt_info.flags & 1;
5191     contents = (ldt_info.flags >> 1) & 3;
5192     read_exec_only = (ldt_info.flags >> 3) & 1;
5193     limit_in_pages = (ldt_info.flags >> 4) & 1;
5194     seg_not_present = (ldt_info.flags >> 5) & 1;
5195     useable = (ldt_info.flags >> 6) & 1;
5196 #ifdef TARGET_ABI32
5197     lm = 0;
5198 #else
5199     lm = (ldt_info.flags >> 7) & 1;
5200 #endif
5201     if (contents == 3) {
5202         if (oldmode)
5203             return -TARGET_EINVAL;
5204         if (seg_not_present == 0)
5205             return -TARGET_EINVAL;
5206     }
5207     /* allocate the LDT */
5208     if (!ldt_table) {
5209         env->ldt.base = target_mmap(0,
5210                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5211                                     PROT_READ|PROT_WRITE,
5212                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5213         if (env->ldt.base == -1)
5214             return -TARGET_ENOMEM;
5215         memset(g2h(env->ldt.base), 0,
5216                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5217         env->ldt.limit = 0xffff;
5218         ldt_table = g2h(env->ldt.base);
5219     }
5220 
5221     /* NOTE: same code as Linux kernel */
5222     /* Allow LDTs to be cleared by the user. */
5223     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5224         if (oldmode ||
5225             (contents == 0		&&
5226              read_exec_only == 1	&&
5227              seg_32bit == 0		&&
5228              limit_in_pages == 0	&&
5229              seg_not_present == 1	&&
5230              useable == 0 )) {
5231             entry_1 = 0;
5232             entry_2 = 0;
5233             goto install;
5234         }
5235     }
5236 
5237     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5238         (ldt_info.limit & 0x0ffff);
5239     entry_2 = (ldt_info.base_addr & 0xff000000) |
5240         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5241         (ldt_info.limit & 0xf0000) |
5242         ((read_exec_only ^ 1) << 9) |
5243         (contents << 10) |
5244         ((seg_not_present ^ 1) << 15) |
5245         (seg_32bit << 22) |
5246         (limit_in_pages << 23) |
5247         (lm << 21) |
5248         0x7000;
5249     if (!oldmode)
5250         entry_2 |= (useable << 20);
5251 
5252     /* Install the new entry ...  */
5253 install:
5254     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5255     lp[0] = tswap32(entry_1);
5256     lp[1] = tswap32(entry_2);
5257     return 0;
5258 }
5259 
5260 /* specific and weird i386 syscalls */
5261 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5262                               unsigned long bytecount)
5263 {
5264     abi_long ret;
5265 
5266     switch (func) {
5267     case 0:
5268         ret = read_ldt(ptr, bytecount);
5269         break;
5270     case 1:
5271         ret = write_ldt(env, ptr, bytecount, 1);
5272         break;
5273     case 0x11:
5274         ret = write_ldt(env, ptr, bytecount, 0);
5275         break;
5276     default:
5277         ret = -TARGET_ENOSYS;
5278         break;
5279     }
5280     return ret;
5281 }
5282 
5283 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5284 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5285 {
5286     uint64_t *gdt_table = g2h(env->gdt.base);
5287     struct target_modify_ldt_ldt_s ldt_info;
5288     struct target_modify_ldt_ldt_s *target_ldt_info;
5289     int seg_32bit, contents, read_exec_only, limit_in_pages;
5290     int seg_not_present, useable, lm;
5291     uint32_t *lp, entry_1, entry_2;
5292     int i;
5293 
5294     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5295     if (!target_ldt_info)
5296         return -TARGET_EFAULT;
5297     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5298     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5299     ldt_info.limit = tswap32(target_ldt_info->limit);
5300     ldt_info.flags = tswap32(target_ldt_info->flags);
5301     if (ldt_info.entry_number == -1) {
5302         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5303             if (gdt_table[i] == 0) {
5304                 ldt_info.entry_number = i;
5305                 target_ldt_info->entry_number = tswap32(i);
5306                 break;
5307             }
5308         }
5309     }
5310     unlock_user_struct(target_ldt_info, ptr, 1);
5311 
5312     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5313         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5314            return -TARGET_EINVAL;
5315     seg_32bit = ldt_info.flags & 1;
5316     contents = (ldt_info.flags >> 1) & 3;
5317     read_exec_only = (ldt_info.flags >> 3) & 1;
5318     limit_in_pages = (ldt_info.flags >> 4) & 1;
5319     seg_not_present = (ldt_info.flags >> 5) & 1;
5320     useable = (ldt_info.flags >> 6) & 1;
5321 #ifdef TARGET_ABI32
5322     lm = 0;
5323 #else
5324     lm = (ldt_info.flags >> 7) & 1;
5325 #endif
5326 
5327     if (contents == 3) {
5328         if (seg_not_present == 0)
5329             return -TARGET_EINVAL;
5330     }
5331 
5332     /* NOTE: same code as Linux kernel */
5333     /* Allow LDTs to be cleared by the user. */
5334     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5335         if ((contents == 0             &&
5336              read_exec_only == 1       &&
5337              seg_32bit == 0            &&
5338              limit_in_pages == 0       &&
5339              seg_not_present == 1      &&
5340              useable == 0 )) {
5341             entry_1 = 0;
5342             entry_2 = 0;
5343             goto install;
5344         }
5345     }
5346 
5347     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5348         (ldt_info.limit & 0x0ffff);
5349     entry_2 = (ldt_info.base_addr & 0xff000000) |
5350         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5351         (ldt_info.limit & 0xf0000) |
5352         ((read_exec_only ^ 1) << 9) |
5353         (contents << 10) |
5354         ((seg_not_present ^ 1) << 15) |
5355         (seg_32bit << 22) |
5356         (limit_in_pages << 23) |
5357         (useable << 20) |
5358         (lm << 21) |
5359         0x7000;
5360 
5361     /* Install the new entry ...  */
5362 install:
5363     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5364     lp[0] = tswap32(entry_1);
5365     lp[1] = tswap32(entry_2);
5366     return 0;
5367 }
5368 
5369 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5370 {
5371     struct target_modify_ldt_ldt_s *target_ldt_info;
5372     uint64_t *gdt_table = g2h(env->gdt.base);
5373     uint32_t base_addr, limit, flags;
5374     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5375     int seg_not_present, useable, lm;
5376     uint32_t *lp, entry_1, entry_2;
5377 
5378     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5379     if (!target_ldt_info)
5380         return -TARGET_EFAULT;
5381     idx = tswap32(target_ldt_info->entry_number);
5382     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5383         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5384         unlock_user_struct(target_ldt_info, ptr, 1);
5385         return -TARGET_EINVAL;
5386     }
5387     lp = (uint32_t *)(gdt_table + idx);
5388     entry_1 = tswap32(lp[0]);
5389     entry_2 = tswap32(lp[1]);
5390 
5391     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5392     contents = (entry_2 >> 10) & 3;
5393     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5394     seg_32bit = (entry_2 >> 22) & 1;
5395     limit_in_pages = (entry_2 >> 23) & 1;
5396     useable = (entry_2 >> 20) & 1;
5397 #ifdef TARGET_ABI32
5398     lm = 0;
5399 #else
5400     lm = (entry_2 >> 21) & 1;
5401 #endif
5402     flags = (seg_32bit << 0) | (contents << 1) |
5403         (read_exec_only << 3) | (limit_in_pages << 4) |
5404         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5405     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5406     base_addr = (entry_1 >> 16) |
5407         (entry_2 & 0xff000000) |
5408         ((entry_2 & 0xff) << 16);
5409     target_ldt_info->base_addr = tswapal(base_addr);
5410     target_ldt_info->limit = tswap32(limit);
5411     target_ldt_info->flags = tswap32(flags);
5412     unlock_user_struct(target_ldt_info, ptr, 1);
5413     return 0;
5414 }
5415 #endif /* TARGET_I386 && TARGET_ABI32 */
5416 
5417 #ifndef TARGET_ABI32
5418 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5419 {
5420     abi_long ret = 0;
5421     abi_ulong val;
5422     int idx;
5423 
5424     switch(code) {
5425     case TARGET_ARCH_SET_GS:
5426     case TARGET_ARCH_SET_FS:
5427         if (code == TARGET_ARCH_SET_GS)
5428             idx = R_GS;
5429         else
5430             idx = R_FS;
5431         cpu_x86_load_seg(env, idx, 0);
5432         env->segs[idx].base = addr;
5433         break;
5434     case TARGET_ARCH_GET_GS:
5435     case TARGET_ARCH_GET_FS:
5436         if (code == TARGET_ARCH_GET_GS)
5437             idx = R_GS;
5438         else
5439             idx = R_FS;
5440         val = env->segs[idx].base;
5441         if (put_user(val, addr, abi_ulong))
5442             ret = -TARGET_EFAULT;
5443         break;
5444     default:
5445         ret = -TARGET_EINVAL;
5446         break;
5447     }
5448     return ret;
5449 }
5450 #endif
5451 
5452 #endif /* defined(TARGET_I386) */
5453 
5454 #define NEW_STACK_SIZE 0x40000
5455 
5456 
5457 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5458 typedef struct {
5459     CPUArchState *env;
5460     pthread_mutex_t mutex;
5461     pthread_cond_t cond;
5462     pthread_t thread;
5463     uint32_t tid;
5464     abi_ulong child_tidptr;
5465     abi_ulong parent_tidptr;
5466     sigset_t sigmask;
5467 } new_thread_info;
5468 
5469 static void *clone_func(void *arg)
5470 {
5471     new_thread_info *info = arg;
5472     CPUArchState *env;
5473     CPUState *cpu;
5474     TaskState *ts;
5475 
5476     rcu_register_thread();
5477     tcg_register_thread();
5478     env = info->env;
5479     cpu = ENV_GET_CPU(env);
5480     thread_cpu = cpu;
5481     ts = (TaskState *)cpu->opaque;
5482     info->tid = sys_gettid();
5483     task_settid(ts);
5484     if (info->child_tidptr)
5485         put_user_u32(info->tid, info->child_tidptr);
5486     if (info->parent_tidptr)
5487         put_user_u32(info->tid, info->parent_tidptr);
5488     /* Enable signals.  */
5489     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5490     /* Signal to the parent that we're ready.  */
5491     pthread_mutex_lock(&info->mutex);
5492     pthread_cond_broadcast(&info->cond);
5493     pthread_mutex_unlock(&info->mutex);
5494     /* Wait until the parent has finished initializing the tls state.  */
5495     pthread_mutex_lock(&clone_lock);
5496     pthread_mutex_unlock(&clone_lock);
5497     cpu_loop(env);
5498     /* never exits */
5499     return NULL;
5500 }
5501 
5502 /* do_fork() Must return host values and target errnos (unlike most
5503    do_*() functions). */
5504 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5505                    abi_ulong parent_tidptr, target_ulong newtls,
5506                    abi_ulong child_tidptr)
5507 {
5508     CPUState *cpu = ENV_GET_CPU(env);
5509     int ret;
5510     TaskState *ts;
5511     CPUState *new_cpu;
5512     CPUArchState *new_env;
5513     sigset_t sigmask;
5514 
5515     flags &= ~CLONE_IGNORED_FLAGS;
5516 
5517     /* Emulate vfork() with fork() */
5518     if (flags & CLONE_VFORK)
5519         flags &= ~(CLONE_VFORK | CLONE_VM);
5520 
5521     if (flags & CLONE_VM) {
5522         TaskState *parent_ts = (TaskState *)cpu->opaque;
5523         new_thread_info info;
5524         pthread_attr_t attr;
5525 
5526         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5527             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5528             return -TARGET_EINVAL;
5529         }
5530 
5531         ts = g_new0(TaskState, 1);
5532         init_task_state(ts);
5533 
5534         /* Grab a mutex so that thread setup appears atomic.  */
5535         pthread_mutex_lock(&clone_lock);
5536 
5537         /* we create a new CPU instance. */
5538         new_env = cpu_copy(env);
5539         /* Init regs that differ from the parent.  */
5540         cpu_clone_regs(new_env, newsp);
5541         new_cpu = ENV_GET_CPU(new_env);
5542         new_cpu->opaque = ts;
5543         ts->bprm = parent_ts->bprm;
5544         ts->info = parent_ts->info;
5545         ts->signal_mask = parent_ts->signal_mask;
5546 
5547         if (flags & CLONE_CHILD_CLEARTID) {
5548             ts->child_tidptr = child_tidptr;
5549         }
5550 
5551         if (flags & CLONE_SETTLS) {
5552             cpu_set_tls (new_env, newtls);
5553         }
5554 
5555         memset(&info, 0, sizeof(info));
5556         pthread_mutex_init(&info.mutex, NULL);
5557         pthread_mutex_lock(&info.mutex);
5558         pthread_cond_init(&info.cond, NULL);
5559         info.env = new_env;
5560         if (flags & CLONE_CHILD_SETTID) {
5561             info.child_tidptr = child_tidptr;
5562         }
5563         if (flags & CLONE_PARENT_SETTID) {
5564             info.parent_tidptr = parent_tidptr;
5565         }
5566 
5567         ret = pthread_attr_init(&attr);
5568         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5569         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5570         /* It is not safe to deliver signals until the child has finished
5571            initializing, so temporarily block all signals.  */
5572         sigfillset(&sigmask);
5573         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5574 
5575         /* If this is our first additional thread, we need to ensure we
5576          * generate code for parallel execution and flush old translations.
5577          */
5578         if (!parallel_cpus) {
5579             parallel_cpus = true;
5580             tb_flush(cpu);
5581         }
5582 
5583         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5584         /* TODO: Free new CPU state if thread creation failed.  */
5585 
5586         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5587         pthread_attr_destroy(&attr);
5588         if (ret == 0) {
5589             /* Wait for the child to initialize.  */
5590             pthread_cond_wait(&info.cond, &info.mutex);
5591             ret = info.tid;
5592         } else {
5593             ret = -1;
5594         }
5595         pthread_mutex_unlock(&info.mutex);
5596         pthread_cond_destroy(&info.cond);
5597         pthread_mutex_destroy(&info.mutex);
5598         pthread_mutex_unlock(&clone_lock);
5599     } else {
5600         /* if no CLONE_VM, we consider it is a fork */
5601         if (flags & CLONE_INVALID_FORK_FLAGS) {
5602             return -TARGET_EINVAL;
5603         }
5604 
5605         /* We can't support custom termination signals */
5606         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5607             return -TARGET_EINVAL;
5608         }
5609 
5610         if (block_signals()) {
5611             return -TARGET_ERESTARTSYS;
5612         }
5613 
5614         fork_start();
5615         ret = fork();
5616         if (ret == 0) {
5617             /* Child Process.  */
5618             cpu_clone_regs(env, newsp);
5619             fork_end(1);
5620             /* There is a race condition here.  The parent process could
5621                theoretically read the TID in the child process before the child
5622                tid is set.  This would require using either ptrace
5623                (not implemented) or having *_tidptr to point at a shared memory
5624                mapping.  We can't repeat the spinlock hack used above because
5625                the child process gets its own copy of the lock.  */
5626             if (flags & CLONE_CHILD_SETTID)
5627                 put_user_u32(sys_gettid(), child_tidptr);
5628             if (flags & CLONE_PARENT_SETTID)
5629                 put_user_u32(sys_gettid(), parent_tidptr);
5630             ts = (TaskState *)cpu->opaque;
5631             if (flags & CLONE_SETTLS)
5632                 cpu_set_tls (env, newtls);
5633             if (flags & CLONE_CHILD_CLEARTID)
5634                 ts->child_tidptr = child_tidptr;
5635         } else {
5636             fork_end(0);
5637         }
5638     }
5639     return ret;
5640 }
5641 
5642 /* warning : doesn't handle linux specific flags... */
5643 static int target_to_host_fcntl_cmd(int cmd)
5644 {
5645     int ret;
5646 
5647     switch(cmd) {
5648     case TARGET_F_DUPFD:
5649     case TARGET_F_GETFD:
5650     case TARGET_F_SETFD:
5651     case TARGET_F_GETFL:
5652     case TARGET_F_SETFL:
5653         ret = cmd;
5654         break;
5655     case TARGET_F_GETLK:
5656         ret = F_GETLK64;
5657         break;
5658     case TARGET_F_SETLK:
5659         ret = F_SETLK64;
5660         break;
5661     case TARGET_F_SETLKW:
5662         ret = F_SETLKW64;
5663         break;
5664     case TARGET_F_GETOWN:
5665         ret = F_GETOWN;
5666         break;
5667     case TARGET_F_SETOWN:
5668         ret = F_SETOWN;
5669         break;
5670     case TARGET_F_GETSIG:
5671         ret = F_GETSIG;
5672         break;
5673     case TARGET_F_SETSIG:
5674         ret = F_SETSIG;
5675         break;
5676 #if TARGET_ABI_BITS == 32
5677     case TARGET_F_GETLK64:
5678         ret = F_GETLK64;
5679         break;
5680     case TARGET_F_SETLK64:
5681         ret = F_SETLK64;
5682         break;
5683     case TARGET_F_SETLKW64:
5684         ret = F_SETLKW64;
5685         break;
5686 #endif
5687     case TARGET_F_SETLEASE:
5688         ret = F_SETLEASE;
5689         break;
5690     case TARGET_F_GETLEASE:
5691         ret = F_GETLEASE;
5692         break;
5693 #ifdef F_DUPFD_CLOEXEC
5694     case TARGET_F_DUPFD_CLOEXEC:
5695         ret = F_DUPFD_CLOEXEC;
5696         break;
5697 #endif
5698     case TARGET_F_NOTIFY:
5699         ret = F_NOTIFY;
5700         break;
5701 #ifdef F_GETOWN_EX
5702     case TARGET_F_GETOWN_EX:
5703         ret = F_GETOWN_EX;
5704         break;
5705 #endif
5706 #ifdef F_SETOWN_EX
5707     case TARGET_F_SETOWN_EX:
5708         ret = F_SETOWN_EX;
5709         break;
5710 #endif
5711 #ifdef F_SETPIPE_SZ
5712     case TARGET_F_SETPIPE_SZ:
5713         ret = F_SETPIPE_SZ;
5714         break;
5715     case TARGET_F_GETPIPE_SZ:
5716         ret = F_GETPIPE_SZ;
5717         break;
5718 #endif
5719     default:
5720         ret = -TARGET_EINVAL;
5721         break;
5722     }
5723 
5724 #if defined(__powerpc64__)
5725     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5726      * is not supported by kernel. The glibc fcntl call actually adjusts
5727      * them to 5, 6 and 7 before making the syscall(). Since we make the
5728      * syscall directly, adjust to what is supported by the kernel.
5729      */
5730     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5731         ret -= F_GETLK64 - 5;
5732     }
5733 #endif
5734 
5735     return ret;
5736 }
5737 
5738 #define FLOCK_TRANSTBL \
5739     switch (type) { \
5740     TRANSTBL_CONVERT(F_RDLCK); \
5741     TRANSTBL_CONVERT(F_WRLCK); \
5742     TRANSTBL_CONVERT(F_UNLCK); \
5743     TRANSTBL_CONVERT(F_EXLCK); \
5744     TRANSTBL_CONVERT(F_SHLCK); \
5745     }
5746 
5747 static int target_to_host_flock(int type)
5748 {
5749 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5750     FLOCK_TRANSTBL
5751 #undef  TRANSTBL_CONVERT
5752     return -TARGET_EINVAL;
5753 }
5754 
5755 static int host_to_target_flock(int type)
5756 {
5757 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5758     FLOCK_TRANSTBL
5759 #undef  TRANSTBL_CONVERT
5760     /* if we don't know how to convert the value coming
5761      * from the host we copy to the target field as-is
5762      */
5763     return type;
5764 }
5765 
5766 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5767                                             abi_ulong target_flock_addr)
5768 {
5769     struct target_flock *target_fl;
5770     int l_type;
5771 
5772     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5773         return -TARGET_EFAULT;
5774     }
5775 
5776     __get_user(l_type, &target_fl->l_type);
5777     l_type = target_to_host_flock(l_type);
5778     if (l_type < 0) {
5779         return l_type;
5780     }
5781     fl->l_type = l_type;
5782     __get_user(fl->l_whence, &target_fl->l_whence);
5783     __get_user(fl->l_start, &target_fl->l_start);
5784     __get_user(fl->l_len, &target_fl->l_len);
5785     __get_user(fl->l_pid, &target_fl->l_pid);
5786     unlock_user_struct(target_fl, target_flock_addr, 0);
5787     return 0;
5788 }
5789 
5790 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5791                                           const struct flock64 *fl)
5792 {
5793     struct target_flock *target_fl;
5794     short l_type;
5795 
5796     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5797         return -TARGET_EFAULT;
5798     }
5799 
5800     l_type = host_to_target_flock(fl->l_type);
5801     __put_user(l_type, &target_fl->l_type);
5802     __put_user(fl->l_whence, &target_fl->l_whence);
5803     __put_user(fl->l_start, &target_fl->l_start);
5804     __put_user(fl->l_len, &target_fl->l_len);
5805     __put_user(fl->l_pid, &target_fl->l_pid);
5806     unlock_user_struct(target_fl, target_flock_addr, 1);
5807     return 0;
5808 }
5809 
5810 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5811 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5812 
5813 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5814 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5815                                                    abi_ulong target_flock_addr)
5816 {
5817     struct target_oabi_flock64 *target_fl;
5818     int l_type;
5819 
5820     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5821         return -TARGET_EFAULT;
5822     }
5823 
5824     __get_user(l_type, &target_fl->l_type);
5825     l_type = target_to_host_flock(l_type);
5826     if (l_type < 0) {
5827         return l_type;
5828     }
5829     fl->l_type = l_type;
5830     __get_user(fl->l_whence, &target_fl->l_whence);
5831     __get_user(fl->l_start, &target_fl->l_start);
5832     __get_user(fl->l_len, &target_fl->l_len);
5833     __get_user(fl->l_pid, &target_fl->l_pid);
5834     unlock_user_struct(target_fl, target_flock_addr, 0);
5835     return 0;
5836 }
5837 
5838 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5839                                                  const struct flock64 *fl)
5840 {
5841     struct target_oabi_flock64 *target_fl;
5842     short l_type;
5843 
5844     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5845         return -TARGET_EFAULT;
5846     }
5847 
5848     l_type = host_to_target_flock(fl->l_type);
5849     __put_user(l_type, &target_fl->l_type);
5850     __put_user(fl->l_whence, &target_fl->l_whence);
5851     __put_user(fl->l_start, &target_fl->l_start);
5852     __put_user(fl->l_len, &target_fl->l_len);
5853     __put_user(fl->l_pid, &target_fl->l_pid);
5854     unlock_user_struct(target_fl, target_flock_addr, 1);
5855     return 0;
5856 }
5857 #endif
5858 
5859 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5860                                               abi_ulong target_flock_addr)
5861 {
5862     struct target_flock64 *target_fl;
5863     int l_type;
5864 
5865     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5866         return -TARGET_EFAULT;
5867     }
5868 
5869     __get_user(l_type, &target_fl->l_type);
5870     l_type = target_to_host_flock(l_type);
5871     if (l_type < 0) {
5872         return l_type;
5873     }
5874     fl->l_type = l_type;
5875     __get_user(fl->l_whence, &target_fl->l_whence);
5876     __get_user(fl->l_start, &target_fl->l_start);
5877     __get_user(fl->l_len, &target_fl->l_len);
5878     __get_user(fl->l_pid, &target_fl->l_pid);
5879     unlock_user_struct(target_fl, target_flock_addr, 0);
5880     return 0;
5881 }
5882 
5883 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5884                                             const struct flock64 *fl)
5885 {
5886     struct target_flock64 *target_fl;
5887     short l_type;
5888 
5889     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5890         return -TARGET_EFAULT;
5891     }
5892 
5893     l_type = host_to_target_flock(fl->l_type);
5894     __put_user(l_type, &target_fl->l_type);
5895     __put_user(fl->l_whence, &target_fl->l_whence);
5896     __put_user(fl->l_start, &target_fl->l_start);
5897     __put_user(fl->l_len, &target_fl->l_len);
5898     __put_user(fl->l_pid, &target_fl->l_pid);
5899     unlock_user_struct(target_fl, target_flock_addr, 1);
5900     return 0;
5901 }
5902 
5903 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5904 {
5905     struct flock64 fl64;
5906 #ifdef F_GETOWN_EX
5907     struct f_owner_ex fox;
5908     struct target_f_owner_ex *target_fox;
5909 #endif
5910     abi_long ret;
5911     int host_cmd = target_to_host_fcntl_cmd(cmd);
5912 
5913     if (host_cmd == -TARGET_EINVAL)
5914 	    return host_cmd;
5915 
5916     switch(cmd) {
5917     case TARGET_F_GETLK:
5918         ret = copy_from_user_flock(&fl64, arg);
5919         if (ret) {
5920             return ret;
5921         }
5922         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5923         if (ret == 0) {
5924             ret = copy_to_user_flock(arg, &fl64);
5925         }
5926         break;
5927 
5928     case TARGET_F_SETLK:
5929     case TARGET_F_SETLKW:
5930         ret = copy_from_user_flock(&fl64, arg);
5931         if (ret) {
5932             return ret;
5933         }
5934         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5935         break;
5936 
5937     case TARGET_F_GETLK64:
5938         ret = copy_from_user_flock64(&fl64, arg);
5939         if (ret) {
5940             return ret;
5941         }
5942         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5943         if (ret == 0) {
5944             ret = copy_to_user_flock64(arg, &fl64);
5945         }
5946         break;
5947     case TARGET_F_SETLK64:
5948     case TARGET_F_SETLKW64:
5949         ret = copy_from_user_flock64(&fl64, arg);
5950         if (ret) {
5951             return ret;
5952         }
5953         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5954         break;
5955 
5956     case TARGET_F_GETFL:
5957         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5958         if (ret >= 0) {
5959             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5960         }
5961         break;
5962 
5963     case TARGET_F_SETFL:
5964         ret = get_errno(safe_fcntl(fd, host_cmd,
5965                                    target_to_host_bitmask(arg,
5966                                                           fcntl_flags_tbl)));
5967         break;
5968 
5969 #ifdef F_GETOWN_EX
5970     case TARGET_F_GETOWN_EX:
5971         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5972         if (ret >= 0) {
5973             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5974                 return -TARGET_EFAULT;
5975             target_fox->type = tswap32(fox.type);
5976             target_fox->pid = tswap32(fox.pid);
5977             unlock_user_struct(target_fox, arg, 1);
5978         }
5979         break;
5980 #endif
5981 
5982 #ifdef F_SETOWN_EX
5983     case TARGET_F_SETOWN_EX:
5984         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5985             return -TARGET_EFAULT;
5986         fox.type = tswap32(target_fox->type);
5987         fox.pid = tswap32(target_fox->pid);
5988         unlock_user_struct(target_fox, arg, 0);
5989         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5990         break;
5991 #endif
5992 
5993     case TARGET_F_SETOWN:
5994     case TARGET_F_GETOWN:
5995     case TARGET_F_SETSIG:
5996     case TARGET_F_GETSIG:
5997     case TARGET_F_SETLEASE:
5998     case TARGET_F_GETLEASE:
5999     case TARGET_F_SETPIPE_SZ:
6000     case TARGET_F_GETPIPE_SZ:
6001         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6002         break;
6003 
6004     default:
6005         ret = get_errno(safe_fcntl(fd, cmd, arg));
6006         break;
6007     }
6008     return ret;
6009 }
6010 
6011 #ifdef USE_UID16
6012 
6013 static inline int high2lowuid(int uid)
6014 {
6015     if (uid > 65535)
6016         return 65534;
6017     else
6018         return uid;
6019 }
6020 
6021 static inline int high2lowgid(int gid)
6022 {
6023     if (gid > 65535)
6024         return 65534;
6025     else
6026         return gid;
6027 }
6028 
6029 static inline int low2highuid(int uid)
6030 {
6031     if ((int16_t)uid == -1)
6032         return -1;
6033     else
6034         return uid;
6035 }
6036 
6037 static inline int low2highgid(int gid)
6038 {
6039     if ((int16_t)gid == -1)
6040         return -1;
6041     else
6042         return gid;
6043 }
6044 static inline int tswapid(int id)
6045 {
6046     return tswap16(id);
6047 }
6048 
6049 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6050 
6051 #else /* !USE_UID16 */
6052 static inline int high2lowuid(int uid)
6053 {
6054     return uid;
6055 }
6056 static inline int high2lowgid(int gid)
6057 {
6058     return gid;
6059 }
6060 static inline int low2highuid(int uid)
6061 {
6062     return uid;
6063 }
6064 static inline int low2highgid(int gid)
6065 {
6066     return gid;
6067 }
6068 static inline int tswapid(int id)
6069 {
6070     return tswap32(id);
6071 }
6072 
6073 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6074 
6075 #endif /* USE_UID16 */
6076 
6077 /* We must do direct syscalls for setting UID/GID, because we want to
6078  * implement the Linux system call semantics of "change only for this thread",
6079  * not the libc/POSIX semantics of "change for all threads in process".
6080  * (See http://ewontfix.com/17/ for more details.)
6081  * We use the 32-bit version of the syscalls if present; if it is not
6082  * then either the host architecture supports 32-bit UIDs natively with
6083  * the standard syscall, or the 16-bit UID is the best we can do.
6084  */
6085 #ifdef __NR_setuid32
6086 #define __NR_sys_setuid __NR_setuid32
6087 #else
6088 #define __NR_sys_setuid __NR_setuid
6089 #endif
6090 #ifdef __NR_setgid32
6091 #define __NR_sys_setgid __NR_setgid32
6092 #else
6093 #define __NR_sys_setgid __NR_setgid
6094 #endif
6095 #ifdef __NR_setresuid32
6096 #define __NR_sys_setresuid __NR_setresuid32
6097 #else
6098 #define __NR_sys_setresuid __NR_setresuid
6099 #endif
6100 #ifdef __NR_setresgid32
6101 #define __NR_sys_setresgid __NR_setresgid32
6102 #else
6103 #define __NR_sys_setresgid __NR_setresgid
6104 #endif
6105 
6106 _syscall1(int, sys_setuid, uid_t, uid)
6107 _syscall1(int, sys_setgid, gid_t, gid)
6108 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6109 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6110 
6111 void syscall_init(void)
6112 {
6113     IOCTLEntry *ie;
6114     const argtype *arg_type;
6115     int size;
6116     int i;
6117 
6118     thunk_init(STRUCT_MAX);
6119 
6120 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6121 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6122 #include "syscall_types.h"
6123 #undef STRUCT
6124 #undef STRUCT_SPECIAL
6125 
6126     /* Build target_to_host_errno_table[] table from
6127      * host_to_target_errno_table[]. */
6128     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6129         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6130     }
6131 
6132     /* we patch the ioctl size if necessary. We rely on the fact that
6133        no ioctl has all the bits at '1' in the size field */
6134     ie = ioctl_entries;
6135     while (ie->target_cmd != 0) {
6136         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6137             TARGET_IOC_SIZEMASK) {
6138             arg_type = ie->arg_type;
6139             if (arg_type[0] != TYPE_PTR) {
6140                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6141                         ie->target_cmd);
6142                 exit(1);
6143             }
6144             arg_type++;
6145             size = thunk_type_size(arg_type, 0);
6146             ie->target_cmd = (ie->target_cmd &
6147                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6148                 (size << TARGET_IOC_SIZESHIFT);
6149         }
6150 
6151         /* automatic consistency check if same arch */
6152 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6153     (defined(__x86_64__) && defined(TARGET_X86_64))
6154         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6155             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6156                     ie->name, ie->target_cmd, ie->host_cmd);
6157         }
6158 #endif
6159         ie++;
6160     }
6161 }
6162 
6163 #if TARGET_ABI_BITS == 32
6164 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6165 {
6166 #ifdef TARGET_WORDS_BIGENDIAN
6167     return ((uint64_t)word0 << 32) | word1;
6168 #else
6169     return ((uint64_t)word1 << 32) | word0;
6170 #endif
6171 }
6172 #else /* TARGET_ABI_BITS == 32 */
6173 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6174 {
6175     return word0;
6176 }
6177 #endif /* TARGET_ABI_BITS != 32 */
6178 
6179 #ifdef TARGET_NR_truncate64
6180 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6181                                          abi_long arg2,
6182                                          abi_long arg3,
6183                                          abi_long arg4)
6184 {
6185     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6186         arg2 = arg3;
6187         arg3 = arg4;
6188     }
6189     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6190 }
6191 #endif
6192 
6193 #ifdef TARGET_NR_ftruncate64
6194 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6195                                           abi_long arg2,
6196                                           abi_long arg3,
6197                                           abi_long arg4)
6198 {
6199     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6200         arg2 = arg3;
6201         arg3 = arg4;
6202     }
6203     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6204 }
6205 #endif
6206 
6207 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6208                                                abi_ulong target_addr)
6209 {
6210     struct target_timespec *target_ts;
6211 
6212     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6213         return -TARGET_EFAULT;
6214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6216     unlock_user_struct(target_ts, target_addr, 0);
6217     return 0;
6218 }
6219 
6220 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6221                                                struct timespec *host_ts)
6222 {
6223     struct target_timespec *target_ts;
6224 
6225     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6226         return -TARGET_EFAULT;
6227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6229     unlock_user_struct(target_ts, target_addr, 1);
6230     return 0;
6231 }
6232 
6233 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6234                                                  abi_ulong target_addr)
6235 {
6236     struct target_itimerspec *target_itspec;
6237 
6238     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6239         return -TARGET_EFAULT;
6240     }
6241 
6242     host_itspec->it_interval.tv_sec =
6243                             tswapal(target_itspec->it_interval.tv_sec);
6244     host_itspec->it_interval.tv_nsec =
6245                             tswapal(target_itspec->it_interval.tv_nsec);
6246     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6247     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6248 
6249     unlock_user_struct(target_itspec, target_addr, 1);
6250     return 0;
6251 }
6252 
6253 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6254                                                struct itimerspec *host_its)
6255 {
6256     struct target_itimerspec *target_itspec;
6257 
6258     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6259         return -TARGET_EFAULT;
6260     }
6261 
6262     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6263     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6264 
6265     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6266     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6267 
6268     unlock_user_struct(target_itspec, target_addr, 0);
6269     return 0;
6270 }
6271 
6272 static inline abi_long target_to_host_timex(struct timex *host_tx,
6273                                             abi_long target_addr)
6274 {
6275     struct target_timex *target_tx;
6276 
6277     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6278         return -TARGET_EFAULT;
6279     }
6280 
6281     __get_user(host_tx->modes, &target_tx->modes);
6282     __get_user(host_tx->offset, &target_tx->offset);
6283     __get_user(host_tx->freq, &target_tx->freq);
6284     __get_user(host_tx->maxerror, &target_tx->maxerror);
6285     __get_user(host_tx->esterror, &target_tx->esterror);
6286     __get_user(host_tx->status, &target_tx->status);
6287     __get_user(host_tx->constant, &target_tx->constant);
6288     __get_user(host_tx->precision, &target_tx->precision);
6289     __get_user(host_tx->tolerance, &target_tx->tolerance);
6290     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6291     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6292     __get_user(host_tx->tick, &target_tx->tick);
6293     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6294     __get_user(host_tx->jitter, &target_tx->jitter);
6295     __get_user(host_tx->shift, &target_tx->shift);
6296     __get_user(host_tx->stabil, &target_tx->stabil);
6297     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6298     __get_user(host_tx->calcnt, &target_tx->calcnt);
6299     __get_user(host_tx->errcnt, &target_tx->errcnt);
6300     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6301     __get_user(host_tx->tai, &target_tx->tai);
6302 
6303     unlock_user_struct(target_tx, target_addr, 0);
6304     return 0;
6305 }
6306 
6307 static inline abi_long host_to_target_timex(abi_long target_addr,
6308                                             struct timex *host_tx)
6309 {
6310     struct target_timex *target_tx;
6311 
6312     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6313         return -TARGET_EFAULT;
6314     }
6315 
6316     __put_user(host_tx->modes, &target_tx->modes);
6317     __put_user(host_tx->offset, &target_tx->offset);
6318     __put_user(host_tx->freq, &target_tx->freq);
6319     __put_user(host_tx->maxerror, &target_tx->maxerror);
6320     __put_user(host_tx->esterror, &target_tx->esterror);
6321     __put_user(host_tx->status, &target_tx->status);
6322     __put_user(host_tx->constant, &target_tx->constant);
6323     __put_user(host_tx->precision, &target_tx->precision);
6324     __put_user(host_tx->tolerance, &target_tx->tolerance);
6325     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6326     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6327     __put_user(host_tx->tick, &target_tx->tick);
6328     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6329     __put_user(host_tx->jitter, &target_tx->jitter);
6330     __put_user(host_tx->shift, &target_tx->shift);
6331     __put_user(host_tx->stabil, &target_tx->stabil);
6332     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6333     __put_user(host_tx->calcnt, &target_tx->calcnt);
6334     __put_user(host_tx->errcnt, &target_tx->errcnt);
6335     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6336     __put_user(host_tx->tai, &target_tx->tai);
6337 
6338     unlock_user_struct(target_tx, target_addr, 1);
6339     return 0;
6340 }
6341 
6342 
6343 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6344                                                abi_ulong target_addr)
6345 {
6346     struct target_sigevent *target_sevp;
6347 
6348     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6349         return -TARGET_EFAULT;
6350     }
6351 
6352     /* This union is awkward on 64 bit systems because it has a 32 bit
6353      * integer and a pointer in it; we follow the conversion approach
6354      * used for handling sigval types in signal.c so the guest should get
6355      * the correct value back even if we did a 64 bit byteswap and it's
6356      * using the 32 bit integer.
6357      */
6358     host_sevp->sigev_value.sival_ptr =
6359         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6360     host_sevp->sigev_signo =
6361         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6362     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6363     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6364 
6365     unlock_user_struct(target_sevp, target_addr, 1);
6366     return 0;
6367 }
6368 
6369 #if defined(TARGET_NR_mlockall)
6370 static inline int target_to_host_mlockall_arg(int arg)
6371 {
6372     int result = 0;
6373 
6374     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6375         result |= MCL_CURRENT;
6376     }
6377     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6378         result |= MCL_FUTURE;
6379     }
6380     return result;
6381 }
6382 #endif
6383 
6384 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6385      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6386      defined(TARGET_NR_newfstatat))
6387 static inline abi_long host_to_target_stat64(void *cpu_env,
6388                                              abi_ulong target_addr,
6389                                              struct stat *host_st)
6390 {
6391 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6392     if (((CPUARMState *)cpu_env)->eabi) {
6393         struct target_eabi_stat64 *target_st;
6394 
6395         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6396             return -TARGET_EFAULT;
6397         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6398         __put_user(host_st->st_dev, &target_st->st_dev);
6399         __put_user(host_st->st_ino, &target_st->st_ino);
6400 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6401         __put_user(host_st->st_ino, &target_st->__st_ino);
6402 #endif
6403         __put_user(host_st->st_mode, &target_st->st_mode);
6404         __put_user(host_st->st_nlink, &target_st->st_nlink);
6405         __put_user(host_st->st_uid, &target_st->st_uid);
6406         __put_user(host_st->st_gid, &target_st->st_gid);
6407         __put_user(host_st->st_rdev, &target_st->st_rdev);
6408         __put_user(host_st->st_size, &target_st->st_size);
6409         __put_user(host_st->st_blksize, &target_st->st_blksize);
6410         __put_user(host_st->st_blocks, &target_st->st_blocks);
6411         __put_user(host_st->st_atime, &target_st->target_st_atime);
6412         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6413         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6414         unlock_user_struct(target_st, target_addr, 1);
6415     } else
6416 #endif
6417     {
6418 #if defined(TARGET_HAS_STRUCT_STAT64)
6419         struct target_stat64 *target_st;
6420 #else
6421         struct target_stat *target_st;
6422 #endif
6423 
6424         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6425             return -TARGET_EFAULT;
6426         memset(target_st, 0, sizeof(*target_st));
6427         __put_user(host_st->st_dev, &target_st->st_dev);
6428         __put_user(host_st->st_ino, &target_st->st_ino);
6429 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6430         __put_user(host_st->st_ino, &target_st->__st_ino);
6431 #endif
6432         __put_user(host_st->st_mode, &target_st->st_mode);
6433         __put_user(host_st->st_nlink, &target_st->st_nlink);
6434         __put_user(host_st->st_uid, &target_st->st_uid);
6435         __put_user(host_st->st_gid, &target_st->st_gid);
6436         __put_user(host_st->st_rdev, &target_st->st_rdev);
6437         /* XXX: better use of kernel struct */
6438         __put_user(host_st->st_size, &target_st->st_size);
6439         __put_user(host_st->st_blksize, &target_st->st_blksize);
6440         __put_user(host_st->st_blocks, &target_st->st_blocks);
6441         __put_user(host_st->st_atime, &target_st->target_st_atime);
6442         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6443         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6444         unlock_user_struct(target_st, target_addr, 1);
6445     }
6446 
6447     return 0;
6448 }
6449 #endif
6450 
6451 /* ??? Using host futex calls even when target atomic operations
6452    are not really atomic probably breaks things.  However implementing
6453    futexes locally would make futexes shared between multiple processes
6454    tricky.  However they're probably useless because guest atomic
6455    operations won't work either.  */
6456 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6457                     target_ulong uaddr2, int val3)
6458 {
6459     struct timespec ts, *pts;
6460     int base_op;
6461 
6462     /* ??? We assume FUTEX_* constants are the same on both host
6463        and target.  */
6464 #ifdef FUTEX_CMD_MASK
6465     base_op = op & FUTEX_CMD_MASK;
6466 #else
6467     base_op = op;
6468 #endif
6469     switch (base_op) {
6470     case FUTEX_WAIT:
6471     case FUTEX_WAIT_BITSET:
6472         if (timeout) {
6473             pts = &ts;
6474             target_to_host_timespec(pts, timeout);
6475         } else {
6476             pts = NULL;
6477         }
6478         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6479                          pts, NULL, val3));
6480     case FUTEX_WAKE:
6481         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6482     case FUTEX_FD:
6483         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6484     case FUTEX_REQUEUE:
6485     case FUTEX_CMP_REQUEUE:
6486     case FUTEX_WAKE_OP:
6487         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6488            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6489            But the prototype takes a `struct timespec *'; insert casts
6490            to satisfy the compiler.  We do not need to tswap TIMEOUT
6491            since it's not compared to guest memory.  */
6492         pts = (struct timespec *)(uintptr_t) timeout;
6493         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6494                                     g2h(uaddr2),
6495                                     (base_op == FUTEX_CMP_REQUEUE
6496                                      ? tswap32(val3)
6497                                      : val3)));
6498     default:
6499         return -TARGET_ENOSYS;
6500     }
6501 }
6502 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6503 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6504                                      abi_long handle, abi_long mount_id,
6505                                      abi_long flags)
6506 {
6507     struct file_handle *target_fh;
6508     struct file_handle *fh;
6509     int mid = 0;
6510     abi_long ret;
6511     char *name;
6512     unsigned int size, total_size;
6513 
6514     if (get_user_s32(size, handle)) {
6515         return -TARGET_EFAULT;
6516     }
6517 
6518     name = lock_user_string(pathname);
6519     if (!name) {
6520         return -TARGET_EFAULT;
6521     }
6522 
6523     total_size = sizeof(struct file_handle) + size;
6524     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6525     if (!target_fh) {
6526         unlock_user(name, pathname, 0);
6527         return -TARGET_EFAULT;
6528     }
6529 
6530     fh = g_malloc0(total_size);
6531     fh->handle_bytes = size;
6532 
6533     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6534     unlock_user(name, pathname, 0);
6535 
6536     /* man name_to_handle_at(2):
6537      * Other than the use of the handle_bytes field, the caller should treat
6538      * the file_handle structure as an opaque data type
6539      */
6540 
6541     memcpy(target_fh, fh, total_size);
6542     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6543     target_fh->handle_type = tswap32(fh->handle_type);
6544     g_free(fh);
6545     unlock_user(target_fh, handle, total_size);
6546 
6547     if (put_user_s32(mid, mount_id)) {
6548         return -TARGET_EFAULT;
6549     }
6550 
6551     return ret;
6552 
6553 }
6554 #endif
6555 
6556 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6557 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6558                                      abi_long flags)
6559 {
6560     struct file_handle *target_fh;
6561     struct file_handle *fh;
6562     unsigned int size, total_size;
6563     abi_long ret;
6564 
6565     if (get_user_s32(size, handle)) {
6566         return -TARGET_EFAULT;
6567     }
6568 
6569     total_size = sizeof(struct file_handle) + size;
6570     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6571     if (!target_fh) {
6572         return -TARGET_EFAULT;
6573     }
6574 
6575     fh = g_memdup(target_fh, total_size);
6576     fh->handle_bytes = size;
6577     fh->handle_type = tswap32(target_fh->handle_type);
6578 
6579     ret = get_errno(open_by_handle_at(mount_fd, fh,
6580                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6581 
6582     g_free(fh);
6583 
6584     unlock_user(target_fh, handle, total_size);
6585 
6586     return ret;
6587 }
6588 #endif
6589 
6590 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6591 
6592 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6593 {
6594     int host_flags;
6595     target_sigset_t *target_mask;
6596     sigset_t host_mask;
6597     abi_long ret;
6598 
6599     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6600         return -TARGET_EINVAL;
6601     }
6602     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6603         return -TARGET_EFAULT;
6604     }
6605 
6606     target_to_host_sigset(&host_mask, target_mask);
6607 
6608     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6609 
6610     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6611     if (ret >= 0) {
6612         fd_trans_register(ret, &target_signalfd_trans);
6613     }
6614 
6615     unlock_user_struct(target_mask, mask, 0);
6616 
6617     return ret;
6618 }
6619 #endif
6620 
6621 /* Map host to target signal numbers for the wait family of syscalls.
6622    Assume all other status bits are the same.  */
6623 int host_to_target_waitstatus(int status)
6624 {
6625     if (WIFSIGNALED(status)) {
6626         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6627     }
6628     if (WIFSTOPPED(status)) {
6629         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6630                | (status & 0xff);
6631     }
6632     return status;
6633 }
6634 
6635 static int open_self_cmdline(void *cpu_env, int fd)
6636 {
6637     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6638     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6639     int i;
6640 
6641     for (i = 0; i < bprm->argc; i++) {
6642         size_t len = strlen(bprm->argv[i]) + 1;
6643 
6644         if (write(fd, bprm->argv[i], len) != len) {
6645             return -1;
6646         }
6647     }
6648 
6649     return 0;
6650 }
6651 
6652 static int open_self_maps(void *cpu_env, int fd)
6653 {
6654     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6655     TaskState *ts = cpu->opaque;
6656     FILE *fp;
6657     char *line = NULL;
6658     size_t len = 0;
6659     ssize_t read;
6660 
6661     fp = fopen("/proc/self/maps", "r");
6662     if (fp == NULL) {
6663         return -1;
6664     }
6665 
6666     while ((read = getline(&line, &len, fp)) != -1) {
6667         int fields, dev_maj, dev_min, inode;
6668         uint64_t min, max, offset;
6669         char flag_r, flag_w, flag_x, flag_p;
6670         char path[512] = "";
6671         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6672                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6673                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6674 
6675         if ((fields < 10) || (fields > 11)) {
6676             continue;
6677         }
6678         if (h2g_valid(min)) {
6679             int flags = page_get_flags(h2g(min));
6680             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6681             if (page_check_range(h2g(min), max - min, flags) == -1) {
6682                 continue;
6683             }
6684             if (h2g(min) == ts->info->stack_limit) {
6685                 pstrcpy(path, sizeof(path), "      [stack]");
6686             }
6687             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6688                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6689                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6690                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6691                     path[0] ? "         " : "", path);
6692         }
6693     }
6694 
6695     free(line);
6696     fclose(fp);
6697 
6698     return 0;
6699 }
6700 
6701 static int open_self_stat(void *cpu_env, int fd)
6702 {
6703     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6704     TaskState *ts = cpu->opaque;
6705     abi_ulong start_stack = ts->info->start_stack;
6706     int i;
6707 
6708     for (i = 0; i < 44; i++) {
6709       char buf[128];
6710       int len;
6711       uint64_t val = 0;
6712 
6713       if (i == 0) {
6714         /* pid */
6715         val = getpid();
6716         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6717       } else if (i == 1) {
6718         /* app name */
6719         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6720       } else if (i == 27) {
6721         /* stack bottom */
6722         val = start_stack;
6723         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6724       } else {
6725         /* for the rest, there is MasterCard */
6726         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6727       }
6728 
6729       len = strlen(buf);
6730       if (write(fd, buf, len) != len) {
6731           return -1;
6732       }
6733     }
6734 
6735     return 0;
6736 }
6737 
6738 static int open_self_auxv(void *cpu_env, int fd)
6739 {
6740     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6741     TaskState *ts = cpu->opaque;
6742     abi_ulong auxv = ts->info->saved_auxv;
6743     abi_ulong len = ts->info->auxv_len;
6744     char *ptr;
6745 
6746     /*
6747      * Auxiliary vector is stored in target process stack.
6748      * read in whole auxv vector and copy it to file
6749      */
6750     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6751     if (ptr != NULL) {
6752         while (len > 0) {
6753             ssize_t r;
6754             r = write(fd, ptr, len);
6755             if (r <= 0) {
6756                 break;
6757             }
6758             len -= r;
6759             ptr += r;
6760         }
6761         lseek(fd, 0, SEEK_SET);
6762         unlock_user(ptr, auxv, len);
6763     }
6764 
6765     return 0;
6766 }
6767 
6768 static int is_proc_myself(const char *filename, const char *entry)
6769 {
6770     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6771         filename += strlen("/proc/");
6772         if (!strncmp(filename, "self/", strlen("self/"))) {
6773             filename += strlen("self/");
6774         } else if (*filename >= '1' && *filename <= '9') {
6775             char myself[80];
6776             snprintf(myself, sizeof(myself), "%d/", getpid());
6777             if (!strncmp(filename, myself, strlen(myself))) {
6778                 filename += strlen(myself);
6779             } else {
6780                 return 0;
6781             }
6782         } else {
6783             return 0;
6784         }
6785         if (!strcmp(filename, entry)) {
6786             return 1;
6787         }
6788     }
6789     return 0;
6790 }
6791 
6792 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6793 static int is_proc(const char *filename, const char *entry)
6794 {
6795     return strcmp(filename, entry) == 0;
6796 }
6797 
6798 static int open_net_route(void *cpu_env, int fd)
6799 {
6800     FILE *fp;
6801     char *line = NULL;
6802     size_t len = 0;
6803     ssize_t read;
6804 
6805     fp = fopen("/proc/net/route", "r");
6806     if (fp == NULL) {
6807         return -1;
6808     }
6809 
6810     /* read header */
6811 
6812     read = getline(&line, &len, fp);
6813     dprintf(fd, "%s", line);
6814 
6815     /* read routes */
6816 
6817     while ((read = getline(&line, &len, fp)) != -1) {
6818         char iface[16];
6819         uint32_t dest, gw, mask;
6820         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6821         int fields;
6822 
6823         fields = sscanf(line,
6824                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6825                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6826                         &mask, &mtu, &window, &irtt);
6827         if (fields != 11) {
6828             continue;
6829         }
6830         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6831                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6832                 metric, tswap32(mask), mtu, window, irtt);
6833     }
6834 
6835     free(line);
6836     fclose(fp);
6837 
6838     return 0;
6839 }
6840 #endif
6841 
6842 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6843 {
6844     struct fake_open {
6845         const char *filename;
6846         int (*fill)(void *cpu_env, int fd);
6847         int (*cmp)(const char *s1, const char *s2);
6848     };
6849     const struct fake_open *fake_open;
6850     static const struct fake_open fakes[] = {
6851         { "maps", open_self_maps, is_proc_myself },
6852         { "stat", open_self_stat, is_proc_myself },
6853         { "auxv", open_self_auxv, is_proc_myself },
6854         { "cmdline", open_self_cmdline, is_proc_myself },
6855 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6856         { "/proc/net/route", open_net_route, is_proc },
6857 #endif
6858         { NULL, NULL, NULL }
6859     };
6860 
6861     if (is_proc_myself(pathname, "exe")) {
6862         int execfd = qemu_getauxval(AT_EXECFD);
6863         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6864     }
6865 
6866     for (fake_open = fakes; fake_open->filename; fake_open++) {
6867         if (fake_open->cmp(pathname, fake_open->filename)) {
6868             break;
6869         }
6870     }
6871 
6872     if (fake_open->filename) {
6873         const char *tmpdir;
6874         char filename[PATH_MAX];
6875         int fd, r;
6876 
6877         /* create temporary file to map stat to */
6878         tmpdir = getenv("TMPDIR");
6879         if (!tmpdir)
6880             tmpdir = "/tmp";
6881         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6882         fd = mkstemp(filename);
6883         if (fd < 0) {
6884             return fd;
6885         }
6886         unlink(filename);
6887 
6888         if ((r = fake_open->fill(cpu_env, fd))) {
6889             int e = errno;
6890             close(fd);
6891             errno = e;
6892             return r;
6893         }
6894         lseek(fd, 0, SEEK_SET);
6895 
6896         return fd;
6897     }
6898 
6899     return safe_openat(dirfd, path(pathname), flags, mode);
6900 }
6901 
6902 #define TIMER_MAGIC 0x0caf0000
6903 #define TIMER_MAGIC_MASK 0xffff0000
6904 
6905 /* Convert QEMU provided timer ID back to internal 16bit index format */
6906 static target_timer_t get_timer_id(abi_long arg)
6907 {
6908     target_timer_t timerid = arg;
6909 
6910     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6911         return -TARGET_EINVAL;
6912     }
6913 
6914     timerid &= 0xffff;
6915 
6916     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6917         return -TARGET_EINVAL;
6918     }
6919 
6920     return timerid;
6921 }
6922 
6923 static int target_to_host_cpu_mask(unsigned long *host_mask,
6924                                    size_t host_size,
6925                                    abi_ulong target_addr,
6926                                    size_t target_size)
6927 {
6928     unsigned target_bits = sizeof(abi_ulong) * 8;
6929     unsigned host_bits = sizeof(*host_mask) * 8;
6930     abi_ulong *target_mask;
6931     unsigned i, j;
6932 
6933     assert(host_size >= target_size);
6934 
6935     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6936     if (!target_mask) {
6937         return -TARGET_EFAULT;
6938     }
6939     memset(host_mask, 0, host_size);
6940 
6941     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6942         unsigned bit = i * target_bits;
6943         abi_ulong val;
6944 
6945         __get_user(val, &target_mask[i]);
6946         for (j = 0; j < target_bits; j++, bit++) {
6947             if (val & (1UL << j)) {
6948                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6949             }
6950         }
6951     }
6952 
6953     unlock_user(target_mask, target_addr, 0);
6954     return 0;
6955 }
6956 
6957 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6958                                    size_t host_size,
6959                                    abi_ulong target_addr,
6960                                    size_t target_size)
6961 {
6962     unsigned target_bits = sizeof(abi_ulong) * 8;
6963     unsigned host_bits = sizeof(*host_mask) * 8;
6964     abi_ulong *target_mask;
6965     unsigned i, j;
6966 
6967     assert(host_size >= target_size);
6968 
6969     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6970     if (!target_mask) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6975         unsigned bit = i * target_bits;
6976         abi_ulong val = 0;
6977 
6978         for (j = 0; j < target_bits; j++, bit++) {
6979             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6980                 val |= 1UL << j;
6981             }
6982         }
6983         __put_user(val, &target_mask[i]);
6984     }
6985 
6986     unlock_user(target_mask, target_addr, target_size);
6987     return 0;
6988 }
6989 
6990 /* This is an internal helper for do_syscall so that it is easier
6991  * to have a single return point, so that actions, such as logging
6992  * of syscall results, can be performed.
6993  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6994  */
6995 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6996                             abi_long arg2, abi_long arg3, abi_long arg4,
6997                             abi_long arg5, abi_long arg6, abi_long arg7,
6998                             abi_long arg8)
6999 {
7000     CPUState *cpu = ENV_GET_CPU(cpu_env);
7001     abi_long ret;
7002 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7003     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7004     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7005     struct stat st;
7006 #endif
7007 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7008     || defined(TARGET_NR_fstatfs)
7009     struct statfs stfs;
7010 #endif
7011     void *p;
7012 
7013     switch(num) {
7014     case TARGET_NR_exit:
7015         /* In old applications this may be used to implement _exit(2).
7016            However in threaded applictions it is used for thread termination,
7017            and _exit_group is used for application termination.
7018            Do thread termination if we have more then one thread.  */
7019 
7020         if (block_signals()) {
7021             return -TARGET_ERESTARTSYS;
7022         }
7023 
7024         cpu_list_lock();
7025 
7026         if (CPU_NEXT(first_cpu)) {
7027             TaskState *ts;
7028 
7029             /* Remove the CPU from the list.  */
7030             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7031 
7032             cpu_list_unlock();
7033 
7034             ts = cpu->opaque;
7035             if (ts->child_tidptr) {
7036                 put_user_u32(0, ts->child_tidptr);
7037                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7038                           NULL, NULL, 0);
7039             }
7040             thread_cpu = NULL;
7041             object_unref(OBJECT(cpu));
7042             g_free(ts);
7043             rcu_unregister_thread();
7044             pthread_exit(NULL);
7045         }
7046 
7047         cpu_list_unlock();
7048         preexit_cleanup(cpu_env, arg1);
7049         _exit(arg1);
7050         return 0; /* avoid warning */
7051     case TARGET_NR_read:
7052         if (arg2 == 0 && arg3 == 0) {
7053             return get_errno(safe_read(arg1, 0, 0));
7054         } else {
7055             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7056                 return -TARGET_EFAULT;
7057             ret = get_errno(safe_read(arg1, p, arg3));
7058             if (ret >= 0 &&
7059                 fd_trans_host_to_target_data(arg1)) {
7060                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7061             }
7062             unlock_user(p, arg2, ret);
7063         }
7064         return ret;
7065     case TARGET_NR_write:
7066         if (arg2 == 0 && arg3 == 0) {
7067             return get_errno(safe_write(arg1, 0, 0));
7068         }
7069         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7070             return -TARGET_EFAULT;
7071         if (fd_trans_target_to_host_data(arg1)) {
7072             void *copy = g_malloc(arg3);
7073             memcpy(copy, p, arg3);
7074             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7075             if (ret >= 0) {
7076                 ret = get_errno(safe_write(arg1, copy, ret));
7077             }
7078             g_free(copy);
7079         } else {
7080             ret = get_errno(safe_write(arg1, p, arg3));
7081         }
7082         unlock_user(p, arg2, 0);
7083         return ret;
7084 
7085 #ifdef TARGET_NR_open
7086     case TARGET_NR_open:
7087         if (!(p = lock_user_string(arg1)))
7088             return -TARGET_EFAULT;
7089         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7090                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7091                                   arg3));
7092         fd_trans_unregister(ret);
7093         unlock_user(p, arg1, 0);
7094         return ret;
7095 #endif
7096     case TARGET_NR_openat:
7097         if (!(p = lock_user_string(arg2)))
7098             return -TARGET_EFAULT;
7099         ret = get_errno(do_openat(cpu_env, arg1, p,
7100                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7101                                   arg4));
7102         fd_trans_unregister(ret);
7103         unlock_user(p, arg2, 0);
7104         return ret;
7105 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7106     case TARGET_NR_name_to_handle_at:
7107         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7108         return ret;
7109 #endif
7110 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7111     case TARGET_NR_open_by_handle_at:
7112         ret = do_open_by_handle_at(arg1, arg2, arg3);
7113         fd_trans_unregister(ret);
7114         return ret;
7115 #endif
7116     case TARGET_NR_close:
7117         fd_trans_unregister(arg1);
7118         return get_errno(close(arg1));
7119 
7120     case TARGET_NR_brk:
7121         return do_brk(arg1);
7122 #ifdef TARGET_NR_fork
7123     case TARGET_NR_fork:
7124         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7125 #endif
7126 #ifdef TARGET_NR_waitpid
7127     case TARGET_NR_waitpid:
7128         {
7129             int status;
7130             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7131             if (!is_error(ret) && arg2 && ret
7132                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7133                 return -TARGET_EFAULT;
7134         }
7135         return ret;
7136 #endif
7137 #ifdef TARGET_NR_waitid
7138     case TARGET_NR_waitid:
7139         {
7140             siginfo_t info;
7141             info.si_pid = 0;
7142             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7143             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7144                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7145                     return -TARGET_EFAULT;
7146                 host_to_target_siginfo(p, &info);
7147                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7148             }
7149         }
7150         return ret;
7151 #endif
7152 #ifdef TARGET_NR_creat /* not on alpha */
7153     case TARGET_NR_creat:
7154         if (!(p = lock_user_string(arg1)))
7155             return -TARGET_EFAULT;
7156         ret = get_errno(creat(p, arg2));
7157         fd_trans_unregister(ret);
7158         unlock_user(p, arg1, 0);
7159         return ret;
7160 #endif
7161 #ifdef TARGET_NR_link
7162     case TARGET_NR_link:
7163         {
7164             void * p2;
7165             p = lock_user_string(arg1);
7166             p2 = lock_user_string(arg2);
7167             if (!p || !p2)
7168                 ret = -TARGET_EFAULT;
7169             else
7170                 ret = get_errno(link(p, p2));
7171             unlock_user(p2, arg2, 0);
7172             unlock_user(p, arg1, 0);
7173         }
7174         return ret;
7175 #endif
7176 #if defined(TARGET_NR_linkat)
7177     case TARGET_NR_linkat:
7178         {
7179             void * p2 = NULL;
7180             if (!arg2 || !arg4)
7181                 return -TARGET_EFAULT;
7182             p  = lock_user_string(arg2);
7183             p2 = lock_user_string(arg4);
7184             if (!p || !p2)
7185                 ret = -TARGET_EFAULT;
7186             else
7187                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7188             unlock_user(p, arg2, 0);
7189             unlock_user(p2, arg4, 0);
7190         }
7191         return ret;
7192 #endif
7193 #ifdef TARGET_NR_unlink
7194     case TARGET_NR_unlink:
7195         if (!(p = lock_user_string(arg1)))
7196             return -TARGET_EFAULT;
7197         ret = get_errno(unlink(p));
7198         unlock_user(p, arg1, 0);
7199         return ret;
7200 #endif
7201 #if defined(TARGET_NR_unlinkat)
7202     case TARGET_NR_unlinkat:
7203         if (!(p = lock_user_string(arg2)))
7204             return -TARGET_EFAULT;
7205         ret = get_errno(unlinkat(arg1, p, arg3));
7206         unlock_user(p, arg2, 0);
7207         return ret;
7208 #endif
7209     case TARGET_NR_execve:
7210         {
7211             char **argp, **envp;
7212             int argc, envc;
7213             abi_ulong gp;
7214             abi_ulong guest_argp;
7215             abi_ulong guest_envp;
7216             abi_ulong addr;
7217             char **q;
7218             int total_size = 0;
7219 
7220             argc = 0;
7221             guest_argp = arg2;
7222             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7223                 if (get_user_ual(addr, gp))
7224                     return -TARGET_EFAULT;
7225                 if (!addr)
7226                     break;
7227                 argc++;
7228             }
7229             envc = 0;
7230             guest_envp = arg3;
7231             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7232                 if (get_user_ual(addr, gp))
7233                     return -TARGET_EFAULT;
7234                 if (!addr)
7235                     break;
7236                 envc++;
7237             }
7238 
7239             argp = g_new0(char *, argc + 1);
7240             envp = g_new0(char *, envc + 1);
7241 
7242             for (gp = guest_argp, q = argp; gp;
7243                   gp += sizeof(abi_ulong), q++) {
7244                 if (get_user_ual(addr, gp))
7245                     goto execve_efault;
7246                 if (!addr)
7247                     break;
7248                 if (!(*q = lock_user_string(addr)))
7249                     goto execve_efault;
7250                 total_size += strlen(*q) + 1;
7251             }
7252             *q = NULL;
7253 
7254             for (gp = guest_envp, q = envp; gp;
7255                   gp += sizeof(abi_ulong), q++) {
7256                 if (get_user_ual(addr, gp))
7257                     goto execve_efault;
7258                 if (!addr)
7259                     break;
7260                 if (!(*q = lock_user_string(addr)))
7261                     goto execve_efault;
7262                 total_size += strlen(*q) + 1;
7263             }
7264             *q = NULL;
7265 
7266             if (!(p = lock_user_string(arg1)))
7267                 goto execve_efault;
7268             /* Although execve() is not an interruptible syscall it is
7269              * a special case where we must use the safe_syscall wrapper:
7270              * if we allow a signal to happen before we make the host
7271              * syscall then we will 'lose' it, because at the point of
7272              * execve the process leaves QEMU's control. So we use the
7273              * safe syscall wrapper to ensure that we either take the
7274              * signal as a guest signal, or else it does not happen
7275              * before the execve completes and makes it the other
7276              * program's problem.
7277              */
7278             ret = get_errno(safe_execve(p, argp, envp));
7279             unlock_user(p, arg1, 0);
7280 
7281             goto execve_end;
7282 
7283         execve_efault:
7284             ret = -TARGET_EFAULT;
7285 
7286         execve_end:
7287             for (gp = guest_argp, q = argp; *q;
7288                   gp += sizeof(abi_ulong), q++) {
7289                 if (get_user_ual(addr, gp)
7290                     || !addr)
7291                     break;
7292                 unlock_user(*q, addr, 0);
7293             }
7294             for (gp = guest_envp, q = envp; *q;
7295                   gp += sizeof(abi_ulong), q++) {
7296                 if (get_user_ual(addr, gp)
7297                     || !addr)
7298                     break;
7299                 unlock_user(*q, addr, 0);
7300             }
7301 
7302             g_free(argp);
7303             g_free(envp);
7304         }
7305         return ret;
7306     case TARGET_NR_chdir:
7307         if (!(p = lock_user_string(arg1)))
7308             return -TARGET_EFAULT;
7309         ret = get_errno(chdir(p));
7310         unlock_user(p, arg1, 0);
7311         return ret;
7312 #ifdef TARGET_NR_time
7313     case TARGET_NR_time:
7314         {
7315             time_t host_time;
7316             ret = get_errno(time(&host_time));
7317             if (!is_error(ret)
7318                 && arg1
7319                 && put_user_sal(host_time, arg1))
7320                 return -TARGET_EFAULT;
7321         }
7322         return ret;
7323 #endif
7324 #ifdef TARGET_NR_mknod
7325     case TARGET_NR_mknod:
7326         if (!(p = lock_user_string(arg1)))
7327             return -TARGET_EFAULT;
7328         ret = get_errno(mknod(p, arg2, arg3));
7329         unlock_user(p, arg1, 0);
7330         return ret;
7331 #endif
7332 #if defined(TARGET_NR_mknodat)
7333     case TARGET_NR_mknodat:
7334         if (!(p = lock_user_string(arg2)))
7335             return -TARGET_EFAULT;
7336         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7337         unlock_user(p, arg2, 0);
7338         return ret;
7339 #endif
7340 #ifdef TARGET_NR_chmod
7341     case TARGET_NR_chmod:
7342         if (!(p = lock_user_string(arg1)))
7343             return -TARGET_EFAULT;
7344         ret = get_errno(chmod(p, arg2));
7345         unlock_user(p, arg1, 0);
7346         return ret;
7347 #endif
7348 #ifdef TARGET_NR_lseek
7349     case TARGET_NR_lseek:
7350         return get_errno(lseek(arg1, arg2, arg3));
7351 #endif
7352 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7353     /* Alpha specific */
7354     case TARGET_NR_getxpid:
7355         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7356         return get_errno(getpid());
7357 #endif
7358 #ifdef TARGET_NR_getpid
7359     case TARGET_NR_getpid:
7360         return get_errno(getpid());
7361 #endif
7362     case TARGET_NR_mount:
7363         {
7364             /* need to look at the data field */
7365             void *p2, *p3;
7366 
7367             if (arg1) {
7368                 p = lock_user_string(arg1);
7369                 if (!p) {
7370                     return -TARGET_EFAULT;
7371                 }
7372             } else {
7373                 p = NULL;
7374             }
7375 
7376             p2 = lock_user_string(arg2);
7377             if (!p2) {
7378                 if (arg1) {
7379                     unlock_user(p, arg1, 0);
7380                 }
7381                 return -TARGET_EFAULT;
7382             }
7383 
7384             if (arg3) {
7385                 p3 = lock_user_string(arg3);
7386                 if (!p3) {
7387                     if (arg1) {
7388                         unlock_user(p, arg1, 0);
7389                     }
7390                     unlock_user(p2, arg2, 0);
7391                     return -TARGET_EFAULT;
7392                 }
7393             } else {
7394                 p3 = NULL;
7395             }
7396 
7397             /* FIXME - arg5 should be locked, but it isn't clear how to
7398              * do that since it's not guaranteed to be a NULL-terminated
7399              * string.
7400              */
7401             if (!arg5) {
7402                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7403             } else {
7404                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7405             }
7406             ret = get_errno(ret);
7407 
7408             if (arg1) {
7409                 unlock_user(p, arg1, 0);
7410             }
7411             unlock_user(p2, arg2, 0);
7412             if (arg3) {
7413                 unlock_user(p3, arg3, 0);
7414             }
7415         }
7416         return ret;
7417 #ifdef TARGET_NR_umount
7418     case TARGET_NR_umount:
7419         if (!(p = lock_user_string(arg1)))
7420             return -TARGET_EFAULT;
7421         ret = get_errno(umount(p));
7422         unlock_user(p, arg1, 0);
7423         return ret;
7424 #endif
7425 #ifdef TARGET_NR_stime /* not on alpha */
7426     case TARGET_NR_stime:
7427         {
7428             time_t host_time;
7429             if (get_user_sal(host_time, arg1))
7430                 return -TARGET_EFAULT;
7431             return get_errno(stime(&host_time));
7432         }
7433 #endif
7434 #ifdef TARGET_NR_alarm /* not on alpha */
7435     case TARGET_NR_alarm:
7436         return alarm(arg1);
7437 #endif
7438 #ifdef TARGET_NR_pause /* not on alpha */
7439     case TARGET_NR_pause:
7440         if (!block_signals()) {
7441             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7442         }
7443         return -TARGET_EINTR;
7444 #endif
7445 #ifdef TARGET_NR_utime
7446     case TARGET_NR_utime:
7447         {
7448             struct utimbuf tbuf, *host_tbuf;
7449             struct target_utimbuf *target_tbuf;
7450             if (arg2) {
7451                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7452                     return -TARGET_EFAULT;
7453                 tbuf.actime = tswapal(target_tbuf->actime);
7454                 tbuf.modtime = tswapal(target_tbuf->modtime);
7455                 unlock_user_struct(target_tbuf, arg2, 0);
7456                 host_tbuf = &tbuf;
7457             } else {
7458                 host_tbuf = NULL;
7459             }
7460             if (!(p = lock_user_string(arg1)))
7461                 return -TARGET_EFAULT;
7462             ret = get_errno(utime(p, host_tbuf));
7463             unlock_user(p, arg1, 0);
7464         }
7465         return ret;
7466 #endif
7467 #ifdef TARGET_NR_utimes
7468     case TARGET_NR_utimes:
7469         {
7470             struct timeval *tvp, tv[2];
7471             if (arg2) {
7472                 if (copy_from_user_timeval(&tv[0], arg2)
7473                     || copy_from_user_timeval(&tv[1],
7474                                               arg2 + sizeof(struct target_timeval)))
7475                     return -TARGET_EFAULT;
7476                 tvp = tv;
7477             } else {
7478                 tvp = NULL;
7479             }
7480             if (!(p = lock_user_string(arg1)))
7481                 return -TARGET_EFAULT;
7482             ret = get_errno(utimes(p, tvp));
7483             unlock_user(p, arg1, 0);
7484         }
7485         return ret;
7486 #endif
7487 #if defined(TARGET_NR_futimesat)
7488     case TARGET_NR_futimesat:
7489         {
7490             struct timeval *tvp, tv[2];
7491             if (arg3) {
7492                 if (copy_from_user_timeval(&tv[0], arg3)
7493                     || copy_from_user_timeval(&tv[1],
7494                                               arg3 + sizeof(struct target_timeval)))
7495                     return -TARGET_EFAULT;
7496                 tvp = tv;
7497             } else {
7498                 tvp = NULL;
7499             }
7500             if (!(p = lock_user_string(arg2))) {
7501                 return -TARGET_EFAULT;
7502             }
7503             ret = get_errno(futimesat(arg1, path(p), tvp));
7504             unlock_user(p, arg2, 0);
7505         }
7506         return ret;
7507 #endif
7508 #ifdef TARGET_NR_access
7509     case TARGET_NR_access:
7510         if (!(p = lock_user_string(arg1))) {
7511             return -TARGET_EFAULT;
7512         }
7513         ret = get_errno(access(path(p), arg2));
7514         unlock_user(p, arg1, 0);
7515         return ret;
7516 #endif
7517 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7518     case TARGET_NR_faccessat:
7519         if (!(p = lock_user_string(arg2))) {
7520             return -TARGET_EFAULT;
7521         }
7522         ret = get_errno(faccessat(arg1, p, arg3, 0));
7523         unlock_user(p, arg2, 0);
7524         return ret;
7525 #endif
7526 #ifdef TARGET_NR_nice /* not on alpha */
7527     case TARGET_NR_nice:
7528         return get_errno(nice(arg1));
7529 #endif
7530     case TARGET_NR_sync:
7531         sync();
7532         return 0;
7533 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7534     case TARGET_NR_syncfs:
7535         return get_errno(syncfs(arg1));
7536 #endif
7537     case TARGET_NR_kill:
7538         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7539 #ifdef TARGET_NR_rename
7540     case TARGET_NR_rename:
7541         {
7542             void *p2;
7543             p = lock_user_string(arg1);
7544             p2 = lock_user_string(arg2);
7545             if (!p || !p2)
7546                 ret = -TARGET_EFAULT;
7547             else
7548                 ret = get_errno(rename(p, p2));
7549             unlock_user(p2, arg2, 0);
7550             unlock_user(p, arg1, 0);
7551         }
7552         return ret;
7553 #endif
7554 #if defined(TARGET_NR_renameat)
7555     case TARGET_NR_renameat:
7556         {
7557             void *p2;
7558             p  = lock_user_string(arg2);
7559             p2 = lock_user_string(arg4);
7560             if (!p || !p2)
7561                 ret = -TARGET_EFAULT;
7562             else
7563                 ret = get_errno(renameat(arg1, p, arg3, p2));
7564             unlock_user(p2, arg4, 0);
7565             unlock_user(p, arg2, 0);
7566         }
7567         return ret;
7568 #endif
7569 #if defined(TARGET_NR_renameat2)
7570     case TARGET_NR_renameat2:
7571         {
7572             void *p2;
7573             p  = lock_user_string(arg2);
7574             p2 = lock_user_string(arg4);
7575             if (!p || !p2) {
7576                 ret = -TARGET_EFAULT;
7577             } else {
7578                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7579             }
7580             unlock_user(p2, arg4, 0);
7581             unlock_user(p, arg2, 0);
7582         }
7583         return ret;
7584 #endif
7585 #ifdef TARGET_NR_mkdir
7586     case TARGET_NR_mkdir:
7587         if (!(p = lock_user_string(arg1)))
7588             return -TARGET_EFAULT;
7589         ret = get_errno(mkdir(p, arg2));
7590         unlock_user(p, arg1, 0);
7591         return ret;
7592 #endif
7593 #if defined(TARGET_NR_mkdirat)
7594     case TARGET_NR_mkdirat:
7595         if (!(p = lock_user_string(arg2)))
7596             return -TARGET_EFAULT;
7597         ret = get_errno(mkdirat(arg1, p, arg3));
7598         unlock_user(p, arg2, 0);
7599         return ret;
7600 #endif
7601 #ifdef TARGET_NR_rmdir
7602     case TARGET_NR_rmdir:
7603         if (!(p = lock_user_string(arg1)))
7604             return -TARGET_EFAULT;
7605         ret = get_errno(rmdir(p));
7606         unlock_user(p, arg1, 0);
7607         return ret;
7608 #endif
7609     case TARGET_NR_dup:
7610         ret = get_errno(dup(arg1));
7611         if (ret >= 0) {
7612             fd_trans_dup(arg1, ret);
7613         }
7614         return ret;
7615 #ifdef TARGET_NR_pipe
7616     case TARGET_NR_pipe:
7617         return do_pipe(cpu_env, arg1, 0, 0);
7618 #endif
7619 #ifdef TARGET_NR_pipe2
7620     case TARGET_NR_pipe2:
7621         return do_pipe(cpu_env, arg1,
7622                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7623 #endif
7624     case TARGET_NR_times:
7625         {
7626             struct target_tms *tmsp;
7627             struct tms tms;
7628             ret = get_errno(times(&tms));
7629             if (arg1) {
7630                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7631                 if (!tmsp)
7632                     return -TARGET_EFAULT;
7633                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7634                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7635                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7636                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7637             }
7638             if (!is_error(ret))
7639                 ret = host_to_target_clock_t(ret);
7640         }
7641         return ret;
7642     case TARGET_NR_acct:
7643         if (arg1 == 0) {
7644             ret = get_errno(acct(NULL));
7645         } else {
7646             if (!(p = lock_user_string(arg1))) {
7647                 return -TARGET_EFAULT;
7648             }
7649             ret = get_errno(acct(path(p)));
7650             unlock_user(p, arg1, 0);
7651         }
7652         return ret;
7653 #ifdef TARGET_NR_umount2
7654     case TARGET_NR_umount2:
7655         if (!(p = lock_user_string(arg1)))
7656             return -TARGET_EFAULT;
7657         ret = get_errno(umount2(p, arg2));
7658         unlock_user(p, arg1, 0);
7659         return ret;
7660 #endif
7661     case TARGET_NR_ioctl:
7662         return do_ioctl(arg1, arg2, arg3);
7663 #ifdef TARGET_NR_fcntl
7664     case TARGET_NR_fcntl:
7665         return do_fcntl(arg1, arg2, arg3);
7666 #endif
7667     case TARGET_NR_setpgid:
7668         return get_errno(setpgid(arg1, arg2));
7669     case TARGET_NR_umask:
7670         return get_errno(umask(arg1));
7671     case TARGET_NR_chroot:
7672         if (!(p = lock_user_string(arg1)))
7673             return -TARGET_EFAULT;
7674         ret = get_errno(chroot(p));
7675         unlock_user(p, arg1, 0);
7676         return ret;
7677 #ifdef TARGET_NR_dup2
7678     case TARGET_NR_dup2:
7679         ret = get_errno(dup2(arg1, arg2));
7680         if (ret >= 0) {
7681             fd_trans_dup(arg1, arg2);
7682         }
7683         return ret;
7684 #endif
7685 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7686     case TARGET_NR_dup3:
7687     {
7688         int host_flags;
7689 
7690         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7691             return -EINVAL;
7692         }
7693         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7694         ret = get_errno(dup3(arg1, arg2, host_flags));
7695         if (ret >= 0) {
7696             fd_trans_dup(arg1, arg2);
7697         }
7698         return ret;
7699     }
7700 #endif
7701 #ifdef TARGET_NR_getppid /* not on alpha */
7702     case TARGET_NR_getppid:
7703         return get_errno(getppid());
7704 #endif
7705 #ifdef TARGET_NR_getpgrp
7706     case TARGET_NR_getpgrp:
7707         return get_errno(getpgrp());
7708 #endif
7709     case TARGET_NR_setsid:
7710         return get_errno(setsid());
7711 #ifdef TARGET_NR_sigaction
7712     case TARGET_NR_sigaction:
7713         {
7714 #if defined(TARGET_ALPHA)
7715             struct target_sigaction act, oact, *pact = 0;
7716             struct target_old_sigaction *old_act;
7717             if (arg2) {
7718                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7719                     return -TARGET_EFAULT;
7720                 act._sa_handler = old_act->_sa_handler;
7721                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7722                 act.sa_flags = old_act->sa_flags;
7723                 act.sa_restorer = 0;
7724                 unlock_user_struct(old_act, arg2, 0);
7725                 pact = &act;
7726             }
7727             ret = get_errno(do_sigaction(arg1, pact, &oact));
7728             if (!is_error(ret) && arg3) {
7729                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7730                     return -TARGET_EFAULT;
7731                 old_act->_sa_handler = oact._sa_handler;
7732                 old_act->sa_mask = oact.sa_mask.sig[0];
7733                 old_act->sa_flags = oact.sa_flags;
7734                 unlock_user_struct(old_act, arg3, 1);
7735             }
7736 #elif defined(TARGET_MIPS)
7737 	    struct target_sigaction act, oact, *pact, *old_act;
7738 
7739 	    if (arg2) {
7740                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7741                     return -TARGET_EFAULT;
7742 		act._sa_handler = old_act->_sa_handler;
7743 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7744 		act.sa_flags = old_act->sa_flags;
7745 		unlock_user_struct(old_act, arg2, 0);
7746 		pact = &act;
7747 	    } else {
7748 		pact = NULL;
7749 	    }
7750 
7751 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7752 
7753 	    if (!is_error(ret) && arg3) {
7754                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7755                     return -TARGET_EFAULT;
7756 		old_act->_sa_handler = oact._sa_handler;
7757 		old_act->sa_flags = oact.sa_flags;
7758 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7759 		old_act->sa_mask.sig[1] = 0;
7760 		old_act->sa_mask.sig[2] = 0;
7761 		old_act->sa_mask.sig[3] = 0;
7762 		unlock_user_struct(old_act, arg3, 1);
7763 	    }
7764 #else
7765             struct target_old_sigaction *old_act;
7766             struct target_sigaction act, oact, *pact;
7767             if (arg2) {
7768                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7769                     return -TARGET_EFAULT;
7770                 act._sa_handler = old_act->_sa_handler;
7771                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7772                 act.sa_flags = old_act->sa_flags;
7773                 act.sa_restorer = old_act->sa_restorer;
7774 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7775                 act.ka_restorer = 0;
7776 #endif
7777                 unlock_user_struct(old_act, arg2, 0);
7778                 pact = &act;
7779             } else {
7780                 pact = NULL;
7781             }
7782             ret = get_errno(do_sigaction(arg1, pact, &oact));
7783             if (!is_error(ret) && arg3) {
7784                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7785                     return -TARGET_EFAULT;
7786                 old_act->_sa_handler = oact._sa_handler;
7787                 old_act->sa_mask = oact.sa_mask.sig[0];
7788                 old_act->sa_flags = oact.sa_flags;
7789                 old_act->sa_restorer = oact.sa_restorer;
7790                 unlock_user_struct(old_act, arg3, 1);
7791             }
7792 #endif
7793         }
7794         return ret;
7795 #endif
7796     case TARGET_NR_rt_sigaction:
7797         {
7798 #if defined(TARGET_ALPHA)
7799             /* For Alpha and SPARC this is a 5 argument syscall, with
7800              * a 'restorer' parameter which must be copied into the
7801              * sa_restorer field of the sigaction struct.
7802              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7803              * and arg5 is the sigsetsize.
7804              * Alpha also has a separate rt_sigaction struct that it uses
7805              * here; SPARC uses the usual sigaction struct.
7806              */
7807             struct target_rt_sigaction *rt_act;
7808             struct target_sigaction act, oact, *pact = 0;
7809 
7810             if (arg4 != sizeof(target_sigset_t)) {
7811                 return -TARGET_EINVAL;
7812             }
7813             if (arg2) {
7814                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7815                     return -TARGET_EFAULT;
7816                 act._sa_handler = rt_act->_sa_handler;
7817                 act.sa_mask = rt_act->sa_mask;
7818                 act.sa_flags = rt_act->sa_flags;
7819                 act.sa_restorer = arg5;
7820                 unlock_user_struct(rt_act, arg2, 0);
7821                 pact = &act;
7822             }
7823             ret = get_errno(do_sigaction(arg1, pact, &oact));
7824             if (!is_error(ret) && arg3) {
7825                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7826                     return -TARGET_EFAULT;
7827                 rt_act->_sa_handler = oact._sa_handler;
7828                 rt_act->sa_mask = oact.sa_mask;
7829                 rt_act->sa_flags = oact.sa_flags;
7830                 unlock_user_struct(rt_act, arg3, 1);
7831             }
7832 #else
7833 #ifdef TARGET_SPARC
7834             target_ulong restorer = arg4;
7835             target_ulong sigsetsize = arg5;
7836 #else
7837             target_ulong sigsetsize = arg4;
7838 #endif
7839             struct target_sigaction *act;
7840             struct target_sigaction *oact;
7841 
7842             if (sigsetsize != sizeof(target_sigset_t)) {
7843                 return -TARGET_EINVAL;
7844             }
7845             if (arg2) {
7846                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7847                     return -TARGET_EFAULT;
7848                 }
7849 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7850                 act->ka_restorer = restorer;
7851 #endif
7852             } else {
7853                 act = NULL;
7854             }
7855             if (arg3) {
7856                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7857                     ret = -TARGET_EFAULT;
7858                     goto rt_sigaction_fail;
7859                 }
7860             } else
7861                 oact = NULL;
7862             ret = get_errno(do_sigaction(arg1, act, oact));
7863 	rt_sigaction_fail:
7864             if (act)
7865                 unlock_user_struct(act, arg2, 0);
7866             if (oact)
7867                 unlock_user_struct(oact, arg3, 1);
7868 #endif
7869         }
7870         return ret;
7871 #ifdef TARGET_NR_sgetmask /* not on alpha */
7872     case TARGET_NR_sgetmask:
7873         {
7874             sigset_t cur_set;
7875             abi_ulong target_set;
7876             ret = do_sigprocmask(0, NULL, &cur_set);
7877             if (!ret) {
7878                 host_to_target_old_sigset(&target_set, &cur_set);
7879                 ret = target_set;
7880             }
7881         }
7882         return ret;
7883 #endif
7884 #ifdef TARGET_NR_ssetmask /* not on alpha */
7885     case TARGET_NR_ssetmask:
7886         {
7887             sigset_t set, oset;
7888             abi_ulong target_set = arg1;
7889             target_to_host_old_sigset(&set, &target_set);
7890             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7891             if (!ret) {
7892                 host_to_target_old_sigset(&target_set, &oset);
7893                 ret = target_set;
7894             }
7895         }
7896         return ret;
7897 #endif
7898 #ifdef TARGET_NR_sigprocmask
7899     case TARGET_NR_sigprocmask:
7900         {
7901 #if defined(TARGET_ALPHA)
7902             sigset_t set, oldset;
7903             abi_ulong mask;
7904             int how;
7905 
7906             switch (arg1) {
7907             case TARGET_SIG_BLOCK:
7908                 how = SIG_BLOCK;
7909                 break;
7910             case TARGET_SIG_UNBLOCK:
7911                 how = SIG_UNBLOCK;
7912                 break;
7913             case TARGET_SIG_SETMASK:
7914                 how = SIG_SETMASK;
7915                 break;
7916             default:
7917                 return -TARGET_EINVAL;
7918             }
7919             mask = arg2;
7920             target_to_host_old_sigset(&set, &mask);
7921 
7922             ret = do_sigprocmask(how, &set, &oldset);
7923             if (!is_error(ret)) {
7924                 host_to_target_old_sigset(&mask, &oldset);
7925                 ret = mask;
7926                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7927             }
7928 #else
7929             sigset_t set, oldset, *set_ptr;
7930             int how;
7931 
7932             if (arg2) {
7933                 switch (arg1) {
7934                 case TARGET_SIG_BLOCK:
7935                     how = SIG_BLOCK;
7936                     break;
7937                 case TARGET_SIG_UNBLOCK:
7938                     how = SIG_UNBLOCK;
7939                     break;
7940                 case TARGET_SIG_SETMASK:
7941                     how = SIG_SETMASK;
7942                     break;
7943                 default:
7944                     return -TARGET_EINVAL;
7945                 }
7946                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7947                     return -TARGET_EFAULT;
7948                 target_to_host_old_sigset(&set, p);
7949                 unlock_user(p, arg2, 0);
7950                 set_ptr = &set;
7951             } else {
7952                 how = 0;
7953                 set_ptr = NULL;
7954             }
7955             ret = do_sigprocmask(how, set_ptr, &oldset);
7956             if (!is_error(ret) && arg3) {
7957                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7958                     return -TARGET_EFAULT;
7959                 host_to_target_old_sigset(p, &oldset);
7960                 unlock_user(p, arg3, sizeof(target_sigset_t));
7961             }
7962 #endif
7963         }
7964         return ret;
7965 #endif
7966     case TARGET_NR_rt_sigprocmask:
7967         {
7968             int how = arg1;
7969             sigset_t set, oldset, *set_ptr;
7970 
7971             if (arg4 != sizeof(target_sigset_t)) {
7972                 return -TARGET_EINVAL;
7973             }
7974 
7975             if (arg2) {
7976                 switch(how) {
7977                 case TARGET_SIG_BLOCK:
7978                     how = SIG_BLOCK;
7979                     break;
7980                 case TARGET_SIG_UNBLOCK:
7981                     how = SIG_UNBLOCK;
7982                     break;
7983                 case TARGET_SIG_SETMASK:
7984                     how = SIG_SETMASK;
7985                     break;
7986                 default:
7987                     return -TARGET_EINVAL;
7988                 }
7989                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7990                     return -TARGET_EFAULT;
7991                 target_to_host_sigset(&set, p);
7992                 unlock_user(p, arg2, 0);
7993                 set_ptr = &set;
7994             } else {
7995                 how = 0;
7996                 set_ptr = NULL;
7997             }
7998             ret = do_sigprocmask(how, set_ptr, &oldset);
7999             if (!is_error(ret) && arg3) {
8000                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8001                     return -TARGET_EFAULT;
8002                 host_to_target_sigset(p, &oldset);
8003                 unlock_user(p, arg3, sizeof(target_sigset_t));
8004             }
8005         }
8006         return ret;
8007 #ifdef TARGET_NR_sigpending
8008     case TARGET_NR_sigpending:
8009         {
8010             sigset_t set;
8011             ret = get_errno(sigpending(&set));
8012             if (!is_error(ret)) {
8013                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8014                     return -TARGET_EFAULT;
8015                 host_to_target_old_sigset(p, &set);
8016                 unlock_user(p, arg1, sizeof(target_sigset_t));
8017             }
8018         }
8019         return ret;
8020 #endif
8021     case TARGET_NR_rt_sigpending:
8022         {
8023             sigset_t set;
8024 
8025             /* Yes, this check is >, not != like most. We follow the kernel's
8026              * logic and it does it like this because it implements
8027              * NR_sigpending through the same code path, and in that case
8028              * the old_sigset_t is smaller in size.
8029              */
8030             if (arg2 > sizeof(target_sigset_t)) {
8031                 return -TARGET_EINVAL;
8032             }
8033 
8034             ret = get_errno(sigpending(&set));
8035             if (!is_error(ret)) {
8036                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8037                     return -TARGET_EFAULT;
8038                 host_to_target_sigset(p, &set);
8039                 unlock_user(p, arg1, sizeof(target_sigset_t));
8040             }
8041         }
8042         return ret;
8043 #ifdef TARGET_NR_sigsuspend
8044     case TARGET_NR_sigsuspend:
8045         {
8046             TaskState *ts = cpu->opaque;
8047 #if defined(TARGET_ALPHA)
8048             abi_ulong mask = arg1;
8049             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8050 #else
8051             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8052                 return -TARGET_EFAULT;
8053             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8054             unlock_user(p, arg1, 0);
8055 #endif
8056             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8057                                                SIGSET_T_SIZE));
8058             if (ret != -TARGET_ERESTARTSYS) {
8059                 ts->in_sigsuspend = 1;
8060             }
8061         }
8062         return ret;
8063 #endif
8064     case TARGET_NR_rt_sigsuspend:
8065         {
8066             TaskState *ts = cpu->opaque;
8067 
8068             if (arg2 != sizeof(target_sigset_t)) {
8069                 return -TARGET_EINVAL;
8070             }
8071             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8072                 return -TARGET_EFAULT;
8073             target_to_host_sigset(&ts->sigsuspend_mask, p);
8074             unlock_user(p, arg1, 0);
8075             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8076                                                SIGSET_T_SIZE));
8077             if (ret != -TARGET_ERESTARTSYS) {
8078                 ts->in_sigsuspend = 1;
8079             }
8080         }
8081         return ret;
8082     case TARGET_NR_rt_sigtimedwait:
8083         {
8084             sigset_t set;
8085             struct timespec uts, *puts;
8086             siginfo_t uinfo;
8087 
8088             if (arg4 != sizeof(target_sigset_t)) {
8089                 return -TARGET_EINVAL;
8090             }
8091 
8092             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8093                 return -TARGET_EFAULT;
8094             target_to_host_sigset(&set, p);
8095             unlock_user(p, arg1, 0);
8096             if (arg3) {
8097                 puts = &uts;
8098                 target_to_host_timespec(puts, arg3);
8099             } else {
8100                 puts = NULL;
8101             }
8102             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8103                                                  SIGSET_T_SIZE));
8104             if (!is_error(ret)) {
8105                 if (arg2) {
8106                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8107                                   0);
8108                     if (!p) {
8109                         return -TARGET_EFAULT;
8110                     }
8111                     host_to_target_siginfo(p, &uinfo);
8112                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8113                 }
8114                 ret = host_to_target_signal(ret);
8115             }
8116         }
8117         return ret;
8118     case TARGET_NR_rt_sigqueueinfo:
8119         {
8120             siginfo_t uinfo;
8121 
8122             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8123             if (!p) {
8124                 return -TARGET_EFAULT;
8125             }
8126             target_to_host_siginfo(&uinfo, p);
8127             unlock_user(p, arg3, 0);
8128             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8129         }
8130         return ret;
8131     case TARGET_NR_rt_tgsigqueueinfo:
8132         {
8133             siginfo_t uinfo;
8134 
8135             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8136             if (!p) {
8137                 return -TARGET_EFAULT;
8138             }
8139             target_to_host_siginfo(&uinfo, p);
8140             unlock_user(p, arg4, 0);
8141             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8142         }
8143         return ret;
8144 #ifdef TARGET_NR_sigreturn
8145     case TARGET_NR_sigreturn:
8146         if (block_signals()) {
8147             return -TARGET_ERESTARTSYS;
8148         }
8149         return do_sigreturn(cpu_env);
8150 #endif
8151     case TARGET_NR_rt_sigreturn:
8152         if (block_signals()) {
8153             return -TARGET_ERESTARTSYS;
8154         }
8155         return do_rt_sigreturn(cpu_env);
8156     case TARGET_NR_sethostname:
8157         if (!(p = lock_user_string(arg1)))
8158             return -TARGET_EFAULT;
8159         ret = get_errno(sethostname(p, arg2));
8160         unlock_user(p, arg1, 0);
8161         return ret;
8162 #ifdef TARGET_NR_setrlimit
8163     case TARGET_NR_setrlimit:
8164         {
8165             int resource = target_to_host_resource(arg1);
8166             struct target_rlimit *target_rlim;
8167             struct rlimit rlim;
8168             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8169                 return -TARGET_EFAULT;
8170             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8171             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8172             unlock_user_struct(target_rlim, arg2, 0);
8173             /*
8174              * If we just passed through resource limit settings for memory then
8175              * they would also apply to QEMU's own allocations, and QEMU will
8176              * crash or hang or die if its allocations fail. Ideally we would
8177              * track the guest allocations in QEMU and apply the limits ourselves.
8178              * For now, just tell the guest the call succeeded but don't actually
8179              * limit anything.
8180              */
8181             if (resource != RLIMIT_AS &&
8182                 resource != RLIMIT_DATA &&
8183                 resource != RLIMIT_STACK) {
8184                 return get_errno(setrlimit(resource, &rlim));
8185             } else {
8186                 return 0;
8187             }
8188         }
8189 #endif
8190 #ifdef TARGET_NR_getrlimit
8191     case TARGET_NR_getrlimit:
8192         {
8193             int resource = target_to_host_resource(arg1);
8194             struct target_rlimit *target_rlim;
8195             struct rlimit rlim;
8196 
8197             ret = get_errno(getrlimit(resource, &rlim));
8198             if (!is_error(ret)) {
8199                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8200                     return -TARGET_EFAULT;
8201                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8202                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8203                 unlock_user_struct(target_rlim, arg2, 1);
8204             }
8205         }
8206         return ret;
8207 #endif
8208     case TARGET_NR_getrusage:
8209         {
8210             struct rusage rusage;
8211             ret = get_errno(getrusage(arg1, &rusage));
8212             if (!is_error(ret)) {
8213                 ret = host_to_target_rusage(arg2, &rusage);
8214             }
8215         }
8216         return ret;
8217     case TARGET_NR_gettimeofday:
8218         {
8219             struct timeval tv;
8220             ret = get_errno(gettimeofday(&tv, NULL));
8221             if (!is_error(ret)) {
8222                 if (copy_to_user_timeval(arg1, &tv))
8223                     return -TARGET_EFAULT;
8224             }
8225         }
8226         return ret;
8227     case TARGET_NR_settimeofday:
8228         {
8229             struct timeval tv, *ptv = NULL;
8230             struct timezone tz, *ptz = NULL;
8231 
8232             if (arg1) {
8233                 if (copy_from_user_timeval(&tv, arg1)) {
8234                     return -TARGET_EFAULT;
8235                 }
8236                 ptv = &tv;
8237             }
8238 
8239             if (arg2) {
8240                 if (copy_from_user_timezone(&tz, arg2)) {
8241                     return -TARGET_EFAULT;
8242                 }
8243                 ptz = &tz;
8244             }
8245 
8246             return get_errno(settimeofday(ptv, ptz));
8247         }
8248 #if defined(TARGET_NR_select)
8249     case TARGET_NR_select:
8250 #if defined(TARGET_WANT_NI_OLD_SELECT)
8251         /* some architectures used to have old_select here
8252          * but now ENOSYS it.
8253          */
8254         ret = -TARGET_ENOSYS;
8255 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8256         ret = do_old_select(arg1);
8257 #else
8258         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8259 #endif
8260         return ret;
8261 #endif
8262 #ifdef TARGET_NR_pselect6
8263     case TARGET_NR_pselect6:
8264         {
8265             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8266             fd_set rfds, wfds, efds;
8267             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8268             struct timespec ts, *ts_ptr;
8269 
8270             /*
8271              * The 6th arg is actually two args smashed together,
8272              * so we cannot use the C library.
8273              */
8274             sigset_t set;
8275             struct {
8276                 sigset_t *set;
8277                 size_t size;
8278             } sig, *sig_ptr;
8279 
8280             abi_ulong arg_sigset, arg_sigsize, *arg7;
8281             target_sigset_t *target_sigset;
8282 
8283             n = arg1;
8284             rfd_addr = arg2;
8285             wfd_addr = arg3;
8286             efd_addr = arg4;
8287             ts_addr = arg5;
8288 
8289             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8290             if (ret) {
8291                 return ret;
8292             }
8293             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8294             if (ret) {
8295                 return ret;
8296             }
8297             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8298             if (ret) {
8299                 return ret;
8300             }
8301 
8302             /*
8303              * This takes a timespec, and not a timeval, so we cannot
8304              * use the do_select() helper ...
8305              */
8306             if (ts_addr) {
8307                 if (target_to_host_timespec(&ts, ts_addr)) {
8308                     return -TARGET_EFAULT;
8309                 }
8310                 ts_ptr = &ts;
8311             } else {
8312                 ts_ptr = NULL;
8313             }
8314 
8315             /* Extract the two packed args for the sigset */
8316             if (arg6) {
8317                 sig_ptr = &sig;
8318                 sig.size = SIGSET_T_SIZE;
8319 
8320                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8321                 if (!arg7) {
8322                     return -TARGET_EFAULT;
8323                 }
8324                 arg_sigset = tswapal(arg7[0]);
8325                 arg_sigsize = tswapal(arg7[1]);
8326                 unlock_user(arg7, arg6, 0);
8327 
8328                 if (arg_sigset) {
8329                     sig.set = &set;
8330                     if (arg_sigsize != sizeof(*target_sigset)) {
8331                         /* Like the kernel, we enforce correct size sigsets */
8332                         return -TARGET_EINVAL;
8333                     }
8334                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8335                                               sizeof(*target_sigset), 1);
8336                     if (!target_sigset) {
8337                         return -TARGET_EFAULT;
8338                     }
8339                     target_to_host_sigset(&set, target_sigset);
8340                     unlock_user(target_sigset, arg_sigset, 0);
8341                 } else {
8342                     sig.set = NULL;
8343                 }
8344             } else {
8345                 sig_ptr = NULL;
8346             }
8347 
8348             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8349                                           ts_ptr, sig_ptr));
8350 
8351             if (!is_error(ret)) {
8352                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8353                     return -TARGET_EFAULT;
8354                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8355                     return -TARGET_EFAULT;
8356                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8357                     return -TARGET_EFAULT;
8358 
8359                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8360                     return -TARGET_EFAULT;
8361             }
8362         }
8363         return ret;
8364 #endif
8365 #ifdef TARGET_NR_symlink
8366     case TARGET_NR_symlink:
8367         {
8368             void *p2;
8369             p = lock_user_string(arg1);
8370             p2 = lock_user_string(arg2);
8371             if (!p || !p2)
8372                 ret = -TARGET_EFAULT;
8373             else
8374                 ret = get_errno(symlink(p, p2));
8375             unlock_user(p2, arg2, 0);
8376             unlock_user(p, arg1, 0);
8377         }
8378         return ret;
8379 #endif
8380 #if defined(TARGET_NR_symlinkat)
8381     case TARGET_NR_symlinkat:
8382         {
8383             void *p2;
8384             p  = lock_user_string(arg1);
8385             p2 = lock_user_string(arg3);
8386             if (!p || !p2)
8387                 ret = -TARGET_EFAULT;
8388             else
8389                 ret = get_errno(symlinkat(p, arg2, p2));
8390             unlock_user(p2, arg3, 0);
8391             unlock_user(p, arg1, 0);
8392         }
8393         return ret;
8394 #endif
8395 #ifdef TARGET_NR_readlink
8396     case TARGET_NR_readlink:
8397         {
8398             void *p2;
8399             p = lock_user_string(arg1);
8400             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8401             if (!p || !p2) {
8402                 ret = -TARGET_EFAULT;
8403             } else if (!arg3) {
8404                 /* Short circuit this for the magic exe check. */
8405                 ret = -TARGET_EINVAL;
8406             } else if (is_proc_myself((const char *)p, "exe")) {
8407                 char real[PATH_MAX], *temp;
8408                 temp = realpath(exec_path, real);
8409                 /* Return value is # of bytes that we wrote to the buffer. */
8410                 if (temp == NULL) {
8411                     ret = get_errno(-1);
8412                 } else {
8413                     /* Don't worry about sign mismatch as earlier mapping
8414                      * logic would have thrown a bad address error. */
8415                     ret = MIN(strlen(real), arg3);
8416                     /* We cannot NUL terminate the string. */
8417                     memcpy(p2, real, ret);
8418                 }
8419             } else {
8420                 ret = get_errno(readlink(path(p), p2, arg3));
8421             }
8422             unlock_user(p2, arg2, ret);
8423             unlock_user(p, arg1, 0);
8424         }
8425         return ret;
8426 #endif
8427 #if defined(TARGET_NR_readlinkat)
8428     case TARGET_NR_readlinkat:
8429         {
8430             void *p2;
8431             p  = lock_user_string(arg2);
8432             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8433             if (!p || !p2) {
8434                 ret = -TARGET_EFAULT;
8435             } else if (is_proc_myself((const char *)p, "exe")) {
8436                 char real[PATH_MAX], *temp;
8437                 temp = realpath(exec_path, real);
8438                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8439                 snprintf((char *)p2, arg4, "%s", real);
8440             } else {
8441                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8442             }
8443             unlock_user(p2, arg3, ret);
8444             unlock_user(p, arg2, 0);
8445         }
8446         return ret;
8447 #endif
8448 #ifdef TARGET_NR_swapon
8449     case TARGET_NR_swapon:
8450         if (!(p = lock_user_string(arg1)))
8451             return -TARGET_EFAULT;
8452         ret = get_errno(swapon(p, arg2));
8453         unlock_user(p, arg1, 0);
8454         return ret;
8455 #endif
8456     case TARGET_NR_reboot:
8457         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8458            /* arg4 must be ignored in all other cases */
8459            p = lock_user_string(arg4);
8460            if (!p) {
8461                return -TARGET_EFAULT;
8462            }
8463            ret = get_errno(reboot(arg1, arg2, arg3, p));
8464            unlock_user(p, arg4, 0);
8465         } else {
8466            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8467         }
8468         return ret;
8469 #ifdef TARGET_NR_mmap
8470     case TARGET_NR_mmap:
8471 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8472     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8473     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8474     || defined(TARGET_S390X)
8475         {
8476             abi_ulong *v;
8477             abi_ulong v1, v2, v3, v4, v5, v6;
8478             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8479                 return -TARGET_EFAULT;
8480             v1 = tswapal(v[0]);
8481             v2 = tswapal(v[1]);
8482             v3 = tswapal(v[2]);
8483             v4 = tswapal(v[3]);
8484             v5 = tswapal(v[4]);
8485             v6 = tswapal(v[5]);
8486             unlock_user(v, arg1, 0);
8487             ret = get_errno(target_mmap(v1, v2, v3,
8488                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8489                                         v5, v6));
8490         }
8491 #else
8492         ret = get_errno(target_mmap(arg1, arg2, arg3,
8493                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8494                                     arg5,
8495                                     arg6));
8496 #endif
8497         return ret;
8498 #endif
8499 #ifdef TARGET_NR_mmap2
8500     case TARGET_NR_mmap2:
8501 #ifndef MMAP_SHIFT
8502 #define MMAP_SHIFT 12
8503 #endif
8504         ret = target_mmap(arg1, arg2, arg3,
8505                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8506                           arg5, arg6 << MMAP_SHIFT);
8507         return get_errno(ret);
8508 #endif
8509     case TARGET_NR_munmap:
8510         return get_errno(target_munmap(arg1, arg2));
8511     case TARGET_NR_mprotect:
8512         {
8513             TaskState *ts = cpu->opaque;
8514             /* Special hack to detect libc making the stack executable.  */
8515             if ((arg3 & PROT_GROWSDOWN)
8516                 && arg1 >= ts->info->stack_limit
8517                 && arg1 <= ts->info->start_stack) {
8518                 arg3 &= ~PROT_GROWSDOWN;
8519                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8520                 arg1 = ts->info->stack_limit;
8521             }
8522         }
8523         return get_errno(target_mprotect(arg1, arg2, arg3));
8524 #ifdef TARGET_NR_mremap
8525     case TARGET_NR_mremap:
8526         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8527 #endif
8528         /* ??? msync/mlock/munlock are broken for softmmu.  */
8529 #ifdef TARGET_NR_msync
8530     case TARGET_NR_msync:
8531         return get_errno(msync(g2h(arg1), arg2, arg3));
8532 #endif
8533 #ifdef TARGET_NR_mlock
8534     case TARGET_NR_mlock:
8535         return get_errno(mlock(g2h(arg1), arg2));
8536 #endif
8537 #ifdef TARGET_NR_munlock
8538     case TARGET_NR_munlock:
8539         return get_errno(munlock(g2h(arg1), arg2));
8540 #endif
8541 #ifdef TARGET_NR_mlockall
8542     case TARGET_NR_mlockall:
8543         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8544 #endif
8545 #ifdef TARGET_NR_munlockall
8546     case TARGET_NR_munlockall:
8547         return get_errno(munlockall());
8548 #endif
8549 #ifdef TARGET_NR_truncate
8550     case TARGET_NR_truncate:
8551         if (!(p = lock_user_string(arg1)))
8552             return -TARGET_EFAULT;
8553         ret = get_errno(truncate(p, arg2));
8554         unlock_user(p, arg1, 0);
8555         return ret;
8556 #endif
8557 #ifdef TARGET_NR_ftruncate
8558     case TARGET_NR_ftruncate:
8559         return get_errno(ftruncate(arg1, arg2));
8560 #endif
8561     case TARGET_NR_fchmod:
8562         return get_errno(fchmod(arg1, arg2));
8563 #if defined(TARGET_NR_fchmodat)
8564     case TARGET_NR_fchmodat:
8565         if (!(p = lock_user_string(arg2)))
8566             return -TARGET_EFAULT;
8567         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8568         unlock_user(p, arg2, 0);
8569         return ret;
8570 #endif
8571     case TARGET_NR_getpriority:
8572         /* Note that negative values are valid for getpriority, so we must
8573            differentiate based on errno settings.  */
8574         errno = 0;
8575         ret = getpriority(arg1, arg2);
8576         if (ret == -1 && errno != 0) {
8577             return -host_to_target_errno(errno);
8578         }
8579 #ifdef TARGET_ALPHA
8580         /* Return value is the unbiased priority.  Signal no error.  */
8581         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8582 #else
8583         /* Return value is a biased priority to avoid negative numbers.  */
8584         ret = 20 - ret;
8585 #endif
8586         return ret;
8587     case TARGET_NR_setpriority:
8588         return get_errno(setpriority(arg1, arg2, arg3));
8589 #ifdef TARGET_NR_statfs
8590     case TARGET_NR_statfs:
8591         if (!(p = lock_user_string(arg1))) {
8592             return -TARGET_EFAULT;
8593         }
8594         ret = get_errno(statfs(path(p), &stfs));
8595         unlock_user(p, arg1, 0);
8596     convert_statfs:
8597         if (!is_error(ret)) {
8598             struct target_statfs *target_stfs;
8599 
8600             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8601                 return -TARGET_EFAULT;
8602             __put_user(stfs.f_type, &target_stfs->f_type);
8603             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8604             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8605             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8606             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8607             __put_user(stfs.f_files, &target_stfs->f_files);
8608             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8609             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8610             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8611             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8612             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8613 #ifdef _STATFS_F_FLAGS
8614             __put_user(stfs.f_flags, &target_stfs->f_flags);
8615 #else
8616             __put_user(0, &target_stfs->f_flags);
8617 #endif
8618             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8619             unlock_user_struct(target_stfs, arg2, 1);
8620         }
8621         return ret;
8622 #endif
8623 #ifdef TARGET_NR_fstatfs
8624     case TARGET_NR_fstatfs:
8625         ret = get_errno(fstatfs(arg1, &stfs));
8626         goto convert_statfs;
8627 #endif
8628 #ifdef TARGET_NR_statfs64
8629     case TARGET_NR_statfs64:
8630         if (!(p = lock_user_string(arg1))) {
8631             return -TARGET_EFAULT;
8632         }
8633         ret = get_errno(statfs(path(p), &stfs));
8634         unlock_user(p, arg1, 0);
8635     convert_statfs64:
8636         if (!is_error(ret)) {
8637             struct target_statfs64 *target_stfs;
8638 
8639             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8640                 return -TARGET_EFAULT;
8641             __put_user(stfs.f_type, &target_stfs->f_type);
8642             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8643             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8644             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8645             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8646             __put_user(stfs.f_files, &target_stfs->f_files);
8647             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8648             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8649             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8650             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8651             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8652             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8653             unlock_user_struct(target_stfs, arg3, 1);
8654         }
8655         return ret;
8656     case TARGET_NR_fstatfs64:
8657         ret = get_errno(fstatfs(arg1, &stfs));
8658         goto convert_statfs64;
8659 #endif
8660 #ifdef TARGET_NR_socketcall
8661     case TARGET_NR_socketcall:
8662         return do_socketcall(arg1, arg2);
8663 #endif
8664 #ifdef TARGET_NR_accept
8665     case TARGET_NR_accept:
8666         return do_accept4(arg1, arg2, arg3, 0);
8667 #endif
8668 #ifdef TARGET_NR_accept4
8669     case TARGET_NR_accept4:
8670         return do_accept4(arg1, arg2, arg3, arg4);
8671 #endif
8672 #ifdef TARGET_NR_bind
8673     case TARGET_NR_bind:
8674         return do_bind(arg1, arg2, arg3);
8675 #endif
8676 #ifdef TARGET_NR_connect
8677     case TARGET_NR_connect:
8678         return do_connect(arg1, arg2, arg3);
8679 #endif
8680 #ifdef TARGET_NR_getpeername
8681     case TARGET_NR_getpeername:
8682         return do_getpeername(arg1, arg2, arg3);
8683 #endif
8684 #ifdef TARGET_NR_getsockname
8685     case TARGET_NR_getsockname:
8686         return do_getsockname(arg1, arg2, arg3);
8687 #endif
8688 #ifdef TARGET_NR_getsockopt
8689     case TARGET_NR_getsockopt:
8690         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8691 #endif
8692 #ifdef TARGET_NR_listen
8693     case TARGET_NR_listen:
8694         return get_errno(listen(arg1, arg2));
8695 #endif
8696 #ifdef TARGET_NR_recv
8697     case TARGET_NR_recv:
8698         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8699 #endif
8700 #ifdef TARGET_NR_recvfrom
8701     case TARGET_NR_recvfrom:
8702         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8703 #endif
8704 #ifdef TARGET_NR_recvmsg
8705     case TARGET_NR_recvmsg:
8706         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8707 #endif
8708 #ifdef TARGET_NR_send
8709     case TARGET_NR_send:
8710         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8711 #endif
8712 #ifdef TARGET_NR_sendmsg
8713     case TARGET_NR_sendmsg:
8714         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8715 #endif
8716 #ifdef TARGET_NR_sendmmsg
8717     case TARGET_NR_sendmmsg:
8718         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8719     case TARGET_NR_recvmmsg:
8720         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8721 #endif
8722 #ifdef TARGET_NR_sendto
8723     case TARGET_NR_sendto:
8724         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8725 #endif
8726 #ifdef TARGET_NR_shutdown
8727     case TARGET_NR_shutdown:
8728         return get_errno(shutdown(arg1, arg2));
8729 #endif
8730 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8731     case TARGET_NR_getrandom:
8732         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8733         if (!p) {
8734             return -TARGET_EFAULT;
8735         }
8736         ret = get_errno(getrandom(p, arg2, arg3));
8737         unlock_user(p, arg1, ret);
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_socket
8741     case TARGET_NR_socket:
8742         return do_socket(arg1, arg2, arg3);
8743 #endif
8744 #ifdef TARGET_NR_socketpair
8745     case TARGET_NR_socketpair:
8746         return do_socketpair(arg1, arg2, arg3, arg4);
8747 #endif
8748 #ifdef TARGET_NR_setsockopt
8749     case TARGET_NR_setsockopt:
8750         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8751 #endif
8752 #if defined(TARGET_NR_syslog)
8753     case TARGET_NR_syslog:
8754         {
8755             int len = arg2;
8756 
8757             switch (arg1) {
8758             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8759             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8760             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8761             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8762             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8763             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8764             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8765             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8766                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8767             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8768             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8769             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8770                 {
8771                     if (len < 0) {
8772                         return -TARGET_EINVAL;
8773                     }
8774                     if (len == 0) {
8775                         return 0;
8776                     }
8777                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8778                     if (!p) {
8779                         return -TARGET_EFAULT;
8780                     }
8781                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8782                     unlock_user(p, arg2, arg3);
8783                 }
8784                 return ret;
8785             default:
8786                 return -TARGET_EINVAL;
8787             }
8788         }
8789         break;
8790 #endif
8791     case TARGET_NR_setitimer:
8792         {
8793             struct itimerval value, ovalue, *pvalue;
8794 
8795             if (arg2) {
8796                 pvalue = &value;
8797                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8798                     || copy_from_user_timeval(&pvalue->it_value,
8799                                               arg2 + sizeof(struct target_timeval)))
8800                     return -TARGET_EFAULT;
8801             } else {
8802                 pvalue = NULL;
8803             }
8804             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8805             if (!is_error(ret) && arg3) {
8806                 if (copy_to_user_timeval(arg3,
8807                                          &ovalue.it_interval)
8808                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8809                                             &ovalue.it_value))
8810                     return -TARGET_EFAULT;
8811             }
8812         }
8813         return ret;
8814     case TARGET_NR_getitimer:
8815         {
8816             struct itimerval value;
8817 
8818             ret = get_errno(getitimer(arg1, &value));
8819             if (!is_error(ret) && arg2) {
8820                 if (copy_to_user_timeval(arg2,
8821                                          &value.it_interval)
8822                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8823                                             &value.it_value))
8824                     return -TARGET_EFAULT;
8825             }
8826         }
8827         return ret;
8828 #ifdef TARGET_NR_stat
8829     case TARGET_NR_stat:
8830         if (!(p = lock_user_string(arg1))) {
8831             return -TARGET_EFAULT;
8832         }
8833         ret = get_errno(stat(path(p), &st));
8834         unlock_user(p, arg1, 0);
8835         goto do_stat;
8836 #endif
8837 #ifdef TARGET_NR_lstat
8838     case TARGET_NR_lstat:
8839         if (!(p = lock_user_string(arg1))) {
8840             return -TARGET_EFAULT;
8841         }
8842         ret = get_errno(lstat(path(p), &st));
8843         unlock_user(p, arg1, 0);
8844         goto do_stat;
8845 #endif
8846 #ifdef TARGET_NR_fstat
8847     case TARGET_NR_fstat:
8848         {
8849             ret = get_errno(fstat(arg1, &st));
8850 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8851         do_stat:
8852 #endif
8853             if (!is_error(ret)) {
8854                 struct target_stat *target_st;
8855 
8856                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8857                     return -TARGET_EFAULT;
8858                 memset(target_st, 0, sizeof(*target_st));
8859                 __put_user(st.st_dev, &target_st->st_dev);
8860                 __put_user(st.st_ino, &target_st->st_ino);
8861                 __put_user(st.st_mode, &target_st->st_mode);
8862                 __put_user(st.st_uid, &target_st->st_uid);
8863                 __put_user(st.st_gid, &target_st->st_gid);
8864                 __put_user(st.st_nlink, &target_st->st_nlink);
8865                 __put_user(st.st_rdev, &target_st->st_rdev);
8866                 __put_user(st.st_size, &target_st->st_size);
8867                 __put_user(st.st_blksize, &target_st->st_blksize);
8868                 __put_user(st.st_blocks, &target_st->st_blocks);
8869                 __put_user(st.st_atime, &target_st->target_st_atime);
8870                 __put_user(st.st_mtime, &target_st->target_st_mtime);
8871                 __put_user(st.st_ctime, &target_st->target_st_ctime);
8872                 unlock_user_struct(target_st, arg2, 1);
8873             }
8874         }
8875         return ret;
8876 #endif
8877     case TARGET_NR_vhangup:
8878         return get_errno(vhangup());
8879 #ifdef TARGET_NR_syscall
8880     case TARGET_NR_syscall:
8881         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8882                           arg6, arg7, arg8, 0);
8883 #endif
8884     case TARGET_NR_wait4:
8885         {
8886             int status;
8887             abi_long status_ptr = arg2;
8888             struct rusage rusage, *rusage_ptr;
8889             abi_ulong target_rusage = arg4;
8890             abi_long rusage_err;
8891             if (target_rusage)
8892                 rusage_ptr = &rusage;
8893             else
8894                 rusage_ptr = NULL;
8895             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8896             if (!is_error(ret)) {
8897                 if (status_ptr && ret) {
8898                     status = host_to_target_waitstatus(status);
8899                     if (put_user_s32(status, status_ptr))
8900                         return -TARGET_EFAULT;
8901                 }
8902                 if (target_rusage) {
8903                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
8904                     if (rusage_err) {
8905                         ret = rusage_err;
8906                     }
8907                 }
8908             }
8909         }
8910         return ret;
8911 #ifdef TARGET_NR_swapoff
8912     case TARGET_NR_swapoff:
8913         if (!(p = lock_user_string(arg1)))
8914             return -TARGET_EFAULT;
8915         ret = get_errno(swapoff(p));
8916         unlock_user(p, arg1, 0);
8917         return ret;
8918 #endif
8919     case TARGET_NR_sysinfo:
8920         {
8921             struct target_sysinfo *target_value;
8922             struct sysinfo value;
8923             ret = get_errno(sysinfo(&value));
8924             if (!is_error(ret) && arg1)
8925             {
8926                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8927                     return -TARGET_EFAULT;
8928                 __put_user(value.uptime, &target_value->uptime);
8929                 __put_user(value.loads[0], &target_value->loads[0]);
8930                 __put_user(value.loads[1], &target_value->loads[1]);
8931                 __put_user(value.loads[2], &target_value->loads[2]);
8932                 __put_user(value.totalram, &target_value->totalram);
8933                 __put_user(value.freeram, &target_value->freeram);
8934                 __put_user(value.sharedram, &target_value->sharedram);
8935                 __put_user(value.bufferram, &target_value->bufferram);
8936                 __put_user(value.totalswap, &target_value->totalswap);
8937                 __put_user(value.freeswap, &target_value->freeswap);
8938                 __put_user(value.procs, &target_value->procs);
8939                 __put_user(value.totalhigh, &target_value->totalhigh);
8940                 __put_user(value.freehigh, &target_value->freehigh);
8941                 __put_user(value.mem_unit, &target_value->mem_unit);
8942                 unlock_user_struct(target_value, arg1, 1);
8943             }
8944         }
8945         return ret;
8946 #ifdef TARGET_NR_ipc
8947     case TARGET_NR_ipc:
8948         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8949 #endif
8950 #ifdef TARGET_NR_semget
8951     case TARGET_NR_semget:
8952         return get_errno(semget(arg1, arg2, arg3));
8953 #endif
8954 #ifdef TARGET_NR_semop
8955     case TARGET_NR_semop:
8956         return do_semop(arg1, arg2, arg3);
8957 #endif
8958 #ifdef TARGET_NR_semctl
8959     case TARGET_NR_semctl:
8960         return do_semctl(arg1, arg2, arg3, arg4);
8961 #endif
8962 #ifdef TARGET_NR_msgctl
8963     case TARGET_NR_msgctl:
8964         return do_msgctl(arg1, arg2, arg3);
8965 #endif
8966 #ifdef TARGET_NR_msgget
8967     case TARGET_NR_msgget:
8968         return get_errno(msgget(arg1, arg2));
8969 #endif
8970 #ifdef TARGET_NR_msgrcv
8971     case TARGET_NR_msgrcv:
8972         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8973 #endif
8974 #ifdef TARGET_NR_msgsnd
8975     case TARGET_NR_msgsnd:
8976         return do_msgsnd(arg1, arg2, arg3, arg4);
8977 #endif
8978 #ifdef TARGET_NR_shmget
8979     case TARGET_NR_shmget:
8980         return get_errno(shmget(arg1, arg2, arg3));
8981 #endif
8982 #ifdef TARGET_NR_shmctl
8983     case TARGET_NR_shmctl:
8984         return do_shmctl(arg1, arg2, arg3);
8985 #endif
8986 #ifdef TARGET_NR_shmat
8987     case TARGET_NR_shmat:
8988         return do_shmat(cpu_env, arg1, arg2, arg3);
8989 #endif
8990 #ifdef TARGET_NR_shmdt
8991     case TARGET_NR_shmdt:
8992         return do_shmdt(arg1);
8993 #endif
8994     case TARGET_NR_fsync:
8995         return get_errno(fsync(arg1));
8996     case TARGET_NR_clone:
8997         /* Linux manages to have three different orderings for its
8998          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8999          * match the kernel's CONFIG_CLONE_* settings.
9000          * Microblaze is further special in that it uses a sixth
9001          * implicit argument to clone for the TLS pointer.
9002          */
9003 #if defined(TARGET_MICROBLAZE)
9004         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9005 #elif defined(TARGET_CLONE_BACKWARDS)
9006         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9007 #elif defined(TARGET_CLONE_BACKWARDS2)
9008         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9009 #else
9010         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9011 #endif
9012         return ret;
9013 #ifdef __NR_exit_group
9014         /* new thread calls */
9015     case TARGET_NR_exit_group:
9016         preexit_cleanup(cpu_env, arg1);
9017         return get_errno(exit_group(arg1));
9018 #endif
9019     case TARGET_NR_setdomainname:
9020         if (!(p = lock_user_string(arg1)))
9021             return -TARGET_EFAULT;
9022         ret = get_errno(setdomainname(p, arg2));
9023         unlock_user(p, arg1, 0);
9024         return ret;
9025     case TARGET_NR_uname:
9026         /* no need to transcode because we use the linux syscall */
9027         {
9028             struct new_utsname * buf;
9029 
9030             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9031                 return -TARGET_EFAULT;
9032             ret = get_errno(sys_uname(buf));
9033             if (!is_error(ret)) {
9034                 /* Overwrite the native machine name with whatever is being
9035                    emulated. */
9036                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9037                           sizeof(buf->machine));
9038                 /* Allow the user to override the reported release.  */
9039                 if (qemu_uname_release && *qemu_uname_release) {
9040                     g_strlcpy(buf->release, qemu_uname_release,
9041                               sizeof(buf->release));
9042                 }
9043             }
9044             unlock_user_struct(buf, arg1, 1);
9045         }
9046         return ret;
9047 #ifdef TARGET_I386
9048     case TARGET_NR_modify_ldt:
9049         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9050 #if !defined(TARGET_X86_64)
9051     case TARGET_NR_vm86:
9052         return do_vm86(cpu_env, arg1, arg2);
9053 #endif
9054 #endif
9055     case TARGET_NR_adjtimex:
9056         {
9057             struct timex host_buf;
9058 
9059             if (target_to_host_timex(&host_buf, arg1) != 0) {
9060                 return -TARGET_EFAULT;
9061             }
9062             ret = get_errno(adjtimex(&host_buf));
9063             if (!is_error(ret)) {
9064                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9065                     return -TARGET_EFAULT;
9066                 }
9067             }
9068         }
9069         return ret;
9070 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9071     case TARGET_NR_clock_adjtime:
9072         {
9073             struct timex htx, *phtx = &htx;
9074 
9075             if (target_to_host_timex(phtx, arg2) != 0) {
9076                 return -TARGET_EFAULT;
9077             }
9078             ret = get_errno(clock_adjtime(arg1, phtx));
9079             if (!is_error(ret) && phtx) {
9080                 if (host_to_target_timex(arg2, phtx) != 0) {
9081                     return -TARGET_EFAULT;
9082                 }
9083             }
9084         }
9085         return ret;
9086 #endif
9087     case TARGET_NR_getpgid:
9088         return get_errno(getpgid(arg1));
9089     case TARGET_NR_fchdir:
9090         return get_errno(fchdir(arg1));
9091     case TARGET_NR_personality:
9092         return get_errno(personality(arg1));
9093 #ifdef TARGET_NR__llseek /* Not on alpha */
9094     case TARGET_NR__llseek:
9095         {
9096             int64_t res;
9097 #if !defined(__NR_llseek)
9098             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9099             if (res == -1) {
9100                 ret = get_errno(res);
9101             } else {
9102                 ret = 0;
9103             }
9104 #else
9105             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9106 #endif
9107             if ((ret == 0) && put_user_s64(res, arg4)) {
9108                 return -TARGET_EFAULT;
9109             }
9110         }
9111         return ret;
9112 #endif
9113 #ifdef TARGET_NR_getdents
9114     case TARGET_NR_getdents:
9115 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9116 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9117         {
9118             struct target_dirent *target_dirp;
9119             struct linux_dirent *dirp;
9120             abi_long count = arg3;
9121 
9122             dirp = g_try_malloc(count);
9123             if (!dirp) {
9124                 return -TARGET_ENOMEM;
9125             }
9126 
9127             ret = get_errno(sys_getdents(arg1, dirp, count));
9128             if (!is_error(ret)) {
9129                 struct linux_dirent *de;
9130 		struct target_dirent *tde;
9131                 int len = ret;
9132                 int reclen, treclen;
9133 		int count1, tnamelen;
9134 
9135 		count1 = 0;
9136                 de = dirp;
9137                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9138                     return -TARGET_EFAULT;
9139 		tde = target_dirp;
9140                 while (len > 0) {
9141                     reclen = de->d_reclen;
9142                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9143                     assert(tnamelen >= 0);
9144                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9145                     assert(count1 + treclen <= count);
9146                     tde->d_reclen = tswap16(treclen);
9147                     tde->d_ino = tswapal(de->d_ino);
9148                     tde->d_off = tswapal(de->d_off);
9149                     memcpy(tde->d_name, de->d_name, tnamelen);
9150                     de = (struct linux_dirent *)((char *)de + reclen);
9151                     len -= reclen;
9152                     tde = (struct target_dirent *)((char *)tde + treclen);
9153 		    count1 += treclen;
9154                 }
9155 		ret = count1;
9156                 unlock_user(target_dirp, arg2, ret);
9157             }
9158             g_free(dirp);
9159         }
9160 #else
9161         {
9162             struct linux_dirent *dirp;
9163             abi_long count = arg3;
9164 
9165             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9166                 return -TARGET_EFAULT;
9167             ret = get_errno(sys_getdents(arg1, dirp, count));
9168             if (!is_error(ret)) {
9169                 struct linux_dirent *de;
9170                 int len = ret;
9171                 int reclen;
9172                 de = dirp;
9173                 while (len > 0) {
9174                     reclen = de->d_reclen;
9175                     if (reclen > len)
9176                         break;
9177                     de->d_reclen = tswap16(reclen);
9178                     tswapls(&de->d_ino);
9179                     tswapls(&de->d_off);
9180                     de = (struct linux_dirent *)((char *)de + reclen);
9181                     len -= reclen;
9182                 }
9183             }
9184             unlock_user(dirp, arg2, ret);
9185         }
9186 #endif
9187 #else
9188         /* Implement getdents in terms of getdents64 */
9189         {
9190             struct linux_dirent64 *dirp;
9191             abi_long count = arg3;
9192 
9193             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9194             if (!dirp) {
9195                 return -TARGET_EFAULT;
9196             }
9197             ret = get_errno(sys_getdents64(arg1, dirp, count));
9198             if (!is_error(ret)) {
9199                 /* Convert the dirent64 structs to target dirent.  We do this
9200                  * in-place, since we can guarantee that a target_dirent is no
9201                  * larger than a dirent64; however this means we have to be
9202                  * careful to read everything before writing in the new format.
9203                  */
9204                 struct linux_dirent64 *de;
9205                 struct target_dirent *tde;
9206                 int len = ret;
9207                 int tlen = 0;
9208 
9209                 de = dirp;
9210                 tde = (struct target_dirent *)dirp;
9211                 while (len > 0) {
9212                     int namelen, treclen;
9213                     int reclen = de->d_reclen;
9214                     uint64_t ino = de->d_ino;
9215                     int64_t off = de->d_off;
9216                     uint8_t type = de->d_type;
9217 
9218                     namelen = strlen(de->d_name);
9219                     treclen = offsetof(struct target_dirent, d_name)
9220                         + namelen + 2;
9221                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9222 
9223                     memmove(tde->d_name, de->d_name, namelen + 1);
9224                     tde->d_ino = tswapal(ino);
9225                     tde->d_off = tswapal(off);
9226                     tde->d_reclen = tswap16(treclen);
9227                     /* The target_dirent type is in what was formerly a padding
9228                      * byte at the end of the structure:
9229                      */
9230                     *(((char *)tde) + treclen - 1) = type;
9231 
9232                     de = (struct linux_dirent64 *)((char *)de + reclen);
9233                     tde = (struct target_dirent *)((char *)tde + treclen);
9234                     len -= reclen;
9235                     tlen += treclen;
9236                 }
9237                 ret = tlen;
9238             }
9239             unlock_user(dirp, arg2, ret);
9240         }
9241 #endif
9242         return ret;
9243 #endif /* TARGET_NR_getdents */
9244 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9245     case TARGET_NR_getdents64:
9246         {
9247             struct linux_dirent64 *dirp;
9248             abi_long count = arg3;
9249             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9250                 return -TARGET_EFAULT;
9251             ret = get_errno(sys_getdents64(arg1, dirp, count));
9252             if (!is_error(ret)) {
9253                 struct linux_dirent64 *de;
9254                 int len = ret;
9255                 int reclen;
9256                 de = dirp;
9257                 while (len > 0) {
9258                     reclen = de->d_reclen;
9259                     if (reclen > len)
9260                         break;
9261                     de->d_reclen = tswap16(reclen);
9262                     tswap64s((uint64_t *)&de->d_ino);
9263                     tswap64s((uint64_t *)&de->d_off);
9264                     de = (struct linux_dirent64 *)((char *)de + reclen);
9265                     len -= reclen;
9266                 }
9267             }
9268             unlock_user(dirp, arg2, ret);
9269         }
9270         return ret;
9271 #endif /* TARGET_NR_getdents64 */
9272 #if defined(TARGET_NR__newselect)
9273     case TARGET_NR__newselect:
9274         return do_select(arg1, arg2, arg3, arg4, arg5);
9275 #endif
9276 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9277 # ifdef TARGET_NR_poll
9278     case TARGET_NR_poll:
9279 # endif
9280 # ifdef TARGET_NR_ppoll
9281     case TARGET_NR_ppoll:
9282 # endif
9283         {
9284             struct target_pollfd *target_pfd;
9285             unsigned int nfds = arg2;
9286             struct pollfd *pfd;
9287             unsigned int i;
9288 
9289             pfd = NULL;
9290             target_pfd = NULL;
9291             if (nfds) {
9292                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9293                     return -TARGET_EINVAL;
9294                 }
9295 
9296                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9297                                        sizeof(struct target_pollfd) * nfds, 1);
9298                 if (!target_pfd) {
9299                     return -TARGET_EFAULT;
9300                 }
9301 
9302                 pfd = alloca(sizeof(struct pollfd) * nfds);
9303                 for (i = 0; i < nfds; i++) {
9304                     pfd[i].fd = tswap32(target_pfd[i].fd);
9305                     pfd[i].events = tswap16(target_pfd[i].events);
9306                 }
9307             }
9308 
9309             switch (num) {
9310 # ifdef TARGET_NR_ppoll
9311             case TARGET_NR_ppoll:
9312             {
9313                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9314                 target_sigset_t *target_set;
9315                 sigset_t _set, *set = &_set;
9316 
9317                 if (arg3) {
9318                     if (target_to_host_timespec(timeout_ts, arg3)) {
9319                         unlock_user(target_pfd, arg1, 0);
9320                         return -TARGET_EFAULT;
9321                     }
9322                 } else {
9323                     timeout_ts = NULL;
9324                 }
9325 
9326                 if (arg4) {
9327                     if (arg5 != sizeof(target_sigset_t)) {
9328                         unlock_user(target_pfd, arg1, 0);
9329                         return -TARGET_EINVAL;
9330                     }
9331 
9332                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9333                     if (!target_set) {
9334                         unlock_user(target_pfd, arg1, 0);
9335                         return -TARGET_EFAULT;
9336                     }
9337                     target_to_host_sigset(set, target_set);
9338                 } else {
9339                     set = NULL;
9340                 }
9341 
9342                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9343                                            set, SIGSET_T_SIZE));
9344 
9345                 if (!is_error(ret) && arg3) {
9346                     host_to_target_timespec(arg3, timeout_ts);
9347                 }
9348                 if (arg4) {
9349                     unlock_user(target_set, arg4, 0);
9350                 }
9351                 break;
9352             }
9353 # endif
9354 # ifdef TARGET_NR_poll
9355             case TARGET_NR_poll:
9356             {
9357                 struct timespec ts, *pts;
9358 
9359                 if (arg3 >= 0) {
9360                     /* Convert ms to secs, ns */
9361                     ts.tv_sec = arg3 / 1000;
9362                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9363                     pts = &ts;
9364                 } else {
9365                     /* -ve poll() timeout means "infinite" */
9366                     pts = NULL;
9367                 }
9368                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9369                 break;
9370             }
9371 # endif
9372             default:
9373                 g_assert_not_reached();
9374             }
9375 
9376             if (!is_error(ret)) {
9377                 for(i = 0; i < nfds; i++) {
9378                     target_pfd[i].revents = tswap16(pfd[i].revents);
9379                 }
9380             }
9381             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9382         }
9383         return ret;
9384 #endif
9385     case TARGET_NR_flock:
9386         /* NOTE: the flock constant seems to be the same for every
9387            Linux platform */
9388         return get_errno(safe_flock(arg1, arg2));
9389     case TARGET_NR_readv:
9390         {
9391             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9392             if (vec != NULL) {
9393                 ret = get_errno(safe_readv(arg1, vec, arg3));
9394                 unlock_iovec(vec, arg2, arg3, 1);
9395             } else {
9396                 ret = -host_to_target_errno(errno);
9397             }
9398         }
9399         return ret;
9400     case TARGET_NR_writev:
9401         {
9402             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9403             if (vec != NULL) {
9404                 ret = get_errno(safe_writev(arg1, vec, arg3));
9405                 unlock_iovec(vec, arg2, arg3, 0);
9406             } else {
9407                 ret = -host_to_target_errno(errno);
9408             }
9409         }
9410         return ret;
9411 #if defined(TARGET_NR_preadv)
9412     case TARGET_NR_preadv:
9413         {
9414             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9415             if (vec != NULL) {
9416                 unsigned long low, high;
9417 
9418                 target_to_host_low_high(arg4, arg5, &low, &high);
9419                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9420                 unlock_iovec(vec, arg2, arg3, 1);
9421             } else {
9422                 ret = -host_to_target_errno(errno);
9423            }
9424         }
9425         return ret;
9426 #endif
9427 #if defined(TARGET_NR_pwritev)
9428     case TARGET_NR_pwritev:
9429         {
9430             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9431             if (vec != NULL) {
9432                 unsigned long low, high;
9433 
9434                 target_to_host_low_high(arg4, arg5, &low, &high);
9435                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9436                 unlock_iovec(vec, arg2, arg3, 0);
9437             } else {
9438                 ret = -host_to_target_errno(errno);
9439            }
9440         }
9441         return ret;
9442 #endif
9443     case TARGET_NR_getsid:
9444         return get_errno(getsid(arg1));
9445 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9446     case TARGET_NR_fdatasync:
9447         return get_errno(fdatasync(arg1));
9448 #endif
9449 #ifdef TARGET_NR__sysctl
9450     case TARGET_NR__sysctl:
9451         /* We don't implement this, but ENOTDIR is always a safe
9452            return value. */
9453         return -TARGET_ENOTDIR;
9454 #endif
9455     case TARGET_NR_sched_getaffinity:
9456         {
9457             unsigned int mask_size;
9458             unsigned long *mask;
9459 
9460             /*
9461              * sched_getaffinity needs multiples of ulong, so need to take
9462              * care of mismatches between target ulong and host ulong sizes.
9463              */
9464             if (arg2 & (sizeof(abi_ulong) - 1)) {
9465                 return -TARGET_EINVAL;
9466             }
9467             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9468 
9469             mask = alloca(mask_size);
9470             memset(mask, 0, mask_size);
9471             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9472 
9473             if (!is_error(ret)) {
9474                 if (ret > arg2) {
9475                     /* More data returned than the caller's buffer will fit.
9476                      * This only happens if sizeof(abi_long) < sizeof(long)
9477                      * and the caller passed us a buffer holding an odd number
9478                      * of abi_longs. If the host kernel is actually using the
9479                      * extra 4 bytes then fail EINVAL; otherwise we can just
9480                      * ignore them and only copy the interesting part.
9481                      */
9482                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9483                     if (numcpus > arg2 * 8) {
9484                         return -TARGET_EINVAL;
9485                     }
9486                     ret = arg2;
9487                 }
9488 
9489                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9490                     return -TARGET_EFAULT;
9491                 }
9492             }
9493         }
9494         return ret;
9495     case TARGET_NR_sched_setaffinity:
9496         {
9497             unsigned int mask_size;
9498             unsigned long *mask;
9499 
9500             /*
9501              * sched_setaffinity needs multiples of ulong, so need to take
9502              * care of mismatches between target ulong and host ulong sizes.
9503              */
9504             if (arg2 & (sizeof(abi_ulong) - 1)) {
9505                 return -TARGET_EINVAL;
9506             }
9507             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9508             mask = alloca(mask_size);
9509 
9510             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9511             if (ret) {
9512                 return ret;
9513             }
9514 
9515             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9516         }
9517     case TARGET_NR_getcpu:
9518         {
9519             unsigned cpu, node;
9520             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9521                                        arg2 ? &node : NULL,
9522                                        NULL));
9523             if (is_error(ret)) {
9524                 return ret;
9525             }
9526             if (arg1 && put_user_u32(cpu, arg1)) {
9527                 return -TARGET_EFAULT;
9528             }
9529             if (arg2 && put_user_u32(node, arg2)) {
9530                 return -TARGET_EFAULT;
9531             }
9532         }
9533         return ret;
9534     case TARGET_NR_sched_setparam:
9535         {
9536             struct sched_param *target_schp;
9537             struct sched_param schp;
9538 
9539             if (arg2 == 0) {
9540                 return -TARGET_EINVAL;
9541             }
9542             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9543                 return -TARGET_EFAULT;
9544             schp.sched_priority = tswap32(target_schp->sched_priority);
9545             unlock_user_struct(target_schp, arg2, 0);
9546             return get_errno(sched_setparam(arg1, &schp));
9547         }
9548     case TARGET_NR_sched_getparam:
9549         {
9550             struct sched_param *target_schp;
9551             struct sched_param schp;
9552 
9553             if (arg2 == 0) {
9554                 return -TARGET_EINVAL;
9555             }
9556             ret = get_errno(sched_getparam(arg1, &schp));
9557             if (!is_error(ret)) {
9558                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9559                     return -TARGET_EFAULT;
9560                 target_schp->sched_priority = tswap32(schp.sched_priority);
9561                 unlock_user_struct(target_schp, arg2, 1);
9562             }
9563         }
9564         return ret;
9565     case TARGET_NR_sched_setscheduler:
9566         {
9567             struct sched_param *target_schp;
9568             struct sched_param schp;
9569             if (arg3 == 0) {
9570                 return -TARGET_EINVAL;
9571             }
9572             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9573                 return -TARGET_EFAULT;
9574             schp.sched_priority = tswap32(target_schp->sched_priority);
9575             unlock_user_struct(target_schp, arg3, 0);
9576             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9577         }
9578     case TARGET_NR_sched_getscheduler:
9579         return get_errno(sched_getscheduler(arg1));
9580     case TARGET_NR_sched_yield:
9581         return get_errno(sched_yield());
9582     case TARGET_NR_sched_get_priority_max:
9583         return get_errno(sched_get_priority_max(arg1));
9584     case TARGET_NR_sched_get_priority_min:
9585         return get_errno(sched_get_priority_min(arg1));
9586     case TARGET_NR_sched_rr_get_interval:
9587         {
9588             struct timespec ts;
9589             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9590             if (!is_error(ret)) {
9591                 ret = host_to_target_timespec(arg2, &ts);
9592             }
9593         }
9594         return ret;
9595     case TARGET_NR_nanosleep:
9596         {
9597             struct timespec req, rem;
9598             target_to_host_timespec(&req, arg1);
9599             ret = get_errno(safe_nanosleep(&req, &rem));
9600             if (is_error(ret) && arg2) {
9601                 host_to_target_timespec(arg2, &rem);
9602             }
9603         }
9604         return ret;
9605     case TARGET_NR_prctl:
9606         switch (arg1) {
9607         case PR_GET_PDEATHSIG:
9608         {
9609             int deathsig;
9610             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9611             if (!is_error(ret) && arg2
9612                 && put_user_ual(deathsig, arg2)) {
9613                 return -TARGET_EFAULT;
9614             }
9615             return ret;
9616         }
9617 #ifdef PR_GET_NAME
9618         case PR_GET_NAME:
9619         {
9620             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9621             if (!name) {
9622                 return -TARGET_EFAULT;
9623             }
9624             ret = get_errno(prctl(arg1, (unsigned long)name,
9625                                   arg3, arg4, arg5));
9626             unlock_user(name, arg2, 16);
9627             return ret;
9628         }
9629         case PR_SET_NAME:
9630         {
9631             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9632             if (!name) {
9633                 return -TARGET_EFAULT;
9634             }
9635             ret = get_errno(prctl(arg1, (unsigned long)name,
9636                                   arg3, arg4, arg5));
9637             unlock_user(name, arg2, 0);
9638             return ret;
9639         }
9640 #endif
9641 #ifdef TARGET_MIPS
9642         case TARGET_PR_GET_FP_MODE:
9643         {
9644             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9645             ret = 0;
9646             if (env->CP0_Status & (1 << CP0St_FR)) {
9647                 ret |= TARGET_PR_FP_MODE_FR;
9648             }
9649             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9650                 ret |= TARGET_PR_FP_MODE_FRE;
9651             }
9652             return ret;
9653         }
9654         case TARGET_PR_SET_FP_MODE:
9655         {
9656             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9657             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9658             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9659             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9660             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9661 
9662             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9663                                             TARGET_PR_FP_MODE_FRE;
9664 
9665             /* If nothing to change, return right away, successfully.  */
9666             if (old_fr == new_fr && old_fre == new_fre) {
9667                 return 0;
9668             }
9669             /* Check the value is valid */
9670             if (arg2 & ~known_bits) {
9671                 return -TARGET_EOPNOTSUPP;
9672             }
9673             /* Setting FRE without FR is not supported.  */
9674             if (new_fre && !new_fr) {
9675                 return -TARGET_EOPNOTSUPP;
9676             }
9677             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9678                 /* FR1 is not supported */
9679                 return -TARGET_EOPNOTSUPP;
9680             }
9681             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9682                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9683                 /* cannot set FR=0 */
9684                 return -TARGET_EOPNOTSUPP;
9685             }
9686             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9687                 /* Cannot set FRE=1 */
9688                 return -TARGET_EOPNOTSUPP;
9689             }
9690 
9691             int i;
9692             fpr_t *fpr = env->active_fpu.fpr;
9693             for (i = 0; i < 32 ; i += 2) {
9694                 if (!old_fr && new_fr) {
9695                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9696                 } else if (old_fr && !new_fr) {
9697                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9698                 }
9699             }
9700 
9701             if (new_fr) {
9702                 env->CP0_Status |= (1 << CP0St_FR);
9703                 env->hflags |= MIPS_HFLAG_F64;
9704             } else {
9705                 env->CP0_Status &= ~(1 << CP0St_FR);
9706                 env->hflags &= ~MIPS_HFLAG_F64;
9707             }
9708             if (new_fre) {
9709                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9710                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9711                     env->hflags |= MIPS_HFLAG_FRE;
9712                 }
9713             } else {
9714                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9715                 env->hflags &= ~MIPS_HFLAG_FRE;
9716             }
9717 
9718             return 0;
9719         }
9720 #endif /* MIPS */
9721 #ifdef TARGET_AARCH64
9722         case TARGET_PR_SVE_SET_VL:
9723             /*
9724              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9725              * PR_SVE_VL_INHERIT.  Note the kernel definition
9726              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9727              * even though the current architectural maximum is VQ=16.
9728              */
9729             ret = -TARGET_EINVAL;
9730             if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9731                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9732                 CPUARMState *env = cpu_env;
9733                 ARMCPU *cpu = arm_env_get_cpu(env);
9734                 uint32_t vq, old_vq;
9735 
9736                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9737                 vq = MAX(arg2 / 16, 1);
9738                 vq = MIN(vq, cpu->sve_max_vq);
9739 
9740                 if (vq < old_vq) {
9741                     aarch64_sve_narrow_vq(env, vq);
9742                 }
9743                 env->vfp.zcr_el[1] = vq - 1;
9744                 ret = vq * 16;
9745             }
9746             return ret;
9747         case TARGET_PR_SVE_GET_VL:
9748             ret = -TARGET_EINVAL;
9749             {
9750                 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9751                 if (cpu_isar_feature(aa64_sve, cpu)) {
9752                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9753                 }
9754             }
9755             return ret;
9756         case TARGET_PR_PAC_RESET_KEYS:
9757             {
9758                 CPUARMState *env = cpu_env;
9759                 ARMCPU *cpu = arm_env_get_cpu(env);
9760 
9761                 if (arg3 || arg4 || arg5) {
9762                     return -TARGET_EINVAL;
9763                 }
9764                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9765                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9766                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9767                                TARGET_PR_PAC_APGAKEY);
9768                     if (arg2 == 0) {
9769                         arg2 = all;
9770                     } else if (arg2 & ~all) {
9771                         return -TARGET_EINVAL;
9772                     }
9773                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9774                         arm_init_pauth_key(&env->apia_key);
9775                     }
9776                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9777                         arm_init_pauth_key(&env->apib_key);
9778                     }
9779                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9780                         arm_init_pauth_key(&env->apda_key);
9781                     }
9782                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9783                         arm_init_pauth_key(&env->apdb_key);
9784                     }
9785                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9786                         arm_init_pauth_key(&env->apga_key);
9787                     }
9788                     return 0;
9789                 }
9790             }
9791             return -TARGET_EINVAL;
9792 #endif /* AARCH64 */
9793         case PR_GET_SECCOMP:
9794         case PR_SET_SECCOMP:
9795             /* Disable seccomp to prevent the target disabling syscalls we
9796              * need. */
9797             return -TARGET_EINVAL;
9798         default:
9799             /* Most prctl options have no pointer arguments */
9800             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9801         }
9802         break;
9803 #ifdef TARGET_NR_arch_prctl
9804     case TARGET_NR_arch_prctl:
9805 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9806         return do_arch_prctl(cpu_env, arg1, arg2);
9807 #else
9808 #error unreachable
9809 #endif
9810 #endif
9811 #ifdef TARGET_NR_pread64
9812     case TARGET_NR_pread64:
9813         if (regpairs_aligned(cpu_env, num)) {
9814             arg4 = arg5;
9815             arg5 = arg6;
9816         }
9817         if (arg2 == 0 && arg3 == 0) {
9818             /* Special-case NULL buffer and zero length, which should succeed */
9819             p = 0;
9820         } else {
9821             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9822             if (!p) {
9823                 return -TARGET_EFAULT;
9824             }
9825         }
9826         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9827         unlock_user(p, arg2, ret);
9828         return ret;
9829     case TARGET_NR_pwrite64:
9830         if (regpairs_aligned(cpu_env, num)) {
9831             arg4 = arg5;
9832             arg5 = arg6;
9833         }
9834         if (arg2 == 0 && arg3 == 0) {
9835             /* Special-case NULL buffer and zero length, which should succeed */
9836             p = 0;
9837         } else {
9838             p = lock_user(VERIFY_READ, arg2, arg3, 1);
9839             if (!p) {
9840                 return -TARGET_EFAULT;
9841             }
9842         }
9843         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9844         unlock_user(p, arg2, 0);
9845         return ret;
9846 #endif
9847     case TARGET_NR_getcwd:
9848         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9849             return -TARGET_EFAULT;
9850         ret = get_errno(sys_getcwd1(p, arg2));
9851         unlock_user(p, arg1, ret);
9852         return ret;
9853     case TARGET_NR_capget:
9854     case TARGET_NR_capset:
9855     {
9856         struct target_user_cap_header *target_header;
9857         struct target_user_cap_data *target_data = NULL;
9858         struct __user_cap_header_struct header;
9859         struct __user_cap_data_struct data[2];
9860         struct __user_cap_data_struct *dataptr = NULL;
9861         int i, target_datalen;
9862         int data_items = 1;
9863 
9864         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9865             return -TARGET_EFAULT;
9866         }
9867         header.version = tswap32(target_header->version);
9868         header.pid = tswap32(target_header->pid);
9869 
9870         if (header.version != _LINUX_CAPABILITY_VERSION) {
9871             /* Version 2 and up takes pointer to two user_data structs */
9872             data_items = 2;
9873         }
9874 
9875         target_datalen = sizeof(*target_data) * data_items;
9876 
9877         if (arg2) {
9878             if (num == TARGET_NR_capget) {
9879                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9880             } else {
9881                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9882             }
9883             if (!target_data) {
9884                 unlock_user_struct(target_header, arg1, 0);
9885                 return -TARGET_EFAULT;
9886             }
9887 
9888             if (num == TARGET_NR_capset) {
9889                 for (i = 0; i < data_items; i++) {
9890                     data[i].effective = tswap32(target_data[i].effective);
9891                     data[i].permitted = tswap32(target_data[i].permitted);
9892                     data[i].inheritable = tswap32(target_data[i].inheritable);
9893                 }
9894             }
9895 
9896             dataptr = data;
9897         }
9898 
9899         if (num == TARGET_NR_capget) {
9900             ret = get_errno(capget(&header, dataptr));
9901         } else {
9902             ret = get_errno(capset(&header, dataptr));
9903         }
9904 
9905         /* The kernel always updates version for both capget and capset */
9906         target_header->version = tswap32(header.version);
9907         unlock_user_struct(target_header, arg1, 1);
9908 
9909         if (arg2) {
9910             if (num == TARGET_NR_capget) {
9911                 for (i = 0; i < data_items; i++) {
9912                     target_data[i].effective = tswap32(data[i].effective);
9913                     target_data[i].permitted = tswap32(data[i].permitted);
9914                     target_data[i].inheritable = tswap32(data[i].inheritable);
9915                 }
9916                 unlock_user(target_data, arg2, target_datalen);
9917             } else {
9918                 unlock_user(target_data, arg2, 0);
9919             }
9920         }
9921         return ret;
9922     }
9923     case TARGET_NR_sigaltstack:
9924         return do_sigaltstack(arg1, arg2,
9925                               get_sp_from_cpustate((CPUArchState *)cpu_env));
9926 
9927 #ifdef CONFIG_SENDFILE
9928 #ifdef TARGET_NR_sendfile
9929     case TARGET_NR_sendfile:
9930     {
9931         off_t *offp = NULL;
9932         off_t off;
9933         if (arg3) {
9934             ret = get_user_sal(off, arg3);
9935             if (is_error(ret)) {
9936                 return ret;
9937             }
9938             offp = &off;
9939         }
9940         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9941         if (!is_error(ret) && arg3) {
9942             abi_long ret2 = put_user_sal(off, arg3);
9943             if (is_error(ret2)) {
9944                 ret = ret2;
9945             }
9946         }
9947         return ret;
9948     }
9949 #endif
9950 #ifdef TARGET_NR_sendfile64
9951     case TARGET_NR_sendfile64:
9952     {
9953         off_t *offp = NULL;
9954         off_t off;
9955         if (arg3) {
9956             ret = get_user_s64(off, arg3);
9957             if (is_error(ret)) {
9958                 return ret;
9959             }
9960             offp = &off;
9961         }
9962         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9963         if (!is_error(ret) && arg3) {
9964             abi_long ret2 = put_user_s64(off, arg3);
9965             if (is_error(ret2)) {
9966                 ret = ret2;
9967             }
9968         }
9969         return ret;
9970     }
9971 #endif
9972 #endif
9973 #ifdef TARGET_NR_vfork
9974     case TARGET_NR_vfork:
9975         return get_errno(do_fork(cpu_env,
9976                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9977                          0, 0, 0, 0));
9978 #endif
9979 #ifdef TARGET_NR_ugetrlimit
9980     case TARGET_NR_ugetrlimit:
9981     {
9982 	struct rlimit rlim;
9983 	int resource = target_to_host_resource(arg1);
9984 	ret = get_errno(getrlimit(resource, &rlim));
9985 	if (!is_error(ret)) {
9986 	    struct target_rlimit *target_rlim;
9987             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9988                 return -TARGET_EFAULT;
9989 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9990 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9991             unlock_user_struct(target_rlim, arg2, 1);
9992 	}
9993         return ret;
9994     }
9995 #endif
9996 #ifdef TARGET_NR_truncate64
9997     case TARGET_NR_truncate64:
9998         if (!(p = lock_user_string(arg1)))
9999             return -TARGET_EFAULT;
10000 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10001         unlock_user(p, arg1, 0);
10002         return ret;
10003 #endif
10004 #ifdef TARGET_NR_ftruncate64
10005     case TARGET_NR_ftruncate64:
10006         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10007 #endif
10008 #ifdef TARGET_NR_stat64
10009     case TARGET_NR_stat64:
10010         if (!(p = lock_user_string(arg1))) {
10011             return -TARGET_EFAULT;
10012         }
10013         ret = get_errno(stat(path(p), &st));
10014         unlock_user(p, arg1, 0);
10015         if (!is_error(ret))
10016             ret = host_to_target_stat64(cpu_env, arg2, &st);
10017         return ret;
10018 #endif
10019 #ifdef TARGET_NR_lstat64
10020     case TARGET_NR_lstat64:
10021         if (!(p = lock_user_string(arg1))) {
10022             return -TARGET_EFAULT;
10023         }
10024         ret = get_errno(lstat(path(p), &st));
10025         unlock_user(p, arg1, 0);
10026         if (!is_error(ret))
10027             ret = host_to_target_stat64(cpu_env, arg2, &st);
10028         return ret;
10029 #endif
10030 #ifdef TARGET_NR_fstat64
10031     case TARGET_NR_fstat64:
10032         ret = get_errno(fstat(arg1, &st));
10033         if (!is_error(ret))
10034             ret = host_to_target_stat64(cpu_env, arg2, &st);
10035         return ret;
10036 #endif
10037 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10038 #ifdef TARGET_NR_fstatat64
10039     case TARGET_NR_fstatat64:
10040 #endif
10041 #ifdef TARGET_NR_newfstatat
10042     case TARGET_NR_newfstatat:
10043 #endif
10044         if (!(p = lock_user_string(arg2))) {
10045             return -TARGET_EFAULT;
10046         }
10047         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10048         unlock_user(p, arg2, 0);
10049         if (!is_error(ret))
10050             ret = host_to_target_stat64(cpu_env, arg3, &st);
10051         return ret;
10052 #endif
10053 #ifdef TARGET_NR_lchown
10054     case TARGET_NR_lchown:
10055         if (!(p = lock_user_string(arg1)))
10056             return -TARGET_EFAULT;
10057         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10058         unlock_user(p, arg1, 0);
10059         return ret;
10060 #endif
10061 #ifdef TARGET_NR_getuid
10062     case TARGET_NR_getuid:
10063         return get_errno(high2lowuid(getuid()));
10064 #endif
10065 #ifdef TARGET_NR_getgid
10066     case TARGET_NR_getgid:
10067         return get_errno(high2lowgid(getgid()));
10068 #endif
10069 #ifdef TARGET_NR_geteuid
10070     case TARGET_NR_geteuid:
10071         return get_errno(high2lowuid(geteuid()));
10072 #endif
10073 #ifdef TARGET_NR_getegid
10074     case TARGET_NR_getegid:
10075         return get_errno(high2lowgid(getegid()));
10076 #endif
10077     case TARGET_NR_setreuid:
10078         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10079     case TARGET_NR_setregid:
10080         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10081     case TARGET_NR_getgroups:
10082         {
10083             int gidsetsize = arg1;
10084             target_id *target_grouplist;
10085             gid_t *grouplist;
10086             int i;
10087 
10088             grouplist = alloca(gidsetsize * sizeof(gid_t));
10089             ret = get_errno(getgroups(gidsetsize, grouplist));
10090             if (gidsetsize == 0)
10091                 return ret;
10092             if (!is_error(ret)) {
10093                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10094                 if (!target_grouplist)
10095                     return -TARGET_EFAULT;
10096                 for(i = 0;i < ret; i++)
10097                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10098                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10099             }
10100         }
10101         return ret;
10102     case TARGET_NR_setgroups:
10103         {
10104             int gidsetsize = arg1;
10105             target_id *target_grouplist;
10106             gid_t *grouplist = NULL;
10107             int i;
10108             if (gidsetsize) {
10109                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10110                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10111                 if (!target_grouplist) {
10112                     return -TARGET_EFAULT;
10113                 }
10114                 for (i = 0; i < gidsetsize; i++) {
10115                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10116                 }
10117                 unlock_user(target_grouplist, arg2, 0);
10118             }
10119             return get_errno(setgroups(gidsetsize, grouplist));
10120         }
10121     case TARGET_NR_fchown:
10122         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10123 #if defined(TARGET_NR_fchownat)
10124     case TARGET_NR_fchownat:
10125         if (!(p = lock_user_string(arg2)))
10126             return -TARGET_EFAULT;
10127         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10128                                  low2highgid(arg4), arg5));
10129         unlock_user(p, arg2, 0);
10130         return ret;
10131 #endif
10132 #ifdef TARGET_NR_setresuid
10133     case TARGET_NR_setresuid:
10134         return get_errno(sys_setresuid(low2highuid(arg1),
10135                                        low2highuid(arg2),
10136                                        low2highuid(arg3)));
10137 #endif
10138 #ifdef TARGET_NR_getresuid
10139     case TARGET_NR_getresuid:
10140         {
10141             uid_t ruid, euid, suid;
10142             ret = get_errno(getresuid(&ruid, &euid, &suid));
10143             if (!is_error(ret)) {
10144                 if (put_user_id(high2lowuid(ruid), arg1)
10145                     || put_user_id(high2lowuid(euid), arg2)
10146                     || put_user_id(high2lowuid(suid), arg3))
10147                     return -TARGET_EFAULT;
10148             }
10149         }
10150         return ret;
10151 #endif
10152 #ifdef TARGET_NR_getresgid
10153     case TARGET_NR_setresgid:
10154         return get_errno(sys_setresgid(low2highgid(arg1),
10155                                        low2highgid(arg2),
10156                                        low2highgid(arg3)));
10157 #endif
10158 #ifdef TARGET_NR_getresgid
10159     case TARGET_NR_getresgid:
10160         {
10161             gid_t rgid, egid, sgid;
10162             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10163             if (!is_error(ret)) {
10164                 if (put_user_id(high2lowgid(rgid), arg1)
10165                     || put_user_id(high2lowgid(egid), arg2)
10166                     || put_user_id(high2lowgid(sgid), arg3))
10167                     return -TARGET_EFAULT;
10168             }
10169         }
10170         return ret;
10171 #endif
10172 #ifdef TARGET_NR_chown
10173     case TARGET_NR_chown:
10174         if (!(p = lock_user_string(arg1)))
10175             return -TARGET_EFAULT;
10176         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10177         unlock_user(p, arg1, 0);
10178         return ret;
10179 #endif
10180     case TARGET_NR_setuid:
10181         return get_errno(sys_setuid(low2highuid(arg1)));
10182     case TARGET_NR_setgid:
10183         return get_errno(sys_setgid(low2highgid(arg1)));
10184     case TARGET_NR_setfsuid:
10185         return get_errno(setfsuid(arg1));
10186     case TARGET_NR_setfsgid:
10187         return get_errno(setfsgid(arg1));
10188 
10189 #ifdef TARGET_NR_lchown32
10190     case TARGET_NR_lchown32:
10191         if (!(p = lock_user_string(arg1)))
10192             return -TARGET_EFAULT;
10193         ret = get_errno(lchown(p, arg2, arg3));
10194         unlock_user(p, arg1, 0);
10195         return ret;
10196 #endif
10197 #ifdef TARGET_NR_getuid32
10198     case TARGET_NR_getuid32:
10199         return get_errno(getuid());
10200 #endif
10201 
10202 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10203    /* Alpha specific */
10204     case TARGET_NR_getxuid:
10205          {
10206             uid_t euid;
10207             euid=geteuid();
10208             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10209          }
10210         return get_errno(getuid());
10211 #endif
10212 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10213    /* Alpha specific */
10214     case TARGET_NR_getxgid:
10215          {
10216             uid_t egid;
10217             egid=getegid();
10218             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10219          }
10220         return get_errno(getgid());
10221 #endif
10222 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10223     /* Alpha specific */
10224     case TARGET_NR_osf_getsysinfo:
10225         ret = -TARGET_EOPNOTSUPP;
10226         switch (arg1) {
10227           case TARGET_GSI_IEEE_FP_CONTROL:
10228             {
10229                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10230 
10231                 /* Copied from linux ieee_fpcr_to_swcr.  */
10232                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10233                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10234                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10235                                         | SWCR_TRAP_ENABLE_DZE
10236                                         | SWCR_TRAP_ENABLE_OVF);
10237                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10238                                         | SWCR_TRAP_ENABLE_INE);
10239                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10240                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10241 
10242                 if (put_user_u64 (swcr, arg2))
10243                         return -TARGET_EFAULT;
10244                 ret = 0;
10245             }
10246             break;
10247 
10248           /* case GSI_IEEE_STATE_AT_SIGNAL:
10249              -- Not implemented in linux kernel.
10250              case GSI_UACPROC:
10251              -- Retrieves current unaligned access state; not much used.
10252              case GSI_PROC_TYPE:
10253              -- Retrieves implver information; surely not used.
10254              case GSI_GET_HWRPB:
10255              -- Grabs a copy of the HWRPB; surely not used.
10256           */
10257         }
10258         return ret;
10259 #endif
10260 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10261     /* Alpha specific */
10262     case TARGET_NR_osf_setsysinfo:
10263         ret = -TARGET_EOPNOTSUPP;
10264         switch (arg1) {
10265           case TARGET_SSI_IEEE_FP_CONTROL:
10266             {
10267                 uint64_t swcr, fpcr, orig_fpcr;
10268 
10269                 if (get_user_u64 (swcr, arg2)) {
10270                     return -TARGET_EFAULT;
10271                 }
10272                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10273                 fpcr = orig_fpcr & FPCR_DYN_MASK;
10274 
10275                 /* Copied from linux ieee_swcr_to_fpcr.  */
10276                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10277                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10278                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10279                                   | SWCR_TRAP_ENABLE_DZE
10280                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
10281                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10282                                   | SWCR_TRAP_ENABLE_INE)) << 57;
10283                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10284                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10285 
10286                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10287                 ret = 0;
10288             }
10289             break;
10290 
10291           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10292             {
10293                 uint64_t exc, fpcr, orig_fpcr;
10294                 int si_code;
10295 
10296                 if (get_user_u64(exc, arg2)) {
10297                     return -TARGET_EFAULT;
10298                 }
10299 
10300                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10301 
10302                 /* We only add to the exception status here.  */
10303                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10304 
10305                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10306                 ret = 0;
10307 
10308                 /* Old exceptions are not signaled.  */
10309                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10310 
10311                 /* If any exceptions set by this call,
10312                    and are unmasked, send a signal.  */
10313                 si_code = 0;
10314                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10315                     si_code = TARGET_FPE_FLTRES;
10316                 }
10317                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10318                     si_code = TARGET_FPE_FLTUND;
10319                 }
10320                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10321                     si_code = TARGET_FPE_FLTOVF;
10322                 }
10323                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10324                     si_code = TARGET_FPE_FLTDIV;
10325                 }
10326                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10327                     si_code = TARGET_FPE_FLTINV;
10328                 }
10329                 if (si_code != 0) {
10330                     target_siginfo_t info;
10331                     info.si_signo = SIGFPE;
10332                     info.si_errno = 0;
10333                     info.si_code = si_code;
10334                     info._sifields._sigfault._addr
10335                         = ((CPUArchState *)cpu_env)->pc;
10336                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10337                                  QEMU_SI_FAULT, &info);
10338                 }
10339             }
10340             break;
10341 
10342           /* case SSI_NVPAIRS:
10343              -- Used with SSIN_UACPROC to enable unaligned accesses.
10344              case SSI_IEEE_STATE_AT_SIGNAL:
10345              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10346              -- Not implemented in linux kernel
10347           */
10348         }
10349         return ret;
10350 #endif
10351 #ifdef TARGET_NR_osf_sigprocmask
10352     /* Alpha specific.  */
10353     case TARGET_NR_osf_sigprocmask:
10354         {
10355             abi_ulong mask;
10356             int how;
10357             sigset_t set, oldset;
10358 
10359             switch(arg1) {
10360             case TARGET_SIG_BLOCK:
10361                 how = SIG_BLOCK;
10362                 break;
10363             case TARGET_SIG_UNBLOCK:
10364                 how = SIG_UNBLOCK;
10365                 break;
10366             case TARGET_SIG_SETMASK:
10367                 how = SIG_SETMASK;
10368                 break;
10369             default:
10370                 return -TARGET_EINVAL;
10371             }
10372             mask = arg2;
10373             target_to_host_old_sigset(&set, &mask);
10374             ret = do_sigprocmask(how, &set, &oldset);
10375             if (!ret) {
10376                 host_to_target_old_sigset(&mask, &oldset);
10377                 ret = mask;
10378             }
10379         }
10380         return ret;
10381 #endif
10382 
10383 #ifdef TARGET_NR_getgid32
10384     case TARGET_NR_getgid32:
10385         return get_errno(getgid());
10386 #endif
10387 #ifdef TARGET_NR_geteuid32
10388     case TARGET_NR_geteuid32:
10389         return get_errno(geteuid());
10390 #endif
10391 #ifdef TARGET_NR_getegid32
10392     case TARGET_NR_getegid32:
10393         return get_errno(getegid());
10394 #endif
10395 #ifdef TARGET_NR_setreuid32
10396     case TARGET_NR_setreuid32:
10397         return get_errno(setreuid(arg1, arg2));
10398 #endif
10399 #ifdef TARGET_NR_setregid32
10400     case TARGET_NR_setregid32:
10401         return get_errno(setregid(arg1, arg2));
10402 #endif
10403 #ifdef TARGET_NR_getgroups32
10404     case TARGET_NR_getgroups32:
10405         {
10406             int gidsetsize = arg1;
10407             uint32_t *target_grouplist;
10408             gid_t *grouplist;
10409             int i;
10410 
10411             grouplist = alloca(gidsetsize * sizeof(gid_t));
10412             ret = get_errno(getgroups(gidsetsize, grouplist));
10413             if (gidsetsize == 0)
10414                 return ret;
10415             if (!is_error(ret)) {
10416                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10417                 if (!target_grouplist) {
10418                     return -TARGET_EFAULT;
10419                 }
10420                 for(i = 0;i < ret; i++)
10421                     target_grouplist[i] = tswap32(grouplist[i]);
10422                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10423             }
10424         }
10425         return ret;
10426 #endif
10427 #ifdef TARGET_NR_setgroups32
10428     case TARGET_NR_setgroups32:
10429         {
10430             int gidsetsize = arg1;
10431             uint32_t *target_grouplist;
10432             gid_t *grouplist;
10433             int i;
10434 
10435             grouplist = alloca(gidsetsize * sizeof(gid_t));
10436             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10437             if (!target_grouplist) {
10438                 return -TARGET_EFAULT;
10439             }
10440             for(i = 0;i < gidsetsize; i++)
10441                 grouplist[i] = tswap32(target_grouplist[i]);
10442             unlock_user(target_grouplist, arg2, 0);
10443             return get_errno(setgroups(gidsetsize, grouplist));
10444         }
10445 #endif
10446 #ifdef TARGET_NR_fchown32
10447     case TARGET_NR_fchown32:
10448         return get_errno(fchown(arg1, arg2, arg3));
10449 #endif
10450 #ifdef TARGET_NR_setresuid32
10451     case TARGET_NR_setresuid32:
10452         return get_errno(sys_setresuid(arg1, arg2, arg3));
10453 #endif
10454 #ifdef TARGET_NR_getresuid32
10455     case TARGET_NR_getresuid32:
10456         {
10457             uid_t ruid, euid, suid;
10458             ret = get_errno(getresuid(&ruid, &euid, &suid));
10459             if (!is_error(ret)) {
10460                 if (put_user_u32(ruid, arg1)
10461                     || put_user_u32(euid, arg2)
10462                     || put_user_u32(suid, arg3))
10463                     return -TARGET_EFAULT;
10464             }
10465         }
10466         return ret;
10467 #endif
10468 #ifdef TARGET_NR_setresgid32
10469     case TARGET_NR_setresgid32:
10470         return get_errno(sys_setresgid(arg1, arg2, arg3));
10471 #endif
10472 #ifdef TARGET_NR_getresgid32
10473     case TARGET_NR_getresgid32:
10474         {
10475             gid_t rgid, egid, sgid;
10476             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10477             if (!is_error(ret)) {
10478                 if (put_user_u32(rgid, arg1)
10479                     || put_user_u32(egid, arg2)
10480                     || put_user_u32(sgid, arg3))
10481                     return -TARGET_EFAULT;
10482             }
10483         }
10484         return ret;
10485 #endif
10486 #ifdef TARGET_NR_chown32
10487     case TARGET_NR_chown32:
10488         if (!(p = lock_user_string(arg1)))
10489             return -TARGET_EFAULT;
10490         ret = get_errno(chown(p, arg2, arg3));
10491         unlock_user(p, arg1, 0);
10492         return ret;
10493 #endif
10494 #ifdef TARGET_NR_setuid32
10495     case TARGET_NR_setuid32:
10496         return get_errno(sys_setuid(arg1));
10497 #endif
10498 #ifdef TARGET_NR_setgid32
10499     case TARGET_NR_setgid32:
10500         return get_errno(sys_setgid(arg1));
10501 #endif
10502 #ifdef TARGET_NR_setfsuid32
10503     case TARGET_NR_setfsuid32:
10504         return get_errno(setfsuid(arg1));
10505 #endif
10506 #ifdef TARGET_NR_setfsgid32
10507     case TARGET_NR_setfsgid32:
10508         return get_errno(setfsgid(arg1));
10509 #endif
10510 #ifdef TARGET_NR_mincore
10511     case TARGET_NR_mincore:
10512         {
10513             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10514             if (!a) {
10515                 return -TARGET_ENOMEM;
10516             }
10517             p = lock_user_string(arg3);
10518             if (!p) {
10519                 ret = -TARGET_EFAULT;
10520             } else {
10521                 ret = get_errno(mincore(a, arg2, p));
10522                 unlock_user(p, arg3, ret);
10523             }
10524             unlock_user(a, arg1, 0);
10525         }
10526         return ret;
10527 #endif
10528 #ifdef TARGET_NR_arm_fadvise64_64
10529     case TARGET_NR_arm_fadvise64_64:
10530         /* arm_fadvise64_64 looks like fadvise64_64 but
10531          * with different argument order: fd, advice, offset, len
10532          * rather than the usual fd, offset, len, advice.
10533          * Note that offset and len are both 64-bit so appear as
10534          * pairs of 32-bit registers.
10535          */
10536         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10537                             target_offset64(arg5, arg6), arg2);
10538         return -host_to_target_errno(ret);
10539 #endif
10540 
10541 #if TARGET_ABI_BITS == 32
10542 
10543 #ifdef TARGET_NR_fadvise64_64
10544     case TARGET_NR_fadvise64_64:
10545 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10546         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10547         ret = arg2;
10548         arg2 = arg3;
10549         arg3 = arg4;
10550         arg4 = arg5;
10551         arg5 = arg6;
10552         arg6 = ret;
10553 #else
10554         /* 6 args: fd, offset (high, low), len (high, low), advice */
10555         if (regpairs_aligned(cpu_env, num)) {
10556             /* offset is in (3,4), len in (5,6) and advice in 7 */
10557             arg2 = arg3;
10558             arg3 = arg4;
10559             arg4 = arg5;
10560             arg5 = arg6;
10561             arg6 = arg7;
10562         }
10563 #endif
10564         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10565                             target_offset64(arg4, arg5), arg6);
10566         return -host_to_target_errno(ret);
10567 #endif
10568 
10569 #ifdef TARGET_NR_fadvise64
10570     case TARGET_NR_fadvise64:
10571         /* 5 args: fd, offset (high, low), len, advice */
10572         if (regpairs_aligned(cpu_env, num)) {
10573             /* offset is in (3,4), len in 5 and advice in 6 */
10574             arg2 = arg3;
10575             arg3 = arg4;
10576             arg4 = arg5;
10577             arg5 = arg6;
10578         }
10579         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10580         return -host_to_target_errno(ret);
10581 #endif
10582 
10583 #else /* not a 32-bit ABI */
10584 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10585 #ifdef TARGET_NR_fadvise64_64
10586     case TARGET_NR_fadvise64_64:
10587 #endif
10588 #ifdef TARGET_NR_fadvise64
10589     case TARGET_NR_fadvise64:
10590 #endif
10591 #ifdef TARGET_S390X
10592         switch (arg4) {
10593         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10594         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10595         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10596         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10597         default: break;
10598         }
10599 #endif
10600         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10601 #endif
10602 #endif /* end of 64-bit ABI fadvise handling */
10603 
10604 #ifdef TARGET_NR_madvise
10605     case TARGET_NR_madvise:
10606         /* A straight passthrough may not be safe because qemu sometimes
10607            turns private file-backed mappings into anonymous mappings.
10608            This will break MADV_DONTNEED.
10609            This is a hint, so ignoring and returning success is ok.  */
10610         return 0;
10611 #endif
10612 #if TARGET_ABI_BITS == 32
10613     case TARGET_NR_fcntl64:
10614     {
10615 	int cmd;
10616 	struct flock64 fl;
10617         from_flock64_fn *copyfrom = copy_from_user_flock64;
10618         to_flock64_fn *copyto = copy_to_user_flock64;
10619 
10620 #ifdef TARGET_ARM
10621         if (!((CPUARMState *)cpu_env)->eabi) {
10622             copyfrom = copy_from_user_oabi_flock64;
10623             copyto = copy_to_user_oabi_flock64;
10624         }
10625 #endif
10626 
10627 	cmd = target_to_host_fcntl_cmd(arg2);
10628         if (cmd == -TARGET_EINVAL) {
10629             return cmd;
10630         }
10631 
10632         switch(arg2) {
10633         case TARGET_F_GETLK64:
10634             ret = copyfrom(&fl, arg3);
10635             if (ret) {
10636                 break;
10637             }
10638             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10639             if (ret == 0) {
10640                 ret = copyto(arg3, &fl);
10641             }
10642 	    break;
10643 
10644         case TARGET_F_SETLK64:
10645         case TARGET_F_SETLKW64:
10646             ret = copyfrom(&fl, arg3);
10647             if (ret) {
10648                 break;
10649             }
10650             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10651 	    break;
10652         default:
10653             ret = do_fcntl(arg1, arg2, arg3);
10654             break;
10655         }
10656         return ret;
10657     }
10658 #endif
10659 #ifdef TARGET_NR_cacheflush
10660     case TARGET_NR_cacheflush:
10661         /* self-modifying code is handled automatically, so nothing needed */
10662         return 0;
10663 #endif
10664 #ifdef TARGET_NR_getpagesize
10665     case TARGET_NR_getpagesize:
10666         return TARGET_PAGE_SIZE;
10667 #endif
10668     case TARGET_NR_gettid:
10669         return get_errno(sys_gettid());
10670 #ifdef TARGET_NR_readahead
10671     case TARGET_NR_readahead:
10672 #if TARGET_ABI_BITS == 32
10673         if (regpairs_aligned(cpu_env, num)) {
10674             arg2 = arg3;
10675             arg3 = arg4;
10676             arg4 = arg5;
10677         }
10678         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10679 #else
10680         ret = get_errno(readahead(arg1, arg2, arg3));
10681 #endif
10682         return ret;
10683 #endif
10684 #ifdef CONFIG_ATTR
10685 #ifdef TARGET_NR_setxattr
10686     case TARGET_NR_listxattr:
10687     case TARGET_NR_llistxattr:
10688     {
10689         void *p, *b = 0;
10690         if (arg2) {
10691             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10692             if (!b) {
10693                 return -TARGET_EFAULT;
10694             }
10695         }
10696         p = lock_user_string(arg1);
10697         if (p) {
10698             if (num == TARGET_NR_listxattr) {
10699                 ret = get_errno(listxattr(p, b, arg3));
10700             } else {
10701                 ret = get_errno(llistxattr(p, b, arg3));
10702             }
10703         } else {
10704             ret = -TARGET_EFAULT;
10705         }
10706         unlock_user(p, arg1, 0);
10707         unlock_user(b, arg2, arg3);
10708         return ret;
10709     }
10710     case TARGET_NR_flistxattr:
10711     {
10712         void *b = 0;
10713         if (arg2) {
10714             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10715             if (!b) {
10716                 return -TARGET_EFAULT;
10717             }
10718         }
10719         ret = get_errno(flistxattr(arg1, b, arg3));
10720         unlock_user(b, arg2, arg3);
10721         return ret;
10722     }
10723     case TARGET_NR_setxattr:
10724     case TARGET_NR_lsetxattr:
10725         {
10726             void *p, *n, *v = 0;
10727             if (arg3) {
10728                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10729                 if (!v) {
10730                     return -TARGET_EFAULT;
10731                 }
10732             }
10733             p = lock_user_string(arg1);
10734             n = lock_user_string(arg2);
10735             if (p && n) {
10736                 if (num == TARGET_NR_setxattr) {
10737                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10738                 } else {
10739                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10740                 }
10741             } else {
10742                 ret = -TARGET_EFAULT;
10743             }
10744             unlock_user(p, arg1, 0);
10745             unlock_user(n, arg2, 0);
10746             unlock_user(v, arg3, 0);
10747         }
10748         return ret;
10749     case TARGET_NR_fsetxattr:
10750         {
10751             void *n, *v = 0;
10752             if (arg3) {
10753                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10754                 if (!v) {
10755                     return -TARGET_EFAULT;
10756                 }
10757             }
10758             n = lock_user_string(arg2);
10759             if (n) {
10760                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10761             } else {
10762                 ret = -TARGET_EFAULT;
10763             }
10764             unlock_user(n, arg2, 0);
10765             unlock_user(v, arg3, 0);
10766         }
10767         return ret;
10768     case TARGET_NR_getxattr:
10769     case TARGET_NR_lgetxattr:
10770         {
10771             void *p, *n, *v = 0;
10772             if (arg3) {
10773                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10774                 if (!v) {
10775                     return -TARGET_EFAULT;
10776                 }
10777             }
10778             p = lock_user_string(arg1);
10779             n = lock_user_string(arg2);
10780             if (p && n) {
10781                 if (num == TARGET_NR_getxattr) {
10782                     ret = get_errno(getxattr(p, n, v, arg4));
10783                 } else {
10784                     ret = get_errno(lgetxattr(p, n, v, arg4));
10785                 }
10786             } else {
10787                 ret = -TARGET_EFAULT;
10788             }
10789             unlock_user(p, arg1, 0);
10790             unlock_user(n, arg2, 0);
10791             unlock_user(v, arg3, arg4);
10792         }
10793         return ret;
10794     case TARGET_NR_fgetxattr:
10795         {
10796             void *n, *v = 0;
10797             if (arg3) {
10798                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10799                 if (!v) {
10800                     return -TARGET_EFAULT;
10801                 }
10802             }
10803             n = lock_user_string(arg2);
10804             if (n) {
10805                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10806             } else {
10807                 ret = -TARGET_EFAULT;
10808             }
10809             unlock_user(n, arg2, 0);
10810             unlock_user(v, arg3, arg4);
10811         }
10812         return ret;
10813     case TARGET_NR_removexattr:
10814     case TARGET_NR_lremovexattr:
10815         {
10816             void *p, *n;
10817             p = lock_user_string(arg1);
10818             n = lock_user_string(arg2);
10819             if (p && n) {
10820                 if (num == TARGET_NR_removexattr) {
10821                     ret = get_errno(removexattr(p, n));
10822                 } else {
10823                     ret = get_errno(lremovexattr(p, n));
10824                 }
10825             } else {
10826                 ret = -TARGET_EFAULT;
10827             }
10828             unlock_user(p, arg1, 0);
10829             unlock_user(n, arg2, 0);
10830         }
10831         return ret;
10832     case TARGET_NR_fremovexattr:
10833         {
10834             void *n;
10835             n = lock_user_string(arg2);
10836             if (n) {
10837                 ret = get_errno(fremovexattr(arg1, n));
10838             } else {
10839                 ret = -TARGET_EFAULT;
10840             }
10841             unlock_user(n, arg2, 0);
10842         }
10843         return ret;
10844 #endif
10845 #endif /* CONFIG_ATTR */
10846 #ifdef TARGET_NR_set_thread_area
10847     case TARGET_NR_set_thread_area:
10848 #if defined(TARGET_MIPS)
10849       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10850       return 0;
10851 #elif defined(TARGET_CRIS)
10852       if (arg1 & 0xff)
10853           ret = -TARGET_EINVAL;
10854       else {
10855           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10856           ret = 0;
10857       }
10858       return ret;
10859 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10860       return do_set_thread_area(cpu_env, arg1);
10861 #elif defined(TARGET_M68K)
10862       {
10863           TaskState *ts = cpu->opaque;
10864           ts->tp_value = arg1;
10865           return 0;
10866       }
10867 #else
10868       return -TARGET_ENOSYS;
10869 #endif
10870 #endif
10871 #ifdef TARGET_NR_get_thread_area
10872     case TARGET_NR_get_thread_area:
10873 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10874         return do_get_thread_area(cpu_env, arg1);
10875 #elif defined(TARGET_M68K)
10876         {
10877             TaskState *ts = cpu->opaque;
10878             return ts->tp_value;
10879         }
10880 #else
10881         return -TARGET_ENOSYS;
10882 #endif
10883 #endif
10884 #ifdef TARGET_NR_getdomainname
10885     case TARGET_NR_getdomainname:
10886         return -TARGET_ENOSYS;
10887 #endif
10888 
10889 #ifdef TARGET_NR_clock_settime
10890     case TARGET_NR_clock_settime:
10891     {
10892         struct timespec ts;
10893 
10894         ret = target_to_host_timespec(&ts, arg2);
10895         if (!is_error(ret)) {
10896             ret = get_errno(clock_settime(arg1, &ts));
10897         }
10898         return ret;
10899     }
10900 #endif
10901 #ifdef TARGET_NR_clock_gettime
10902     case TARGET_NR_clock_gettime:
10903     {
10904         struct timespec ts;
10905         ret = get_errno(clock_gettime(arg1, &ts));
10906         if (!is_error(ret)) {
10907             ret = host_to_target_timespec(arg2, &ts);
10908         }
10909         return ret;
10910     }
10911 #endif
10912 #ifdef TARGET_NR_clock_getres
10913     case TARGET_NR_clock_getres:
10914     {
10915         struct timespec ts;
10916         ret = get_errno(clock_getres(arg1, &ts));
10917         if (!is_error(ret)) {
10918             host_to_target_timespec(arg2, &ts);
10919         }
10920         return ret;
10921     }
10922 #endif
10923 #ifdef TARGET_NR_clock_nanosleep
10924     case TARGET_NR_clock_nanosleep:
10925     {
10926         struct timespec ts;
10927         target_to_host_timespec(&ts, arg3);
10928         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10929                                              &ts, arg4 ? &ts : NULL));
10930         if (arg4)
10931             host_to_target_timespec(arg4, &ts);
10932 
10933 #if defined(TARGET_PPC)
10934         /* clock_nanosleep is odd in that it returns positive errno values.
10935          * On PPC, CR0 bit 3 should be set in such a situation. */
10936         if (ret && ret != -TARGET_ERESTARTSYS) {
10937             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10938         }
10939 #endif
10940         return ret;
10941     }
10942 #endif
10943 
10944 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10945     case TARGET_NR_set_tid_address:
10946         return get_errno(set_tid_address((int *)g2h(arg1)));
10947 #endif
10948 
10949     case TARGET_NR_tkill:
10950         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10951 
10952     case TARGET_NR_tgkill:
10953         return get_errno(safe_tgkill((int)arg1, (int)arg2,
10954                          target_to_host_signal(arg3)));
10955 
10956 #ifdef TARGET_NR_set_robust_list
10957     case TARGET_NR_set_robust_list:
10958     case TARGET_NR_get_robust_list:
10959         /* The ABI for supporting robust futexes has userspace pass
10960          * the kernel a pointer to a linked list which is updated by
10961          * userspace after the syscall; the list is walked by the kernel
10962          * when the thread exits. Since the linked list in QEMU guest
10963          * memory isn't a valid linked list for the host and we have
10964          * no way to reliably intercept the thread-death event, we can't
10965          * support these. Silently return ENOSYS so that guest userspace
10966          * falls back to a non-robust futex implementation (which should
10967          * be OK except in the corner case of the guest crashing while
10968          * holding a mutex that is shared with another process via
10969          * shared memory).
10970          */
10971         return -TARGET_ENOSYS;
10972 #endif
10973 
10974 #if defined(TARGET_NR_utimensat)
10975     case TARGET_NR_utimensat:
10976         {
10977             struct timespec *tsp, ts[2];
10978             if (!arg3) {
10979                 tsp = NULL;
10980             } else {
10981                 target_to_host_timespec(ts, arg3);
10982                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10983                 tsp = ts;
10984             }
10985             if (!arg2)
10986                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10987             else {
10988                 if (!(p = lock_user_string(arg2))) {
10989                     return -TARGET_EFAULT;
10990                 }
10991                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10992                 unlock_user(p, arg2, 0);
10993             }
10994         }
10995         return ret;
10996 #endif
10997     case TARGET_NR_futex:
10998         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10999 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11000     case TARGET_NR_inotify_init:
11001         ret = get_errno(sys_inotify_init());
11002         if (ret >= 0) {
11003             fd_trans_register(ret, &target_inotify_trans);
11004         }
11005         return ret;
11006 #endif
11007 #ifdef CONFIG_INOTIFY1
11008 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11009     case TARGET_NR_inotify_init1:
11010         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11011                                           fcntl_flags_tbl)));
11012         if (ret >= 0) {
11013             fd_trans_register(ret, &target_inotify_trans);
11014         }
11015         return ret;
11016 #endif
11017 #endif
11018 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11019     case TARGET_NR_inotify_add_watch:
11020         p = lock_user_string(arg2);
11021         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11022         unlock_user(p, arg2, 0);
11023         return ret;
11024 #endif
11025 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11026     case TARGET_NR_inotify_rm_watch:
11027         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11028 #endif
11029 
11030 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11031     case TARGET_NR_mq_open:
11032         {
11033             struct mq_attr posix_mq_attr;
11034             struct mq_attr *pposix_mq_attr;
11035             int host_flags;
11036 
11037             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11038             pposix_mq_attr = NULL;
11039             if (arg4) {
11040                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11041                     return -TARGET_EFAULT;
11042                 }
11043                 pposix_mq_attr = &posix_mq_attr;
11044             }
11045             p = lock_user_string(arg1 - 1);
11046             if (!p) {
11047                 return -TARGET_EFAULT;
11048             }
11049             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11050             unlock_user (p, arg1, 0);
11051         }
11052         return ret;
11053 
11054     case TARGET_NR_mq_unlink:
11055         p = lock_user_string(arg1 - 1);
11056         if (!p) {
11057             return -TARGET_EFAULT;
11058         }
11059         ret = get_errno(mq_unlink(p));
11060         unlock_user (p, arg1, 0);
11061         return ret;
11062 
11063     case TARGET_NR_mq_timedsend:
11064         {
11065             struct timespec ts;
11066 
11067             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11068             if (arg5 != 0) {
11069                 target_to_host_timespec(&ts, arg5);
11070                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11071                 host_to_target_timespec(arg5, &ts);
11072             } else {
11073                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11074             }
11075             unlock_user (p, arg2, arg3);
11076         }
11077         return ret;
11078 
11079     case TARGET_NR_mq_timedreceive:
11080         {
11081             struct timespec ts;
11082             unsigned int prio;
11083 
11084             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11085             if (arg5 != 0) {
11086                 target_to_host_timespec(&ts, arg5);
11087                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11088                                                      &prio, &ts));
11089                 host_to_target_timespec(arg5, &ts);
11090             } else {
11091                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11092                                                      &prio, NULL));
11093             }
11094             unlock_user (p, arg2, arg3);
11095             if (arg4 != 0)
11096                 put_user_u32(prio, arg4);
11097         }
11098         return ret;
11099 
11100     /* Not implemented for now... */
11101 /*     case TARGET_NR_mq_notify: */
11102 /*         break; */
11103 
11104     case TARGET_NR_mq_getsetattr:
11105         {
11106             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11107             ret = 0;
11108             if (arg2 != 0) {
11109                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11110                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11111                                            &posix_mq_attr_out));
11112             } else if (arg3 != 0) {
11113                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11114             }
11115             if (ret == 0 && arg3 != 0) {
11116                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11117             }
11118         }
11119         return ret;
11120 #endif
11121 
11122 #ifdef CONFIG_SPLICE
11123 #ifdef TARGET_NR_tee
11124     case TARGET_NR_tee:
11125         {
11126             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11127         }
11128         return ret;
11129 #endif
11130 #ifdef TARGET_NR_splice
11131     case TARGET_NR_splice:
11132         {
11133             loff_t loff_in, loff_out;
11134             loff_t *ploff_in = NULL, *ploff_out = NULL;
11135             if (arg2) {
11136                 if (get_user_u64(loff_in, arg2)) {
11137                     return -TARGET_EFAULT;
11138                 }
11139                 ploff_in = &loff_in;
11140             }
11141             if (arg4) {
11142                 if (get_user_u64(loff_out, arg4)) {
11143                     return -TARGET_EFAULT;
11144                 }
11145                 ploff_out = &loff_out;
11146             }
11147             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11148             if (arg2) {
11149                 if (put_user_u64(loff_in, arg2)) {
11150                     return -TARGET_EFAULT;
11151                 }
11152             }
11153             if (arg4) {
11154                 if (put_user_u64(loff_out, arg4)) {
11155                     return -TARGET_EFAULT;
11156                 }
11157             }
11158         }
11159         return ret;
11160 #endif
11161 #ifdef TARGET_NR_vmsplice
11162 	case TARGET_NR_vmsplice:
11163         {
11164             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11165             if (vec != NULL) {
11166                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11167                 unlock_iovec(vec, arg2, arg3, 0);
11168             } else {
11169                 ret = -host_to_target_errno(errno);
11170             }
11171         }
11172         return ret;
11173 #endif
11174 #endif /* CONFIG_SPLICE */
11175 #ifdef CONFIG_EVENTFD
11176 #if defined(TARGET_NR_eventfd)
11177     case TARGET_NR_eventfd:
11178         ret = get_errno(eventfd(arg1, 0));
11179         if (ret >= 0) {
11180             fd_trans_register(ret, &target_eventfd_trans);
11181         }
11182         return ret;
11183 #endif
11184 #if defined(TARGET_NR_eventfd2)
11185     case TARGET_NR_eventfd2:
11186     {
11187         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11188         if (arg2 & TARGET_O_NONBLOCK) {
11189             host_flags |= O_NONBLOCK;
11190         }
11191         if (arg2 & TARGET_O_CLOEXEC) {
11192             host_flags |= O_CLOEXEC;
11193         }
11194         ret = get_errno(eventfd(arg1, host_flags));
11195         if (ret >= 0) {
11196             fd_trans_register(ret, &target_eventfd_trans);
11197         }
11198         return ret;
11199     }
11200 #endif
11201 #endif /* CONFIG_EVENTFD  */
11202 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11203     case TARGET_NR_fallocate:
11204 #if TARGET_ABI_BITS == 32
11205         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11206                                   target_offset64(arg5, arg6)));
11207 #else
11208         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11209 #endif
11210         return ret;
11211 #endif
11212 #if defined(CONFIG_SYNC_FILE_RANGE)
11213 #if defined(TARGET_NR_sync_file_range)
11214     case TARGET_NR_sync_file_range:
11215 #if TARGET_ABI_BITS == 32
11216 #if defined(TARGET_MIPS)
11217         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11218                                         target_offset64(arg5, arg6), arg7));
11219 #else
11220         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11221                                         target_offset64(arg4, arg5), arg6));
11222 #endif /* !TARGET_MIPS */
11223 #else
11224         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11225 #endif
11226         return ret;
11227 #endif
11228 #if defined(TARGET_NR_sync_file_range2)
11229     case TARGET_NR_sync_file_range2:
11230         /* This is like sync_file_range but the arguments are reordered */
11231 #if TARGET_ABI_BITS == 32
11232         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11233                                         target_offset64(arg5, arg6), arg2));
11234 #else
11235         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11236 #endif
11237         return ret;
11238 #endif
11239 #endif
11240 #if defined(TARGET_NR_signalfd4)
11241     case TARGET_NR_signalfd4:
11242         return do_signalfd4(arg1, arg2, arg4);
11243 #endif
11244 #if defined(TARGET_NR_signalfd)
11245     case TARGET_NR_signalfd:
11246         return do_signalfd4(arg1, arg2, 0);
11247 #endif
11248 #if defined(CONFIG_EPOLL)
11249 #if defined(TARGET_NR_epoll_create)
11250     case TARGET_NR_epoll_create:
11251         return get_errno(epoll_create(arg1));
11252 #endif
11253 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11254     case TARGET_NR_epoll_create1:
11255         return get_errno(epoll_create1(arg1));
11256 #endif
11257 #if defined(TARGET_NR_epoll_ctl)
11258     case TARGET_NR_epoll_ctl:
11259     {
11260         struct epoll_event ep;
11261         struct epoll_event *epp = 0;
11262         if (arg4) {
11263             struct target_epoll_event *target_ep;
11264             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11265                 return -TARGET_EFAULT;
11266             }
11267             ep.events = tswap32(target_ep->events);
11268             /* The epoll_data_t union is just opaque data to the kernel,
11269              * so we transfer all 64 bits across and need not worry what
11270              * actual data type it is.
11271              */
11272             ep.data.u64 = tswap64(target_ep->data.u64);
11273             unlock_user_struct(target_ep, arg4, 0);
11274             epp = &ep;
11275         }
11276         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11277     }
11278 #endif
11279 
11280 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11281 #if defined(TARGET_NR_epoll_wait)
11282     case TARGET_NR_epoll_wait:
11283 #endif
11284 #if defined(TARGET_NR_epoll_pwait)
11285     case TARGET_NR_epoll_pwait:
11286 #endif
11287     {
11288         struct target_epoll_event *target_ep;
11289         struct epoll_event *ep;
11290         int epfd = arg1;
11291         int maxevents = arg3;
11292         int timeout = arg4;
11293 
11294         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11295             return -TARGET_EINVAL;
11296         }
11297 
11298         target_ep = lock_user(VERIFY_WRITE, arg2,
11299                               maxevents * sizeof(struct target_epoll_event), 1);
11300         if (!target_ep) {
11301             return -TARGET_EFAULT;
11302         }
11303 
11304         ep = g_try_new(struct epoll_event, maxevents);
11305         if (!ep) {
11306             unlock_user(target_ep, arg2, 0);
11307             return -TARGET_ENOMEM;
11308         }
11309 
11310         switch (num) {
11311 #if defined(TARGET_NR_epoll_pwait)
11312         case TARGET_NR_epoll_pwait:
11313         {
11314             target_sigset_t *target_set;
11315             sigset_t _set, *set = &_set;
11316 
11317             if (arg5) {
11318                 if (arg6 != sizeof(target_sigset_t)) {
11319                     ret = -TARGET_EINVAL;
11320                     break;
11321                 }
11322 
11323                 target_set = lock_user(VERIFY_READ, arg5,
11324                                        sizeof(target_sigset_t), 1);
11325                 if (!target_set) {
11326                     ret = -TARGET_EFAULT;
11327                     break;
11328                 }
11329                 target_to_host_sigset(set, target_set);
11330                 unlock_user(target_set, arg5, 0);
11331             } else {
11332                 set = NULL;
11333             }
11334 
11335             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11336                                              set, SIGSET_T_SIZE));
11337             break;
11338         }
11339 #endif
11340 #if defined(TARGET_NR_epoll_wait)
11341         case TARGET_NR_epoll_wait:
11342             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11343                                              NULL, 0));
11344             break;
11345 #endif
11346         default:
11347             ret = -TARGET_ENOSYS;
11348         }
11349         if (!is_error(ret)) {
11350             int i;
11351             for (i = 0; i < ret; i++) {
11352                 target_ep[i].events = tswap32(ep[i].events);
11353                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11354             }
11355             unlock_user(target_ep, arg2,
11356                         ret * sizeof(struct target_epoll_event));
11357         } else {
11358             unlock_user(target_ep, arg2, 0);
11359         }
11360         g_free(ep);
11361         return ret;
11362     }
11363 #endif
11364 #endif
11365 #ifdef TARGET_NR_prlimit64
11366     case TARGET_NR_prlimit64:
11367     {
11368         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11369         struct target_rlimit64 *target_rnew, *target_rold;
11370         struct host_rlimit64 rnew, rold, *rnewp = 0;
11371         int resource = target_to_host_resource(arg2);
11372         if (arg3) {
11373             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11374                 return -TARGET_EFAULT;
11375             }
11376             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11377             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11378             unlock_user_struct(target_rnew, arg3, 0);
11379             rnewp = &rnew;
11380         }
11381 
11382         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11383         if (!is_error(ret) && arg4) {
11384             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11385                 return -TARGET_EFAULT;
11386             }
11387             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11388             target_rold->rlim_max = tswap64(rold.rlim_max);
11389             unlock_user_struct(target_rold, arg4, 1);
11390         }
11391         return ret;
11392     }
11393 #endif
11394 #ifdef TARGET_NR_gethostname
11395     case TARGET_NR_gethostname:
11396     {
11397         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11398         if (name) {
11399             ret = get_errno(gethostname(name, arg2));
11400             unlock_user(name, arg1, arg2);
11401         } else {
11402             ret = -TARGET_EFAULT;
11403         }
11404         return ret;
11405     }
11406 #endif
11407 #ifdef TARGET_NR_atomic_cmpxchg_32
11408     case TARGET_NR_atomic_cmpxchg_32:
11409     {
11410         /* should use start_exclusive from main.c */
11411         abi_ulong mem_value;
11412         if (get_user_u32(mem_value, arg6)) {
11413             target_siginfo_t info;
11414             info.si_signo = SIGSEGV;
11415             info.si_errno = 0;
11416             info.si_code = TARGET_SEGV_MAPERR;
11417             info._sifields._sigfault._addr = arg6;
11418             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11419                          QEMU_SI_FAULT, &info);
11420             ret = 0xdeadbeef;
11421 
11422         }
11423         if (mem_value == arg2)
11424             put_user_u32(arg1, arg6);
11425         return mem_value;
11426     }
11427 #endif
11428 #ifdef TARGET_NR_atomic_barrier
11429     case TARGET_NR_atomic_barrier:
11430         /* Like the kernel implementation and the
11431            qemu arm barrier, no-op this? */
11432         return 0;
11433 #endif
11434 
11435 #ifdef TARGET_NR_timer_create
11436     case TARGET_NR_timer_create:
11437     {
11438         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11439 
11440         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11441 
11442         int clkid = arg1;
11443         int timer_index = next_free_host_timer();
11444 
11445         if (timer_index < 0) {
11446             ret = -TARGET_EAGAIN;
11447         } else {
11448             timer_t *phtimer = g_posix_timers  + timer_index;
11449 
11450             if (arg2) {
11451                 phost_sevp = &host_sevp;
11452                 ret = target_to_host_sigevent(phost_sevp, arg2);
11453                 if (ret != 0) {
11454                     return ret;
11455                 }
11456             }
11457 
11458             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11459             if (ret) {
11460                 phtimer = NULL;
11461             } else {
11462                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11463                     return -TARGET_EFAULT;
11464                 }
11465             }
11466         }
11467         return ret;
11468     }
11469 #endif
11470 
11471 #ifdef TARGET_NR_timer_settime
11472     case TARGET_NR_timer_settime:
11473     {
11474         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11475          * struct itimerspec * old_value */
11476         target_timer_t timerid = get_timer_id(arg1);
11477 
11478         if (timerid < 0) {
11479             ret = timerid;
11480         } else if (arg3 == 0) {
11481             ret = -TARGET_EINVAL;
11482         } else {
11483             timer_t htimer = g_posix_timers[timerid];
11484             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11485 
11486             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11487                 return -TARGET_EFAULT;
11488             }
11489             ret = get_errno(
11490                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11491             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11492                 return -TARGET_EFAULT;
11493             }
11494         }
11495         return ret;
11496     }
11497 #endif
11498 
11499 #ifdef TARGET_NR_timer_gettime
11500     case TARGET_NR_timer_gettime:
11501     {
11502         /* args: timer_t timerid, struct itimerspec *curr_value */
11503         target_timer_t timerid = get_timer_id(arg1);
11504 
11505         if (timerid < 0) {
11506             ret = timerid;
11507         } else if (!arg2) {
11508             ret = -TARGET_EFAULT;
11509         } else {
11510             timer_t htimer = g_posix_timers[timerid];
11511             struct itimerspec hspec;
11512             ret = get_errno(timer_gettime(htimer, &hspec));
11513 
11514             if (host_to_target_itimerspec(arg2, &hspec)) {
11515                 ret = -TARGET_EFAULT;
11516             }
11517         }
11518         return ret;
11519     }
11520 #endif
11521 
11522 #ifdef TARGET_NR_timer_getoverrun
11523     case TARGET_NR_timer_getoverrun:
11524     {
11525         /* args: timer_t timerid */
11526         target_timer_t timerid = get_timer_id(arg1);
11527 
11528         if (timerid < 0) {
11529             ret = timerid;
11530         } else {
11531             timer_t htimer = g_posix_timers[timerid];
11532             ret = get_errno(timer_getoverrun(htimer));
11533         }
11534         fd_trans_unregister(ret);
11535         return ret;
11536     }
11537 #endif
11538 
11539 #ifdef TARGET_NR_timer_delete
11540     case TARGET_NR_timer_delete:
11541     {
11542         /* args: timer_t timerid */
11543         target_timer_t timerid = get_timer_id(arg1);
11544 
11545         if (timerid < 0) {
11546             ret = timerid;
11547         } else {
11548             timer_t htimer = g_posix_timers[timerid];
11549             ret = get_errno(timer_delete(htimer));
11550             g_posix_timers[timerid] = 0;
11551         }
11552         return ret;
11553     }
11554 #endif
11555 
11556 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11557     case TARGET_NR_timerfd_create:
11558         return get_errno(timerfd_create(arg1,
11559                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11560 #endif
11561 
11562 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11563     case TARGET_NR_timerfd_gettime:
11564         {
11565             struct itimerspec its_curr;
11566 
11567             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11568 
11569             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11570                 return -TARGET_EFAULT;
11571             }
11572         }
11573         return ret;
11574 #endif
11575 
11576 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11577     case TARGET_NR_timerfd_settime:
11578         {
11579             struct itimerspec its_new, its_old, *p_new;
11580 
11581             if (arg3) {
11582                 if (target_to_host_itimerspec(&its_new, arg3)) {
11583                     return -TARGET_EFAULT;
11584                 }
11585                 p_new = &its_new;
11586             } else {
11587                 p_new = NULL;
11588             }
11589 
11590             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11591 
11592             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11593                 return -TARGET_EFAULT;
11594             }
11595         }
11596         return ret;
11597 #endif
11598 
11599 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11600     case TARGET_NR_ioprio_get:
11601         return get_errno(ioprio_get(arg1, arg2));
11602 #endif
11603 
11604 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11605     case TARGET_NR_ioprio_set:
11606         return get_errno(ioprio_set(arg1, arg2, arg3));
11607 #endif
11608 
11609 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11610     case TARGET_NR_setns:
11611         return get_errno(setns(arg1, arg2));
11612 #endif
11613 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11614     case TARGET_NR_unshare:
11615         return get_errno(unshare(arg1));
11616 #endif
11617 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11618     case TARGET_NR_kcmp:
11619         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11620 #endif
11621 #ifdef TARGET_NR_swapcontext
11622     case TARGET_NR_swapcontext:
11623         /* PowerPC specific.  */
11624         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11625 #endif
11626 
11627     default:
11628         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11629         return -TARGET_ENOSYS;
11630     }
11631     return ret;
11632 }
11633 
11634 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11635                     abi_long arg2, abi_long arg3, abi_long arg4,
11636                     abi_long arg5, abi_long arg6, abi_long arg7,
11637                     abi_long arg8)
11638 {
11639     CPUState *cpu = ENV_GET_CPU(cpu_env);
11640     abi_long ret;
11641 
11642 #ifdef DEBUG_ERESTARTSYS
11643     /* Debug-only code for exercising the syscall-restart code paths
11644      * in the per-architecture cpu main loops: restart every syscall
11645      * the guest makes once before letting it through.
11646      */
11647     {
11648         static bool flag;
11649         flag = !flag;
11650         if (flag) {
11651             return -TARGET_ERESTARTSYS;
11652         }
11653     }
11654 #endif
11655 
11656     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11657                              arg5, arg6, arg7, arg8);
11658 
11659     if (unlikely(do_strace)) {
11660         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11661         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11662                           arg5, arg6, arg7, arg8);
11663         print_syscall_ret(num, ret);
11664     } else {
11665         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11666                           arg5, arg6, arg7, arg8);
11667     }
11668 
11669     trace_guest_user_syscall_ret(cpu, num, ret);
11670     return ret;
11671 }
11672