xref: /openbmc/qemu/linux-user/syscall.c (revision 234e2565)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #ifdef CONFIG_TIMERFD
59 #include <sys/timerfd.h>
60 #endif
61 #ifdef CONFIG_EVENTFD
62 #include <sys/eventfd.h>
63 #endif
64 #ifdef CONFIG_EPOLL
65 #include <sys/epoll.h>
66 #endif
67 #ifdef CONFIG_ATTR
68 #include "qemu/xattr.h"
69 #endif
70 #ifdef CONFIG_SENDFILE
71 #include <sys/sendfile.h>
72 #endif
73 
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
80 
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/cdrom.h>
84 #include <linux/hdreg.h>
85 #include <linux/soundcard.h>
86 #include <linux/kd.h>
87 #include <linux/mtio.h>
88 #include <linux/fs.h>
89 #if defined(CONFIG_FIEMAP)
90 #include <linux/fiemap.h>
91 #endif
92 #include <linux/fb.h>
93 #if defined(CONFIG_USBFS)
94 #include <linux/usbdevice_fs.h>
95 #include <linux/usb/ch9.h>
96 #endif
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #include <linux/if_alg.h>
106 #include "linux_loop.h"
107 #include "uname.h"
108 
109 #include "qemu.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
113 
114 #ifndef CLONE_IO
115 #define CLONE_IO                0x80000000      /* Clone io context */
116 #endif
117 
118 /* We can't directly call the host clone syscall, because this will
119  * badly confuse libc (breaking mutexes, for example). So we must
120  * divide clone flags into:
121  *  * flag combinations that look like pthread_create()
122  *  * flag combinations that look like fork()
123  *  * flags we can implement within QEMU itself
124  *  * flags we can't support and will return an error for
125  */
126 /* For thread creation, all these flags must be present; for
127  * fork, none must be present.
128  */
129 #define CLONE_THREAD_FLAGS                              \
130     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
131      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
132 
133 /* These flags are ignored:
134  * CLONE_DETACHED is now ignored by the kernel;
135  * CLONE_IO is just an optimisation hint to the I/O scheduler
136  */
137 #define CLONE_IGNORED_FLAGS                     \
138     (CLONE_DETACHED | CLONE_IO)
139 
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS               \
142     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
143      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
144 
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
149 
150 #define CLONE_INVALID_FORK_FLAGS                                        \
151     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
152 
153 #define CLONE_INVALID_THREAD_FLAGS                                      \
154     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
155        CLONE_IGNORED_FLAGS))
156 
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158  * have almost all been allocated. We cannot support any of
159  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161  * The checks against the invalid thread masks above will catch these.
162  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
163  */
164 
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166  * once. This exercises the codepaths for restart.
167  */
168 //#define DEBUG_ERESTARTSYS
169 
170 //#include <linux/msdos_fs.h>
171 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
172 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
173 
174 #undef _syscall0
175 #undef _syscall1
176 #undef _syscall2
177 #undef _syscall3
178 #undef _syscall4
179 #undef _syscall5
180 #undef _syscall6
181 
182 #define _syscall0(type,name)		\
183 static type name (void)			\
184 {					\
185 	return syscall(__NR_##name);	\
186 }
187 
188 #define _syscall1(type,name,type1,arg1)		\
189 static type name (type1 arg1)			\
190 {						\
191 	return syscall(__NR_##name, arg1);	\
192 }
193 
194 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
195 static type name (type1 arg1,type2 arg2)		\
196 {							\
197 	return syscall(__NR_##name, arg1, arg2);	\
198 }
199 
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
201 static type name (type1 arg1,type2 arg2,type3 arg3)		\
202 {								\
203 	return syscall(__NR_##name, arg1, arg2, arg3);		\
204 }
205 
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
208 {										\
209 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
210 }
211 
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
213 		  type5,arg5)							\
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
215 {										\
216 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
217 }
218 
219 
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
221 		  type5,arg5,type6,arg6)					\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
223                   type6 arg6)							\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
226 }
227 
228 
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
241 #define __NR_sys_statx __NR_statx
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #define __NR_sys_gettid __NR_gettid
253 _syscall0(int, sys_gettid)
254 
255 /* For the 64-bit guest on 32-bit host case we must emulate
256  * getdents using getdents64, because otherwise the host
257  * might hand us back more dirent records than we can fit
258  * into the guest buffer after structure format conversion.
259  * Otherwise we emulate getdents with getdents if the host has it.
260  */
261 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
262 #define EMULATE_GETDENTS_WITH_GETDENTS
263 #endif
264 
265 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
266 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
267 #endif
268 #if (defined(TARGET_NR_getdents) && \
269       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
270     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
272 #endif
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
275           loff_t *, res, uint, wh);
276 #endif
277 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
278 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
279           siginfo_t *, uinfo)
280 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
281 #ifdef __NR_exit_group
282 _syscall1(int,exit_group,int,error_code)
283 #endif
284 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
285 _syscall1(int,set_tid_address,int *,tidptr)
286 #endif
287 #if defined(TARGET_NR_futex) && defined(__NR_futex)
288 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
289           const struct timespec *,timeout,int *,uaddr2,int,val3)
290 #endif
291 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
292 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
293           unsigned long *, user_mask_ptr);
294 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
295 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
296           unsigned long *, user_mask_ptr);
297 #define __NR_sys_getcpu __NR_getcpu
298 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
299 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
300           void *, arg);
301 _syscall2(int, capget, struct __user_cap_header_struct *, header,
302           struct __user_cap_data_struct *, data);
303 _syscall2(int, capset, struct __user_cap_header_struct *, header,
304           struct __user_cap_data_struct *, data);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get, int, which, int, who)
307 #endif
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
310 #endif
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
313 #endif
314 
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
317           unsigned long, idx1, unsigned long, idx2)
318 #endif
319 
320 /*
321  * It is assumed that struct statx is architecture independent.
322  */
323 #if defined(TARGET_NR_statx) && defined(__NR_statx)
324 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
325           unsigned int, mask, struct target_statx *, statxbuf)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 static int sys_getcwd1(char *buf, size_t size)
365 {
366   if (getcwd(buf, size) == NULL) {
367       /* getcwd() sets errno */
368       return (-1);
369   }
370   return strlen(buf)+1;
371 }
372 
373 #ifdef TARGET_NR_utimensat
374 #if defined(__NR_utimensat)
375 #define __NR_sys_utimensat __NR_utimensat
376 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
377           const struct timespec *,tsp,int,flags)
378 #else
379 static int sys_utimensat(int dirfd, const char *pathname,
380                          const struct timespec times[2], int flags)
381 {
382     errno = ENOSYS;
383     return -1;
384 }
385 #endif
386 #endif /* TARGET_NR_utimensat */
387 
388 #ifdef TARGET_NR_renameat2
389 #if defined(__NR_renameat2)
390 #define __NR_sys_renameat2 __NR_renameat2
391 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
392           const char *, new, unsigned int, flags)
393 #else
394 static int sys_renameat2(int oldfd, const char *old,
395                          int newfd, const char *new, int flags)
396 {
397     if (flags == 0) {
398         return renameat(oldfd, old, newfd, new);
399     }
400     errno = ENOSYS;
401     return -1;
402 }
403 #endif
404 #endif /* TARGET_NR_renameat2 */
405 
406 #ifdef CONFIG_INOTIFY
407 #include <sys/inotify.h>
408 
409 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
410 static int sys_inotify_init(void)
411 {
412   return (inotify_init());
413 }
414 #endif
415 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
416 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
417 {
418   return (inotify_add_watch(fd, pathname, mask));
419 }
420 #endif
421 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
422 static int sys_inotify_rm_watch(int fd, int32_t wd)
423 {
424   return (inotify_rm_watch(fd, wd));
425 }
426 #endif
427 #ifdef CONFIG_INOTIFY1
428 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
429 static int sys_inotify_init1(int flags)
430 {
431   return (inotify_init1(flags));
432 }
433 #endif
434 #endif
435 #else
436 /* Userspace can usually survive runtime without inotify */
437 #undef TARGET_NR_inotify_init
438 #undef TARGET_NR_inotify_init1
439 #undef TARGET_NR_inotify_add_watch
440 #undef TARGET_NR_inotify_rm_watch
441 #endif /* CONFIG_INOTIFY  */
442 
443 #if defined(TARGET_NR_prlimit64)
444 #ifndef __NR_prlimit64
445 # define __NR_prlimit64 -1
446 #endif
447 #define __NR_sys_prlimit64 __NR_prlimit64
448 /* The glibc rlimit structure may not be that used by the underlying syscall */
449 struct host_rlimit64 {
450     uint64_t rlim_cur;
451     uint64_t rlim_max;
452 };
453 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
454           const struct host_rlimit64 *, new_limit,
455           struct host_rlimit64 *, old_limit)
456 #endif
457 
458 
459 #if defined(TARGET_NR_timer_create)
460 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
461 static timer_t g_posix_timers[32] = { 0, } ;
462 
463 static inline int next_free_host_timer(void)
464 {
465     int k ;
466     /* FIXME: Does finding the next free slot require a lock? */
467     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
468         if (g_posix_timers[k] == 0) {
469             g_posix_timers[k] = (timer_t) 1;
470             return k;
471         }
472     }
473     return -1;
474 }
475 #endif
476 
477 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
478 #ifdef TARGET_ARM
479 static inline int regpairs_aligned(void *cpu_env, int num)
480 {
481     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
482 }
483 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
484 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
485 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
486 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
487  * of registers which translates to the same as ARM/MIPS, because we start with
488  * r3 as arg1 */
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_SH4)
491 /* SH4 doesn't align register pairs, except for p{read,write}64 */
492 static inline int regpairs_aligned(void *cpu_env, int num)
493 {
494     switch (num) {
495     case TARGET_NR_pread64:
496     case TARGET_NR_pwrite64:
497         return 1;
498 
499     default:
500         return 0;
501     }
502 }
503 #elif defined(TARGET_XTENSA)
504 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
505 #else
506 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
507 #endif
508 
509 #define ERRNO_TABLE_SIZE 1200
510 
511 /* target_to_host_errno_table[] is initialized from
512  * host_to_target_errno_table[] in syscall_init(). */
513 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
514 };
515 
516 /*
517  * This list is the union of errno values overridden in asm-<arch>/errno.h
518  * minus the errnos that are not actually generic to all archs.
519  */
520 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
521     [EAGAIN]		= TARGET_EAGAIN,
522     [EIDRM]		= TARGET_EIDRM,
523     [ECHRNG]		= TARGET_ECHRNG,
524     [EL2NSYNC]		= TARGET_EL2NSYNC,
525     [EL3HLT]		= TARGET_EL3HLT,
526     [EL3RST]		= TARGET_EL3RST,
527     [ELNRNG]		= TARGET_ELNRNG,
528     [EUNATCH]		= TARGET_EUNATCH,
529     [ENOCSI]		= TARGET_ENOCSI,
530     [EL2HLT]		= TARGET_EL2HLT,
531     [EDEADLK]		= TARGET_EDEADLK,
532     [ENOLCK]		= TARGET_ENOLCK,
533     [EBADE]		= TARGET_EBADE,
534     [EBADR]		= TARGET_EBADR,
535     [EXFULL]		= TARGET_EXFULL,
536     [ENOANO]		= TARGET_ENOANO,
537     [EBADRQC]		= TARGET_EBADRQC,
538     [EBADSLT]		= TARGET_EBADSLT,
539     [EBFONT]		= TARGET_EBFONT,
540     [ENOSTR]		= TARGET_ENOSTR,
541     [ENODATA]		= TARGET_ENODATA,
542     [ETIME]		= TARGET_ETIME,
543     [ENOSR]		= TARGET_ENOSR,
544     [ENONET]		= TARGET_ENONET,
545     [ENOPKG]		= TARGET_ENOPKG,
546     [EREMOTE]		= TARGET_EREMOTE,
547     [ENOLINK]		= TARGET_ENOLINK,
548     [EADV]		= TARGET_EADV,
549     [ESRMNT]		= TARGET_ESRMNT,
550     [ECOMM]		= TARGET_ECOMM,
551     [EPROTO]		= TARGET_EPROTO,
552     [EDOTDOT]		= TARGET_EDOTDOT,
553     [EMULTIHOP]		= TARGET_EMULTIHOP,
554     [EBADMSG]		= TARGET_EBADMSG,
555     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
556     [EOVERFLOW]		= TARGET_EOVERFLOW,
557     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
558     [EBADFD]		= TARGET_EBADFD,
559     [EREMCHG]		= TARGET_EREMCHG,
560     [ELIBACC]		= TARGET_ELIBACC,
561     [ELIBBAD]		= TARGET_ELIBBAD,
562     [ELIBSCN]		= TARGET_ELIBSCN,
563     [ELIBMAX]		= TARGET_ELIBMAX,
564     [ELIBEXEC]		= TARGET_ELIBEXEC,
565     [EILSEQ]		= TARGET_EILSEQ,
566     [ENOSYS]		= TARGET_ENOSYS,
567     [ELOOP]		= TARGET_ELOOP,
568     [ERESTART]		= TARGET_ERESTART,
569     [ESTRPIPE]		= TARGET_ESTRPIPE,
570     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
571     [EUSERS]		= TARGET_EUSERS,
572     [ENOTSOCK]		= TARGET_ENOTSOCK,
573     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
574     [EMSGSIZE]		= TARGET_EMSGSIZE,
575     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
576     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
577     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
578     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
579     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
580     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
581     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
582     [EADDRINUSE]	= TARGET_EADDRINUSE,
583     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
584     [ENETDOWN]		= TARGET_ENETDOWN,
585     [ENETUNREACH]	= TARGET_ENETUNREACH,
586     [ENETRESET]		= TARGET_ENETRESET,
587     [ECONNABORTED]	= TARGET_ECONNABORTED,
588     [ECONNRESET]	= TARGET_ECONNRESET,
589     [ENOBUFS]		= TARGET_ENOBUFS,
590     [EISCONN]		= TARGET_EISCONN,
591     [ENOTCONN]		= TARGET_ENOTCONN,
592     [EUCLEAN]		= TARGET_EUCLEAN,
593     [ENOTNAM]		= TARGET_ENOTNAM,
594     [ENAVAIL]		= TARGET_ENAVAIL,
595     [EISNAM]		= TARGET_EISNAM,
596     [EREMOTEIO]		= TARGET_EREMOTEIO,
597     [EDQUOT]            = TARGET_EDQUOT,
598     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
599     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
600     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
601     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
602     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
603     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
604     [EALREADY]		= TARGET_EALREADY,
605     [EINPROGRESS]	= TARGET_EINPROGRESS,
606     [ESTALE]		= TARGET_ESTALE,
607     [ECANCELED]		= TARGET_ECANCELED,
608     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
609     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
610 #ifdef ENOKEY
611     [ENOKEY]		= TARGET_ENOKEY,
612 #endif
613 #ifdef EKEYEXPIRED
614     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
615 #endif
616 #ifdef EKEYREVOKED
617     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
618 #endif
619 #ifdef EKEYREJECTED
620     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
621 #endif
622 #ifdef EOWNERDEAD
623     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
624 #endif
625 #ifdef ENOTRECOVERABLE
626     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
627 #endif
628 #ifdef ENOMSG
629     [ENOMSG]            = TARGET_ENOMSG,
630 #endif
631 #ifdef ERKFILL
632     [ERFKILL]           = TARGET_ERFKILL,
633 #endif
634 #ifdef EHWPOISON
635     [EHWPOISON]         = TARGET_EHWPOISON,
636 #endif
637 };
638 
639 static inline int host_to_target_errno(int err)
640 {
641     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
642         host_to_target_errno_table[err]) {
643         return host_to_target_errno_table[err];
644     }
645     return err;
646 }
647 
648 static inline int target_to_host_errno(int err)
649 {
650     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
651         target_to_host_errno_table[err]) {
652         return target_to_host_errno_table[err];
653     }
654     return err;
655 }
656 
657 static inline abi_long get_errno(abi_long ret)
658 {
659     if (ret == -1)
660         return -host_to_target_errno(errno);
661     else
662         return ret;
663 }
664 
665 const char *target_strerror(int err)
666 {
667     if (err == TARGET_ERESTARTSYS) {
668         return "To be restarted";
669     }
670     if (err == TARGET_QEMU_ESIGRETURN) {
671         return "Successful exit from sigreturn";
672     }
673 
674     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
675         return NULL;
676     }
677     return strerror(target_to_host_errno(err));
678 }
679 
680 #define safe_syscall0(type, name) \
681 static type safe_##name(void) \
682 { \
683     return safe_syscall(__NR_##name); \
684 }
685 
686 #define safe_syscall1(type, name, type1, arg1) \
687 static type safe_##name(type1 arg1) \
688 { \
689     return safe_syscall(__NR_##name, arg1); \
690 }
691 
692 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
693 static type safe_##name(type1 arg1, type2 arg2) \
694 { \
695     return safe_syscall(__NR_##name, arg1, arg2); \
696 }
697 
698 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
699 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 { \
701     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
702 }
703 
704 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
705     type4, arg4) \
706 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 { \
708     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
709 }
710 
711 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
712     type4, arg4, type5, arg5) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714     type5 arg5) \
715 { \
716     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
717 }
718 
719 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
720     type4, arg4, type5, arg5, type6, arg6) \
721 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
722     type5 arg5, type6 arg6) \
723 { \
724     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
725 }
726 
727 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
728 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
729 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
730               int, flags, mode_t, mode)
731 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
732               struct rusage *, rusage)
733 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
734               int, options, struct rusage *, rusage)
735 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
736 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
737               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
738 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
739               struct timespec *, tsp, const sigset_t *, sigmask,
740               size_t, sigsetsize)
741 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
742               int, maxevents, int, timeout, const sigset_t *, sigmask,
743               size_t, sigsetsize)
744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
745               const struct timespec *,timeout,int *,uaddr2,int,val3)
746 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
747 safe_syscall2(int, kill, pid_t, pid, int, sig)
748 safe_syscall2(int, tkill, int, tid, int, sig)
749 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
750 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
752 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
753               unsigned long, pos_l, unsigned long, pos_h)
754 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
757               socklen_t, addrlen)
758 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
759               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
760 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
761               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
762 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
763 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
764 safe_syscall2(int, flock, int, fd, int, operation)
765 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
766               const struct timespec *, uts, size_t, sigsetsize)
767 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
768               int, flags)
769 safe_syscall2(int, nanosleep, const struct timespec *, req,
770               struct timespec *, rem)
771 #ifdef TARGET_NR_clock_nanosleep
772 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
773               const struct timespec *, req, struct timespec *, rem)
774 #endif
775 #ifdef __NR_ipc
776 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
777               void *, ptr, long, fifth)
778 #endif
779 #ifdef __NR_msgsnd
780 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
781               int, flags)
782 #endif
783 #ifdef __NR_msgrcv
784 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
785               long, msgtype, int, flags)
786 #endif
787 #ifdef __NR_semtimedop
788 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
789               unsigned, nsops, const struct timespec *, timeout)
790 #endif
791 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
792 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
793               size_t, len, unsigned, prio, const struct timespec *, timeout)
794 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
795               size_t, len, unsigned *, prio, const struct timespec *, timeout)
796 #endif
797 /* We do ioctl like this rather than via safe_syscall3 to preserve the
798  * "third argument might be integer or pointer or not present" behaviour of
799  * the libc function.
800  */
801 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
802 /* Similarly for fcntl. Note that callers must always:
803  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
804  *  use the flock64 struct rather than unsuffixed flock
805  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
806  */
807 #ifdef __NR_fcntl64
808 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
809 #else
810 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
811 #endif
812 
813 static inline int host_to_target_sock_type(int host_type)
814 {
815     int target_type;
816 
817     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
818     case SOCK_DGRAM:
819         target_type = TARGET_SOCK_DGRAM;
820         break;
821     case SOCK_STREAM:
822         target_type = TARGET_SOCK_STREAM;
823         break;
824     default:
825         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
826         break;
827     }
828 
829 #if defined(SOCK_CLOEXEC)
830     if (host_type & SOCK_CLOEXEC) {
831         target_type |= TARGET_SOCK_CLOEXEC;
832     }
833 #endif
834 
835 #if defined(SOCK_NONBLOCK)
836     if (host_type & SOCK_NONBLOCK) {
837         target_type |= TARGET_SOCK_NONBLOCK;
838     }
839 #endif
840 
841     return target_type;
842 }
843 
844 static abi_ulong target_brk;
845 static abi_ulong target_original_brk;
846 static abi_ulong brk_page;
847 
848 void target_set_brk(abi_ulong new_brk)
849 {
850     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
851     brk_page = HOST_PAGE_ALIGN(target_brk);
852 }
853 
854 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
855 #define DEBUGF_BRK(message, args...)
856 
857 /* do_brk() must return target values and target errnos. */
858 abi_long do_brk(abi_ulong new_brk)
859 {
860     abi_long mapped_addr;
861     abi_ulong new_alloc_size;
862 
863     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
864 
865     if (!new_brk) {
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
867         return target_brk;
868     }
869     if (new_brk < target_original_brk) {
870         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
871                    target_brk);
872         return target_brk;
873     }
874 
875     /* If the new brk is less than the highest page reserved to the
876      * target heap allocation, set it and we're almost done...  */
877     if (new_brk <= brk_page) {
878         /* Heap contents are initialized to zero, as for anonymous
879          * mapped pages.  */
880         if (new_brk > target_brk) {
881             memset(g2h(target_brk), 0, new_brk - target_brk);
882         }
883 	target_brk = new_brk;
884         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
885 	return target_brk;
886     }
887 
888     /* We need to allocate more memory after the brk... Note that
889      * we don't use MAP_FIXED because that will map over the top of
890      * any existing mapping (like the one with the host libc or qemu
891      * itself); instead we treat "mapped but at wrong address" as
892      * a failure and unmap again.
893      */
894     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
895     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
896                                         PROT_READ|PROT_WRITE,
897                                         MAP_ANON|MAP_PRIVATE, 0, 0));
898 
899     if (mapped_addr == brk_page) {
900         /* Heap contents are initialized to zero, as for anonymous
901          * mapped pages.  Technically the new pages are already
902          * initialized to zero since they *are* anonymous mapped
903          * pages, however we have to take care with the contents that
904          * come from the remaining part of the previous page: it may
905          * contains garbage data due to a previous heap usage (grown
906          * then shrunken).  */
907         memset(g2h(target_brk), 0, brk_page - target_brk);
908 
909         target_brk = new_brk;
910         brk_page = HOST_PAGE_ALIGN(target_brk);
911         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
912             target_brk);
913         return target_brk;
914     } else if (mapped_addr != -1) {
915         /* Mapped but at wrong address, meaning there wasn't actually
916          * enough space for this brk.
917          */
918         target_munmap(mapped_addr, new_alloc_size);
919         mapped_addr = -1;
920         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
921     }
922     else {
923         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
924     }
925 
926 #if defined(TARGET_ALPHA)
927     /* We (partially) emulate OSF/1 on Alpha, which requires we
928        return a proper errno, not an unchanged brk value.  */
929     return -TARGET_ENOMEM;
930 #endif
931     /* For everything else, return the previous break. */
932     return target_brk;
933 }
934 
935 static inline abi_long copy_from_user_fdset(fd_set *fds,
936                                             abi_ulong target_fds_addr,
937                                             int n)
938 {
939     int i, nw, j, k;
940     abi_ulong b, *target_fds;
941 
942     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
943     if (!(target_fds = lock_user(VERIFY_READ,
944                                  target_fds_addr,
945                                  sizeof(abi_ulong) * nw,
946                                  1)))
947         return -TARGET_EFAULT;
948 
949     FD_ZERO(fds);
950     k = 0;
951     for (i = 0; i < nw; i++) {
952         /* grab the abi_ulong */
953         __get_user(b, &target_fds[i]);
954         for (j = 0; j < TARGET_ABI_BITS; j++) {
955             /* check the bit inside the abi_ulong */
956             if ((b >> j) & 1)
957                 FD_SET(k, fds);
958             k++;
959         }
960     }
961 
962     unlock_user(target_fds, target_fds_addr, 0);
963 
964     return 0;
965 }
966 
967 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
968                                                  abi_ulong target_fds_addr,
969                                                  int n)
970 {
971     if (target_fds_addr) {
972         if (copy_from_user_fdset(fds, target_fds_addr, n))
973             return -TARGET_EFAULT;
974         *fds_ptr = fds;
975     } else {
976         *fds_ptr = NULL;
977     }
978     return 0;
979 }
980 
981 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
982                                           const fd_set *fds,
983                                           int n)
984 {
985     int i, nw, j, k;
986     abi_long v;
987     abi_ulong *target_fds;
988 
989     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
990     if (!(target_fds = lock_user(VERIFY_WRITE,
991                                  target_fds_addr,
992                                  sizeof(abi_ulong) * nw,
993                                  0)))
994         return -TARGET_EFAULT;
995 
996     k = 0;
997     for (i = 0; i < nw; i++) {
998         v = 0;
999         for (j = 0; j < TARGET_ABI_BITS; j++) {
1000             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1001             k++;
1002         }
1003         __put_user(v, &target_fds[i]);
1004     }
1005 
1006     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1007 
1008     return 0;
1009 }
1010 
1011 #if defined(__alpha__)
1012 #define HOST_HZ 1024
1013 #else
1014 #define HOST_HZ 100
1015 #endif
1016 
1017 static inline abi_long host_to_target_clock_t(long ticks)
1018 {
1019 #if HOST_HZ == TARGET_HZ
1020     return ticks;
1021 #else
1022     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1023 #endif
1024 }
1025 
1026 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1027                                              const struct rusage *rusage)
1028 {
1029     struct target_rusage *target_rusage;
1030 
1031     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1032         return -TARGET_EFAULT;
1033     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1034     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1035     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1036     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1037     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1038     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1039     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1040     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1041     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1042     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1043     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1044     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1045     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1046     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1047     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1048     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1049     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1050     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1051     unlock_user_struct(target_rusage, target_addr, 1);
1052 
1053     return 0;
1054 }
1055 
1056 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1057 {
1058     abi_ulong target_rlim_swap;
1059     rlim_t result;
1060 
1061     target_rlim_swap = tswapal(target_rlim);
1062     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1063         return RLIM_INFINITY;
1064 
1065     result = target_rlim_swap;
1066     if (target_rlim_swap != (rlim_t)result)
1067         return RLIM_INFINITY;
1068 
1069     return result;
1070 }
1071 
1072 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1073 {
1074     abi_ulong target_rlim_swap;
1075     abi_ulong result;
1076 
1077     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1078         target_rlim_swap = TARGET_RLIM_INFINITY;
1079     else
1080         target_rlim_swap = rlim;
1081     result = tswapal(target_rlim_swap);
1082 
1083     return result;
1084 }
1085 
1086 static inline int target_to_host_resource(int code)
1087 {
1088     switch (code) {
1089     case TARGET_RLIMIT_AS:
1090         return RLIMIT_AS;
1091     case TARGET_RLIMIT_CORE:
1092         return RLIMIT_CORE;
1093     case TARGET_RLIMIT_CPU:
1094         return RLIMIT_CPU;
1095     case TARGET_RLIMIT_DATA:
1096         return RLIMIT_DATA;
1097     case TARGET_RLIMIT_FSIZE:
1098         return RLIMIT_FSIZE;
1099     case TARGET_RLIMIT_LOCKS:
1100         return RLIMIT_LOCKS;
1101     case TARGET_RLIMIT_MEMLOCK:
1102         return RLIMIT_MEMLOCK;
1103     case TARGET_RLIMIT_MSGQUEUE:
1104         return RLIMIT_MSGQUEUE;
1105     case TARGET_RLIMIT_NICE:
1106         return RLIMIT_NICE;
1107     case TARGET_RLIMIT_NOFILE:
1108         return RLIMIT_NOFILE;
1109     case TARGET_RLIMIT_NPROC:
1110         return RLIMIT_NPROC;
1111     case TARGET_RLIMIT_RSS:
1112         return RLIMIT_RSS;
1113     case TARGET_RLIMIT_RTPRIO:
1114         return RLIMIT_RTPRIO;
1115     case TARGET_RLIMIT_SIGPENDING:
1116         return RLIMIT_SIGPENDING;
1117     case TARGET_RLIMIT_STACK:
1118         return RLIMIT_STACK;
1119     default:
1120         return code;
1121     }
1122 }
1123 
1124 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1125                                               abi_ulong target_tv_addr)
1126 {
1127     struct target_timeval *target_tv;
1128 
1129     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1130         return -TARGET_EFAULT;
1131 
1132     __get_user(tv->tv_sec, &target_tv->tv_sec);
1133     __get_user(tv->tv_usec, &target_tv->tv_usec);
1134 
1135     unlock_user_struct(target_tv, target_tv_addr, 0);
1136 
1137     return 0;
1138 }
1139 
1140 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1141                                             const struct timeval *tv)
1142 {
1143     struct target_timeval *target_tv;
1144 
1145     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1146         return -TARGET_EFAULT;
1147 
1148     __put_user(tv->tv_sec, &target_tv->tv_sec);
1149     __put_user(tv->tv_usec, &target_tv->tv_usec);
1150 
1151     unlock_user_struct(target_tv, target_tv_addr, 1);
1152 
1153     return 0;
1154 }
1155 
1156 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1157                                                abi_ulong target_tz_addr)
1158 {
1159     struct target_timezone *target_tz;
1160 
1161     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1162         return -TARGET_EFAULT;
1163     }
1164 
1165     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1166     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1167 
1168     unlock_user_struct(target_tz, target_tz_addr, 0);
1169 
1170     return 0;
1171 }
1172 
1173 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1174 #include <mqueue.h>
1175 
1176 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1177                                               abi_ulong target_mq_attr_addr)
1178 {
1179     struct target_mq_attr *target_mq_attr;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1182                           target_mq_attr_addr, 1))
1183         return -TARGET_EFAULT;
1184 
1185     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1186     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1187     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1188     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1189 
1190     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1191 
1192     return 0;
1193 }
1194 
1195 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1196                                             const struct mq_attr *attr)
1197 {
1198     struct target_mq_attr *target_mq_attr;
1199 
1200     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1201                           target_mq_attr_addr, 0))
1202         return -TARGET_EFAULT;
1203 
1204     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1205     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1206     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1207     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1208 
1209     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1210 
1211     return 0;
1212 }
1213 #endif
1214 
1215 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1216 /* do_select() must return target values and target errnos. */
1217 static abi_long do_select(int n,
1218                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1219                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1220 {
1221     fd_set rfds, wfds, efds;
1222     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1223     struct timeval tv;
1224     struct timespec ts, *ts_ptr;
1225     abi_long ret;
1226 
1227     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1228     if (ret) {
1229         return ret;
1230     }
1231     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1232     if (ret) {
1233         return ret;
1234     }
1235     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1236     if (ret) {
1237         return ret;
1238     }
1239 
1240     if (target_tv_addr) {
1241         if (copy_from_user_timeval(&tv, target_tv_addr))
1242             return -TARGET_EFAULT;
1243         ts.tv_sec = tv.tv_sec;
1244         ts.tv_nsec = tv.tv_usec * 1000;
1245         ts_ptr = &ts;
1246     } else {
1247         ts_ptr = NULL;
1248     }
1249 
1250     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1251                                   ts_ptr, NULL));
1252 
1253     if (!is_error(ret)) {
1254         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1255             return -TARGET_EFAULT;
1256         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1257             return -TARGET_EFAULT;
1258         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1259             return -TARGET_EFAULT;
1260 
1261         if (target_tv_addr) {
1262             tv.tv_sec = ts.tv_sec;
1263             tv.tv_usec = ts.tv_nsec / 1000;
1264             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1265                 return -TARGET_EFAULT;
1266             }
1267         }
1268     }
1269 
1270     return ret;
1271 }
1272 
1273 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1274 static abi_long do_old_select(abi_ulong arg1)
1275 {
1276     struct target_sel_arg_struct *sel;
1277     abi_ulong inp, outp, exp, tvp;
1278     long nsel;
1279 
1280     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1281         return -TARGET_EFAULT;
1282     }
1283 
1284     nsel = tswapal(sel->n);
1285     inp = tswapal(sel->inp);
1286     outp = tswapal(sel->outp);
1287     exp = tswapal(sel->exp);
1288     tvp = tswapal(sel->tvp);
1289 
1290     unlock_user_struct(sel, arg1, 0);
1291 
1292     return do_select(nsel, inp, outp, exp, tvp);
1293 }
1294 #endif
1295 #endif
1296 
1297 static abi_long do_pipe2(int host_pipe[], int flags)
1298 {
1299 #ifdef CONFIG_PIPE2
1300     return pipe2(host_pipe, flags);
1301 #else
1302     return -ENOSYS;
1303 #endif
1304 }
1305 
1306 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1307                         int flags, int is_pipe2)
1308 {
1309     int host_pipe[2];
1310     abi_long ret;
1311     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1312 
1313     if (is_error(ret))
1314         return get_errno(ret);
1315 
1316     /* Several targets have special calling conventions for the original
1317        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1318     if (!is_pipe2) {
1319 #if defined(TARGET_ALPHA)
1320         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1321         return host_pipe[0];
1322 #elif defined(TARGET_MIPS)
1323         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1324         return host_pipe[0];
1325 #elif defined(TARGET_SH4)
1326         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1327         return host_pipe[0];
1328 #elif defined(TARGET_SPARC)
1329         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1330         return host_pipe[0];
1331 #endif
1332     }
1333 
1334     if (put_user_s32(host_pipe[0], pipedes)
1335         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1336         return -TARGET_EFAULT;
1337     return get_errno(ret);
1338 }
1339 
1340 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1341                                               abi_ulong target_addr,
1342                                               socklen_t len)
1343 {
1344     struct target_ip_mreqn *target_smreqn;
1345 
1346     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1347     if (!target_smreqn)
1348         return -TARGET_EFAULT;
1349     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1350     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1351     if (len == sizeof(struct target_ip_mreqn))
1352         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1353     unlock_user(target_smreqn, target_addr, 0);
1354 
1355     return 0;
1356 }
1357 
1358 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1359                                                abi_ulong target_addr,
1360                                                socklen_t len)
1361 {
1362     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1363     sa_family_t sa_family;
1364     struct target_sockaddr *target_saddr;
1365 
1366     if (fd_trans_target_to_host_addr(fd)) {
1367         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1368     }
1369 
1370     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1371     if (!target_saddr)
1372         return -TARGET_EFAULT;
1373 
1374     sa_family = tswap16(target_saddr->sa_family);
1375 
1376     /* Oops. The caller might send a incomplete sun_path; sun_path
1377      * must be terminated by \0 (see the manual page), but
1378      * unfortunately it is quite common to specify sockaddr_un
1379      * length as "strlen(x->sun_path)" while it should be
1380      * "strlen(...) + 1". We'll fix that here if needed.
1381      * Linux kernel has a similar feature.
1382      */
1383 
1384     if (sa_family == AF_UNIX) {
1385         if (len < unix_maxlen && len > 0) {
1386             char *cp = (char*)target_saddr;
1387 
1388             if ( cp[len-1] && !cp[len] )
1389                 len++;
1390         }
1391         if (len > unix_maxlen)
1392             len = unix_maxlen;
1393     }
1394 
1395     memcpy(addr, target_saddr, len);
1396     addr->sa_family = sa_family;
1397     if (sa_family == AF_NETLINK) {
1398         struct sockaddr_nl *nladdr;
1399 
1400         nladdr = (struct sockaddr_nl *)addr;
1401         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1402         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1403     } else if (sa_family == AF_PACKET) {
1404 	struct target_sockaddr_ll *lladdr;
1405 
1406 	lladdr = (struct target_sockaddr_ll *)addr;
1407 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1408 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1409     }
1410     unlock_user(target_saddr, target_addr, 0);
1411 
1412     return 0;
1413 }
1414 
1415 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1416                                                struct sockaddr *addr,
1417                                                socklen_t len)
1418 {
1419     struct target_sockaddr *target_saddr;
1420 
1421     if (len == 0) {
1422         return 0;
1423     }
1424     assert(addr);
1425 
1426     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1427     if (!target_saddr)
1428         return -TARGET_EFAULT;
1429     memcpy(target_saddr, addr, len);
1430     if (len >= offsetof(struct target_sockaddr, sa_family) +
1431         sizeof(target_saddr->sa_family)) {
1432         target_saddr->sa_family = tswap16(addr->sa_family);
1433     }
1434     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1435         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1436         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1437         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1438     } else if (addr->sa_family == AF_PACKET) {
1439         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1440         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1441         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1442     } else if (addr->sa_family == AF_INET6 &&
1443                len >= sizeof(struct target_sockaddr_in6)) {
1444         struct target_sockaddr_in6 *target_in6 =
1445                (struct target_sockaddr_in6 *)target_saddr;
1446         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1447     }
1448     unlock_user(target_saddr, target_addr, len);
1449 
1450     return 0;
1451 }
1452 
1453 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1454                                            struct target_msghdr *target_msgh)
1455 {
1456     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1457     abi_long msg_controllen;
1458     abi_ulong target_cmsg_addr;
1459     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1460     socklen_t space = 0;
1461 
1462     msg_controllen = tswapal(target_msgh->msg_controllen);
1463     if (msg_controllen < sizeof (struct target_cmsghdr))
1464         goto the_end;
1465     target_cmsg_addr = tswapal(target_msgh->msg_control);
1466     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1467     target_cmsg_start = target_cmsg;
1468     if (!target_cmsg)
1469         return -TARGET_EFAULT;
1470 
1471     while (cmsg && target_cmsg) {
1472         void *data = CMSG_DATA(cmsg);
1473         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1474 
1475         int len = tswapal(target_cmsg->cmsg_len)
1476             - sizeof(struct target_cmsghdr);
1477 
1478         space += CMSG_SPACE(len);
1479         if (space > msgh->msg_controllen) {
1480             space -= CMSG_SPACE(len);
1481             /* This is a QEMU bug, since we allocated the payload
1482              * area ourselves (unlike overflow in host-to-target
1483              * conversion, which is just the guest giving us a buffer
1484              * that's too small). It can't happen for the payload types
1485              * we currently support; if it becomes an issue in future
1486              * we would need to improve our allocation strategy to
1487              * something more intelligent than "twice the size of the
1488              * target buffer we're reading from".
1489              */
1490             gemu_log("Host cmsg overflow\n");
1491             break;
1492         }
1493 
1494         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1495             cmsg->cmsg_level = SOL_SOCKET;
1496         } else {
1497             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1498         }
1499         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1500         cmsg->cmsg_len = CMSG_LEN(len);
1501 
1502         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1503             int *fd = (int *)data;
1504             int *target_fd = (int *)target_data;
1505             int i, numfds = len / sizeof(int);
1506 
1507             for (i = 0; i < numfds; i++) {
1508                 __get_user(fd[i], target_fd + i);
1509             }
1510         } else if (cmsg->cmsg_level == SOL_SOCKET
1511                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1512             struct ucred *cred = (struct ucred *)data;
1513             struct target_ucred *target_cred =
1514                 (struct target_ucred *)target_data;
1515 
1516             __get_user(cred->pid, &target_cred->pid);
1517             __get_user(cred->uid, &target_cred->uid);
1518             __get_user(cred->gid, &target_cred->gid);
1519         } else {
1520             gemu_log("Unsupported ancillary data: %d/%d\n",
1521                                         cmsg->cmsg_level, cmsg->cmsg_type);
1522             memcpy(data, target_data, len);
1523         }
1524 
1525         cmsg = CMSG_NXTHDR(msgh, cmsg);
1526         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1527                                          target_cmsg_start);
1528     }
1529     unlock_user(target_cmsg, target_cmsg_addr, 0);
1530  the_end:
1531     msgh->msg_controllen = space;
1532     return 0;
1533 }
1534 
1535 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1536                                            struct msghdr *msgh)
1537 {
1538     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1539     abi_long msg_controllen;
1540     abi_ulong target_cmsg_addr;
1541     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1542     socklen_t space = 0;
1543 
1544     msg_controllen = tswapal(target_msgh->msg_controllen);
1545     if (msg_controllen < sizeof (struct target_cmsghdr))
1546         goto the_end;
1547     target_cmsg_addr = tswapal(target_msgh->msg_control);
1548     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1549     target_cmsg_start = target_cmsg;
1550     if (!target_cmsg)
1551         return -TARGET_EFAULT;
1552 
1553     while (cmsg && target_cmsg) {
1554         void *data = CMSG_DATA(cmsg);
1555         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1556 
1557         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1558         int tgt_len, tgt_space;
1559 
1560         /* We never copy a half-header but may copy half-data;
1561          * this is Linux's behaviour in put_cmsg(). Note that
1562          * truncation here is a guest problem (which we report
1563          * to the guest via the CTRUNC bit), unlike truncation
1564          * in target_to_host_cmsg, which is a QEMU bug.
1565          */
1566         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1567             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1568             break;
1569         }
1570 
1571         if (cmsg->cmsg_level == SOL_SOCKET) {
1572             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1573         } else {
1574             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1575         }
1576         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1577 
1578         /* Payload types which need a different size of payload on
1579          * the target must adjust tgt_len here.
1580          */
1581         tgt_len = len;
1582         switch (cmsg->cmsg_level) {
1583         case SOL_SOCKET:
1584             switch (cmsg->cmsg_type) {
1585             case SO_TIMESTAMP:
1586                 tgt_len = sizeof(struct target_timeval);
1587                 break;
1588             default:
1589                 break;
1590             }
1591             break;
1592         default:
1593             break;
1594         }
1595 
1596         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1597             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1598             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1599         }
1600 
1601         /* We must now copy-and-convert len bytes of payload
1602          * into tgt_len bytes of destination space. Bear in mind
1603          * that in both source and destination we may be dealing
1604          * with a truncated value!
1605          */
1606         switch (cmsg->cmsg_level) {
1607         case SOL_SOCKET:
1608             switch (cmsg->cmsg_type) {
1609             case SCM_RIGHTS:
1610             {
1611                 int *fd = (int *)data;
1612                 int *target_fd = (int *)target_data;
1613                 int i, numfds = tgt_len / sizeof(int);
1614 
1615                 for (i = 0; i < numfds; i++) {
1616                     __put_user(fd[i], target_fd + i);
1617                 }
1618                 break;
1619             }
1620             case SO_TIMESTAMP:
1621             {
1622                 struct timeval *tv = (struct timeval *)data;
1623                 struct target_timeval *target_tv =
1624                     (struct target_timeval *)target_data;
1625 
1626                 if (len != sizeof(struct timeval) ||
1627                     tgt_len != sizeof(struct target_timeval)) {
1628                     goto unimplemented;
1629                 }
1630 
1631                 /* copy struct timeval to target */
1632                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1633                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1634                 break;
1635             }
1636             case SCM_CREDENTIALS:
1637             {
1638                 struct ucred *cred = (struct ucred *)data;
1639                 struct target_ucred *target_cred =
1640                     (struct target_ucred *)target_data;
1641 
1642                 __put_user(cred->pid, &target_cred->pid);
1643                 __put_user(cred->uid, &target_cred->uid);
1644                 __put_user(cred->gid, &target_cred->gid);
1645                 break;
1646             }
1647             default:
1648                 goto unimplemented;
1649             }
1650             break;
1651 
1652         case SOL_IP:
1653             switch (cmsg->cmsg_type) {
1654             case IP_TTL:
1655             {
1656                 uint32_t *v = (uint32_t *)data;
1657                 uint32_t *t_int = (uint32_t *)target_data;
1658 
1659                 if (len != sizeof(uint32_t) ||
1660                     tgt_len != sizeof(uint32_t)) {
1661                     goto unimplemented;
1662                 }
1663                 __put_user(*v, t_int);
1664                 break;
1665             }
1666             case IP_RECVERR:
1667             {
1668                 struct errhdr_t {
1669                    struct sock_extended_err ee;
1670                    struct sockaddr_in offender;
1671                 };
1672                 struct errhdr_t *errh = (struct errhdr_t *)data;
1673                 struct errhdr_t *target_errh =
1674                     (struct errhdr_t *)target_data;
1675 
1676                 if (len != sizeof(struct errhdr_t) ||
1677                     tgt_len != sizeof(struct errhdr_t)) {
1678                     goto unimplemented;
1679                 }
1680                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1681                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1682                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1683                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1684                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1685                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1686                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1687                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1688                     (void *) &errh->offender, sizeof(errh->offender));
1689                 break;
1690             }
1691             default:
1692                 goto unimplemented;
1693             }
1694             break;
1695 
1696         case SOL_IPV6:
1697             switch (cmsg->cmsg_type) {
1698             case IPV6_HOPLIMIT:
1699             {
1700                 uint32_t *v = (uint32_t *)data;
1701                 uint32_t *t_int = (uint32_t *)target_data;
1702 
1703                 if (len != sizeof(uint32_t) ||
1704                     tgt_len != sizeof(uint32_t)) {
1705                     goto unimplemented;
1706                 }
1707                 __put_user(*v, t_int);
1708                 break;
1709             }
1710             case IPV6_RECVERR:
1711             {
1712                 struct errhdr6_t {
1713                    struct sock_extended_err ee;
1714                    struct sockaddr_in6 offender;
1715                 };
1716                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1717                 struct errhdr6_t *target_errh =
1718                     (struct errhdr6_t *)target_data;
1719 
1720                 if (len != sizeof(struct errhdr6_t) ||
1721                     tgt_len != sizeof(struct errhdr6_t)) {
1722                     goto unimplemented;
1723                 }
1724                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1725                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1726                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1727                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1728                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1729                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1730                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1731                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1732                     (void *) &errh->offender, sizeof(errh->offender));
1733                 break;
1734             }
1735             default:
1736                 goto unimplemented;
1737             }
1738             break;
1739 
1740         default:
1741         unimplemented:
1742             gemu_log("Unsupported ancillary data: %d/%d\n",
1743                                         cmsg->cmsg_level, cmsg->cmsg_type);
1744             memcpy(target_data, data, MIN(len, tgt_len));
1745             if (tgt_len > len) {
1746                 memset(target_data + len, 0, tgt_len - len);
1747             }
1748         }
1749 
1750         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1751         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1752         if (msg_controllen < tgt_space) {
1753             tgt_space = msg_controllen;
1754         }
1755         msg_controllen -= tgt_space;
1756         space += tgt_space;
1757         cmsg = CMSG_NXTHDR(msgh, cmsg);
1758         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1759                                          target_cmsg_start);
1760     }
1761     unlock_user(target_cmsg, target_cmsg_addr, space);
1762  the_end:
1763     target_msgh->msg_controllen = tswapal(space);
1764     return 0;
1765 }
1766 
1767 /* do_setsockopt() Must return target values and target errnos. */
1768 static abi_long do_setsockopt(int sockfd, int level, int optname,
1769                               abi_ulong optval_addr, socklen_t optlen)
1770 {
1771     abi_long ret;
1772     int val;
1773     struct ip_mreqn *ip_mreq;
1774     struct ip_mreq_source *ip_mreq_source;
1775 
1776     switch(level) {
1777     case SOL_TCP:
1778         /* TCP options all take an 'int' value.  */
1779         if (optlen < sizeof(uint32_t))
1780             return -TARGET_EINVAL;
1781 
1782         if (get_user_u32(val, optval_addr))
1783             return -TARGET_EFAULT;
1784         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1785         break;
1786     case SOL_IP:
1787         switch(optname) {
1788         case IP_TOS:
1789         case IP_TTL:
1790         case IP_HDRINCL:
1791         case IP_ROUTER_ALERT:
1792         case IP_RECVOPTS:
1793         case IP_RETOPTS:
1794         case IP_PKTINFO:
1795         case IP_MTU_DISCOVER:
1796         case IP_RECVERR:
1797         case IP_RECVTTL:
1798         case IP_RECVTOS:
1799 #ifdef IP_FREEBIND
1800         case IP_FREEBIND:
1801 #endif
1802         case IP_MULTICAST_TTL:
1803         case IP_MULTICAST_LOOP:
1804             val = 0;
1805             if (optlen >= sizeof(uint32_t)) {
1806                 if (get_user_u32(val, optval_addr))
1807                     return -TARGET_EFAULT;
1808             } else if (optlen >= 1) {
1809                 if (get_user_u8(val, optval_addr))
1810                     return -TARGET_EFAULT;
1811             }
1812             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1813             break;
1814         case IP_ADD_MEMBERSHIP:
1815         case IP_DROP_MEMBERSHIP:
1816             if (optlen < sizeof (struct target_ip_mreq) ||
1817                 optlen > sizeof (struct target_ip_mreqn))
1818                 return -TARGET_EINVAL;
1819 
1820             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1821             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1822             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1823             break;
1824 
1825         case IP_BLOCK_SOURCE:
1826         case IP_UNBLOCK_SOURCE:
1827         case IP_ADD_SOURCE_MEMBERSHIP:
1828         case IP_DROP_SOURCE_MEMBERSHIP:
1829             if (optlen != sizeof (struct target_ip_mreq_source))
1830                 return -TARGET_EINVAL;
1831 
1832             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1833             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1834             unlock_user (ip_mreq_source, optval_addr, 0);
1835             break;
1836 
1837         default:
1838             goto unimplemented;
1839         }
1840         break;
1841     case SOL_IPV6:
1842         switch (optname) {
1843         case IPV6_MTU_DISCOVER:
1844         case IPV6_MTU:
1845         case IPV6_V6ONLY:
1846         case IPV6_RECVPKTINFO:
1847         case IPV6_UNICAST_HOPS:
1848         case IPV6_MULTICAST_HOPS:
1849         case IPV6_MULTICAST_LOOP:
1850         case IPV6_RECVERR:
1851         case IPV6_RECVHOPLIMIT:
1852         case IPV6_2292HOPLIMIT:
1853         case IPV6_CHECKSUM:
1854         case IPV6_ADDRFORM:
1855         case IPV6_2292PKTINFO:
1856         case IPV6_RECVTCLASS:
1857         case IPV6_RECVRTHDR:
1858         case IPV6_2292RTHDR:
1859         case IPV6_RECVHOPOPTS:
1860         case IPV6_2292HOPOPTS:
1861         case IPV6_RECVDSTOPTS:
1862         case IPV6_2292DSTOPTS:
1863         case IPV6_TCLASS:
1864 #ifdef IPV6_RECVPATHMTU
1865         case IPV6_RECVPATHMTU:
1866 #endif
1867 #ifdef IPV6_TRANSPARENT
1868         case IPV6_TRANSPARENT:
1869 #endif
1870 #ifdef IPV6_FREEBIND
1871         case IPV6_FREEBIND:
1872 #endif
1873 #ifdef IPV6_RECVORIGDSTADDR
1874         case IPV6_RECVORIGDSTADDR:
1875 #endif
1876             val = 0;
1877             if (optlen < sizeof(uint32_t)) {
1878                 return -TARGET_EINVAL;
1879             }
1880             if (get_user_u32(val, optval_addr)) {
1881                 return -TARGET_EFAULT;
1882             }
1883             ret = get_errno(setsockopt(sockfd, level, optname,
1884                                        &val, sizeof(val)));
1885             break;
1886         case IPV6_PKTINFO:
1887         {
1888             struct in6_pktinfo pki;
1889 
1890             if (optlen < sizeof(pki)) {
1891                 return -TARGET_EINVAL;
1892             }
1893 
1894             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1895                 return -TARGET_EFAULT;
1896             }
1897 
1898             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1899 
1900             ret = get_errno(setsockopt(sockfd, level, optname,
1901                                        &pki, sizeof(pki)));
1902             break;
1903         }
1904         case IPV6_ADD_MEMBERSHIP:
1905         case IPV6_DROP_MEMBERSHIP:
1906         {
1907             struct ipv6_mreq ipv6mreq;
1908 
1909             if (optlen < sizeof(ipv6mreq)) {
1910                 return -TARGET_EINVAL;
1911             }
1912 
1913             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1914                 return -TARGET_EFAULT;
1915             }
1916 
1917             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1918 
1919             ret = get_errno(setsockopt(sockfd, level, optname,
1920                                        &ipv6mreq, sizeof(ipv6mreq)));
1921             break;
1922         }
1923         default:
1924             goto unimplemented;
1925         }
1926         break;
1927     case SOL_ICMPV6:
1928         switch (optname) {
1929         case ICMPV6_FILTER:
1930         {
1931             struct icmp6_filter icmp6f;
1932 
1933             if (optlen > sizeof(icmp6f)) {
1934                 optlen = sizeof(icmp6f);
1935             }
1936 
1937             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1938                 return -TARGET_EFAULT;
1939             }
1940 
1941             for (val = 0; val < 8; val++) {
1942                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1943             }
1944 
1945             ret = get_errno(setsockopt(sockfd, level, optname,
1946                                        &icmp6f, optlen));
1947             break;
1948         }
1949         default:
1950             goto unimplemented;
1951         }
1952         break;
1953     case SOL_RAW:
1954         switch (optname) {
1955         case ICMP_FILTER:
1956         case IPV6_CHECKSUM:
1957             /* those take an u32 value */
1958             if (optlen < sizeof(uint32_t)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (get_user_u32(val, optval_addr)) {
1963                 return -TARGET_EFAULT;
1964             }
1965             ret = get_errno(setsockopt(sockfd, level, optname,
1966                                        &val, sizeof(val)));
1967             break;
1968 
1969         default:
1970             goto unimplemented;
1971         }
1972         break;
1973 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
1974     case SOL_ALG:
1975         switch (optname) {
1976         case ALG_SET_KEY:
1977         {
1978             char *alg_key = g_malloc(optlen);
1979 
1980             if (!alg_key) {
1981                 return -TARGET_ENOMEM;
1982             }
1983             if (copy_from_user(alg_key, optval_addr, optlen)) {
1984                 g_free(alg_key);
1985                 return -TARGET_EFAULT;
1986             }
1987             ret = get_errno(setsockopt(sockfd, level, optname,
1988                                        alg_key, optlen));
1989             g_free(alg_key);
1990             break;
1991         }
1992         case ALG_SET_AEAD_AUTHSIZE:
1993         {
1994             ret = get_errno(setsockopt(sockfd, level, optname,
1995                                        NULL, optlen));
1996             break;
1997         }
1998         default:
1999             goto unimplemented;
2000         }
2001         break;
2002 #endif
2003     case TARGET_SOL_SOCKET:
2004         switch (optname) {
2005         case TARGET_SO_RCVTIMEO:
2006         {
2007                 struct timeval tv;
2008 
2009                 optname = SO_RCVTIMEO;
2010 
2011 set_timeout:
2012                 if (optlen != sizeof(struct target_timeval)) {
2013                     return -TARGET_EINVAL;
2014                 }
2015 
2016                 if (copy_from_user_timeval(&tv, optval_addr)) {
2017                     return -TARGET_EFAULT;
2018                 }
2019 
2020                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2021                                 &tv, sizeof(tv)));
2022                 return ret;
2023         }
2024         case TARGET_SO_SNDTIMEO:
2025                 optname = SO_SNDTIMEO;
2026                 goto set_timeout;
2027         case TARGET_SO_ATTACH_FILTER:
2028         {
2029                 struct target_sock_fprog *tfprog;
2030                 struct target_sock_filter *tfilter;
2031                 struct sock_fprog fprog;
2032                 struct sock_filter *filter;
2033                 int i;
2034 
2035                 if (optlen != sizeof(*tfprog)) {
2036                     return -TARGET_EINVAL;
2037                 }
2038                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2039                     return -TARGET_EFAULT;
2040                 }
2041                 if (!lock_user_struct(VERIFY_READ, tfilter,
2042                                       tswapal(tfprog->filter), 0)) {
2043                     unlock_user_struct(tfprog, optval_addr, 1);
2044                     return -TARGET_EFAULT;
2045                 }
2046 
2047                 fprog.len = tswap16(tfprog->len);
2048                 filter = g_try_new(struct sock_filter, fprog.len);
2049                 if (filter == NULL) {
2050                     unlock_user_struct(tfilter, tfprog->filter, 1);
2051                     unlock_user_struct(tfprog, optval_addr, 1);
2052                     return -TARGET_ENOMEM;
2053                 }
2054                 for (i = 0; i < fprog.len; i++) {
2055                     filter[i].code = tswap16(tfilter[i].code);
2056                     filter[i].jt = tfilter[i].jt;
2057                     filter[i].jf = tfilter[i].jf;
2058                     filter[i].k = tswap32(tfilter[i].k);
2059                 }
2060                 fprog.filter = filter;
2061 
2062                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2063                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2064                 g_free(filter);
2065 
2066                 unlock_user_struct(tfilter, tfprog->filter, 1);
2067                 unlock_user_struct(tfprog, optval_addr, 1);
2068                 return ret;
2069         }
2070 	case TARGET_SO_BINDTODEVICE:
2071 	{
2072 		char *dev_ifname, *addr_ifname;
2073 
2074 		if (optlen > IFNAMSIZ - 1) {
2075 		    optlen = IFNAMSIZ - 1;
2076 		}
2077 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2078 		if (!dev_ifname) {
2079 		    return -TARGET_EFAULT;
2080 		}
2081 		optname = SO_BINDTODEVICE;
2082 		addr_ifname = alloca(IFNAMSIZ);
2083 		memcpy(addr_ifname, dev_ifname, optlen);
2084 		addr_ifname[optlen] = 0;
2085 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2086                                            addr_ifname, optlen));
2087 		unlock_user (dev_ifname, optval_addr, 0);
2088 		return ret;
2089 	}
2090         case TARGET_SO_LINGER:
2091         {
2092                 struct linger lg;
2093                 struct target_linger *tlg;
2094 
2095                 if (optlen != sizeof(struct target_linger)) {
2096                     return -TARGET_EINVAL;
2097                 }
2098                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2099                     return -TARGET_EFAULT;
2100                 }
2101                 __get_user(lg.l_onoff, &tlg->l_onoff);
2102                 __get_user(lg.l_linger, &tlg->l_linger);
2103                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2104                                 &lg, sizeof(lg)));
2105                 unlock_user_struct(tlg, optval_addr, 0);
2106                 return ret;
2107         }
2108             /* Options with 'int' argument.  */
2109         case TARGET_SO_DEBUG:
2110 		optname = SO_DEBUG;
2111 		break;
2112         case TARGET_SO_REUSEADDR:
2113 		optname = SO_REUSEADDR;
2114 		break;
2115 #ifdef SO_REUSEPORT
2116         case TARGET_SO_REUSEPORT:
2117                 optname = SO_REUSEPORT;
2118                 break;
2119 #endif
2120         case TARGET_SO_TYPE:
2121 		optname = SO_TYPE;
2122 		break;
2123         case TARGET_SO_ERROR:
2124 		optname = SO_ERROR;
2125 		break;
2126         case TARGET_SO_DONTROUTE:
2127 		optname = SO_DONTROUTE;
2128 		break;
2129         case TARGET_SO_BROADCAST:
2130 		optname = SO_BROADCAST;
2131 		break;
2132         case TARGET_SO_SNDBUF:
2133 		optname = SO_SNDBUF;
2134 		break;
2135         case TARGET_SO_SNDBUFFORCE:
2136                 optname = SO_SNDBUFFORCE;
2137                 break;
2138         case TARGET_SO_RCVBUF:
2139 		optname = SO_RCVBUF;
2140 		break;
2141         case TARGET_SO_RCVBUFFORCE:
2142                 optname = SO_RCVBUFFORCE;
2143                 break;
2144         case TARGET_SO_KEEPALIVE:
2145 		optname = SO_KEEPALIVE;
2146 		break;
2147         case TARGET_SO_OOBINLINE:
2148 		optname = SO_OOBINLINE;
2149 		break;
2150         case TARGET_SO_NO_CHECK:
2151 		optname = SO_NO_CHECK;
2152 		break;
2153         case TARGET_SO_PRIORITY:
2154 		optname = SO_PRIORITY;
2155 		break;
2156 #ifdef SO_BSDCOMPAT
2157         case TARGET_SO_BSDCOMPAT:
2158 		optname = SO_BSDCOMPAT;
2159 		break;
2160 #endif
2161         case TARGET_SO_PASSCRED:
2162 		optname = SO_PASSCRED;
2163 		break;
2164         case TARGET_SO_PASSSEC:
2165                 optname = SO_PASSSEC;
2166                 break;
2167         case TARGET_SO_TIMESTAMP:
2168 		optname = SO_TIMESTAMP;
2169 		break;
2170         case TARGET_SO_RCVLOWAT:
2171 		optname = SO_RCVLOWAT;
2172 		break;
2173         default:
2174             goto unimplemented;
2175         }
2176 	if (optlen < sizeof(uint32_t))
2177             return -TARGET_EINVAL;
2178 
2179 	if (get_user_u32(val, optval_addr))
2180             return -TARGET_EFAULT;
2181 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2182         break;
2183     default:
2184     unimplemented:
2185         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2186         ret = -TARGET_ENOPROTOOPT;
2187     }
2188     return ret;
2189 }
2190 
2191 /* do_getsockopt() Must return target values and target errnos. */
2192 static abi_long do_getsockopt(int sockfd, int level, int optname,
2193                               abi_ulong optval_addr, abi_ulong optlen)
2194 {
2195     abi_long ret;
2196     int len, val;
2197     socklen_t lv;
2198 
2199     switch(level) {
2200     case TARGET_SOL_SOCKET:
2201         level = SOL_SOCKET;
2202         switch (optname) {
2203         /* These don't just return a single integer */
2204         case TARGET_SO_RCVTIMEO:
2205         case TARGET_SO_SNDTIMEO:
2206         case TARGET_SO_PEERNAME:
2207             goto unimplemented;
2208         case TARGET_SO_PEERCRED: {
2209             struct ucred cr;
2210             socklen_t crlen;
2211             struct target_ucred *tcr;
2212 
2213             if (get_user_u32(len, optlen)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             if (len < 0) {
2217                 return -TARGET_EINVAL;
2218             }
2219 
2220             crlen = sizeof(cr);
2221             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2222                                        &cr, &crlen));
2223             if (ret < 0) {
2224                 return ret;
2225             }
2226             if (len > crlen) {
2227                 len = crlen;
2228             }
2229             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2230                 return -TARGET_EFAULT;
2231             }
2232             __put_user(cr.pid, &tcr->pid);
2233             __put_user(cr.uid, &tcr->uid);
2234             __put_user(cr.gid, &tcr->gid);
2235             unlock_user_struct(tcr, optval_addr, 1);
2236             if (put_user_u32(len, optlen)) {
2237                 return -TARGET_EFAULT;
2238             }
2239             break;
2240         }
2241         case TARGET_SO_LINGER:
2242         {
2243             struct linger lg;
2244             socklen_t lglen;
2245             struct target_linger *tlg;
2246 
2247             if (get_user_u32(len, optlen)) {
2248                 return -TARGET_EFAULT;
2249             }
2250             if (len < 0) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             lglen = sizeof(lg);
2255             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2256                                        &lg, &lglen));
2257             if (ret < 0) {
2258                 return ret;
2259             }
2260             if (len > lglen) {
2261                 len = lglen;
2262             }
2263             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2264                 return -TARGET_EFAULT;
2265             }
2266             __put_user(lg.l_onoff, &tlg->l_onoff);
2267             __put_user(lg.l_linger, &tlg->l_linger);
2268             unlock_user_struct(tlg, optval_addr, 1);
2269             if (put_user_u32(len, optlen)) {
2270                 return -TARGET_EFAULT;
2271             }
2272             break;
2273         }
2274         /* Options with 'int' argument.  */
2275         case TARGET_SO_DEBUG:
2276             optname = SO_DEBUG;
2277             goto int_case;
2278         case TARGET_SO_REUSEADDR:
2279             optname = SO_REUSEADDR;
2280             goto int_case;
2281 #ifdef SO_REUSEPORT
2282         case TARGET_SO_REUSEPORT:
2283             optname = SO_REUSEPORT;
2284             goto int_case;
2285 #endif
2286         case TARGET_SO_TYPE:
2287             optname = SO_TYPE;
2288             goto int_case;
2289         case TARGET_SO_ERROR:
2290             optname = SO_ERROR;
2291             goto int_case;
2292         case TARGET_SO_DONTROUTE:
2293             optname = SO_DONTROUTE;
2294             goto int_case;
2295         case TARGET_SO_BROADCAST:
2296             optname = SO_BROADCAST;
2297             goto int_case;
2298         case TARGET_SO_SNDBUF:
2299             optname = SO_SNDBUF;
2300             goto int_case;
2301         case TARGET_SO_RCVBUF:
2302             optname = SO_RCVBUF;
2303             goto int_case;
2304         case TARGET_SO_KEEPALIVE:
2305             optname = SO_KEEPALIVE;
2306             goto int_case;
2307         case TARGET_SO_OOBINLINE:
2308             optname = SO_OOBINLINE;
2309             goto int_case;
2310         case TARGET_SO_NO_CHECK:
2311             optname = SO_NO_CHECK;
2312             goto int_case;
2313         case TARGET_SO_PRIORITY:
2314             optname = SO_PRIORITY;
2315             goto int_case;
2316 #ifdef SO_BSDCOMPAT
2317         case TARGET_SO_BSDCOMPAT:
2318             optname = SO_BSDCOMPAT;
2319             goto int_case;
2320 #endif
2321         case TARGET_SO_PASSCRED:
2322             optname = SO_PASSCRED;
2323             goto int_case;
2324         case TARGET_SO_TIMESTAMP:
2325             optname = SO_TIMESTAMP;
2326             goto int_case;
2327         case TARGET_SO_RCVLOWAT:
2328             optname = SO_RCVLOWAT;
2329             goto int_case;
2330         case TARGET_SO_ACCEPTCONN:
2331             optname = SO_ACCEPTCONN;
2332             goto int_case;
2333         default:
2334             goto int_case;
2335         }
2336         break;
2337     case SOL_TCP:
2338         /* TCP options all take an 'int' value.  */
2339     int_case:
2340         if (get_user_u32(len, optlen))
2341             return -TARGET_EFAULT;
2342         if (len < 0)
2343             return -TARGET_EINVAL;
2344         lv = sizeof(lv);
2345         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2346         if (ret < 0)
2347             return ret;
2348         if (optname == SO_TYPE) {
2349             val = host_to_target_sock_type(val);
2350         }
2351         if (len > lv)
2352             len = lv;
2353         if (len == 4) {
2354             if (put_user_u32(val, optval_addr))
2355                 return -TARGET_EFAULT;
2356         } else {
2357             if (put_user_u8(val, optval_addr))
2358                 return -TARGET_EFAULT;
2359         }
2360         if (put_user_u32(len, optlen))
2361             return -TARGET_EFAULT;
2362         break;
2363     case SOL_IP:
2364         switch(optname) {
2365         case IP_TOS:
2366         case IP_TTL:
2367         case IP_HDRINCL:
2368         case IP_ROUTER_ALERT:
2369         case IP_RECVOPTS:
2370         case IP_RETOPTS:
2371         case IP_PKTINFO:
2372         case IP_MTU_DISCOVER:
2373         case IP_RECVERR:
2374         case IP_RECVTOS:
2375 #ifdef IP_FREEBIND
2376         case IP_FREEBIND:
2377 #endif
2378         case IP_MULTICAST_TTL:
2379         case IP_MULTICAST_LOOP:
2380             if (get_user_u32(len, optlen))
2381                 return -TARGET_EFAULT;
2382             if (len < 0)
2383                 return -TARGET_EINVAL;
2384             lv = sizeof(lv);
2385             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2386             if (ret < 0)
2387                 return ret;
2388             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2389                 len = 1;
2390                 if (put_user_u32(len, optlen)
2391                     || put_user_u8(val, optval_addr))
2392                     return -TARGET_EFAULT;
2393             } else {
2394                 if (len > sizeof(int))
2395                     len = sizeof(int);
2396                 if (put_user_u32(len, optlen)
2397                     || put_user_u32(val, optval_addr))
2398                     return -TARGET_EFAULT;
2399             }
2400             break;
2401         default:
2402             ret = -TARGET_ENOPROTOOPT;
2403             break;
2404         }
2405         break;
2406     case SOL_IPV6:
2407         switch (optname) {
2408         case IPV6_MTU_DISCOVER:
2409         case IPV6_MTU:
2410         case IPV6_V6ONLY:
2411         case IPV6_RECVPKTINFO:
2412         case IPV6_UNICAST_HOPS:
2413         case IPV6_MULTICAST_HOPS:
2414         case IPV6_MULTICAST_LOOP:
2415         case IPV6_RECVERR:
2416         case IPV6_RECVHOPLIMIT:
2417         case IPV6_2292HOPLIMIT:
2418         case IPV6_CHECKSUM:
2419         case IPV6_ADDRFORM:
2420         case IPV6_2292PKTINFO:
2421         case IPV6_RECVTCLASS:
2422         case IPV6_RECVRTHDR:
2423         case IPV6_2292RTHDR:
2424         case IPV6_RECVHOPOPTS:
2425         case IPV6_2292HOPOPTS:
2426         case IPV6_RECVDSTOPTS:
2427         case IPV6_2292DSTOPTS:
2428         case IPV6_TCLASS:
2429 #ifdef IPV6_RECVPATHMTU
2430         case IPV6_RECVPATHMTU:
2431 #endif
2432 #ifdef IPV6_TRANSPARENT
2433         case IPV6_TRANSPARENT:
2434 #endif
2435 #ifdef IPV6_FREEBIND
2436         case IPV6_FREEBIND:
2437 #endif
2438 #ifdef IPV6_RECVORIGDSTADDR
2439         case IPV6_RECVORIGDSTADDR:
2440 #endif
2441             if (get_user_u32(len, optlen))
2442                 return -TARGET_EFAULT;
2443             if (len < 0)
2444                 return -TARGET_EINVAL;
2445             lv = sizeof(lv);
2446             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2447             if (ret < 0)
2448                 return ret;
2449             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2450                 len = 1;
2451                 if (put_user_u32(len, optlen)
2452                     || put_user_u8(val, optval_addr))
2453                     return -TARGET_EFAULT;
2454             } else {
2455                 if (len > sizeof(int))
2456                     len = sizeof(int);
2457                 if (put_user_u32(len, optlen)
2458                     || put_user_u32(val, optval_addr))
2459                     return -TARGET_EFAULT;
2460             }
2461             break;
2462         default:
2463             ret = -TARGET_ENOPROTOOPT;
2464             break;
2465         }
2466         break;
2467     default:
2468     unimplemented:
2469         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2470                  level, optname);
2471         ret = -TARGET_EOPNOTSUPP;
2472         break;
2473     }
2474     return ret;
2475 }
2476 
2477 /* Convert target low/high pair representing file offset into the host
2478  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2479  * as the kernel doesn't handle them either.
2480  */
2481 static void target_to_host_low_high(abi_ulong tlow,
2482                                     abi_ulong thigh,
2483                                     unsigned long *hlow,
2484                                     unsigned long *hhigh)
2485 {
2486     uint64_t off = tlow |
2487         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2488         TARGET_LONG_BITS / 2;
2489 
2490     *hlow = off;
2491     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2492 }
2493 
2494 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2495                                 abi_ulong count, int copy)
2496 {
2497     struct target_iovec *target_vec;
2498     struct iovec *vec;
2499     abi_ulong total_len, max_len;
2500     int i;
2501     int err = 0;
2502     bool bad_address = false;
2503 
2504     if (count == 0) {
2505         errno = 0;
2506         return NULL;
2507     }
2508     if (count > IOV_MAX) {
2509         errno = EINVAL;
2510         return NULL;
2511     }
2512 
2513     vec = g_try_new0(struct iovec, count);
2514     if (vec == NULL) {
2515         errno = ENOMEM;
2516         return NULL;
2517     }
2518 
2519     target_vec = lock_user(VERIFY_READ, target_addr,
2520                            count * sizeof(struct target_iovec), 1);
2521     if (target_vec == NULL) {
2522         err = EFAULT;
2523         goto fail2;
2524     }
2525 
2526     /* ??? If host page size > target page size, this will result in a
2527        value larger than what we can actually support.  */
2528     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2529     total_len = 0;
2530 
2531     for (i = 0; i < count; i++) {
2532         abi_ulong base = tswapal(target_vec[i].iov_base);
2533         abi_long len = tswapal(target_vec[i].iov_len);
2534 
2535         if (len < 0) {
2536             err = EINVAL;
2537             goto fail;
2538         } else if (len == 0) {
2539             /* Zero length pointer is ignored.  */
2540             vec[i].iov_base = 0;
2541         } else {
2542             vec[i].iov_base = lock_user(type, base, len, copy);
2543             /* If the first buffer pointer is bad, this is a fault.  But
2544              * subsequent bad buffers will result in a partial write; this
2545              * is realized by filling the vector with null pointers and
2546              * zero lengths. */
2547             if (!vec[i].iov_base) {
2548                 if (i == 0) {
2549                     err = EFAULT;
2550                     goto fail;
2551                 } else {
2552                     bad_address = true;
2553                 }
2554             }
2555             if (bad_address) {
2556                 len = 0;
2557             }
2558             if (len > max_len - total_len) {
2559                 len = max_len - total_len;
2560             }
2561         }
2562         vec[i].iov_len = len;
2563         total_len += len;
2564     }
2565 
2566     unlock_user(target_vec, target_addr, 0);
2567     return vec;
2568 
2569  fail:
2570     while (--i >= 0) {
2571         if (tswapal(target_vec[i].iov_len) > 0) {
2572             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2573         }
2574     }
2575     unlock_user(target_vec, target_addr, 0);
2576  fail2:
2577     g_free(vec);
2578     errno = err;
2579     return NULL;
2580 }
2581 
2582 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2583                          abi_ulong count, int copy)
2584 {
2585     struct target_iovec *target_vec;
2586     int i;
2587 
2588     target_vec = lock_user(VERIFY_READ, target_addr,
2589                            count * sizeof(struct target_iovec), 1);
2590     if (target_vec) {
2591         for (i = 0; i < count; i++) {
2592             abi_ulong base = tswapal(target_vec[i].iov_base);
2593             abi_long len = tswapal(target_vec[i].iov_len);
2594             if (len < 0) {
2595                 break;
2596             }
2597             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2598         }
2599         unlock_user(target_vec, target_addr, 0);
2600     }
2601 
2602     g_free(vec);
2603 }
2604 
2605 static inline int target_to_host_sock_type(int *type)
2606 {
2607     int host_type = 0;
2608     int target_type = *type;
2609 
2610     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2611     case TARGET_SOCK_DGRAM:
2612         host_type = SOCK_DGRAM;
2613         break;
2614     case TARGET_SOCK_STREAM:
2615         host_type = SOCK_STREAM;
2616         break;
2617     default:
2618         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2619         break;
2620     }
2621     if (target_type & TARGET_SOCK_CLOEXEC) {
2622 #if defined(SOCK_CLOEXEC)
2623         host_type |= SOCK_CLOEXEC;
2624 #else
2625         return -TARGET_EINVAL;
2626 #endif
2627     }
2628     if (target_type & TARGET_SOCK_NONBLOCK) {
2629 #if defined(SOCK_NONBLOCK)
2630         host_type |= SOCK_NONBLOCK;
2631 #elif !defined(O_NONBLOCK)
2632         return -TARGET_EINVAL;
2633 #endif
2634     }
2635     *type = host_type;
2636     return 0;
2637 }
2638 
2639 /* Try to emulate socket type flags after socket creation.  */
2640 static int sock_flags_fixup(int fd, int target_type)
2641 {
2642 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2643     if (target_type & TARGET_SOCK_NONBLOCK) {
2644         int flags = fcntl(fd, F_GETFL);
2645         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2646             close(fd);
2647             return -TARGET_EINVAL;
2648         }
2649     }
2650 #endif
2651     return fd;
2652 }
2653 
2654 /* do_socket() Must return target values and target errnos. */
2655 static abi_long do_socket(int domain, int type, int protocol)
2656 {
2657     int target_type = type;
2658     int ret;
2659 
2660     ret = target_to_host_sock_type(&type);
2661     if (ret) {
2662         return ret;
2663     }
2664 
2665     if (domain == PF_NETLINK && !(
2666 #ifdef CONFIG_RTNETLINK
2667          protocol == NETLINK_ROUTE ||
2668 #endif
2669          protocol == NETLINK_KOBJECT_UEVENT ||
2670          protocol == NETLINK_AUDIT)) {
2671         return -EPFNOSUPPORT;
2672     }
2673 
2674     if (domain == AF_PACKET ||
2675         (domain == AF_INET && type == SOCK_PACKET)) {
2676         protocol = tswap16(protocol);
2677     }
2678 
2679     ret = get_errno(socket(domain, type, protocol));
2680     if (ret >= 0) {
2681         ret = sock_flags_fixup(ret, target_type);
2682         if (type == SOCK_PACKET) {
2683             /* Manage an obsolete case :
2684              * if socket type is SOCK_PACKET, bind by name
2685              */
2686             fd_trans_register(ret, &target_packet_trans);
2687         } else if (domain == PF_NETLINK) {
2688             switch (protocol) {
2689 #ifdef CONFIG_RTNETLINK
2690             case NETLINK_ROUTE:
2691                 fd_trans_register(ret, &target_netlink_route_trans);
2692                 break;
2693 #endif
2694             case NETLINK_KOBJECT_UEVENT:
2695                 /* nothing to do: messages are strings */
2696                 break;
2697             case NETLINK_AUDIT:
2698                 fd_trans_register(ret, &target_netlink_audit_trans);
2699                 break;
2700             default:
2701                 g_assert_not_reached();
2702             }
2703         }
2704     }
2705     return ret;
2706 }
2707 
2708 /* do_bind() Must return target values and target errnos. */
2709 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2710                         socklen_t addrlen)
2711 {
2712     void *addr;
2713     abi_long ret;
2714 
2715     if ((int)addrlen < 0) {
2716         return -TARGET_EINVAL;
2717     }
2718 
2719     addr = alloca(addrlen+1);
2720 
2721     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2722     if (ret)
2723         return ret;
2724 
2725     return get_errno(bind(sockfd, addr, addrlen));
2726 }
2727 
2728 /* do_connect() Must return target values and target errnos. */
2729 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2730                            socklen_t addrlen)
2731 {
2732     void *addr;
2733     abi_long ret;
2734 
2735     if ((int)addrlen < 0) {
2736         return -TARGET_EINVAL;
2737     }
2738 
2739     addr = alloca(addrlen+1);
2740 
2741     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2742     if (ret)
2743         return ret;
2744 
2745     return get_errno(safe_connect(sockfd, addr, addrlen));
2746 }
2747 
2748 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2749 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2750                                       int flags, int send)
2751 {
2752     abi_long ret, len;
2753     struct msghdr msg;
2754     abi_ulong count;
2755     struct iovec *vec;
2756     abi_ulong target_vec;
2757 
2758     if (msgp->msg_name) {
2759         msg.msg_namelen = tswap32(msgp->msg_namelen);
2760         msg.msg_name = alloca(msg.msg_namelen+1);
2761         ret = target_to_host_sockaddr(fd, msg.msg_name,
2762                                       tswapal(msgp->msg_name),
2763                                       msg.msg_namelen);
2764         if (ret == -TARGET_EFAULT) {
2765             /* For connected sockets msg_name and msg_namelen must
2766              * be ignored, so returning EFAULT immediately is wrong.
2767              * Instead, pass a bad msg_name to the host kernel, and
2768              * let it decide whether to return EFAULT or not.
2769              */
2770             msg.msg_name = (void *)-1;
2771         } else if (ret) {
2772             goto out2;
2773         }
2774     } else {
2775         msg.msg_name = NULL;
2776         msg.msg_namelen = 0;
2777     }
2778     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2779     msg.msg_control = alloca(msg.msg_controllen);
2780     memset(msg.msg_control, 0, msg.msg_controllen);
2781 
2782     msg.msg_flags = tswap32(msgp->msg_flags);
2783 
2784     count = tswapal(msgp->msg_iovlen);
2785     target_vec = tswapal(msgp->msg_iov);
2786 
2787     if (count > IOV_MAX) {
2788         /* sendrcvmsg returns a different errno for this condition than
2789          * readv/writev, so we must catch it here before lock_iovec() does.
2790          */
2791         ret = -TARGET_EMSGSIZE;
2792         goto out2;
2793     }
2794 
2795     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2796                      target_vec, count, send);
2797     if (vec == NULL) {
2798         ret = -host_to_target_errno(errno);
2799         goto out2;
2800     }
2801     msg.msg_iovlen = count;
2802     msg.msg_iov = vec;
2803 
2804     if (send) {
2805         if (fd_trans_target_to_host_data(fd)) {
2806             void *host_msg;
2807 
2808             host_msg = g_malloc(msg.msg_iov->iov_len);
2809             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2810             ret = fd_trans_target_to_host_data(fd)(host_msg,
2811                                                    msg.msg_iov->iov_len);
2812             if (ret >= 0) {
2813                 msg.msg_iov->iov_base = host_msg;
2814                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2815             }
2816             g_free(host_msg);
2817         } else {
2818             ret = target_to_host_cmsg(&msg, msgp);
2819             if (ret == 0) {
2820                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2821             }
2822         }
2823     } else {
2824         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2825         if (!is_error(ret)) {
2826             len = ret;
2827             if (fd_trans_host_to_target_data(fd)) {
2828                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2829                                                MIN(msg.msg_iov->iov_len, len));
2830             } else {
2831                 ret = host_to_target_cmsg(msgp, &msg);
2832             }
2833             if (!is_error(ret)) {
2834                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2835                 msgp->msg_flags = tswap32(msg.msg_flags);
2836                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2837                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2838                                     msg.msg_name, msg.msg_namelen);
2839                     if (ret) {
2840                         goto out;
2841                     }
2842                 }
2843 
2844                 ret = len;
2845             }
2846         }
2847     }
2848 
2849 out:
2850     unlock_iovec(vec, target_vec, count, !send);
2851 out2:
2852     return ret;
2853 }
2854 
2855 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2856                                int flags, int send)
2857 {
2858     abi_long ret;
2859     struct target_msghdr *msgp;
2860 
2861     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2862                           msgp,
2863                           target_msg,
2864                           send ? 1 : 0)) {
2865         return -TARGET_EFAULT;
2866     }
2867     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2868     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2869     return ret;
2870 }
2871 
2872 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2873  * so it might not have this *mmsg-specific flag either.
2874  */
2875 #ifndef MSG_WAITFORONE
2876 #define MSG_WAITFORONE 0x10000
2877 #endif
2878 
2879 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2880                                 unsigned int vlen, unsigned int flags,
2881                                 int send)
2882 {
2883     struct target_mmsghdr *mmsgp;
2884     abi_long ret = 0;
2885     int i;
2886 
2887     if (vlen > UIO_MAXIOV) {
2888         vlen = UIO_MAXIOV;
2889     }
2890 
2891     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2892     if (!mmsgp) {
2893         return -TARGET_EFAULT;
2894     }
2895 
2896     for (i = 0; i < vlen; i++) {
2897         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2898         if (is_error(ret)) {
2899             break;
2900         }
2901         mmsgp[i].msg_len = tswap32(ret);
2902         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2903         if (flags & MSG_WAITFORONE) {
2904             flags |= MSG_DONTWAIT;
2905         }
2906     }
2907 
2908     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2909 
2910     /* Return number of datagrams sent if we sent any at all;
2911      * otherwise return the error.
2912      */
2913     if (i) {
2914         return i;
2915     }
2916     return ret;
2917 }
2918 
2919 /* do_accept4() Must return target values and target errnos. */
2920 static abi_long do_accept4(int fd, abi_ulong target_addr,
2921                            abi_ulong target_addrlen_addr, int flags)
2922 {
2923     socklen_t addrlen, ret_addrlen;
2924     void *addr;
2925     abi_long ret;
2926     int host_flags;
2927 
2928     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2929 
2930     if (target_addr == 0) {
2931         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2932     }
2933 
2934     /* linux returns EINVAL if addrlen pointer is invalid */
2935     if (get_user_u32(addrlen, target_addrlen_addr))
2936         return -TARGET_EINVAL;
2937 
2938     if ((int)addrlen < 0) {
2939         return -TARGET_EINVAL;
2940     }
2941 
2942     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2943         return -TARGET_EINVAL;
2944 
2945     addr = alloca(addrlen);
2946 
2947     ret_addrlen = addrlen;
2948     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2949     if (!is_error(ret)) {
2950         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2951         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2952             ret = -TARGET_EFAULT;
2953         }
2954     }
2955     return ret;
2956 }
2957 
2958 /* do_getpeername() Must return target values and target errnos. */
2959 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2960                                abi_ulong target_addrlen_addr)
2961 {
2962     socklen_t addrlen, ret_addrlen;
2963     void *addr;
2964     abi_long ret;
2965 
2966     if (get_user_u32(addrlen, target_addrlen_addr))
2967         return -TARGET_EFAULT;
2968 
2969     if ((int)addrlen < 0) {
2970         return -TARGET_EINVAL;
2971     }
2972 
2973     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2974         return -TARGET_EFAULT;
2975 
2976     addr = alloca(addrlen);
2977 
2978     ret_addrlen = addrlen;
2979     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2980     if (!is_error(ret)) {
2981         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2982         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2983             ret = -TARGET_EFAULT;
2984         }
2985     }
2986     return ret;
2987 }
2988 
2989 /* do_getsockname() Must return target values and target errnos. */
2990 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2991                                abi_ulong target_addrlen_addr)
2992 {
2993     socklen_t addrlen, ret_addrlen;
2994     void *addr;
2995     abi_long ret;
2996 
2997     if (get_user_u32(addrlen, target_addrlen_addr))
2998         return -TARGET_EFAULT;
2999 
3000     if ((int)addrlen < 0) {
3001         return -TARGET_EINVAL;
3002     }
3003 
3004     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3005         return -TARGET_EFAULT;
3006 
3007     addr = alloca(addrlen);
3008 
3009     ret_addrlen = addrlen;
3010     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3011     if (!is_error(ret)) {
3012         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3013         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3014             ret = -TARGET_EFAULT;
3015         }
3016     }
3017     return ret;
3018 }
3019 
3020 /* do_socketpair() Must return target values and target errnos. */
3021 static abi_long do_socketpair(int domain, int type, int protocol,
3022                               abi_ulong target_tab_addr)
3023 {
3024     int tab[2];
3025     abi_long ret;
3026 
3027     target_to_host_sock_type(&type);
3028 
3029     ret = get_errno(socketpair(domain, type, protocol, tab));
3030     if (!is_error(ret)) {
3031         if (put_user_s32(tab[0], target_tab_addr)
3032             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3033             ret = -TARGET_EFAULT;
3034     }
3035     return ret;
3036 }
3037 
3038 /* do_sendto() Must return target values and target errnos. */
3039 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3040                           abi_ulong target_addr, socklen_t addrlen)
3041 {
3042     void *addr;
3043     void *host_msg;
3044     void *copy_msg = NULL;
3045     abi_long ret;
3046 
3047     if ((int)addrlen < 0) {
3048         return -TARGET_EINVAL;
3049     }
3050 
3051     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3052     if (!host_msg)
3053         return -TARGET_EFAULT;
3054     if (fd_trans_target_to_host_data(fd)) {
3055         copy_msg = host_msg;
3056         host_msg = g_malloc(len);
3057         memcpy(host_msg, copy_msg, len);
3058         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3059         if (ret < 0) {
3060             goto fail;
3061         }
3062     }
3063     if (target_addr) {
3064         addr = alloca(addrlen+1);
3065         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3066         if (ret) {
3067             goto fail;
3068         }
3069         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3070     } else {
3071         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3072     }
3073 fail:
3074     if (copy_msg) {
3075         g_free(host_msg);
3076         host_msg = copy_msg;
3077     }
3078     unlock_user(host_msg, msg, 0);
3079     return ret;
3080 }
3081 
3082 /* do_recvfrom() Must return target values and target errnos. */
3083 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3084                             abi_ulong target_addr,
3085                             abi_ulong target_addrlen)
3086 {
3087     socklen_t addrlen, ret_addrlen;
3088     void *addr;
3089     void *host_msg;
3090     abi_long ret;
3091 
3092     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3093     if (!host_msg)
3094         return -TARGET_EFAULT;
3095     if (target_addr) {
3096         if (get_user_u32(addrlen, target_addrlen)) {
3097             ret = -TARGET_EFAULT;
3098             goto fail;
3099         }
3100         if ((int)addrlen < 0) {
3101             ret = -TARGET_EINVAL;
3102             goto fail;
3103         }
3104         addr = alloca(addrlen);
3105         ret_addrlen = addrlen;
3106         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3107                                       addr, &ret_addrlen));
3108     } else {
3109         addr = NULL; /* To keep compiler quiet.  */
3110         addrlen = 0; /* To keep compiler quiet.  */
3111         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3112     }
3113     if (!is_error(ret)) {
3114         if (fd_trans_host_to_target_data(fd)) {
3115             abi_long trans;
3116             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3117             if (is_error(trans)) {
3118                 ret = trans;
3119                 goto fail;
3120             }
3121         }
3122         if (target_addr) {
3123             host_to_target_sockaddr(target_addr, addr,
3124                                     MIN(addrlen, ret_addrlen));
3125             if (put_user_u32(ret_addrlen, target_addrlen)) {
3126                 ret = -TARGET_EFAULT;
3127                 goto fail;
3128             }
3129         }
3130         unlock_user(host_msg, msg, len);
3131     } else {
3132 fail:
3133         unlock_user(host_msg, msg, 0);
3134     }
3135     return ret;
3136 }
3137 
3138 #ifdef TARGET_NR_socketcall
3139 /* do_socketcall() must return target values and target errnos. */
3140 static abi_long do_socketcall(int num, abi_ulong vptr)
3141 {
3142     static const unsigned nargs[] = { /* number of arguments per operation */
3143         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3144         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3145         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3146         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3147         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3148         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3149         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3150         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3151         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3152         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3153         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3154         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3155         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3156         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3157         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3158         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3159         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3160         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3161         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3162         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3163     };
3164     abi_long a[6]; /* max 6 args */
3165     unsigned i;
3166 
3167     /* check the range of the first argument num */
3168     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3169     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3170         return -TARGET_EINVAL;
3171     }
3172     /* ensure we have space for args */
3173     if (nargs[num] > ARRAY_SIZE(a)) {
3174         return -TARGET_EINVAL;
3175     }
3176     /* collect the arguments in a[] according to nargs[] */
3177     for (i = 0; i < nargs[num]; ++i) {
3178         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3179             return -TARGET_EFAULT;
3180         }
3181     }
3182     /* now when we have the args, invoke the appropriate underlying function */
3183     switch (num) {
3184     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3185         return do_socket(a[0], a[1], a[2]);
3186     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3187         return do_bind(a[0], a[1], a[2]);
3188     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3189         return do_connect(a[0], a[1], a[2]);
3190     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3191         return get_errno(listen(a[0], a[1]));
3192     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3193         return do_accept4(a[0], a[1], a[2], 0);
3194     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3195         return do_getsockname(a[0], a[1], a[2]);
3196     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3197         return do_getpeername(a[0], a[1], a[2]);
3198     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3199         return do_socketpair(a[0], a[1], a[2], a[3]);
3200     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3201         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3202     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3203         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3204     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3205         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3206     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3207         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3208     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3209         return get_errno(shutdown(a[0], a[1]));
3210     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3211         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3212     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3213         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3214     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3215         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3216     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3217         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3218     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3219         return do_accept4(a[0], a[1], a[2], a[3]);
3220     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3221         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3222     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3223         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3224     default:
3225         gemu_log("Unsupported socketcall: %d\n", num);
3226         return -TARGET_EINVAL;
3227     }
3228 }
3229 #endif
3230 
3231 #define N_SHM_REGIONS	32
3232 
3233 static struct shm_region {
3234     abi_ulong start;
3235     abi_ulong size;
3236     bool in_use;
3237 } shm_regions[N_SHM_REGIONS];
3238 
3239 #ifndef TARGET_SEMID64_DS
3240 /* asm-generic version of this struct */
3241 struct target_semid64_ds
3242 {
3243   struct target_ipc_perm sem_perm;
3244   abi_ulong sem_otime;
3245 #if TARGET_ABI_BITS == 32
3246   abi_ulong __unused1;
3247 #endif
3248   abi_ulong sem_ctime;
3249 #if TARGET_ABI_BITS == 32
3250   abi_ulong __unused2;
3251 #endif
3252   abi_ulong sem_nsems;
3253   abi_ulong __unused3;
3254   abi_ulong __unused4;
3255 };
3256 #endif
3257 
3258 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3259                                                abi_ulong target_addr)
3260 {
3261     struct target_ipc_perm *target_ip;
3262     struct target_semid64_ds *target_sd;
3263 
3264     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3265         return -TARGET_EFAULT;
3266     target_ip = &(target_sd->sem_perm);
3267     host_ip->__key = tswap32(target_ip->__key);
3268     host_ip->uid = tswap32(target_ip->uid);
3269     host_ip->gid = tswap32(target_ip->gid);
3270     host_ip->cuid = tswap32(target_ip->cuid);
3271     host_ip->cgid = tswap32(target_ip->cgid);
3272 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3273     host_ip->mode = tswap32(target_ip->mode);
3274 #else
3275     host_ip->mode = tswap16(target_ip->mode);
3276 #endif
3277 #if defined(TARGET_PPC)
3278     host_ip->__seq = tswap32(target_ip->__seq);
3279 #else
3280     host_ip->__seq = tswap16(target_ip->__seq);
3281 #endif
3282     unlock_user_struct(target_sd, target_addr, 0);
3283     return 0;
3284 }
3285 
3286 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3287                                                struct ipc_perm *host_ip)
3288 {
3289     struct target_ipc_perm *target_ip;
3290     struct target_semid64_ds *target_sd;
3291 
3292     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3293         return -TARGET_EFAULT;
3294     target_ip = &(target_sd->sem_perm);
3295     target_ip->__key = tswap32(host_ip->__key);
3296     target_ip->uid = tswap32(host_ip->uid);
3297     target_ip->gid = tswap32(host_ip->gid);
3298     target_ip->cuid = tswap32(host_ip->cuid);
3299     target_ip->cgid = tswap32(host_ip->cgid);
3300 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3301     target_ip->mode = tswap32(host_ip->mode);
3302 #else
3303     target_ip->mode = tswap16(host_ip->mode);
3304 #endif
3305 #if defined(TARGET_PPC)
3306     target_ip->__seq = tswap32(host_ip->__seq);
3307 #else
3308     target_ip->__seq = tswap16(host_ip->__seq);
3309 #endif
3310     unlock_user_struct(target_sd, target_addr, 1);
3311     return 0;
3312 }
3313 
3314 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3315                                                abi_ulong target_addr)
3316 {
3317     struct target_semid64_ds *target_sd;
3318 
3319     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3320         return -TARGET_EFAULT;
3321     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3322         return -TARGET_EFAULT;
3323     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3324     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3325     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3326     unlock_user_struct(target_sd, target_addr, 0);
3327     return 0;
3328 }
3329 
3330 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3331                                                struct semid_ds *host_sd)
3332 {
3333     struct target_semid64_ds *target_sd;
3334 
3335     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3336         return -TARGET_EFAULT;
3337     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3338         return -TARGET_EFAULT;
3339     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3340     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3341     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3342     unlock_user_struct(target_sd, target_addr, 1);
3343     return 0;
3344 }
3345 
3346 struct target_seminfo {
3347     int semmap;
3348     int semmni;
3349     int semmns;
3350     int semmnu;
3351     int semmsl;
3352     int semopm;
3353     int semume;
3354     int semusz;
3355     int semvmx;
3356     int semaem;
3357 };
3358 
3359 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3360                                               struct seminfo *host_seminfo)
3361 {
3362     struct target_seminfo *target_seminfo;
3363     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3364         return -TARGET_EFAULT;
3365     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3366     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3367     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3368     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3369     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3370     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3371     __put_user(host_seminfo->semume, &target_seminfo->semume);
3372     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3373     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3374     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3375     unlock_user_struct(target_seminfo, target_addr, 1);
3376     return 0;
3377 }
3378 
3379 union semun {
3380 	int val;
3381 	struct semid_ds *buf;
3382 	unsigned short *array;
3383 	struct seminfo *__buf;
3384 };
3385 
3386 union target_semun {
3387 	int val;
3388 	abi_ulong buf;
3389 	abi_ulong array;
3390 	abi_ulong __buf;
3391 };
3392 
3393 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3394                                                abi_ulong target_addr)
3395 {
3396     int nsems;
3397     unsigned short *array;
3398     union semun semun;
3399     struct semid_ds semid_ds;
3400     int i, ret;
3401 
3402     semun.buf = &semid_ds;
3403 
3404     ret = semctl(semid, 0, IPC_STAT, semun);
3405     if (ret == -1)
3406         return get_errno(ret);
3407 
3408     nsems = semid_ds.sem_nsems;
3409 
3410     *host_array = g_try_new(unsigned short, nsems);
3411     if (!*host_array) {
3412         return -TARGET_ENOMEM;
3413     }
3414     array = lock_user(VERIFY_READ, target_addr,
3415                       nsems*sizeof(unsigned short), 1);
3416     if (!array) {
3417         g_free(*host_array);
3418         return -TARGET_EFAULT;
3419     }
3420 
3421     for(i=0; i<nsems; i++) {
3422         __get_user((*host_array)[i], &array[i]);
3423     }
3424     unlock_user(array, target_addr, 0);
3425 
3426     return 0;
3427 }
3428 
3429 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3430                                                unsigned short **host_array)
3431 {
3432     int nsems;
3433     unsigned short *array;
3434     union semun semun;
3435     struct semid_ds semid_ds;
3436     int i, ret;
3437 
3438     semun.buf = &semid_ds;
3439 
3440     ret = semctl(semid, 0, IPC_STAT, semun);
3441     if (ret == -1)
3442         return get_errno(ret);
3443 
3444     nsems = semid_ds.sem_nsems;
3445 
3446     array = lock_user(VERIFY_WRITE, target_addr,
3447                       nsems*sizeof(unsigned short), 0);
3448     if (!array)
3449         return -TARGET_EFAULT;
3450 
3451     for(i=0; i<nsems; i++) {
3452         __put_user((*host_array)[i], &array[i]);
3453     }
3454     g_free(*host_array);
3455     unlock_user(array, target_addr, 1);
3456 
3457     return 0;
3458 }
3459 
3460 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3461                                  abi_ulong target_arg)
3462 {
3463     union target_semun target_su = { .buf = target_arg };
3464     union semun arg;
3465     struct semid_ds dsarg;
3466     unsigned short *array = NULL;
3467     struct seminfo seminfo;
3468     abi_long ret = -TARGET_EINVAL;
3469     abi_long err;
3470     cmd &= 0xff;
3471 
3472     switch( cmd ) {
3473 	case GETVAL:
3474 	case SETVAL:
3475             /* In 64 bit cross-endian situations, we will erroneously pick up
3476              * the wrong half of the union for the "val" element.  To rectify
3477              * this, the entire 8-byte structure is byteswapped, followed by
3478 	     * a swap of the 4 byte val field. In other cases, the data is
3479 	     * already in proper host byte order. */
3480 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3481 		target_su.buf = tswapal(target_su.buf);
3482 		arg.val = tswap32(target_su.val);
3483 	    } else {
3484 		arg.val = target_su.val;
3485 	    }
3486             ret = get_errno(semctl(semid, semnum, cmd, arg));
3487             break;
3488 	case GETALL:
3489 	case SETALL:
3490             err = target_to_host_semarray(semid, &array, target_su.array);
3491             if (err)
3492                 return err;
3493             arg.array = array;
3494             ret = get_errno(semctl(semid, semnum, cmd, arg));
3495             err = host_to_target_semarray(semid, target_su.array, &array);
3496             if (err)
3497                 return err;
3498             break;
3499 	case IPC_STAT:
3500 	case IPC_SET:
3501 	case SEM_STAT:
3502             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3503             if (err)
3504                 return err;
3505             arg.buf = &dsarg;
3506             ret = get_errno(semctl(semid, semnum, cmd, arg));
3507             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3508             if (err)
3509                 return err;
3510             break;
3511 	case IPC_INFO:
3512 	case SEM_INFO:
3513             arg.__buf = &seminfo;
3514             ret = get_errno(semctl(semid, semnum, cmd, arg));
3515             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3516             if (err)
3517                 return err;
3518             break;
3519 	case IPC_RMID:
3520 	case GETPID:
3521 	case GETNCNT:
3522 	case GETZCNT:
3523             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3524             break;
3525     }
3526 
3527     return ret;
3528 }
3529 
3530 struct target_sembuf {
3531     unsigned short sem_num;
3532     short sem_op;
3533     short sem_flg;
3534 };
3535 
3536 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3537                                              abi_ulong target_addr,
3538                                              unsigned nsops)
3539 {
3540     struct target_sembuf *target_sembuf;
3541     int i;
3542 
3543     target_sembuf = lock_user(VERIFY_READ, target_addr,
3544                               nsops*sizeof(struct target_sembuf), 1);
3545     if (!target_sembuf)
3546         return -TARGET_EFAULT;
3547 
3548     for(i=0; i<nsops; i++) {
3549         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3550         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3551         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3552     }
3553 
3554     unlock_user(target_sembuf, target_addr, 0);
3555 
3556     return 0;
3557 }
3558 
3559 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3560 {
3561     struct sembuf sops[nsops];
3562     abi_long ret;
3563 
3564     if (target_to_host_sembuf(sops, ptr, nsops))
3565         return -TARGET_EFAULT;
3566 
3567     ret = -TARGET_ENOSYS;
3568 #ifdef __NR_semtimedop
3569     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3570 #endif
3571 #ifdef __NR_ipc
3572     if (ret == -TARGET_ENOSYS) {
3573         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3574     }
3575 #endif
3576     return ret;
3577 }
3578 
3579 struct target_msqid_ds
3580 {
3581     struct target_ipc_perm msg_perm;
3582     abi_ulong msg_stime;
3583 #if TARGET_ABI_BITS == 32
3584     abi_ulong __unused1;
3585 #endif
3586     abi_ulong msg_rtime;
3587 #if TARGET_ABI_BITS == 32
3588     abi_ulong __unused2;
3589 #endif
3590     abi_ulong msg_ctime;
3591 #if TARGET_ABI_BITS == 32
3592     abi_ulong __unused3;
3593 #endif
3594     abi_ulong __msg_cbytes;
3595     abi_ulong msg_qnum;
3596     abi_ulong msg_qbytes;
3597     abi_ulong msg_lspid;
3598     abi_ulong msg_lrpid;
3599     abi_ulong __unused4;
3600     abi_ulong __unused5;
3601 };
3602 
3603 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3604                                                abi_ulong target_addr)
3605 {
3606     struct target_msqid_ds *target_md;
3607 
3608     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3609         return -TARGET_EFAULT;
3610     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3611         return -TARGET_EFAULT;
3612     host_md->msg_stime = tswapal(target_md->msg_stime);
3613     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3614     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3615     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3616     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3617     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3618     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3619     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3620     unlock_user_struct(target_md, target_addr, 0);
3621     return 0;
3622 }
3623 
3624 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3625                                                struct msqid_ds *host_md)
3626 {
3627     struct target_msqid_ds *target_md;
3628 
3629     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3630         return -TARGET_EFAULT;
3631     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3632         return -TARGET_EFAULT;
3633     target_md->msg_stime = tswapal(host_md->msg_stime);
3634     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3635     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3636     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3637     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3638     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3639     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3640     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3641     unlock_user_struct(target_md, target_addr, 1);
3642     return 0;
3643 }
3644 
3645 struct target_msginfo {
3646     int msgpool;
3647     int msgmap;
3648     int msgmax;
3649     int msgmnb;
3650     int msgmni;
3651     int msgssz;
3652     int msgtql;
3653     unsigned short int msgseg;
3654 };
3655 
3656 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3657                                               struct msginfo *host_msginfo)
3658 {
3659     struct target_msginfo *target_msginfo;
3660     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3661         return -TARGET_EFAULT;
3662     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3663     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3664     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3665     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3666     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3667     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3668     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3669     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3670     unlock_user_struct(target_msginfo, target_addr, 1);
3671     return 0;
3672 }
3673 
3674 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3675 {
3676     struct msqid_ds dsarg;
3677     struct msginfo msginfo;
3678     abi_long ret = -TARGET_EINVAL;
3679 
3680     cmd &= 0xff;
3681 
3682     switch (cmd) {
3683     case IPC_STAT:
3684     case IPC_SET:
3685     case MSG_STAT:
3686         if (target_to_host_msqid_ds(&dsarg,ptr))
3687             return -TARGET_EFAULT;
3688         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3689         if (host_to_target_msqid_ds(ptr,&dsarg))
3690             return -TARGET_EFAULT;
3691         break;
3692     case IPC_RMID:
3693         ret = get_errno(msgctl(msgid, cmd, NULL));
3694         break;
3695     case IPC_INFO:
3696     case MSG_INFO:
3697         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3698         if (host_to_target_msginfo(ptr, &msginfo))
3699             return -TARGET_EFAULT;
3700         break;
3701     }
3702 
3703     return ret;
3704 }
3705 
3706 struct target_msgbuf {
3707     abi_long mtype;
3708     char	mtext[1];
3709 };
3710 
3711 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3712                                  ssize_t msgsz, int msgflg)
3713 {
3714     struct target_msgbuf *target_mb;
3715     struct msgbuf *host_mb;
3716     abi_long ret = 0;
3717 
3718     if (msgsz < 0) {
3719         return -TARGET_EINVAL;
3720     }
3721 
3722     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3723         return -TARGET_EFAULT;
3724     host_mb = g_try_malloc(msgsz + sizeof(long));
3725     if (!host_mb) {
3726         unlock_user_struct(target_mb, msgp, 0);
3727         return -TARGET_ENOMEM;
3728     }
3729     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3730     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3731     ret = -TARGET_ENOSYS;
3732 #ifdef __NR_msgsnd
3733     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3734 #endif
3735 #ifdef __NR_ipc
3736     if (ret == -TARGET_ENOSYS) {
3737         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3738                                  host_mb, 0));
3739     }
3740 #endif
3741     g_free(host_mb);
3742     unlock_user_struct(target_mb, msgp, 0);
3743 
3744     return ret;
3745 }
3746 
3747 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3748                                  ssize_t msgsz, abi_long msgtyp,
3749                                  int msgflg)
3750 {
3751     struct target_msgbuf *target_mb;
3752     char *target_mtext;
3753     struct msgbuf *host_mb;
3754     abi_long ret = 0;
3755 
3756     if (msgsz < 0) {
3757         return -TARGET_EINVAL;
3758     }
3759 
3760     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3761         return -TARGET_EFAULT;
3762 
3763     host_mb = g_try_malloc(msgsz + sizeof(long));
3764     if (!host_mb) {
3765         ret = -TARGET_ENOMEM;
3766         goto end;
3767     }
3768     ret = -TARGET_ENOSYS;
3769 #ifdef __NR_msgrcv
3770     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3771 #endif
3772 #ifdef __NR_ipc
3773     if (ret == -TARGET_ENOSYS) {
3774         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3775                         msgflg, host_mb, msgtyp));
3776     }
3777 #endif
3778 
3779     if (ret > 0) {
3780         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3781         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3782         if (!target_mtext) {
3783             ret = -TARGET_EFAULT;
3784             goto end;
3785         }
3786         memcpy(target_mb->mtext, host_mb->mtext, ret);
3787         unlock_user(target_mtext, target_mtext_addr, ret);
3788     }
3789 
3790     target_mb->mtype = tswapal(host_mb->mtype);
3791 
3792 end:
3793     if (target_mb)
3794         unlock_user_struct(target_mb, msgp, 1);
3795     g_free(host_mb);
3796     return ret;
3797 }
3798 
3799 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3800                                                abi_ulong target_addr)
3801 {
3802     struct target_shmid_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3805         return -TARGET_EFAULT;
3806     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3807         return -TARGET_EFAULT;
3808     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3809     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3810     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3811     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3812     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3813     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3814     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3815     unlock_user_struct(target_sd, target_addr, 0);
3816     return 0;
3817 }
3818 
3819 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3820                                                struct shmid_ds *host_sd)
3821 {
3822     struct target_shmid_ds *target_sd;
3823 
3824     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3825         return -TARGET_EFAULT;
3826     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3827         return -TARGET_EFAULT;
3828     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3829     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3830     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3831     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3832     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3833     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3834     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3835     unlock_user_struct(target_sd, target_addr, 1);
3836     return 0;
3837 }
3838 
3839 struct  target_shminfo {
3840     abi_ulong shmmax;
3841     abi_ulong shmmin;
3842     abi_ulong shmmni;
3843     abi_ulong shmseg;
3844     abi_ulong shmall;
3845 };
3846 
3847 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3848                                               struct shminfo *host_shminfo)
3849 {
3850     struct target_shminfo *target_shminfo;
3851     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3852         return -TARGET_EFAULT;
3853     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3854     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3855     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3856     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3857     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3858     unlock_user_struct(target_shminfo, target_addr, 1);
3859     return 0;
3860 }
3861 
3862 struct target_shm_info {
3863     int used_ids;
3864     abi_ulong shm_tot;
3865     abi_ulong shm_rss;
3866     abi_ulong shm_swp;
3867     abi_ulong swap_attempts;
3868     abi_ulong swap_successes;
3869 };
3870 
3871 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3872                                                struct shm_info *host_shm_info)
3873 {
3874     struct target_shm_info *target_shm_info;
3875     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3878     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3879     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3880     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3881     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3882     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3883     unlock_user_struct(target_shm_info, target_addr, 1);
3884     return 0;
3885 }
3886 
3887 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3888 {
3889     struct shmid_ds dsarg;
3890     struct shminfo shminfo;
3891     struct shm_info shm_info;
3892     abi_long ret = -TARGET_EINVAL;
3893 
3894     cmd &= 0xff;
3895 
3896     switch(cmd) {
3897     case IPC_STAT:
3898     case IPC_SET:
3899     case SHM_STAT:
3900         if (target_to_host_shmid_ds(&dsarg, buf))
3901             return -TARGET_EFAULT;
3902         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3903         if (host_to_target_shmid_ds(buf, &dsarg))
3904             return -TARGET_EFAULT;
3905         break;
3906     case IPC_INFO:
3907         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3908         if (host_to_target_shminfo(buf, &shminfo))
3909             return -TARGET_EFAULT;
3910         break;
3911     case SHM_INFO:
3912         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3913         if (host_to_target_shm_info(buf, &shm_info))
3914             return -TARGET_EFAULT;
3915         break;
3916     case IPC_RMID:
3917     case SHM_LOCK:
3918     case SHM_UNLOCK:
3919         ret = get_errno(shmctl(shmid, cmd, NULL));
3920         break;
3921     }
3922 
3923     return ret;
3924 }
3925 
3926 #ifndef TARGET_FORCE_SHMLBA
3927 /* For most architectures, SHMLBA is the same as the page size;
3928  * some architectures have larger values, in which case they should
3929  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3930  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3931  * and defining its own value for SHMLBA.
3932  *
3933  * The kernel also permits SHMLBA to be set by the architecture to a
3934  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3935  * this means that addresses are rounded to the large size if
3936  * SHM_RND is set but addresses not aligned to that size are not rejected
3937  * as long as they are at least page-aligned. Since the only architecture
3938  * which uses this is ia64 this code doesn't provide for that oddity.
3939  */
3940 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3941 {
3942     return TARGET_PAGE_SIZE;
3943 }
3944 #endif
3945 
3946 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3947                                  int shmid, abi_ulong shmaddr, int shmflg)
3948 {
3949     abi_long raddr;
3950     void *host_raddr;
3951     struct shmid_ds shm_info;
3952     int i,ret;
3953     abi_ulong shmlba;
3954 
3955     /* find out the length of the shared memory segment */
3956     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3957     if (is_error(ret)) {
3958         /* can't get length, bail out */
3959         return ret;
3960     }
3961 
3962     shmlba = target_shmlba(cpu_env);
3963 
3964     if (shmaddr & (shmlba - 1)) {
3965         if (shmflg & SHM_RND) {
3966             shmaddr &= ~(shmlba - 1);
3967         } else {
3968             return -TARGET_EINVAL;
3969         }
3970     }
3971     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3972         return -TARGET_EINVAL;
3973     }
3974 
3975     mmap_lock();
3976 
3977     if (shmaddr)
3978         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3979     else {
3980         abi_ulong mmap_start;
3981 
3982         /* In order to use the host shmat, we need to honor host SHMLBA.  */
3983         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
3984 
3985         if (mmap_start == -1) {
3986             errno = ENOMEM;
3987             host_raddr = (void *)-1;
3988         } else
3989             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3990     }
3991 
3992     if (host_raddr == (void *)-1) {
3993         mmap_unlock();
3994         return get_errno((long)host_raddr);
3995     }
3996     raddr=h2g((unsigned long)host_raddr);
3997 
3998     page_set_flags(raddr, raddr + shm_info.shm_segsz,
3999                    PAGE_VALID | PAGE_READ |
4000                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4001 
4002     for (i = 0; i < N_SHM_REGIONS; i++) {
4003         if (!shm_regions[i].in_use) {
4004             shm_regions[i].in_use = true;
4005             shm_regions[i].start = raddr;
4006             shm_regions[i].size = shm_info.shm_segsz;
4007             break;
4008         }
4009     }
4010 
4011     mmap_unlock();
4012     return raddr;
4013 
4014 }
4015 
4016 static inline abi_long do_shmdt(abi_ulong shmaddr)
4017 {
4018     int i;
4019     abi_long rv;
4020 
4021     mmap_lock();
4022 
4023     for (i = 0; i < N_SHM_REGIONS; ++i) {
4024         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4025             shm_regions[i].in_use = false;
4026             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4027             break;
4028         }
4029     }
4030     rv = get_errno(shmdt(g2h(shmaddr)));
4031 
4032     mmap_unlock();
4033 
4034     return rv;
4035 }
4036 
4037 #ifdef TARGET_NR_ipc
4038 /* ??? This only works with linear mappings.  */
4039 /* do_ipc() must return target values and target errnos. */
4040 static abi_long do_ipc(CPUArchState *cpu_env,
4041                        unsigned int call, abi_long first,
4042                        abi_long second, abi_long third,
4043                        abi_long ptr, abi_long fifth)
4044 {
4045     int version;
4046     abi_long ret = 0;
4047 
4048     version = call >> 16;
4049     call &= 0xffff;
4050 
4051     switch (call) {
4052     case IPCOP_semop:
4053         ret = do_semop(first, ptr, second);
4054         break;
4055 
4056     case IPCOP_semget:
4057         ret = get_errno(semget(first, second, third));
4058         break;
4059 
4060     case IPCOP_semctl: {
4061         /* The semun argument to semctl is passed by value, so dereference the
4062          * ptr argument. */
4063         abi_ulong atptr;
4064         get_user_ual(atptr, ptr);
4065         ret = do_semctl(first, second, third, atptr);
4066         break;
4067     }
4068 
4069     case IPCOP_msgget:
4070         ret = get_errno(msgget(first, second));
4071         break;
4072 
4073     case IPCOP_msgsnd:
4074         ret = do_msgsnd(first, ptr, second, third);
4075         break;
4076 
4077     case IPCOP_msgctl:
4078         ret = do_msgctl(first, second, ptr);
4079         break;
4080 
4081     case IPCOP_msgrcv:
4082         switch (version) {
4083         case 0:
4084             {
4085                 struct target_ipc_kludge {
4086                     abi_long msgp;
4087                     abi_long msgtyp;
4088                 } *tmp;
4089 
4090                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4091                     ret = -TARGET_EFAULT;
4092                     break;
4093                 }
4094 
4095                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4096 
4097                 unlock_user_struct(tmp, ptr, 0);
4098                 break;
4099             }
4100         default:
4101             ret = do_msgrcv(first, ptr, second, fifth, third);
4102         }
4103         break;
4104 
4105     case IPCOP_shmat:
4106         switch (version) {
4107         default:
4108         {
4109             abi_ulong raddr;
4110             raddr = do_shmat(cpu_env, first, ptr, second);
4111             if (is_error(raddr))
4112                 return get_errno(raddr);
4113             if (put_user_ual(raddr, third))
4114                 return -TARGET_EFAULT;
4115             break;
4116         }
4117         case 1:
4118             ret = -TARGET_EINVAL;
4119             break;
4120         }
4121 	break;
4122     case IPCOP_shmdt:
4123         ret = do_shmdt(ptr);
4124 	break;
4125 
4126     case IPCOP_shmget:
4127 	/* IPC_* flag values are the same on all linux platforms */
4128 	ret = get_errno(shmget(first, second, third));
4129 	break;
4130 
4131 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4132     case IPCOP_shmctl:
4133         ret = do_shmctl(first, second, ptr);
4134         break;
4135     default:
4136 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4137 	ret = -TARGET_ENOSYS;
4138 	break;
4139     }
4140     return ret;
4141 }
4142 #endif
4143 
4144 /* kernel structure types definitions */
4145 
4146 #define STRUCT(name, ...) STRUCT_ ## name,
4147 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4148 enum {
4149 #include "syscall_types.h"
4150 STRUCT_MAX
4151 };
4152 #undef STRUCT
4153 #undef STRUCT_SPECIAL
4154 
4155 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4156 #define STRUCT_SPECIAL(name)
4157 #include "syscall_types.h"
4158 #undef STRUCT
4159 #undef STRUCT_SPECIAL
4160 
4161 typedef struct IOCTLEntry IOCTLEntry;
4162 
4163 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4164                              int fd, int cmd, abi_long arg);
4165 
4166 struct IOCTLEntry {
4167     int target_cmd;
4168     unsigned int host_cmd;
4169     const char *name;
4170     int access;
4171     do_ioctl_fn *do_ioctl;
4172     const argtype arg_type[5];
4173 };
4174 
4175 #define IOC_R 0x0001
4176 #define IOC_W 0x0002
4177 #define IOC_RW (IOC_R | IOC_W)
4178 
4179 #define MAX_STRUCT_SIZE 4096
4180 
4181 #ifdef CONFIG_FIEMAP
4182 /* So fiemap access checks don't overflow on 32 bit systems.
4183  * This is very slightly smaller than the limit imposed by
4184  * the underlying kernel.
4185  */
4186 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4187                             / sizeof(struct fiemap_extent))
4188 
4189 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4190                                        int fd, int cmd, abi_long arg)
4191 {
4192     /* The parameter for this ioctl is a struct fiemap followed
4193      * by an array of struct fiemap_extent whose size is set
4194      * in fiemap->fm_extent_count. The array is filled in by the
4195      * ioctl.
4196      */
4197     int target_size_in, target_size_out;
4198     struct fiemap *fm;
4199     const argtype *arg_type = ie->arg_type;
4200     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4201     void *argptr, *p;
4202     abi_long ret;
4203     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4204     uint32_t outbufsz;
4205     int free_fm = 0;
4206 
4207     assert(arg_type[0] == TYPE_PTR);
4208     assert(ie->access == IOC_RW);
4209     arg_type++;
4210     target_size_in = thunk_type_size(arg_type, 0);
4211     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4212     if (!argptr) {
4213         return -TARGET_EFAULT;
4214     }
4215     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4216     unlock_user(argptr, arg, 0);
4217     fm = (struct fiemap *)buf_temp;
4218     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4219         return -TARGET_EINVAL;
4220     }
4221 
4222     outbufsz = sizeof (*fm) +
4223         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4224 
4225     if (outbufsz > MAX_STRUCT_SIZE) {
4226         /* We can't fit all the extents into the fixed size buffer.
4227          * Allocate one that is large enough and use it instead.
4228          */
4229         fm = g_try_malloc(outbufsz);
4230         if (!fm) {
4231             return -TARGET_ENOMEM;
4232         }
4233         memcpy(fm, buf_temp, sizeof(struct fiemap));
4234         free_fm = 1;
4235     }
4236     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4237     if (!is_error(ret)) {
4238         target_size_out = target_size_in;
4239         /* An extent_count of 0 means we were only counting the extents
4240          * so there are no structs to copy
4241          */
4242         if (fm->fm_extent_count != 0) {
4243             target_size_out += fm->fm_mapped_extents * extent_size;
4244         }
4245         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4246         if (!argptr) {
4247             ret = -TARGET_EFAULT;
4248         } else {
4249             /* Convert the struct fiemap */
4250             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4251             if (fm->fm_extent_count != 0) {
4252                 p = argptr + target_size_in;
4253                 /* ...and then all the struct fiemap_extents */
4254                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4255                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4256                                   THUNK_TARGET);
4257                     p += extent_size;
4258                 }
4259             }
4260             unlock_user(argptr, arg, target_size_out);
4261         }
4262     }
4263     if (free_fm) {
4264         g_free(fm);
4265     }
4266     return ret;
4267 }
4268 #endif
4269 
4270 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4271                                 int fd, int cmd, abi_long arg)
4272 {
4273     const argtype *arg_type = ie->arg_type;
4274     int target_size;
4275     void *argptr;
4276     int ret;
4277     struct ifconf *host_ifconf;
4278     uint32_t outbufsz;
4279     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4280     int target_ifreq_size;
4281     int nb_ifreq;
4282     int free_buf = 0;
4283     int i;
4284     int target_ifc_len;
4285     abi_long target_ifc_buf;
4286     int host_ifc_len;
4287     char *host_ifc_buf;
4288 
4289     assert(arg_type[0] == TYPE_PTR);
4290     assert(ie->access == IOC_RW);
4291 
4292     arg_type++;
4293     target_size = thunk_type_size(arg_type, 0);
4294 
4295     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4296     if (!argptr)
4297         return -TARGET_EFAULT;
4298     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4299     unlock_user(argptr, arg, 0);
4300 
4301     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4302     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4303     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4304 
4305     if (target_ifc_buf != 0) {
4306         target_ifc_len = host_ifconf->ifc_len;
4307         nb_ifreq = target_ifc_len / target_ifreq_size;
4308         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4309 
4310         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4311         if (outbufsz > MAX_STRUCT_SIZE) {
4312             /*
4313              * We can't fit all the extents into the fixed size buffer.
4314              * Allocate one that is large enough and use it instead.
4315              */
4316             host_ifconf = malloc(outbufsz);
4317             if (!host_ifconf) {
4318                 return -TARGET_ENOMEM;
4319             }
4320             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4321             free_buf = 1;
4322         }
4323         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4324 
4325         host_ifconf->ifc_len = host_ifc_len;
4326     } else {
4327       host_ifc_buf = NULL;
4328     }
4329     host_ifconf->ifc_buf = host_ifc_buf;
4330 
4331     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4332     if (!is_error(ret)) {
4333 	/* convert host ifc_len to target ifc_len */
4334 
4335         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4336         target_ifc_len = nb_ifreq * target_ifreq_size;
4337         host_ifconf->ifc_len = target_ifc_len;
4338 
4339 	/* restore target ifc_buf */
4340 
4341         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4342 
4343 	/* copy struct ifconf to target user */
4344 
4345         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4346         if (!argptr)
4347             return -TARGET_EFAULT;
4348         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4349         unlock_user(argptr, arg, target_size);
4350 
4351         if (target_ifc_buf != 0) {
4352             /* copy ifreq[] to target user */
4353             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4354             for (i = 0; i < nb_ifreq ; i++) {
4355                 thunk_convert(argptr + i * target_ifreq_size,
4356                               host_ifc_buf + i * sizeof(struct ifreq),
4357                               ifreq_arg_type, THUNK_TARGET);
4358             }
4359             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4360         }
4361     }
4362 
4363     if (free_buf) {
4364         free(host_ifconf);
4365     }
4366 
4367     return ret;
4368 }
4369 
4370 #if defined(CONFIG_USBFS)
4371 #if HOST_LONG_BITS > 64
4372 #error USBDEVFS thunks do not support >64 bit hosts yet.
4373 #endif
4374 struct live_urb {
4375     uint64_t target_urb_adr;
4376     uint64_t target_buf_adr;
4377     char *target_buf_ptr;
4378     struct usbdevfs_urb host_urb;
4379 };
4380 
4381 static GHashTable *usbdevfs_urb_hashtable(void)
4382 {
4383     static GHashTable *urb_hashtable;
4384 
4385     if (!urb_hashtable) {
4386         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4387     }
4388     return urb_hashtable;
4389 }
4390 
4391 static void urb_hashtable_insert(struct live_urb *urb)
4392 {
4393     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4394     g_hash_table_insert(urb_hashtable, urb, urb);
4395 }
4396 
4397 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4398 {
4399     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4400     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4401 }
4402 
4403 static void urb_hashtable_remove(struct live_urb *urb)
4404 {
4405     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4406     g_hash_table_remove(urb_hashtable, urb);
4407 }
4408 
4409 static abi_long
4410 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4411                           int fd, int cmd, abi_long arg)
4412 {
4413     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4414     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4415     struct live_urb *lurb;
4416     void *argptr;
4417     uint64_t hurb;
4418     int target_size;
4419     uintptr_t target_urb_adr;
4420     abi_long ret;
4421 
4422     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4423 
4424     memset(buf_temp, 0, sizeof(uint64_t));
4425     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4426     if (is_error(ret)) {
4427         return ret;
4428     }
4429 
4430     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4431     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4432     if (!lurb->target_urb_adr) {
4433         return -TARGET_EFAULT;
4434     }
4435     urb_hashtable_remove(lurb);
4436     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4437         lurb->host_urb.buffer_length);
4438     lurb->target_buf_ptr = NULL;
4439 
4440     /* restore the guest buffer pointer */
4441     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4442 
4443     /* update the guest urb struct */
4444     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4445     if (!argptr) {
4446         g_free(lurb);
4447         return -TARGET_EFAULT;
4448     }
4449     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4450     unlock_user(argptr, lurb->target_urb_adr, target_size);
4451 
4452     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4453     /* write back the urb handle */
4454     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4455     if (!argptr) {
4456         g_free(lurb);
4457         return -TARGET_EFAULT;
4458     }
4459 
4460     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4461     target_urb_adr = lurb->target_urb_adr;
4462     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4463     unlock_user(argptr, arg, target_size);
4464 
4465     g_free(lurb);
4466     return ret;
4467 }
4468 
4469 static abi_long
4470 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4471                              uint8_t *buf_temp __attribute__((unused)),
4472                              int fd, int cmd, abi_long arg)
4473 {
4474     struct live_urb *lurb;
4475 
4476     /* map target address back to host URB with metadata. */
4477     lurb = urb_hashtable_lookup(arg);
4478     if (!lurb) {
4479         return -TARGET_EFAULT;
4480     }
4481     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4482 }
4483 
4484 static abi_long
4485 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4486                             int fd, int cmd, abi_long arg)
4487 {
4488     const argtype *arg_type = ie->arg_type;
4489     int target_size;
4490     abi_long ret;
4491     void *argptr;
4492     int rw_dir;
4493     struct live_urb *lurb;
4494 
4495     /*
4496      * each submitted URB needs to map to a unique ID for the
4497      * kernel, and that unique ID needs to be a pointer to
4498      * host memory.  hence, we need to malloc for each URB.
4499      * isochronous transfers have a variable length struct.
4500      */
4501     arg_type++;
4502     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4503 
4504     /* construct host copy of urb and metadata */
4505     lurb = g_try_malloc0(sizeof(struct live_urb));
4506     if (!lurb) {
4507         return -TARGET_ENOMEM;
4508     }
4509 
4510     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4511     if (!argptr) {
4512         g_free(lurb);
4513         return -TARGET_EFAULT;
4514     }
4515     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4516     unlock_user(argptr, arg, 0);
4517 
4518     lurb->target_urb_adr = arg;
4519     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4520 
4521     /* buffer space used depends on endpoint type so lock the entire buffer */
4522     /* control type urbs should check the buffer contents for true direction */
4523     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4524     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4525         lurb->host_urb.buffer_length, 1);
4526     if (lurb->target_buf_ptr == NULL) {
4527         g_free(lurb);
4528         return -TARGET_EFAULT;
4529     }
4530 
4531     /* update buffer pointer in host copy */
4532     lurb->host_urb.buffer = lurb->target_buf_ptr;
4533 
4534     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4535     if (is_error(ret)) {
4536         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4537         g_free(lurb);
4538     } else {
4539         urb_hashtable_insert(lurb);
4540     }
4541 
4542     return ret;
4543 }
4544 #endif /* CONFIG_USBFS */
4545 
4546 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4547                             int cmd, abi_long arg)
4548 {
4549     void *argptr;
4550     struct dm_ioctl *host_dm;
4551     abi_long guest_data;
4552     uint32_t guest_data_size;
4553     int target_size;
4554     const argtype *arg_type = ie->arg_type;
4555     abi_long ret;
4556     void *big_buf = NULL;
4557     char *host_data;
4558 
4559     arg_type++;
4560     target_size = thunk_type_size(arg_type, 0);
4561     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4562     if (!argptr) {
4563         ret = -TARGET_EFAULT;
4564         goto out;
4565     }
4566     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4567     unlock_user(argptr, arg, 0);
4568 
4569     /* buf_temp is too small, so fetch things into a bigger buffer */
4570     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4571     memcpy(big_buf, buf_temp, target_size);
4572     buf_temp = big_buf;
4573     host_dm = big_buf;
4574 
4575     guest_data = arg + host_dm->data_start;
4576     if ((guest_data - arg) < 0) {
4577         ret = -TARGET_EINVAL;
4578         goto out;
4579     }
4580     guest_data_size = host_dm->data_size - host_dm->data_start;
4581     host_data = (char*)host_dm + host_dm->data_start;
4582 
4583     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4584     if (!argptr) {
4585         ret = -TARGET_EFAULT;
4586         goto out;
4587     }
4588 
4589     switch (ie->host_cmd) {
4590     case DM_REMOVE_ALL:
4591     case DM_LIST_DEVICES:
4592     case DM_DEV_CREATE:
4593     case DM_DEV_REMOVE:
4594     case DM_DEV_SUSPEND:
4595     case DM_DEV_STATUS:
4596     case DM_DEV_WAIT:
4597     case DM_TABLE_STATUS:
4598     case DM_TABLE_CLEAR:
4599     case DM_TABLE_DEPS:
4600     case DM_LIST_VERSIONS:
4601         /* no input data */
4602         break;
4603     case DM_DEV_RENAME:
4604     case DM_DEV_SET_GEOMETRY:
4605         /* data contains only strings */
4606         memcpy(host_data, argptr, guest_data_size);
4607         break;
4608     case DM_TARGET_MSG:
4609         memcpy(host_data, argptr, guest_data_size);
4610         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4611         break;
4612     case DM_TABLE_LOAD:
4613     {
4614         void *gspec = argptr;
4615         void *cur_data = host_data;
4616         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4617         int spec_size = thunk_type_size(arg_type, 0);
4618         int i;
4619 
4620         for (i = 0; i < host_dm->target_count; i++) {
4621             struct dm_target_spec *spec = cur_data;
4622             uint32_t next;
4623             int slen;
4624 
4625             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4626             slen = strlen((char*)gspec + spec_size) + 1;
4627             next = spec->next;
4628             spec->next = sizeof(*spec) + slen;
4629             strcpy((char*)&spec[1], gspec + spec_size);
4630             gspec += next;
4631             cur_data += spec->next;
4632         }
4633         break;
4634     }
4635     default:
4636         ret = -TARGET_EINVAL;
4637         unlock_user(argptr, guest_data, 0);
4638         goto out;
4639     }
4640     unlock_user(argptr, guest_data, 0);
4641 
4642     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4643     if (!is_error(ret)) {
4644         guest_data = arg + host_dm->data_start;
4645         guest_data_size = host_dm->data_size - host_dm->data_start;
4646         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4647         switch (ie->host_cmd) {
4648         case DM_REMOVE_ALL:
4649         case DM_DEV_CREATE:
4650         case DM_DEV_REMOVE:
4651         case DM_DEV_RENAME:
4652         case DM_DEV_SUSPEND:
4653         case DM_DEV_STATUS:
4654         case DM_TABLE_LOAD:
4655         case DM_TABLE_CLEAR:
4656         case DM_TARGET_MSG:
4657         case DM_DEV_SET_GEOMETRY:
4658             /* no return data */
4659             break;
4660         case DM_LIST_DEVICES:
4661         {
4662             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4663             uint32_t remaining_data = guest_data_size;
4664             void *cur_data = argptr;
4665             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4666             int nl_size = 12; /* can't use thunk_size due to alignment */
4667 
4668             while (1) {
4669                 uint32_t next = nl->next;
4670                 if (next) {
4671                     nl->next = nl_size + (strlen(nl->name) + 1);
4672                 }
4673                 if (remaining_data < nl->next) {
4674                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4675                     break;
4676                 }
4677                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4678                 strcpy(cur_data + nl_size, nl->name);
4679                 cur_data += nl->next;
4680                 remaining_data -= nl->next;
4681                 if (!next) {
4682                     break;
4683                 }
4684                 nl = (void*)nl + next;
4685             }
4686             break;
4687         }
4688         case DM_DEV_WAIT:
4689         case DM_TABLE_STATUS:
4690         {
4691             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4692             void *cur_data = argptr;
4693             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4694             int spec_size = thunk_type_size(arg_type, 0);
4695             int i;
4696 
4697             for (i = 0; i < host_dm->target_count; i++) {
4698                 uint32_t next = spec->next;
4699                 int slen = strlen((char*)&spec[1]) + 1;
4700                 spec->next = (cur_data - argptr) + spec_size + slen;
4701                 if (guest_data_size < spec->next) {
4702                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4703                     break;
4704                 }
4705                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4706                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4707                 cur_data = argptr + spec->next;
4708                 spec = (void*)host_dm + host_dm->data_start + next;
4709             }
4710             break;
4711         }
4712         case DM_TABLE_DEPS:
4713         {
4714             void *hdata = (void*)host_dm + host_dm->data_start;
4715             int count = *(uint32_t*)hdata;
4716             uint64_t *hdev = hdata + 8;
4717             uint64_t *gdev = argptr + 8;
4718             int i;
4719 
4720             *(uint32_t*)argptr = tswap32(count);
4721             for (i = 0; i < count; i++) {
4722                 *gdev = tswap64(*hdev);
4723                 gdev++;
4724                 hdev++;
4725             }
4726             break;
4727         }
4728         case DM_LIST_VERSIONS:
4729         {
4730             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4731             uint32_t remaining_data = guest_data_size;
4732             void *cur_data = argptr;
4733             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4734             int vers_size = thunk_type_size(arg_type, 0);
4735 
4736             while (1) {
4737                 uint32_t next = vers->next;
4738                 if (next) {
4739                     vers->next = vers_size + (strlen(vers->name) + 1);
4740                 }
4741                 if (remaining_data < vers->next) {
4742                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4743                     break;
4744                 }
4745                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4746                 strcpy(cur_data + vers_size, vers->name);
4747                 cur_data += vers->next;
4748                 remaining_data -= vers->next;
4749                 if (!next) {
4750                     break;
4751                 }
4752                 vers = (void*)vers + next;
4753             }
4754             break;
4755         }
4756         default:
4757             unlock_user(argptr, guest_data, 0);
4758             ret = -TARGET_EINVAL;
4759             goto out;
4760         }
4761         unlock_user(argptr, guest_data, guest_data_size);
4762 
4763         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4764         if (!argptr) {
4765             ret = -TARGET_EFAULT;
4766             goto out;
4767         }
4768         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4769         unlock_user(argptr, arg, target_size);
4770     }
4771 out:
4772     g_free(big_buf);
4773     return ret;
4774 }
4775 
4776 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4777                                int cmd, abi_long arg)
4778 {
4779     void *argptr;
4780     int target_size;
4781     const argtype *arg_type = ie->arg_type;
4782     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4783     abi_long ret;
4784 
4785     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4786     struct blkpg_partition host_part;
4787 
4788     /* Read and convert blkpg */
4789     arg_type++;
4790     target_size = thunk_type_size(arg_type, 0);
4791     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4792     if (!argptr) {
4793         ret = -TARGET_EFAULT;
4794         goto out;
4795     }
4796     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4797     unlock_user(argptr, arg, 0);
4798 
4799     switch (host_blkpg->op) {
4800     case BLKPG_ADD_PARTITION:
4801     case BLKPG_DEL_PARTITION:
4802         /* payload is struct blkpg_partition */
4803         break;
4804     default:
4805         /* Unknown opcode */
4806         ret = -TARGET_EINVAL;
4807         goto out;
4808     }
4809 
4810     /* Read and convert blkpg->data */
4811     arg = (abi_long)(uintptr_t)host_blkpg->data;
4812     target_size = thunk_type_size(part_arg_type, 0);
4813     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4814     if (!argptr) {
4815         ret = -TARGET_EFAULT;
4816         goto out;
4817     }
4818     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4819     unlock_user(argptr, arg, 0);
4820 
4821     /* Swizzle the data pointer to our local copy and call! */
4822     host_blkpg->data = &host_part;
4823     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4824 
4825 out:
4826     return ret;
4827 }
4828 
4829 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4830                                 int fd, int cmd, abi_long arg)
4831 {
4832     const argtype *arg_type = ie->arg_type;
4833     const StructEntry *se;
4834     const argtype *field_types;
4835     const int *dst_offsets, *src_offsets;
4836     int target_size;
4837     void *argptr;
4838     abi_ulong *target_rt_dev_ptr = NULL;
4839     unsigned long *host_rt_dev_ptr = NULL;
4840     abi_long ret;
4841     int i;
4842 
4843     assert(ie->access == IOC_W);
4844     assert(*arg_type == TYPE_PTR);
4845     arg_type++;
4846     assert(*arg_type == TYPE_STRUCT);
4847     target_size = thunk_type_size(arg_type, 0);
4848     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4849     if (!argptr) {
4850         return -TARGET_EFAULT;
4851     }
4852     arg_type++;
4853     assert(*arg_type == (int)STRUCT_rtentry);
4854     se = struct_entries + *arg_type++;
4855     assert(se->convert[0] == NULL);
4856     /* convert struct here to be able to catch rt_dev string */
4857     field_types = se->field_types;
4858     dst_offsets = se->field_offsets[THUNK_HOST];
4859     src_offsets = se->field_offsets[THUNK_TARGET];
4860     for (i = 0; i < se->nb_fields; i++) {
4861         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4862             assert(*field_types == TYPE_PTRVOID);
4863             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4864             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4865             if (*target_rt_dev_ptr != 0) {
4866                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4867                                                   tswapal(*target_rt_dev_ptr));
4868                 if (!*host_rt_dev_ptr) {
4869                     unlock_user(argptr, arg, 0);
4870                     return -TARGET_EFAULT;
4871                 }
4872             } else {
4873                 *host_rt_dev_ptr = 0;
4874             }
4875             field_types++;
4876             continue;
4877         }
4878         field_types = thunk_convert(buf_temp + dst_offsets[i],
4879                                     argptr + src_offsets[i],
4880                                     field_types, THUNK_HOST);
4881     }
4882     unlock_user(argptr, arg, 0);
4883 
4884     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4885 
4886     assert(host_rt_dev_ptr != NULL);
4887     assert(target_rt_dev_ptr != NULL);
4888     if (*host_rt_dev_ptr != 0) {
4889         unlock_user((void *)*host_rt_dev_ptr,
4890                     *target_rt_dev_ptr, 0);
4891     }
4892     return ret;
4893 }
4894 
4895 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4896                                      int fd, int cmd, abi_long arg)
4897 {
4898     int sig = target_to_host_signal(arg);
4899     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4900 }
4901 
4902 #ifdef TIOCGPTPEER
4903 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4904                                      int fd, int cmd, abi_long arg)
4905 {
4906     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4907     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4908 }
4909 #endif
4910 
4911 static IOCTLEntry ioctl_entries[] = {
4912 #define IOCTL(cmd, access, ...) \
4913     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4914 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4915     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4916 #define IOCTL_IGNORE(cmd) \
4917     { TARGET_ ## cmd, 0, #cmd },
4918 #include "ioctls.h"
4919     { 0, 0, },
4920 };
4921 
4922 /* ??? Implement proper locking for ioctls.  */
4923 /* do_ioctl() Must return target values and target errnos. */
4924 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4925 {
4926     const IOCTLEntry *ie;
4927     const argtype *arg_type;
4928     abi_long ret;
4929     uint8_t buf_temp[MAX_STRUCT_SIZE];
4930     int target_size;
4931     void *argptr;
4932 
4933     ie = ioctl_entries;
4934     for(;;) {
4935         if (ie->target_cmd == 0) {
4936             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4937             return -TARGET_ENOSYS;
4938         }
4939         if (ie->target_cmd == cmd)
4940             break;
4941         ie++;
4942     }
4943     arg_type = ie->arg_type;
4944     if (ie->do_ioctl) {
4945         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4946     } else if (!ie->host_cmd) {
4947         /* Some architectures define BSD ioctls in their headers
4948            that are not implemented in Linux.  */
4949         return -TARGET_ENOSYS;
4950     }
4951 
4952     switch(arg_type[0]) {
4953     case TYPE_NULL:
4954         /* no argument */
4955         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4956         break;
4957     case TYPE_PTRVOID:
4958     case TYPE_INT:
4959         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4960         break;
4961     case TYPE_PTR:
4962         arg_type++;
4963         target_size = thunk_type_size(arg_type, 0);
4964         switch(ie->access) {
4965         case IOC_R:
4966             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4967             if (!is_error(ret)) {
4968                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4969                 if (!argptr)
4970                     return -TARGET_EFAULT;
4971                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4972                 unlock_user(argptr, arg, target_size);
4973             }
4974             break;
4975         case IOC_W:
4976             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4977             if (!argptr)
4978                 return -TARGET_EFAULT;
4979             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4980             unlock_user(argptr, arg, 0);
4981             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4982             break;
4983         default:
4984         case IOC_RW:
4985             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4986             if (!argptr)
4987                 return -TARGET_EFAULT;
4988             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4989             unlock_user(argptr, arg, 0);
4990             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4991             if (!is_error(ret)) {
4992                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4993                 if (!argptr)
4994                     return -TARGET_EFAULT;
4995                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4996                 unlock_user(argptr, arg, target_size);
4997             }
4998             break;
4999         }
5000         break;
5001     default:
5002         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5003                  (long)cmd, arg_type[0]);
5004         ret = -TARGET_ENOSYS;
5005         break;
5006     }
5007     return ret;
5008 }
5009 
5010 static const bitmask_transtbl iflag_tbl[] = {
5011         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5012         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5013         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5014         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5015         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5016         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5017         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5018         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5019         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5020         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5021         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5022         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5023         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5024         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5025         { 0, 0, 0, 0 }
5026 };
5027 
5028 static const bitmask_transtbl oflag_tbl[] = {
5029 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5030 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5031 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5032 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5033 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5034 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5035 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5036 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5037 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5038 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5039 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5040 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5041 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5042 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5043 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5044 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5045 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5046 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5047 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5048 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5049 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5050 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5051 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5052 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5053 	{ 0, 0, 0, 0 }
5054 };
5055 
5056 static const bitmask_transtbl cflag_tbl[] = {
5057 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5058 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5059 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5060 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5061 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5062 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5063 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5064 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5065 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5066 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5067 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5068 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5069 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5070 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5071 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5072 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5073 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5074 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5075 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5076 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5077 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5078 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5079 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5080 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5081 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5082 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5083 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5084 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5085 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5086 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5087 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5088 	{ 0, 0, 0, 0 }
5089 };
5090 
5091 static const bitmask_transtbl lflag_tbl[] = {
5092 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5093 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5094 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5095 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5096 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5097 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5098 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5099 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5100 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5101 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5102 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5103 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5104 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5105 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5106 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5107 	{ 0, 0, 0, 0 }
5108 };
5109 
5110 static void target_to_host_termios (void *dst, const void *src)
5111 {
5112     struct host_termios *host = dst;
5113     const struct target_termios *target = src;
5114 
5115     host->c_iflag =
5116         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5117     host->c_oflag =
5118         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5119     host->c_cflag =
5120         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5121     host->c_lflag =
5122         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5123     host->c_line = target->c_line;
5124 
5125     memset(host->c_cc, 0, sizeof(host->c_cc));
5126     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5127     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5128     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5129     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5130     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5131     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5132     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5133     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5134     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5135     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5136     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5137     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5138     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5139     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5140     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5141     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5142     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5143 }
5144 
5145 static void host_to_target_termios (void *dst, const void *src)
5146 {
5147     struct target_termios *target = dst;
5148     const struct host_termios *host = src;
5149 
5150     target->c_iflag =
5151         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5152     target->c_oflag =
5153         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5154     target->c_cflag =
5155         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5156     target->c_lflag =
5157         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5158     target->c_line = host->c_line;
5159 
5160     memset(target->c_cc, 0, sizeof(target->c_cc));
5161     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5162     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5163     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5164     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5165     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5166     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5167     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5168     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5169     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5170     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5171     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5172     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5173     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5174     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5175     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5176     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5177     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5178 }
5179 
5180 static const StructEntry struct_termios_def = {
5181     .convert = { host_to_target_termios, target_to_host_termios },
5182     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5183     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5184 };
5185 
5186 static bitmask_transtbl mmap_flags_tbl[] = {
5187     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5188     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5189     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5190     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5191       MAP_ANONYMOUS, MAP_ANONYMOUS },
5192     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5193       MAP_GROWSDOWN, MAP_GROWSDOWN },
5194     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5195       MAP_DENYWRITE, MAP_DENYWRITE },
5196     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5197       MAP_EXECUTABLE, MAP_EXECUTABLE },
5198     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5199     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5200       MAP_NORESERVE, MAP_NORESERVE },
5201     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5202     /* MAP_STACK had been ignored by the kernel for quite some time.
5203        Recognize it for the target insofar as we do not want to pass
5204        it through to the host.  */
5205     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5206     { 0, 0, 0, 0 }
5207 };
5208 
5209 #if defined(TARGET_I386)
5210 
5211 /* NOTE: there is really one LDT for all the threads */
5212 static uint8_t *ldt_table;
5213 
5214 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5215 {
5216     int size;
5217     void *p;
5218 
5219     if (!ldt_table)
5220         return 0;
5221     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5222     if (size > bytecount)
5223         size = bytecount;
5224     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5225     if (!p)
5226         return -TARGET_EFAULT;
5227     /* ??? Should this by byteswapped?  */
5228     memcpy(p, ldt_table, size);
5229     unlock_user(p, ptr, size);
5230     return size;
5231 }
5232 
5233 /* XXX: add locking support */
5234 static abi_long write_ldt(CPUX86State *env,
5235                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5236 {
5237     struct target_modify_ldt_ldt_s ldt_info;
5238     struct target_modify_ldt_ldt_s *target_ldt_info;
5239     int seg_32bit, contents, read_exec_only, limit_in_pages;
5240     int seg_not_present, useable, lm;
5241     uint32_t *lp, entry_1, entry_2;
5242 
5243     if (bytecount != sizeof(ldt_info))
5244         return -TARGET_EINVAL;
5245     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5246         return -TARGET_EFAULT;
5247     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5248     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5249     ldt_info.limit = tswap32(target_ldt_info->limit);
5250     ldt_info.flags = tswap32(target_ldt_info->flags);
5251     unlock_user_struct(target_ldt_info, ptr, 0);
5252 
5253     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5254         return -TARGET_EINVAL;
5255     seg_32bit = ldt_info.flags & 1;
5256     contents = (ldt_info.flags >> 1) & 3;
5257     read_exec_only = (ldt_info.flags >> 3) & 1;
5258     limit_in_pages = (ldt_info.flags >> 4) & 1;
5259     seg_not_present = (ldt_info.flags >> 5) & 1;
5260     useable = (ldt_info.flags >> 6) & 1;
5261 #ifdef TARGET_ABI32
5262     lm = 0;
5263 #else
5264     lm = (ldt_info.flags >> 7) & 1;
5265 #endif
5266     if (contents == 3) {
5267         if (oldmode)
5268             return -TARGET_EINVAL;
5269         if (seg_not_present == 0)
5270             return -TARGET_EINVAL;
5271     }
5272     /* allocate the LDT */
5273     if (!ldt_table) {
5274         env->ldt.base = target_mmap(0,
5275                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5276                                     PROT_READ|PROT_WRITE,
5277                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5278         if (env->ldt.base == -1)
5279             return -TARGET_ENOMEM;
5280         memset(g2h(env->ldt.base), 0,
5281                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5282         env->ldt.limit = 0xffff;
5283         ldt_table = g2h(env->ldt.base);
5284     }
5285 
5286     /* NOTE: same code as Linux kernel */
5287     /* Allow LDTs to be cleared by the user. */
5288     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5289         if (oldmode ||
5290             (contents == 0		&&
5291              read_exec_only == 1	&&
5292              seg_32bit == 0		&&
5293              limit_in_pages == 0	&&
5294              seg_not_present == 1	&&
5295              useable == 0 )) {
5296             entry_1 = 0;
5297             entry_2 = 0;
5298             goto install;
5299         }
5300     }
5301 
5302     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5303         (ldt_info.limit & 0x0ffff);
5304     entry_2 = (ldt_info.base_addr & 0xff000000) |
5305         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5306         (ldt_info.limit & 0xf0000) |
5307         ((read_exec_only ^ 1) << 9) |
5308         (contents << 10) |
5309         ((seg_not_present ^ 1) << 15) |
5310         (seg_32bit << 22) |
5311         (limit_in_pages << 23) |
5312         (lm << 21) |
5313         0x7000;
5314     if (!oldmode)
5315         entry_2 |= (useable << 20);
5316 
5317     /* Install the new entry ...  */
5318 install:
5319     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5320     lp[0] = tswap32(entry_1);
5321     lp[1] = tswap32(entry_2);
5322     return 0;
5323 }
5324 
5325 /* specific and weird i386 syscalls */
5326 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5327                               unsigned long bytecount)
5328 {
5329     abi_long ret;
5330 
5331     switch (func) {
5332     case 0:
5333         ret = read_ldt(ptr, bytecount);
5334         break;
5335     case 1:
5336         ret = write_ldt(env, ptr, bytecount, 1);
5337         break;
5338     case 0x11:
5339         ret = write_ldt(env, ptr, bytecount, 0);
5340         break;
5341     default:
5342         ret = -TARGET_ENOSYS;
5343         break;
5344     }
5345     return ret;
5346 }
5347 
5348 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5349 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5350 {
5351     uint64_t *gdt_table = g2h(env->gdt.base);
5352     struct target_modify_ldt_ldt_s ldt_info;
5353     struct target_modify_ldt_ldt_s *target_ldt_info;
5354     int seg_32bit, contents, read_exec_only, limit_in_pages;
5355     int seg_not_present, useable, lm;
5356     uint32_t *lp, entry_1, entry_2;
5357     int i;
5358 
5359     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5360     if (!target_ldt_info)
5361         return -TARGET_EFAULT;
5362     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5363     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5364     ldt_info.limit = tswap32(target_ldt_info->limit);
5365     ldt_info.flags = tswap32(target_ldt_info->flags);
5366     if (ldt_info.entry_number == -1) {
5367         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5368             if (gdt_table[i] == 0) {
5369                 ldt_info.entry_number = i;
5370                 target_ldt_info->entry_number = tswap32(i);
5371                 break;
5372             }
5373         }
5374     }
5375     unlock_user_struct(target_ldt_info, ptr, 1);
5376 
5377     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5378         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5379            return -TARGET_EINVAL;
5380     seg_32bit = ldt_info.flags & 1;
5381     contents = (ldt_info.flags >> 1) & 3;
5382     read_exec_only = (ldt_info.flags >> 3) & 1;
5383     limit_in_pages = (ldt_info.flags >> 4) & 1;
5384     seg_not_present = (ldt_info.flags >> 5) & 1;
5385     useable = (ldt_info.flags >> 6) & 1;
5386 #ifdef TARGET_ABI32
5387     lm = 0;
5388 #else
5389     lm = (ldt_info.flags >> 7) & 1;
5390 #endif
5391 
5392     if (contents == 3) {
5393         if (seg_not_present == 0)
5394             return -TARGET_EINVAL;
5395     }
5396 
5397     /* NOTE: same code as Linux kernel */
5398     /* Allow LDTs to be cleared by the user. */
5399     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5400         if ((contents == 0             &&
5401              read_exec_only == 1       &&
5402              seg_32bit == 0            &&
5403              limit_in_pages == 0       &&
5404              seg_not_present == 1      &&
5405              useable == 0 )) {
5406             entry_1 = 0;
5407             entry_2 = 0;
5408             goto install;
5409         }
5410     }
5411 
5412     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5413         (ldt_info.limit & 0x0ffff);
5414     entry_2 = (ldt_info.base_addr & 0xff000000) |
5415         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5416         (ldt_info.limit & 0xf0000) |
5417         ((read_exec_only ^ 1) << 9) |
5418         (contents << 10) |
5419         ((seg_not_present ^ 1) << 15) |
5420         (seg_32bit << 22) |
5421         (limit_in_pages << 23) |
5422         (useable << 20) |
5423         (lm << 21) |
5424         0x7000;
5425 
5426     /* Install the new entry ...  */
5427 install:
5428     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5429     lp[0] = tswap32(entry_1);
5430     lp[1] = tswap32(entry_2);
5431     return 0;
5432 }
5433 
5434 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5435 {
5436     struct target_modify_ldt_ldt_s *target_ldt_info;
5437     uint64_t *gdt_table = g2h(env->gdt.base);
5438     uint32_t base_addr, limit, flags;
5439     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5440     int seg_not_present, useable, lm;
5441     uint32_t *lp, entry_1, entry_2;
5442 
5443     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5444     if (!target_ldt_info)
5445         return -TARGET_EFAULT;
5446     idx = tswap32(target_ldt_info->entry_number);
5447     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5448         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5449         unlock_user_struct(target_ldt_info, ptr, 1);
5450         return -TARGET_EINVAL;
5451     }
5452     lp = (uint32_t *)(gdt_table + idx);
5453     entry_1 = tswap32(lp[0]);
5454     entry_2 = tswap32(lp[1]);
5455 
5456     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5457     contents = (entry_2 >> 10) & 3;
5458     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5459     seg_32bit = (entry_2 >> 22) & 1;
5460     limit_in_pages = (entry_2 >> 23) & 1;
5461     useable = (entry_2 >> 20) & 1;
5462 #ifdef TARGET_ABI32
5463     lm = 0;
5464 #else
5465     lm = (entry_2 >> 21) & 1;
5466 #endif
5467     flags = (seg_32bit << 0) | (contents << 1) |
5468         (read_exec_only << 3) | (limit_in_pages << 4) |
5469         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5470     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5471     base_addr = (entry_1 >> 16) |
5472         (entry_2 & 0xff000000) |
5473         ((entry_2 & 0xff) << 16);
5474     target_ldt_info->base_addr = tswapal(base_addr);
5475     target_ldt_info->limit = tswap32(limit);
5476     target_ldt_info->flags = tswap32(flags);
5477     unlock_user_struct(target_ldt_info, ptr, 1);
5478     return 0;
5479 }
5480 #endif /* TARGET_I386 && TARGET_ABI32 */
5481 
5482 #ifndef TARGET_ABI32
5483 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5484 {
5485     abi_long ret = 0;
5486     abi_ulong val;
5487     int idx;
5488 
5489     switch(code) {
5490     case TARGET_ARCH_SET_GS:
5491     case TARGET_ARCH_SET_FS:
5492         if (code == TARGET_ARCH_SET_GS)
5493             idx = R_GS;
5494         else
5495             idx = R_FS;
5496         cpu_x86_load_seg(env, idx, 0);
5497         env->segs[idx].base = addr;
5498         break;
5499     case TARGET_ARCH_GET_GS:
5500     case TARGET_ARCH_GET_FS:
5501         if (code == TARGET_ARCH_GET_GS)
5502             idx = R_GS;
5503         else
5504             idx = R_FS;
5505         val = env->segs[idx].base;
5506         if (put_user(val, addr, abi_ulong))
5507             ret = -TARGET_EFAULT;
5508         break;
5509     default:
5510         ret = -TARGET_EINVAL;
5511         break;
5512     }
5513     return ret;
5514 }
5515 #endif
5516 
5517 #endif /* defined(TARGET_I386) */
5518 
5519 #define NEW_STACK_SIZE 0x40000
5520 
5521 
5522 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5523 typedef struct {
5524     CPUArchState *env;
5525     pthread_mutex_t mutex;
5526     pthread_cond_t cond;
5527     pthread_t thread;
5528     uint32_t tid;
5529     abi_ulong child_tidptr;
5530     abi_ulong parent_tidptr;
5531     sigset_t sigmask;
5532 } new_thread_info;
5533 
5534 static void *clone_func(void *arg)
5535 {
5536     new_thread_info *info = arg;
5537     CPUArchState *env;
5538     CPUState *cpu;
5539     TaskState *ts;
5540 
5541     rcu_register_thread();
5542     tcg_register_thread();
5543     env = info->env;
5544     cpu = env_cpu(env);
5545     thread_cpu = cpu;
5546     ts = (TaskState *)cpu->opaque;
5547     info->tid = sys_gettid();
5548     task_settid(ts);
5549     if (info->child_tidptr)
5550         put_user_u32(info->tid, info->child_tidptr);
5551     if (info->parent_tidptr)
5552         put_user_u32(info->tid, info->parent_tidptr);
5553     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5554     /* Enable signals.  */
5555     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5556     /* Signal to the parent that we're ready.  */
5557     pthread_mutex_lock(&info->mutex);
5558     pthread_cond_broadcast(&info->cond);
5559     pthread_mutex_unlock(&info->mutex);
5560     /* Wait until the parent has finished initializing the tls state.  */
5561     pthread_mutex_lock(&clone_lock);
5562     pthread_mutex_unlock(&clone_lock);
5563     cpu_loop(env);
5564     /* never exits */
5565     return NULL;
5566 }
5567 
5568 /* do_fork() Must return host values and target errnos (unlike most
5569    do_*() functions). */
5570 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5571                    abi_ulong parent_tidptr, target_ulong newtls,
5572                    abi_ulong child_tidptr)
5573 {
5574     CPUState *cpu = env_cpu(env);
5575     int ret;
5576     TaskState *ts;
5577     CPUState *new_cpu;
5578     CPUArchState *new_env;
5579     sigset_t sigmask;
5580 
5581     flags &= ~CLONE_IGNORED_FLAGS;
5582 
5583     /* Emulate vfork() with fork() */
5584     if (flags & CLONE_VFORK)
5585         flags &= ~(CLONE_VFORK | CLONE_VM);
5586 
5587     if (flags & CLONE_VM) {
5588         TaskState *parent_ts = (TaskState *)cpu->opaque;
5589         new_thread_info info;
5590         pthread_attr_t attr;
5591 
5592         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5593             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5594             return -TARGET_EINVAL;
5595         }
5596 
5597         ts = g_new0(TaskState, 1);
5598         init_task_state(ts);
5599 
5600         /* Grab a mutex so that thread setup appears atomic.  */
5601         pthread_mutex_lock(&clone_lock);
5602 
5603         /* we create a new CPU instance. */
5604         new_env = cpu_copy(env);
5605         /* Init regs that differ from the parent.  */
5606         cpu_clone_regs(new_env, newsp);
5607         new_cpu = env_cpu(new_env);
5608         new_cpu->opaque = ts;
5609         ts->bprm = parent_ts->bprm;
5610         ts->info = parent_ts->info;
5611         ts->signal_mask = parent_ts->signal_mask;
5612 
5613         if (flags & CLONE_CHILD_CLEARTID) {
5614             ts->child_tidptr = child_tidptr;
5615         }
5616 
5617         if (flags & CLONE_SETTLS) {
5618             cpu_set_tls (new_env, newtls);
5619         }
5620 
5621         memset(&info, 0, sizeof(info));
5622         pthread_mutex_init(&info.mutex, NULL);
5623         pthread_mutex_lock(&info.mutex);
5624         pthread_cond_init(&info.cond, NULL);
5625         info.env = new_env;
5626         if (flags & CLONE_CHILD_SETTID) {
5627             info.child_tidptr = child_tidptr;
5628         }
5629         if (flags & CLONE_PARENT_SETTID) {
5630             info.parent_tidptr = parent_tidptr;
5631         }
5632 
5633         ret = pthread_attr_init(&attr);
5634         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5635         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5636         /* It is not safe to deliver signals until the child has finished
5637            initializing, so temporarily block all signals.  */
5638         sigfillset(&sigmask);
5639         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5640         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5641 
5642         /* If this is our first additional thread, we need to ensure we
5643          * generate code for parallel execution and flush old translations.
5644          */
5645         if (!parallel_cpus) {
5646             parallel_cpus = true;
5647             tb_flush(cpu);
5648         }
5649 
5650         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5651         /* TODO: Free new CPU state if thread creation failed.  */
5652 
5653         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5654         pthread_attr_destroy(&attr);
5655         if (ret == 0) {
5656             /* Wait for the child to initialize.  */
5657             pthread_cond_wait(&info.cond, &info.mutex);
5658             ret = info.tid;
5659         } else {
5660             ret = -1;
5661         }
5662         pthread_mutex_unlock(&info.mutex);
5663         pthread_cond_destroy(&info.cond);
5664         pthread_mutex_destroy(&info.mutex);
5665         pthread_mutex_unlock(&clone_lock);
5666     } else {
5667         /* if no CLONE_VM, we consider it is a fork */
5668         if (flags & CLONE_INVALID_FORK_FLAGS) {
5669             return -TARGET_EINVAL;
5670         }
5671 
5672         /* We can't support custom termination signals */
5673         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5674             return -TARGET_EINVAL;
5675         }
5676 
5677         if (block_signals()) {
5678             return -TARGET_ERESTARTSYS;
5679         }
5680 
5681         fork_start();
5682         ret = fork();
5683         if (ret == 0) {
5684             /* Child Process.  */
5685             cpu_clone_regs(env, newsp);
5686             fork_end(1);
5687             /* There is a race condition here.  The parent process could
5688                theoretically read the TID in the child process before the child
5689                tid is set.  This would require using either ptrace
5690                (not implemented) or having *_tidptr to point at a shared memory
5691                mapping.  We can't repeat the spinlock hack used above because
5692                the child process gets its own copy of the lock.  */
5693             if (flags & CLONE_CHILD_SETTID)
5694                 put_user_u32(sys_gettid(), child_tidptr);
5695             if (flags & CLONE_PARENT_SETTID)
5696                 put_user_u32(sys_gettid(), parent_tidptr);
5697             ts = (TaskState *)cpu->opaque;
5698             if (flags & CLONE_SETTLS)
5699                 cpu_set_tls (env, newtls);
5700             if (flags & CLONE_CHILD_CLEARTID)
5701                 ts->child_tidptr = child_tidptr;
5702         } else {
5703             fork_end(0);
5704         }
5705     }
5706     return ret;
5707 }
5708 
5709 /* warning : doesn't handle linux specific flags... */
5710 static int target_to_host_fcntl_cmd(int cmd)
5711 {
5712     int ret;
5713 
5714     switch(cmd) {
5715     case TARGET_F_DUPFD:
5716     case TARGET_F_GETFD:
5717     case TARGET_F_SETFD:
5718     case TARGET_F_GETFL:
5719     case TARGET_F_SETFL:
5720         ret = cmd;
5721         break;
5722     case TARGET_F_GETLK:
5723         ret = F_GETLK64;
5724         break;
5725     case TARGET_F_SETLK:
5726         ret = F_SETLK64;
5727         break;
5728     case TARGET_F_SETLKW:
5729         ret = F_SETLKW64;
5730         break;
5731     case TARGET_F_GETOWN:
5732         ret = F_GETOWN;
5733         break;
5734     case TARGET_F_SETOWN:
5735         ret = F_SETOWN;
5736         break;
5737     case TARGET_F_GETSIG:
5738         ret = F_GETSIG;
5739         break;
5740     case TARGET_F_SETSIG:
5741         ret = F_SETSIG;
5742         break;
5743 #if TARGET_ABI_BITS == 32
5744     case TARGET_F_GETLK64:
5745         ret = F_GETLK64;
5746         break;
5747     case TARGET_F_SETLK64:
5748         ret = F_SETLK64;
5749         break;
5750     case TARGET_F_SETLKW64:
5751         ret = F_SETLKW64;
5752         break;
5753 #endif
5754     case TARGET_F_SETLEASE:
5755         ret = F_SETLEASE;
5756         break;
5757     case TARGET_F_GETLEASE:
5758         ret = F_GETLEASE;
5759         break;
5760 #ifdef F_DUPFD_CLOEXEC
5761     case TARGET_F_DUPFD_CLOEXEC:
5762         ret = F_DUPFD_CLOEXEC;
5763         break;
5764 #endif
5765     case TARGET_F_NOTIFY:
5766         ret = F_NOTIFY;
5767         break;
5768 #ifdef F_GETOWN_EX
5769     case TARGET_F_GETOWN_EX:
5770         ret = F_GETOWN_EX;
5771         break;
5772 #endif
5773 #ifdef F_SETOWN_EX
5774     case TARGET_F_SETOWN_EX:
5775         ret = F_SETOWN_EX;
5776         break;
5777 #endif
5778 #ifdef F_SETPIPE_SZ
5779     case TARGET_F_SETPIPE_SZ:
5780         ret = F_SETPIPE_SZ;
5781         break;
5782     case TARGET_F_GETPIPE_SZ:
5783         ret = F_GETPIPE_SZ;
5784         break;
5785 #endif
5786     default:
5787         ret = -TARGET_EINVAL;
5788         break;
5789     }
5790 
5791 #if defined(__powerpc64__)
5792     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5793      * is not supported by kernel. The glibc fcntl call actually adjusts
5794      * them to 5, 6 and 7 before making the syscall(). Since we make the
5795      * syscall directly, adjust to what is supported by the kernel.
5796      */
5797     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5798         ret -= F_GETLK64 - 5;
5799     }
5800 #endif
5801 
5802     return ret;
5803 }
5804 
5805 #define FLOCK_TRANSTBL \
5806     switch (type) { \
5807     TRANSTBL_CONVERT(F_RDLCK); \
5808     TRANSTBL_CONVERT(F_WRLCK); \
5809     TRANSTBL_CONVERT(F_UNLCK); \
5810     TRANSTBL_CONVERT(F_EXLCK); \
5811     TRANSTBL_CONVERT(F_SHLCK); \
5812     }
5813 
5814 static int target_to_host_flock(int type)
5815 {
5816 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5817     FLOCK_TRANSTBL
5818 #undef  TRANSTBL_CONVERT
5819     return -TARGET_EINVAL;
5820 }
5821 
5822 static int host_to_target_flock(int type)
5823 {
5824 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5825     FLOCK_TRANSTBL
5826 #undef  TRANSTBL_CONVERT
5827     /* if we don't know how to convert the value coming
5828      * from the host we copy to the target field as-is
5829      */
5830     return type;
5831 }
5832 
5833 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5834                                             abi_ulong target_flock_addr)
5835 {
5836     struct target_flock *target_fl;
5837     int l_type;
5838 
5839     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5840         return -TARGET_EFAULT;
5841     }
5842 
5843     __get_user(l_type, &target_fl->l_type);
5844     l_type = target_to_host_flock(l_type);
5845     if (l_type < 0) {
5846         return l_type;
5847     }
5848     fl->l_type = l_type;
5849     __get_user(fl->l_whence, &target_fl->l_whence);
5850     __get_user(fl->l_start, &target_fl->l_start);
5851     __get_user(fl->l_len, &target_fl->l_len);
5852     __get_user(fl->l_pid, &target_fl->l_pid);
5853     unlock_user_struct(target_fl, target_flock_addr, 0);
5854     return 0;
5855 }
5856 
5857 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5858                                           const struct flock64 *fl)
5859 {
5860     struct target_flock *target_fl;
5861     short l_type;
5862 
5863     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5864         return -TARGET_EFAULT;
5865     }
5866 
5867     l_type = host_to_target_flock(fl->l_type);
5868     __put_user(l_type, &target_fl->l_type);
5869     __put_user(fl->l_whence, &target_fl->l_whence);
5870     __put_user(fl->l_start, &target_fl->l_start);
5871     __put_user(fl->l_len, &target_fl->l_len);
5872     __put_user(fl->l_pid, &target_fl->l_pid);
5873     unlock_user_struct(target_fl, target_flock_addr, 1);
5874     return 0;
5875 }
5876 
5877 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5878 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5879 
5880 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5881 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5882                                                    abi_ulong target_flock_addr)
5883 {
5884     struct target_oabi_flock64 *target_fl;
5885     int l_type;
5886 
5887     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5888         return -TARGET_EFAULT;
5889     }
5890 
5891     __get_user(l_type, &target_fl->l_type);
5892     l_type = target_to_host_flock(l_type);
5893     if (l_type < 0) {
5894         return l_type;
5895     }
5896     fl->l_type = l_type;
5897     __get_user(fl->l_whence, &target_fl->l_whence);
5898     __get_user(fl->l_start, &target_fl->l_start);
5899     __get_user(fl->l_len, &target_fl->l_len);
5900     __get_user(fl->l_pid, &target_fl->l_pid);
5901     unlock_user_struct(target_fl, target_flock_addr, 0);
5902     return 0;
5903 }
5904 
5905 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5906                                                  const struct flock64 *fl)
5907 {
5908     struct target_oabi_flock64 *target_fl;
5909     short l_type;
5910 
5911     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5912         return -TARGET_EFAULT;
5913     }
5914 
5915     l_type = host_to_target_flock(fl->l_type);
5916     __put_user(l_type, &target_fl->l_type);
5917     __put_user(fl->l_whence, &target_fl->l_whence);
5918     __put_user(fl->l_start, &target_fl->l_start);
5919     __put_user(fl->l_len, &target_fl->l_len);
5920     __put_user(fl->l_pid, &target_fl->l_pid);
5921     unlock_user_struct(target_fl, target_flock_addr, 1);
5922     return 0;
5923 }
5924 #endif
5925 
5926 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5927                                               abi_ulong target_flock_addr)
5928 {
5929     struct target_flock64 *target_fl;
5930     int l_type;
5931 
5932     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5933         return -TARGET_EFAULT;
5934     }
5935 
5936     __get_user(l_type, &target_fl->l_type);
5937     l_type = target_to_host_flock(l_type);
5938     if (l_type < 0) {
5939         return l_type;
5940     }
5941     fl->l_type = l_type;
5942     __get_user(fl->l_whence, &target_fl->l_whence);
5943     __get_user(fl->l_start, &target_fl->l_start);
5944     __get_user(fl->l_len, &target_fl->l_len);
5945     __get_user(fl->l_pid, &target_fl->l_pid);
5946     unlock_user_struct(target_fl, target_flock_addr, 0);
5947     return 0;
5948 }
5949 
5950 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5951                                             const struct flock64 *fl)
5952 {
5953     struct target_flock64 *target_fl;
5954     short l_type;
5955 
5956     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5957         return -TARGET_EFAULT;
5958     }
5959 
5960     l_type = host_to_target_flock(fl->l_type);
5961     __put_user(l_type, &target_fl->l_type);
5962     __put_user(fl->l_whence, &target_fl->l_whence);
5963     __put_user(fl->l_start, &target_fl->l_start);
5964     __put_user(fl->l_len, &target_fl->l_len);
5965     __put_user(fl->l_pid, &target_fl->l_pid);
5966     unlock_user_struct(target_fl, target_flock_addr, 1);
5967     return 0;
5968 }
5969 
5970 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5971 {
5972     struct flock64 fl64;
5973 #ifdef F_GETOWN_EX
5974     struct f_owner_ex fox;
5975     struct target_f_owner_ex *target_fox;
5976 #endif
5977     abi_long ret;
5978     int host_cmd = target_to_host_fcntl_cmd(cmd);
5979 
5980     if (host_cmd == -TARGET_EINVAL)
5981 	    return host_cmd;
5982 
5983     switch(cmd) {
5984     case TARGET_F_GETLK:
5985         ret = copy_from_user_flock(&fl64, arg);
5986         if (ret) {
5987             return ret;
5988         }
5989         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5990         if (ret == 0) {
5991             ret = copy_to_user_flock(arg, &fl64);
5992         }
5993         break;
5994 
5995     case TARGET_F_SETLK:
5996     case TARGET_F_SETLKW:
5997         ret = copy_from_user_flock(&fl64, arg);
5998         if (ret) {
5999             return ret;
6000         }
6001         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6002         break;
6003 
6004     case TARGET_F_GETLK64:
6005         ret = copy_from_user_flock64(&fl64, arg);
6006         if (ret) {
6007             return ret;
6008         }
6009         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6010         if (ret == 0) {
6011             ret = copy_to_user_flock64(arg, &fl64);
6012         }
6013         break;
6014     case TARGET_F_SETLK64:
6015     case TARGET_F_SETLKW64:
6016         ret = copy_from_user_flock64(&fl64, arg);
6017         if (ret) {
6018             return ret;
6019         }
6020         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6021         break;
6022 
6023     case TARGET_F_GETFL:
6024         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6025         if (ret >= 0) {
6026             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6027         }
6028         break;
6029 
6030     case TARGET_F_SETFL:
6031         ret = get_errno(safe_fcntl(fd, host_cmd,
6032                                    target_to_host_bitmask(arg,
6033                                                           fcntl_flags_tbl)));
6034         break;
6035 
6036 #ifdef F_GETOWN_EX
6037     case TARGET_F_GETOWN_EX:
6038         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6039         if (ret >= 0) {
6040             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6041                 return -TARGET_EFAULT;
6042             target_fox->type = tswap32(fox.type);
6043             target_fox->pid = tswap32(fox.pid);
6044             unlock_user_struct(target_fox, arg, 1);
6045         }
6046         break;
6047 #endif
6048 
6049 #ifdef F_SETOWN_EX
6050     case TARGET_F_SETOWN_EX:
6051         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6052             return -TARGET_EFAULT;
6053         fox.type = tswap32(target_fox->type);
6054         fox.pid = tswap32(target_fox->pid);
6055         unlock_user_struct(target_fox, arg, 0);
6056         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6057         break;
6058 #endif
6059 
6060     case TARGET_F_SETOWN:
6061     case TARGET_F_GETOWN:
6062     case TARGET_F_SETSIG:
6063     case TARGET_F_GETSIG:
6064     case TARGET_F_SETLEASE:
6065     case TARGET_F_GETLEASE:
6066     case TARGET_F_SETPIPE_SZ:
6067     case TARGET_F_GETPIPE_SZ:
6068         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6069         break;
6070 
6071     default:
6072         ret = get_errno(safe_fcntl(fd, cmd, arg));
6073         break;
6074     }
6075     return ret;
6076 }
6077 
6078 #ifdef USE_UID16
6079 
6080 static inline int high2lowuid(int uid)
6081 {
6082     if (uid > 65535)
6083         return 65534;
6084     else
6085         return uid;
6086 }
6087 
6088 static inline int high2lowgid(int gid)
6089 {
6090     if (gid > 65535)
6091         return 65534;
6092     else
6093         return gid;
6094 }
6095 
6096 static inline int low2highuid(int uid)
6097 {
6098     if ((int16_t)uid == -1)
6099         return -1;
6100     else
6101         return uid;
6102 }
6103 
6104 static inline int low2highgid(int gid)
6105 {
6106     if ((int16_t)gid == -1)
6107         return -1;
6108     else
6109         return gid;
6110 }
6111 static inline int tswapid(int id)
6112 {
6113     return tswap16(id);
6114 }
6115 
6116 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6117 
6118 #else /* !USE_UID16 */
6119 static inline int high2lowuid(int uid)
6120 {
6121     return uid;
6122 }
6123 static inline int high2lowgid(int gid)
6124 {
6125     return gid;
6126 }
6127 static inline int low2highuid(int uid)
6128 {
6129     return uid;
6130 }
6131 static inline int low2highgid(int gid)
6132 {
6133     return gid;
6134 }
6135 static inline int tswapid(int id)
6136 {
6137     return tswap32(id);
6138 }
6139 
6140 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6141 
6142 #endif /* USE_UID16 */
6143 
6144 /* We must do direct syscalls for setting UID/GID, because we want to
6145  * implement the Linux system call semantics of "change only for this thread",
6146  * not the libc/POSIX semantics of "change for all threads in process".
6147  * (See http://ewontfix.com/17/ for more details.)
6148  * We use the 32-bit version of the syscalls if present; if it is not
6149  * then either the host architecture supports 32-bit UIDs natively with
6150  * the standard syscall, or the 16-bit UID is the best we can do.
6151  */
6152 #ifdef __NR_setuid32
6153 #define __NR_sys_setuid __NR_setuid32
6154 #else
6155 #define __NR_sys_setuid __NR_setuid
6156 #endif
6157 #ifdef __NR_setgid32
6158 #define __NR_sys_setgid __NR_setgid32
6159 #else
6160 #define __NR_sys_setgid __NR_setgid
6161 #endif
6162 #ifdef __NR_setresuid32
6163 #define __NR_sys_setresuid __NR_setresuid32
6164 #else
6165 #define __NR_sys_setresuid __NR_setresuid
6166 #endif
6167 #ifdef __NR_setresgid32
6168 #define __NR_sys_setresgid __NR_setresgid32
6169 #else
6170 #define __NR_sys_setresgid __NR_setresgid
6171 #endif
6172 
6173 _syscall1(int, sys_setuid, uid_t, uid)
6174 _syscall1(int, sys_setgid, gid_t, gid)
6175 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6176 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6177 
6178 void syscall_init(void)
6179 {
6180     IOCTLEntry *ie;
6181     const argtype *arg_type;
6182     int size;
6183     int i;
6184 
6185     thunk_init(STRUCT_MAX);
6186 
6187 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6188 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6189 #include "syscall_types.h"
6190 #undef STRUCT
6191 #undef STRUCT_SPECIAL
6192 
6193     /* Build target_to_host_errno_table[] table from
6194      * host_to_target_errno_table[]. */
6195     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6196         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6197     }
6198 
6199     /* we patch the ioctl size if necessary. We rely on the fact that
6200        no ioctl has all the bits at '1' in the size field */
6201     ie = ioctl_entries;
6202     while (ie->target_cmd != 0) {
6203         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6204             TARGET_IOC_SIZEMASK) {
6205             arg_type = ie->arg_type;
6206             if (arg_type[0] != TYPE_PTR) {
6207                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6208                         ie->target_cmd);
6209                 exit(1);
6210             }
6211             arg_type++;
6212             size = thunk_type_size(arg_type, 0);
6213             ie->target_cmd = (ie->target_cmd &
6214                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6215                 (size << TARGET_IOC_SIZESHIFT);
6216         }
6217 
6218         /* automatic consistency check if same arch */
6219 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6220     (defined(__x86_64__) && defined(TARGET_X86_64))
6221         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6222             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6223                     ie->name, ie->target_cmd, ie->host_cmd);
6224         }
6225 #endif
6226         ie++;
6227     }
6228 }
6229 
6230 #if TARGET_ABI_BITS == 32
6231 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6232 {
6233 #ifdef TARGET_WORDS_BIGENDIAN
6234     return ((uint64_t)word0 << 32) | word1;
6235 #else
6236     return ((uint64_t)word1 << 32) | word0;
6237 #endif
6238 }
6239 #else /* TARGET_ABI_BITS == 32 */
6240 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6241 {
6242     return word0;
6243 }
6244 #endif /* TARGET_ABI_BITS != 32 */
6245 
6246 #ifdef TARGET_NR_truncate64
6247 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6248                                          abi_long arg2,
6249                                          abi_long arg3,
6250                                          abi_long arg4)
6251 {
6252     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6253         arg2 = arg3;
6254         arg3 = arg4;
6255     }
6256     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6257 }
6258 #endif
6259 
6260 #ifdef TARGET_NR_ftruncate64
6261 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6262                                           abi_long arg2,
6263                                           abi_long arg3,
6264                                           abi_long arg4)
6265 {
6266     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6267         arg2 = arg3;
6268         arg3 = arg4;
6269     }
6270     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6271 }
6272 #endif
6273 
6274 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6275                                                abi_ulong target_addr)
6276 {
6277     struct target_timespec *target_ts;
6278 
6279     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6280         return -TARGET_EFAULT;
6281     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6282     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6283     unlock_user_struct(target_ts, target_addr, 0);
6284     return 0;
6285 }
6286 
6287 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6288                                                struct timespec *host_ts)
6289 {
6290     struct target_timespec *target_ts;
6291 
6292     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6293         return -TARGET_EFAULT;
6294     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6295     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6296     unlock_user_struct(target_ts, target_addr, 1);
6297     return 0;
6298 }
6299 
6300 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6301                                                  abi_ulong target_addr)
6302 {
6303     struct target_itimerspec *target_itspec;
6304 
6305     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6306         return -TARGET_EFAULT;
6307     }
6308 
6309     host_itspec->it_interval.tv_sec =
6310                             tswapal(target_itspec->it_interval.tv_sec);
6311     host_itspec->it_interval.tv_nsec =
6312                             tswapal(target_itspec->it_interval.tv_nsec);
6313     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6314     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6315 
6316     unlock_user_struct(target_itspec, target_addr, 1);
6317     return 0;
6318 }
6319 
6320 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6321                                                struct itimerspec *host_its)
6322 {
6323     struct target_itimerspec *target_itspec;
6324 
6325     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6326         return -TARGET_EFAULT;
6327     }
6328 
6329     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6330     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6331 
6332     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6333     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6334 
6335     unlock_user_struct(target_itspec, target_addr, 0);
6336     return 0;
6337 }
6338 
6339 static inline abi_long target_to_host_timex(struct timex *host_tx,
6340                                             abi_long target_addr)
6341 {
6342     struct target_timex *target_tx;
6343 
6344     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6345         return -TARGET_EFAULT;
6346     }
6347 
6348     __get_user(host_tx->modes, &target_tx->modes);
6349     __get_user(host_tx->offset, &target_tx->offset);
6350     __get_user(host_tx->freq, &target_tx->freq);
6351     __get_user(host_tx->maxerror, &target_tx->maxerror);
6352     __get_user(host_tx->esterror, &target_tx->esterror);
6353     __get_user(host_tx->status, &target_tx->status);
6354     __get_user(host_tx->constant, &target_tx->constant);
6355     __get_user(host_tx->precision, &target_tx->precision);
6356     __get_user(host_tx->tolerance, &target_tx->tolerance);
6357     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6358     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6359     __get_user(host_tx->tick, &target_tx->tick);
6360     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6361     __get_user(host_tx->jitter, &target_tx->jitter);
6362     __get_user(host_tx->shift, &target_tx->shift);
6363     __get_user(host_tx->stabil, &target_tx->stabil);
6364     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6365     __get_user(host_tx->calcnt, &target_tx->calcnt);
6366     __get_user(host_tx->errcnt, &target_tx->errcnt);
6367     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6368     __get_user(host_tx->tai, &target_tx->tai);
6369 
6370     unlock_user_struct(target_tx, target_addr, 0);
6371     return 0;
6372 }
6373 
6374 static inline abi_long host_to_target_timex(abi_long target_addr,
6375                                             struct timex *host_tx)
6376 {
6377     struct target_timex *target_tx;
6378 
6379     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6380         return -TARGET_EFAULT;
6381     }
6382 
6383     __put_user(host_tx->modes, &target_tx->modes);
6384     __put_user(host_tx->offset, &target_tx->offset);
6385     __put_user(host_tx->freq, &target_tx->freq);
6386     __put_user(host_tx->maxerror, &target_tx->maxerror);
6387     __put_user(host_tx->esterror, &target_tx->esterror);
6388     __put_user(host_tx->status, &target_tx->status);
6389     __put_user(host_tx->constant, &target_tx->constant);
6390     __put_user(host_tx->precision, &target_tx->precision);
6391     __put_user(host_tx->tolerance, &target_tx->tolerance);
6392     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6393     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6394     __put_user(host_tx->tick, &target_tx->tick);
6395     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6396     __put_user(host_tx->jitter, &target_tx->jitter);
6397     __put_user(host_tx->shift, &target_tx->shift);
6398     __put_user(host_tx->stabil, &target_tx->stabil);
6399     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6400     __put_user(host_tx->calcnt, &target_tx->calcnt);
6401     __put_user(host_tx->errcnt, &target_tx->errcnt);
6402     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6403     __put_user(host_tx->tai, &target_tx->tai);
6404 
6405     unlock_user_struct(target_tx, target_addr, 1);
6406     return 0;
6407 }
6408 
6409 
6410 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6411                                                abi_ulong target_addr)
6412 {
6413     struct target_sigevent *target_sevp;
6414 
6415     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6416         return -TARGET_EFAULT;
6417     }
6418 
6419     /* This union is awkward on 64 bit systems because it has a 32 bit
6420      * integer and a pointer in it; we follow the conversion approach
6421      * used for handling sigval types in signal.c so the guest should get
6422      * the correct value back even if we did a 64 bit byteswap and it's
6423      * using the 32 bit integer.
6424      */
6425     host_sevp->sigev_value.sival_ptr =
6426         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6427     host_sevp->sigev_signo =
6428         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6429     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6430     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6431 
6432     unlock_user_struct(target_sevp, target_addr, 1);
6433     return 0;
6434 }
6435 
6436 #if defined(TARGET_NR_mlockall)
6437 static inline int target_to_host_mlockall_arg(int arg)
6438 {
6439     int result = 0;
6440 
6441     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6442         result |= MCL_CURRENT;
6443     }
6444     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6445         result |= MCL_FUTURE;
6446     }
6447     return result;
6448 }
6449 #endif
6450 
6451 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6452      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6453      defined(TARGET_NR_newfstatat))
6454 static inline abi_long host_to_target_stat64(void *cpu_env,
6455                                              abi_ulong target_addr,
6456                                              struct stat *host_st)
6457 {
6458 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6459     if (((CPUARMState *)cpu_env)->eabi) {
6460         struct target_eabi_stat64 *target_st;
6461 
6462         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6463             return -TARGET_EFAULT;
6464         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6465         __put_user(host_st->st_dev, &target_st->st_dev);
6466         __put_user(host_st->st_ino, &target_st->st_ino);
6467 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6468         __put_user(host_st->st_ino, &target_st->__st_ino);
6469 #endif
6470         __put_user(host_st->st_mode, &target_st->st_mode);
6471         __put_user(host_st->st_nlink, &target_st->st_nlink);
6472         __put_user(host_st->st_uid, &target_st->st_uid);
6473         __put_user(host_st->st_gid, &target_st->st_gid);
6474         __put_user(host_st->st_rdev, &target_st->st_rdev);
6475         __put_user(host_st->st_size, &target_st->st_size);
6476         __put_user(host_st->st_blksize, &target_st->st_blksize);
6477         __put_user(host_st->st_blocks, &target_st->st_blocks);
6478         __put_user(host_st->st_atime, &target_st->target_st_atime);
6479         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6480         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6481 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6482         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6483         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6484         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6485 #endif
6486         unlock_user_struct(target_st, target_addr, 1);
6487     } else
6488 #endif
6489     {
6490 #if defined(TARGET_HAS_STRUCT_STAT64)
6491         struct target_stat64 *target_st;
6492 #else
6493         struct target_stat *target_st;
6494 #endif
6495 
6496         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6497             return -TARGET_EFAULT;
6498         memset(target_st, 0, sizeof(*target_st));
6499         __put_user(host_st->st_dev, &target_st->st_dev);
6500         __put_user(host_st->st_ino, &target_st->st_ino);
6501 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6502         __put_user(host_st->st_ino, &target_st->__st_ino);
6503 #endif
6504         __put_user(host_st->st_mode, &target_st->st_mode);
6505         __put_user(host_st->st_nlink, &target_st->st_nlink);
6506         __put_user(host_st->st_uid, &target_st->st_uid);
6507         __put_user(host_st->st_gid, &target_st->st_gid);
6508         __put_user(host_st->st_rdev, &target_st->st_rdev);
6509         /* XXX: better use of kernel struct */
6510         __put_user(host_st->st_size, &target_st->st_size);
6511         __put_user(host_st->st_blksize, &target_st->st_blksize);
6512         __put_user(host_st->st_blocks, &target_st->st_blocks);
6513         __put_user(host_st->st_atime, &target_st->target_st_atime);
6514         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6515         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6516 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6517         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6518         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6519         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6520 #endif
6521         unlock_user_struct(target_st, target_addr, 1);
6522     }
6523 
6524     return 0;
6525 }
6526 #endif
6527 
6528 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6529 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6530                                             abi_ulong target_addr)
6531 {
6532     struct target_statx *target_stx;
6533 
6534     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6535         return -TARGET_EFAULT;
6536     }
6537     memset(target_stx, 0, sizeof(*target_stx));
6538 
6539     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6540     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6541     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6542     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6543     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6544     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6545     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6546     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6547     __put_user(host_stx->stx_size, &target_stx->stx_size);
6548     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6549     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6550     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6551     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6552     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6553     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6554     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6555     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6556     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6557     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6558     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6559     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6560     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6561     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6562 
6563     unlock_user_struct(target_stx, target_addr, 1);
6564 
6565     return 0;
6566 }
6567 #endif
6568 
6569 
6570 /* ??? Using host futex calls even when target atomic operations
6571    are not really atomic probably breaks things.  However implementing
6572    futexes locally would make futexes shared between multiple processes
6573    tricky.  However they're probably useless because guest atomic
6574    operations won't work either.  */
6575 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6576                     target_ulong uaddr2, int val3)
6577 {
6578     struct timespec ts, *pts;
6579     int base_op;
6580 
6581     /* ??? We assume FUTEX_* constants are the same on both host
6582        and target.  */
6583 #ifdef FUTEX_CMD_MASK
6584     base_op = op & FUTEX_CMD_MASK;
6585 #else
6586     base_op = op;
6587 #endif
6588     switch (base_op) {
6589     case FUTEX_WAIT:
6590     case FUTEX_WAIT_BITSET:
6591         if (timeout) {
6592             pts = &ts;
6593             target_to_host_timespec(pts, timeout);
6594         } else {
6595             pts = NULL;
6596         }
6597         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6598                          pts, NULL, val3));
6599     case FUTEX_WAKE:
6600         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6601     case FUTEX_FD:
6602         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6603     case FUTEX_REQUEUE:
6604     case FUTEX_CMP_REQUEUE:
6605     case FUTEX_WAKE_OP:
6606         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6607            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6608            But the prototype takes a `struct timespec *'; insert casts
6609            to satisfy the compiler.  We do not need to tswap TIMEOUT
6610            since it's not compared to guest memory.  */
6611         pts = (struct timespec *)(uintptr_t) timeout;
6612         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6613                                     g2h(uaddr2),
6614                                     (base_op == FUTEX_CMP_REQUEUE
6615                                      ? tswap32(val3)
6616                                      : val3)));
6617     default:
6618         return -TARGET_ENOSYS;
6619     }
6620 }
6621 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6622 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6623                                      abi_long handle, abi_long mount_id,
6624                                      abi_long flags)
6625 {
6626     struct file_handle *target_fh;
6627     struct file_handle *fh;
6628     int mid = 0;
6629     abi_long ret;
6630     char *name;
6631     unsigned int size, total_size;
6632 
6633     if (get_user_s32(size, handle)) {
6634         return -TARGET_EFAULT;
6635     }
6636 
6637     name = lock_user_string(pathname);
6638     if (!name) {
6639         return -TARGET_EFAULT;
6640     }
6641 
6642     total_size = sizeof(struct file_handle) + size;
6643     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6644     if (!target_fh) {
6645         unlock_user(name, pathname, 0);
6646         return -TARGET_EFAULT;
6647     }
6648 
6649     fh = g_malloc0(total_size);
6650     fh->handle_bytes = size;
6651 
6652     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6653     unlock_user(name, pathname, 0);
6654 
6655     /* man name_to_handle_at(2):
6656      * Other than the use of the handle_bytes field, the caller should treat
6657      * the file_handle structure as an opaque data type
6658      */
6659 
6660     memcpy(target_fh, fh, total_size);
6661     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6662     target_fh->handle_type = tswap32(fh->handle_type);
6663     g_free(fh);
6664     unlock_user(target_fh, handle, total_size);
6665 
6666     if (put_user_s32(mid, mount_id)) {
6667         return -TARGET_EFAULT;
6668     }
6669 
6670     return ret;
6671 
6672 }
6673 #endif
6674 
6675 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6676 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6677                                      abi_long flags)
6678 {
6679     struct file_handle *target_fh;
6680     struct file_handle *fh;
6681     unsigned int size, total_size;
6682     abi_long ret;
6683 
6684     if (get_user_s32(size, handle)) {
6685         return -TARGET_EFAULT;
6686     }
6687 
6688     total_size = sizeof(struct file_handle) + size;
6689     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6690     if (!target_fh) {
6691         return -TARGET_EFAULT;
6692     }
6693 
6694     fh = g_memdup(target_fh, total_size);
6695     fh->handle_bytes = size;
6696     fh->handle_type = tswap32(target_fh->handle_type);
6697 
6698     ret = get_errno(open_by_handle_at(mount_fd, fh,
6699                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6700 
6701     g_free(fh);
6702 
6703     unlock_user(target_fh, handle, total_size);
6704 
6705     return ret;
6706 }
6707 #endif
6708 
6709 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6710 
6711 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6712 {
6713     int host_flags;
6714     target_sigset_t *target_mask;
6715     sigset_t host_mask;
6716     abi_long ret;
6717 
6718     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6719         return -TARGET_EINVAL;
6720     }
6721     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6722         return -TARGET_EFAULT;
6723     }
6724 
6725     target_to_host_sigset(&host_mask, target_mask);
6726 
6727     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6728 
6729     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6730     if (ret >= 0) {
6731         fd_trans_register(ret, &target_signalfd_trans);
6732     }
6733 
6734     unlock_user_struct(target_mask, mask, 0);
6735 
6736     return ret;
6737 }
6738 #endif
6739 
6740 /* Map host to target signal numbers for the wait family of syscalls.
6741    Assume all other status bits are the same.  */
6742 int host_to_target_waitstatus(int status)
6743 {
6744     if (WIFSIGNALED(status)) {
6745         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6746     }
6747     if (WIFSTOPPED(status)) {
6748         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6749                | (status & 0xff);
6750     }
6751     return status;
6752 }
6753 
6754 static int open_self_cmdline(void *cpu_env, int fd)
6755 {
6756     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6757     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6758     int i;
6759 
6760     for (i = 0; i < bprm->argc; i++) {
6761         size_t len = strlen(bprm->argv[i]) + 1;
6762 
6763         if (write(fd, bprm->argv[i], len) != len) {
6764             return -1;
6765         }
6766     }
6767 
6768     return 0;
6769 }
6770 
6771 static int open_self_maps(void *cpu_env, int fd)
6772 {
6773     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6774     TaskState *ts = cpu->opaque;
6775     FILE *fp;
6776     char *line = NULL;
6777     size_t len = 0;
6778     ssize_t read;
6779 
6780     fp = fopen("/proc/self/maps", "r");
6781     if (fp == NULL) {
6782         return -1;
6783     }
6784 
6785     while ((read = getline(&line, &len, fp)) != -1) {
6786         int fields, dev_maj, dev_min, inode;
6787         uint64_t min, max, offset;
6788         char flag_r, flag_w, flag_x, flag_p;
6789         char path[512] = "";
6790         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6791                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6792                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6793 
6794         if ((fields < 10) || (fields > 11)) {
6795             continue;
6796         }
6797         if (h2g_valid(min)) {
6798             int flags = page_get_flags(h2g(min));
6799             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6800             if (page_check_range(h2g(min), max - min, flags) == -1) {
6801                 continue;
6802             }
6803             if (h2g(min) == ts->info->stack_limit) {
6804                 pstrcpy(path, sizeof(path), "      [stack]");
6805             }
6806             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6807                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6808                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6809                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6810                     path[0] ? "         " : "", path);
6811         }
6812     }
6813 
6814     free(line);
6815     fclose(fp);
6816 
6817     return 0;
6818 }
6819 
6820 static int open_self_stat(void *cpu_env, int fd)
6821 {
6822     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6823     TaskState *ts = cpu->opaque;
6824     abi_ulong start_stack = ts->info->start_stack;
6825     int i;
6826 
6827     for (i = 0; i < 44; i++) {
6828       char buf[128];
6829       int len;
6830       uint64_t val = 0;
6831 
6832       if (i == 0) {
6833         /* pid */
6834         val = getpid();
6835         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6836       } else if (i == 1) {
6837         /* app name */
6838         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6839       } else if (i == 27) {
6840         /* stack bottom */
6841         val = start_stack;
6842         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6843       } else {
6844         /* for the rest, there is MasterCard */
6845         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6846       }
6847 
6848       len = strlen(buf);
6849       if (write(fd, buf, len) != len) {
6850           return -1;
6851       }
6852     }
6853 
6854     return 0;
6855 }
6856 
6857 static int open_self_auxv(void *cpu_env, int fd)
6858 {
6859     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6860     TaskState *ts = cpu->opaque;
6861     abi_ulong auxv = ts->info->saved_auxv;
6862     abi_ulong len = ts->info->auxv_len;
6863     char *ptr;
6864 
6865     /*
6866      * Auxiliary vector is stored in target process stack.
6867      * read in whole auxv vector and copy it to file
6868      */
6869     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6870     if (ptr != NULL) {
6871         while (len > 0) {
6872             ssize_t r;
6873             r = write(fd, ptr, len);
6874             if (r <= 0) {
6875                 break;
6876             }
6877             len -= r;
6878             ptr += r;
6879         }
6880         lseek(fd, 0, SEEK_SET);
6881         unlock_user(ptr, auxv, len);
6882     }
6883 
6884     return 0;
6885 }
6886 
6887 static int is_proc_myself(const char *filename, const char *entry)
6888 {
6889     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6890         filename += strlen("/proc/");
6891         if (!strncmp(filename, "self/", strlen("self/"))) {
6892             filename += strlen("self/");
6893         } else if (*filename >= '1' && *filename <= '9') {
6894             char myself[80];
6895             snprintf(myself, sizeof(myself), "%d/", getpid());
6896             if (!strncmp(filename, myself, strlen(myself))) {
6897                 filename += strlen(myself);
6898             } else {
6899                 return 0;
6900             }
6901         } else {
6902             return 0;
6903         }
6904         if (!strcmp(filename, entry)) {
6905             return 1;
6906         }
6907     }
6908     return 0;
6909 }
6910 
6911 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6912     defined(TARGET_SPARC) || defined(TARGET_M68K)
6913 static int is_proc(const char *filename, const char *entry)
6914 {
6915     return strcmp(filename, entry) == 0;
6916 }
6917 #endif
6918 
6919 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6920 static int open_net_route(void *cpu_env, int fd)
6921 {
6922     FILE *fp;
6923     char *line = NULL;
6924     size_t len = 0;
6925     ssize_t read;
6926 
6927     fp = fopen("/proc/net/route", "r");
6928     if (fp == NULL) {
6929         return -1;
6930     }
6931 
6932     /* read header */
6933 
6934     read = getline(&line, &len, fp);
6935     dprintf(fd, "%s", line);
6936 
6937     /* read routes */
6938 
6939     while ((read = getline(&line, &len, fp)) != -1) {
6940         char iface[16];
6941         uint32_t dest, gw, mask;
6942         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6943         int fields;
6944 
6945         fields = sscanf(line,
6946                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6947                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6948                         &mask, &mtu, &window, &irtt);
6949         if (fields != 11) {
6950             continue;
6951         }
6952         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6953                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6954                 metric, tswap32(mask), mtu, window, irtt);
6955     }
6956 
6957     free(line);
6958     fclose(fp);
6959 
6960     return 0;
6961 }
6962 #endif
6963 
6964 #if defined(TARGET_SPARC)
6965 static int open_cpuinfo(void *cpu_env, int fd)
6966 {
6967     dprintf(fd, "type\t\t: sun4u\n");
6968     return 0;
6969 }
6970 #endif
6971 
6972 #if defined(TARGET_M68K)
6973 static int open_hardware(void *cpu_env, int fd)
6974 {
6975     dprintf(fd, "Model:\t\tqemu-m68k\n");
6976     return 0;
6977 }
6978 #endif
6979 
6980 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6981 {
6982     struct fake_open {
6983         const char *filename;
6984         int (*fill)(void *cpu_env, int fd);
6985         int (*cmp)(const char *s1, const char *s2);
6986     };
6987     const struct fake_open *fake_open;
6988     static const struct fake_open fakes[] = {
6989         { "maps", open_self_maps, is_proc_myself },
6990         { "stat", open_self_stat, is_proc_myself },
6991         { "auxv", open_self_auxv, is_proc_myself },
6992         { "cmdline", open_self_cmdline, is_proc_myself },
6993 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6994         { "/proc/net/route", open_net_route, is_proc },
6995 #endif
6996 #if defined(TARGET_SPARC)
6997         { "/proc/cpuinfo", open_cpuinfo, is_proc },
6998 #endif
6999 #if defined(TARGET_M68K)
7000         { "/proc/hardware", open_hardware, is_proc },
7001 #endif
7002         { NULL, NULL, NULL }
7003     };
7004 
7005     if (is_proc_myself(pathname, "exe")) {
7006         int execfd = qemu_getauxval(AT_EXECFD);
7007         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7008     }
7009 
7010     for (fake_open = fakes; fake_open->filename; fake_open++) {
7011         if (fake_open->cmp(pathname, fake_open->filename)) {
7012             break;
7013         }
7014     }
7015 
7016     if (fake_open->filename) {
7017         const char *tmpdir;
7018         char filename[PATH_MAX];
7019         int fd, r;
7020 
7021         /* create temporary file to map stat to */
7022         tmpdir = getenv("TMPDIR");
7023         if (!tmpdir)
7024             tmpdir = "/tmp";
7025         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7026         fd = mkstemp(filename);
7027         if (fd < 0) {
7028             return fd;
7029         }
7030         unlink(filename);
7031 
7032         if ((r = fake_open->fill(cpu_env, fd))) {
7033             int e = errno;
7034             close(fd);
7035             errno = e;
7036             return r;
7037         }
7038         lseek(fd, 0, SEEK_SET);
7039 
7040         return fd;
7041     }
7042 
7043     return safe_openat(dirfd, path(pathname), flags, mode);
7044 }
7045 
7046 #define TIMER_MAGIC 0x0caf0000
7047 #define TIMER_MAGIC_MASK 0xffff0000
7048 
7049 /* Convert QEMU provided timer ID back to internal 16bit index format */
7050 static target_timer_t get_timer_id(abi_long arg)
7051 {
7052     target_timer_t timerid = arg;
7053 
7054     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7055         return -TARGET_EINVAL;
7056     }
7057 
7058     timerid &= 0xffff;
7059 
7060     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7061         return -TARGET_EINVAL;
7062     }
7063 
7064     return timerid;
7065 }
7066 
7067 static int target_to_host_cpu_mask(unsigned long *host_mask,
7068                                    size_t host_size,
7069                                    abi_ulong target_addr,
7070                                    size_t target_size)
7071 {
7072     unsigned target_bits = sizeof(abi_ulong) * 8;
7073     unsigned host_bits = sizeof(*host_mask) * 8;
7074     abi_ulong *target_mask;
7075     unsigned i, j;
7076 
7077     assert(host_size >= target_size);
7078 
7079     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7080     if (!target_mask) {
7081         return -TARGET_EFAULT;
7082     }
7083     memset(host_mask, 0, host_size);
7084 
7085     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7086         unsigned bit = i * target_bits;
7087         abi_ulong val;
7088 
7089         __get_user(val, &target_mask[i]);
7090         for (j = 0; j < target_bits; j++, bit++) {
7091             if (val & (1UL << j)) {
7092                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7093             }
7094         }
7095     }
7096 
7097     unlock_user(target_mask, target_addr, 0);
7098     return 0;
7099 }
7100 
7101 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7102                                    size_t host_size,
7103                                    abi_ulong target_addr,
7104                                    size_t target_size)
7105 {
7106     unsigned target_bits = sizeof(abi_ulong) * 8;
7107     unsigned host_bits = sizeof(*host_mask) * 8;
7108     abi_ulong *target_mask;
7109     unsigned i, j;
7110 
7111     assert(host_size >= target_size);
7112 
7113     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7114     if (!target_mask) {
7115         return -TARGET_EFAULT;
7116     }
7117 
7118     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7119         unsigned bit = i * target_bits;
7120         abi_ulong val = 0;
7121 
7122         for (j = 0; j < target_bits; j++, bit++) {
7123             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7124                 val |= 1UL << j;
7125             }
7126         }
7127         __put_user(val, &target_mask[i]);
7128     }
7129 
7130     unlock_user(target_mask, target_addr, target_size);
7131     return 0;
7132 }
7133 
7134 /* This is an internal helper for do_syscall so that it is easier
7135  * to have a single return point, so that actions, such as logging
7136  * of syscall results, can be performed.
7137  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7138  */
7139 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7140                             abi_long arg2, abi_long arg3, abi_long arg4,
7141                             abi_long arg5, abi_long arg6, abi_long arg7,
7142                             abi_long arg8)
7143 {
7144     CPUState *cpu = env_cpu(cpu_env);
7145     abi_long ret;
7146 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7147     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7148     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7149     || defined(TARGET_NR_statx)
7150     struct stat st;
7151 #endif
7152 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7153     || defined(TARGET_NR_fstatfs)
7154     struct statfs stfs;
7155 #endif
7156     void *p;
7157 
7158     switch(num) {
7159     case TARGET_NR_exit:
7160         /* In old applications this may be used to implement _exit(2).
7161            However in threaded applictions it is used for thread termination,
7162            and _exit_group is used for application termination.
7163            Do thread termination if we have more then one thread.  */
7164 
7165         if (block_signals()) {
7166             return -TARGET_ERESTARTSYS;
7167         }
7168 
7169         cpu_list_lock();
7170 
7171         if (CPU_NEXT(first_cpu)) {
7172             TaskState *ts;
7173 
7174             /* Remove the CPU from the list.  */
7175             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7176 
7177             cpu_list_unlock();
7178 
7179             ts = cpu->opaque;
7180             if (ts->child_tidptr) {
7181                 put_user_u32(0, ts->child_tidptr);
7182                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7183                           NULL, NULL, 0);
7184             }
7185             thread_cpu = NULL;
7186             object_unref(OBJECT(cpu));
7187             g_free(ts);
7188             rcu_unregister_thread();
7189             pthread_exit(NULL);
7190         }
7191 
7192         cpu_list_unlock();
7193         preexit_cleanup(cpu_env, arg1);
7194         _exit(arg1);
7195         return 0; /* avoid warning */
7196     case TARGET_NR_read:
7197         if (arg2 == 0 && arg3 == 0) {
7198             return get_errno(safe_read(arg1, 0, 0));
7199         } else {
7200             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7201                 return -TARGET_EFAULT;
7202             ret = get_errno(safe_read(arg1, p, arg3));
7203             if (ret >= 0 &&
7204                 fd_trans_host_to_target_data(arg1)) {
7205                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7206             }
7207             unlock_user(p, arg2, ret);
7208         }
7209         return ret;
7210     case TARGET_NR_write:
7211         if (arg2 == 0 && arg3 == 0) {
7212             return get_errno(safe_write(arg1, 0, 0));
7213         }
7214         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7215             return -TARGET_EFAULT;
7216         if (fd_trans_target_to_host_data(arg1)) {
7217             void *copy = g_malloc(arg3);
7218             memcpy(copy, p, arg3);
7219             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7220             if (ret >= 0) {
7221                 ret = get_errno(safe_write(arg1, copy, ret));
7222             }
7223             g_free(copy);
7224         } else {
7225             ret = get_errno(safe_write(arg1, p, arg3));
7226         }
7227         unlock_user(p, arg2, 0);
7228         return ret;
7229 
7230 #ifdef TARGET_NR_open
7231     case TARGET_NR_open:
7232         if (!(p = lock_user_string(arg1)))
7233             return -TARGET_EFAULT;
7234         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7235                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7236                                   arg3));
7237         fd_trans_unregister(ret);
7238         unlock_user(p, arg1, 0);
7239         return ret;
7240 #endif
7241     case TARGET_NR_openat:
7242         if (!(p = lock_user_string(arg2)))
7243             return -TARGET_EFAULT;
7244         ret = get_errno(do_openat(cpu_env, arg1, p,
7245                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7246                                   arg4));
7247         fd_trans_unregister(ret);
7248         unlock_user(p, arg2, 0);
7249         return ret;
7250 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7251     case TARGET_NR_name_to_handle_at:
7252         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7253         return ret;
7254 #endif
7255 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7256     case TARGET_NR_open_by_handle_at:
7257         ret = do_open_by_handle_at(arg1, arg2, arg3);
7258         fd_trans_unregister(ret);
7259         return ret;
7260 #endif
7261     case TARGET_NR_close:
7262         fd_trans_unregister(arg1);
7263         return get_errno(close(arg1));
7264 
7265     case TARGET_NR_brk:
7266         return do_brk(arg1);
7267 #ifdef TARGET_NR_fork
7268     case TARGET_NR_fork:
7269         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7270 #endif
7271 #ifdef TARGET_NR_waitpid
7272     case TARGET_NR_waitpid:
7273         {
7274             int status;
7275             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7276             if (!is_error(ret) && arg2 && ret
7277                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7278                 return -TARGET_EFAULT;
7279         }
7280         return ret;
7281 #endif
7282 #ifdef TARGET_NR_waitid
7283     case TARGET_NR_waitid:
7284         {
7285             siginfo_t info;
7286             info.si_pid = 0;
7287             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7288             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7289                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7290                     return -TARGET_EFAULT;
7291                 host_to_target_siginfo(p, &info);
7292                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7293             }
7294         }
7295         return ret;
7296 #endif
7297 #ifdef TARGET_NR_creat /* not on alpha */
7298     case TARGET_NR_creat:
7299         if (!(p = lock_user_string(arg1)))
7300             return -TARGET_EFAULT;
7301         ret = get_errno(creat(p, arg2));
7302         fd_trans_unregister(ret);
7303         unlock_user(p, arg1, 0);
7304         return ret;
7305 #endif
7306 #ifdef TARGET_NR_link
7307     case TARGET_NR_link:
7308         {
7309             void * p2;
7310             p = lock_user_string(arg1);
7311             p2 = lock_user_string(arg2);
7312             if (!p || !p2)
7313                 ret = -TARGET_EFAULT;
7314             else
7315                 ret = get_errno(link(p, p2));
7316             unlock_user(p2, arg2, 0);
7317             unlock_user(p, arg1, 0);
7318         }
7319         return ret;
7320 #endif
7321 #if defined(TARGET_NR_linkat)
7322     case TARGET_NR_linkat:
7323         {
7324             void * p2 = NULL;
7325             if (!arg2 || !arg4)
7326                 return -TARGET_EFAULT;
7327             p  = lock_user_string(arg2);
7328             p2 = lock_user_string(arg4);
7329             if (!p || !p2)
7330                 ret = -TARGET_EFAULT;
7331             else
7332                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7333             unlock_user(p, arg2, 0);
7334             unlock_user(p2, arg4, 0);
7335         }
7336         return ret;
7337 #endif
7338 #ifdef TARGET_NR_unlink
7339     case TARGET_NR_unlink:
7340         if (!(p = lock_user_string(arg1)))
7341             return -TARGET_EFAULT;
7342         ret = get_errno(unlink(p));
7343         unlock_user(p, arg1, 0);
7344         return ret;
7345 #endif
7346 #if defined(TARGET_NR_unlinkat)
7347     case TARGET_NR_unlinkat:
7348         if (!(p = lock_user_string(arg2)))
7349             return -TARGET_EFAULT;
7350         ret = get_errno(unlinkat(arg1, p, arg3));
7351         unlock_user(p, arg2, 0);
7352         return ret;
7353 #endif
7354     case TARGET_NR_execve:
7355         {
7356             char **argp, **envp;
7357             int argc, envc;
7358             abi_ulong gp;
7359             abi_ulong guest_argp;
7360             abi_ulong guest_envp;
7361             abi_ulong addr;
7362             char **q;
7363             int total_size = 0;
7364 
7365             argc = 0;
7366             guest_argp = arg2;
7367             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7368                 if (get_user_ual(addr, gp))
7369                     return -TARGET_EFAULT;
7370                 if (!addr)
7371                     break;
7372                 argc++;
7373             }
7374             envc = 0;
7375             guest_envp = arg3;
7376             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7377                 if (get_user_ual(addr, gp))
7378                     return -TARGET_EFAULT;
7379                 if (!addr)
7380                     break;
7381                 envc++;
7382             }
7383 
7384             argp = g_new0(char *, argc + 1);
7385             envp = g_new0(char *, envc + 1);
7386 
7387             for (gp = guest_argp, q = argp; gp;
7388                   gp += sizeof(abi_ulong), q++) {
7389                 if (get_user_ual(addr, gp))
7390                     goto execve_efault;
7391                 if (!addr)
7392                     break;
7393                 if (!(*q = lock_user_string(addr)))
7394                     goto execve_efault;
7395                 total_size += strlen(*q) + 1;
7396             }
7397             *q = NULL;
7398 
7399             for (gp = guest_envp, q = envp; gp;
7400                   gp += sizeof(abi_ulong), q++) {
7401                 if (get_user_ual(addr, gp))
7402                     goto execve_efault;
7403                 if (!addr)
7404                     break;
7405                 if (!(*q = lock_user_string(addr)))
7406                     goto execve_efault;
7407                 total_size += strlen(*q) + 1;
7408             }
7409             *q = NULL;
7410 
7411             if (!(p = lock_user_string(arg1)))
7412                 goto execve_efault;
7413             /* Although execve() is not an interruptible syscall it is
7414              * a special case where we must use the safe_syscall wrapper:
7415              * if we allow a signal to happen before we make the host
7416              * syscall then we will 'lose' it, because at the point of
7417              * execve the process leaves QEMU's control. So we use the
7418              * safe syscall wrapper to ensure that we either take the
7419              * signal as a guest signal, or else it does not happen
7420              * before the execve completes and makes it the other
7421              * program's problem.
7422              */
7423             ret = get_errno(safe_execve(p, argp, envp));
7424             unlock_user(p, arg1, 0);
7425 
7426             goto execve_end;
7427 
7428         execve_efault:
7429             ret = -TARGET_EFAULT;
7430 
7431         execve_end:
7432             for (gp = guest_argp, q = argp; *q;
7433                   gp += sizeof(abi_ulong), q++) {
7434                 if (get_user_ual(addr, gp)
7435                     || !addr)
7436                     break;
7437                 unlock_user(*q, addr, 0);
7438             }
7439             for (gp = guest_envp, q = envp; *q;
7440                   gp += sizeof(abi_ulong), q++) {
7441                 if (get_user_ual(addr, gp)
7442                     || !addr)
7443                     break;
7444                 unlock_user(*q, addr, 0);
7445             }
7446 
7447             g_free(argp);
7448             g_free(envp);
7449         }
7450         return ret;
7451     case TARGET_NR_chdir:
7452         if (!(p = lock_user_string(arg1)))
7453             return -TARGET_EFAULT;
7454         ret = get_errno(chdir(p));
7455         unlock_user(p, arg1, 0);
7456         return ret;
7457 #ifdef TARGET_NR_time
7458     case TARGET_NR_time:
7459         {
7460             time_t host_time;
7461             ret = get_errno(time(&host_time));
7462             if (!is_error(ret)
7463                 && arg1
7464                 && put_user_sal(host_time, arg1))
7465                 return -TARGET_EFAULT;
7466         }
7467         return ret;
7468 #endif
7469 #ifdef TARGET_NR_mknod
7470     case TARGET_NR_mknod:
7471         if (!(p = lock_user_string(arg1)))
7472             return -TARGET_EFAULT;
7473         ret = get_errno(mknod(p, arg2, arg3));
7474         unlock_user(p, arg1, 0);
7475         return ret;
7476 #endif
7477 #if defined(TARGET_NR_mknodat)
7478     case TARGET_NR_mknodat:
7479         if (!(p = lock_user_string(arg2)))
7480             return -TARGET_EFAULT;
7481         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7482         unlock_user(p, arg2, 0);
7483         return ret;
7484 #endif
7485 #ifdef TARGET_NR_chmod
7486     case TARGET_NR_chmod:
7487         if (!(p = lock_user_string(arg1)))
7488             return -TARGET_EFAULT;
7489         ret = get_errno(chmod(p, arg2));
7490         unlock_user(p, arg1, 0);
7491         return ret;
7492 #endif
7493 #ifdef TARGET_NR_lseek
7494     case TARGET_NR_lseek:
7495         return get_errno(lseek(arg1, arg2, arg3));
7496 #endif
7497 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7498     /* Alpha specific */
7499     case TARGET_NR_getxpid:
7500         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7501         return get_errno(getpid());
7502 #endif
7503 #ifdef TARGET_NR_getpid
7504     case TARGET_NR_getpid:
7505         return get_errno(getpid());
7506 #endif
7507     case TARGET_NR_mount:
7508         {
7509             /* need to look at the data field */
7510             void *p2, *p3;
7511 
7512             if (arg1) {
7513                 p = lock_user_string(arg1);
7514                 if (!p) {
7515                     return -TARGET_EFAULT;
7516                 }
7517             } else {
7518                 p = NULL;
7519             }
7520 
7521             p2 = lock_user_string(arg2);
7522             if (!p2) {
7523                 if (arg1) {
7524                     unlock_user(p, arg1, 0);
7525                 }
7526                 return -TARGET_EFAULT;
7527             }
7528 
7529             if (arg3) {
7530                 p3 = lock_user_string(arg3);
7531                 if (!p3) {
7532                     if (arg1) {
7533                         unlock_user(p, arg1, 0);
7534                     }
7535                     unlock_user(p2, arg2, 0);
7536                     return -TARGET_EFAULT;
7537                 }
7538             } else {
7539                 p3 = NULL;
7540             }
7541 
7542             /* FIXME - arg5 should be locked, but it isn't clear how to
7543              * do that since it's not guaranteed to be a NULL-terminated
7544              * string.
7545              */
7546             if (!arg5) {
7547                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7548             } else {
7549                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7550             }
7551             ret = get_errno(ret);
7552 
7553             if (arg1) {
7554                 unlock_user(p, arg1, 0);
7555             }
7556             unlock_user(p2, arg2, 0);
7557             if (arg3) {
7558                 unlock_user(p3, arg3, 0);
7559             }
7560         }
7561         return ret;
7562 #ifdef TARGET_NR_umount
7563     case TARGET_NR_umount:
7564         if (!(p = lock_user_string(arg1)))
7565             return -TARGET_EFAULT;
7566         ret = get_errno(umount(p));
7567         unlock_user(p, arg1, 0);
7568         return ret;
7569 #endif
7570 #ifdef TARGET_NR_stime /* not on alpha */
7571     case TARGET_NR_stime:
7572         {
7573             time_t host_time;
7574             if (get_user_sal(host_time, arg1))
7575                 return -TARGET_EFAULT;
7576             return get_errno(stime(&host_time));
7577         }
7578 #endif
7579 #ifdef TARGET_NR_alarm /* not on alpha */
7580     case TARGET_NR_alarm:
7581         return alarm(arg1);
7582 #endif
7583 #ifdef TARGET_NR_pause /* not on alpha */
7584     case TARGET_NR_pause:
7585         if (!block_signals()) {
7586             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7587         }
7588         return -TARGET_EINTR;
7589 #endif
7590 #ifdef TARGET_NR_utime
7591     case TARGET_NR_utime:
7592         {
7593             struct utimbuf tbuf, *host_tbuf;
7594             struct target_utimbuf *target_tbuf;
7595             if (arg2) {
7596                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7597                     return -TARGET_EFAULT;
7598                 tbuf.actime = tswapal(target_tbuf->actime);
7599                 tbuf.modtime = tswapal(target_tbuf->modtime);
7600                 unlock_user_struct(target_tbuf, arg2, 0);
7601                 host_tbuf = &tbuf;
7602             } else {
7603                 host_tbuf = NULL;
7604             }
7605             if (!(p = lock_user_string(arg1)))
7606                 return -TARGET_EFAULT;
7607             ret = get_errno(utime(p, host_tbuf));
7608             unlock_user(p, arg1, 0);
7609         }
7610         return ret;
7611 #endif
7612 #ifdef TARGET_NR_utimes
7613     case TARGET_NR_utimes:
7614         {
7615             struct timeval *tvp, tv[2];
7616             if (arg2) {
7617                 if (copy_from_user_timeval(&tv[0], arg2)
7618                     || copy_from_user_timeval(&tv[1],
7619                                               arg2 + sizeof(struct target_timeval)))
7620                     return -TARGET_EFAULT;
7621                 tvp = tv;
7622             } else {
7623                 tvp = NULL;
7624             }
7625             if (!(p = lock_user_string(arg1)))
7626                 return -TARGET_EFAULT;
7627             ret = get_errno(utimes(p, tvp));
7628             unlock_user(p, arg1, 0);
7629         }
7630         return ret;
7631 #endif
7632 #if defined(TARGET_NR_futimesat)
7633     case TARGET_NR_futimesat:
7634         {
7635             struct timeval *tvp, tv[2];
7636             if (arg3) {
7637                 if (copy_from_user_timeval(&tv[0], arg3)
7638                     || copy_from_user_timeval(&tv[1],
7639                                               arg3 + sizeof(struct target_timeval)))
7640                     return -TARGET_EFAULT;
7641                 tvp = tv;
7642             } else {
7643                 tvp = NULL;
7644             }
7645             if (!(p = lock_user_string(arg2))) {
7646                 return -TARGET_EFAULT;
7647             }
7648             ret = get_errno(futimesat(arg1, path(p), tvp));
7649             unlock_user(p, arg2, 0);
7650         }
7651         return ret;
7652 #endif
7653 #ifdef TARGET_NR_access
7654     case TARGET_NR_access:
7655         if (!(p = lock_user_string(arg1))) {
7656             return -TARGET_EFAULT;
7657         }
7658         ret = get_errno(access(path(p), arg2));
7659         unlock_user(p, arg1, 0);
7660         return ret;
7661 #endif
7662 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7663     case TARGET_NR_faccessat:
7664         if (!(p = lock_user_string(arg2))) {
7665             return -TARGET_EFAULT;
7666         }
7667         ret = get_errno(faccessat(arg1, p, arg3, 0));
7668         unlock_user(p, arg2, 0);
7669         return ret;
7670 #endif
7671 #ifdef TARGET_NR_nice /* not on alpha */
7672     case TARGET_NR_nice:
7673         return get_errno(nice(arg1));
7674 #endif
7675     case TARGET_NR_sync:
7676         sync();
7677         return 0;
7678 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7679     case TARGET_NR_syncfs:
7680         return get_errno(syncfs(arg1));
7681 #endif
7682     case TARGET_NR_kill:
7683         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7684 #ifdef TARGET_NR_rename
7685     case TARGET_NR_rename:
7686         {
7687             void *p2;
7688             p = lock_user_string(arg1);
7689             p2 = lock_user_string(arg2);
7690             if (!p || !p2)
7691                 ret = -TARGET_EFAULT;
7692             else
7693                 ret = get_errno(rename(p, p2));
7694             unlock_user(p2, arg2, 0);
7695             unlock_user(p, arg1, 0);
7696         }
7697         return ret;
7698 #endif
7699 #if defined(TARGET_NR_renameat)
7700     case TARGET_NR_renameat:
7701         {
7702             void *p2;
7703             p  = lock_user_string(arg2);
7704             p2 = lock_user_string(arg4);
7705             if (!p || !p2)
7706                 ret = -TARGET_EFAULT;
7707             else
7708                 ret = get_errno(renameat(arg1, p, arg3, p2));
7709             unlock_user(p2, arg4, 0);
7710             unlock_user(p, arg2, 0);
7711         }
7712         return ret;
7713 #endif
7714 #if defined(TARGET_NR_renameat2)
7715     case TARGET_NR_renameat2:
7716         {
7717             void *p2;
7718             p  = lock_user_string(arg2);
7719             p2 = lock_user_string(arg4);
7720             if (!p || !p2) {
7721                 ret = -TARGET_EFAULT;
7722             } else {
7723                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7724             }
7725             unlock_user(p2, arg4, 0);
7726             unlock_user(p, arg2, 0);
7727         }
7728         return ret;
7729 #endif
7730 #ifdef TARGET_NR_mkdir
7731     case TARGET_NR_mkdir:
7732         if (!(p = lock_user_string(arg1)))
7733             return -TARGET_EFAULT;
7734         ret = get_errno(mkdir(p, arg2));
7735         unlock_user(p, arg1, 0);
7736         return ret;
7737 #endif
7738 #if defined(TARGET_NR_mkdirat)
7739     case TARGET_NR_mkdirat:
7740         if (!(p = lock_user_string(arg2)))
7741             return -TARGET_EFAULT;
7742         ret = get_errno(mkdirat(arg1, p, arg3));
7743         unlock_user(p, arg2, 0);
7744         return ret;
7745 #endif
7746 #ifdef TARGET_NR_rmdir
7747     case TARGET_NR_rmdir:
7748         if (!(p = lock_user_string(arg1)))
7749             return -TARGET_EFAULT;
7750         ret = get_errno(rmdir(p));
7751         unlock_user(p, arg1, 0);
7752         return ret;
7753 #endif
7754     case TARGET_NR_dup:
7755         ret = get_errno(dup(arg1));
7756         if (ret >= 0) {
7757             fd_trans_dup(arg1, ret);
7758         }
7759         return ret;
7760 #ifdef TARGET_NR_pipe
7761     case TARGET_NR_pipe:
7762         return do_pipe(cpu_env, arg1, 0, 0);
7763 #endif
7764 #ifdef TARGET_NR_pipe2
7765     case TARGET_NR_pipe2:
7766         return do_pipe(cpu_env, arg1,
7767                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7768 #endif
7769     case TARGET_NR_times:
7770         {
7771             struct target_tms *tmsp;
7772             struct tms tms;
7773             ret = get_errno(times(&tms));
7774             if (arg1) {
7775                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7776                 if (!tmsp)
7777                     return -TARGET_EFAULT;
7778                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7779                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7780                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7781                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7782             }
7783             if (!is_error(ret))
7784                 ret = host_to_target_clock_t(ret);
7785         }
7786         return ret;
7787     case TARGET_NR_acct:
7788         if (arg1 == 0) {
7789             ret = get_errno(acct(NULL));
7790         } else {
7791             if (!(p = lock_user_string(arg1))) {
7792                 return -TARGET_EFAULT;
7793             }
7794             ret = get_errno(acct(path(p)));
7795             unlock_user(p, arg1, 0);
7796         }
7797         return ret;
7798 #ifdef TARGET_NR_umount2
7799     case TARGET_NR_umount2:
7800         if (!(p = lock_user_string(arg1)))
7801             return -TARGET_EFAULT;
7802         ret = get_errno(umount2(p, arg2));
7803         unlock_user(p, arg1, 0);
7804         return ret;
7805 #endif
7806     case TARGET_NR_ioctl:
7807         return do_ioctl(arg1, arg2, arg3);
7808 #ifdef TARGET_NR_fcntl
7809     case TARGET_NR_fcntl:
7810         return do_fcntl(arg1, arg2, arg3);
7811 #endif
7812     case TARGET_NR_setpgid:
7813         return get_errno(setpgid(arg1, arg2));
7814     case TARGET_NR_umask:
7815         return get_errno(umask(arg1));
7816     case TARGET_NR_chroot:
7817         if (!(p = lock_user_string(arg1)))
7818             return -TARGET_EFAULT;
7819         ret = get_errno(chroot(p));
7820         unlock_user(p, arg1, 0);
7821         return ret;
7822 #ifdef TARGET_NR_dup2
7823     case TARGET_NR_dup2:
7824         ret = get_errno(dup2(arg1, arg2));
7825         if (ret >= 0) {
7826             fd_trans_dup(arg1, arg2);
7827         }
7828         return ret;
7829 #endif
7830 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7831     case TARGET_NR_dup3:
7832     {
7833         int host_flags;
7834 
7835         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7836             return -EINVAL;
7837         }
7838         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7839         ret = get_errno(dup3(arg1, arg2, host_flags));
7840         if (ret >= 0) {
7841             fd_trans_dup(arg1, arg2);
7842         }
7843         return ret;
7844     }
7845 #endif
7846 #ifdef TARGET_NR_getppid /* not on alpha */
7847     case TARGET_NR_getppid:
7848         return get_errno(getppid());
7849 #endif
7850 #ifdef TARGET_NR_getpgrp
7851     case TARGET_NR_getpgrp:
7852         return get_errno(getpgrp());
7853 #endif
7854     case TARGET_NR_setsid:
7855         return get_errno(setsid());
7856 #ifdef TARGET_NR_sigaction
7857     case TARGET_NR_sigaction:
7858         {
7859 #if defined(TARGET_ALPHA)
7860             struct target_sigaction act, oact, *pact = 0;
7861             struct target_old_sigaction *old_act;
7862             if (arg2) {
7863                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7864                     return -TARGET_EFAULT;
7865                 act._sa_handler = old_act->_sa_handler;
7866                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7867                 act.sa_flags = old_act->sa_flags;
7868                 act.sa_restorer = 0;
7869                 unlock_user_struct(old_act, arg2, 0);
7870                 pact = &act;
7871             }
7872             ret = get_errno(do_sigaction(arg1, pact, &oact));
7873             if (!is_error(ret) && arg3) {
7874                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7875                     return -TARGET_EFAULT;
7876                 old_act->_sa_handler = oact._sa_handler;
7877                 old_act->sa_mask = oact.sa_mask.sig[0];
7878                 old_act->sa_flags = oact.sa_flags;
7879                 unlock_user_struct(old_act, arg3, 1);
7880             }
7881 #elif defined(TARGET_MIPS)
7882 	    struct target_sigaction act, oact, *pact, *old_act;
7883 
7884 	    if (arg2) {
7885                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7886                     return -TARGET_EFAULT;
7887 		act._sa_handler = old_act->_sa_handler;
7888 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7889 		act.sa_flags = old_act->sa_flags;
7890 		unlock_user_struct(old_act, arg2, 0);
7891 		pact = &act;
7892 	    } else {
7893 		pact = NULL;
7894 	    }
7895 
7896 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7897 
7898 	    if (!is_error(ret) && arg3) {
7899                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7900                     return -TARGET_EFAULT;
7901 		old_act->_sa_handler = oact._sa_handler;
7902 		old_act->sa_flags = oact.sa_flags;
7903 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7904 		old_act->sa_mask.sig[1] = 0;
7905 		old_act->sa_mask.sig[2] = 0;
7906 		old_act->sa_mask.sig[3] = 0;
7907 		unlock_user_struct(old_act, arg3, 1);
7908 	    }
7909 #else
7910             struct target_old_sigaction *old_act;
7911             struct target_sigaction act, oact, *pact;
7912             if (arg2) {
7913                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7914                     return -TARGET_EFAULT;
7915                 act._sa_handler = old_act->_sa_handler;
7916                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7917                 act.sa_flags = old_act->sa_flags;
7918                 act.sa_restorer = old_act->sa_restorer;
7919 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7920                 act.ka_restorer = 0;
7921 #endif
7922                 unlock_user_struct(old_act, arg2, 0);
7923                 pact = &act;
7924             } else {
7925                 pact = NULL;
7926             }
7927             ret = get_errno(do_sigaction(arg1, pact, &oact));
7928             if (!is_error(ret) && arg3) {
7929                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7930                     return -TARGET_EFAULT;
7931                 old_act->_sa_handler = oact._sa_handler;
7932                 old_act->sa_mask = oact.sa_mask.sig[0];
7933                 old_act->sa_flags = oact.sa_flags;
7934                 old_act->sa_restorer = oact.sa_restorer;
7935                 unlock_user_struct(old_act, arg3, 1);
7936             }
7937 #endif
7938         }
7939         return ret;
7940 #endif
7941     case TARGET_NR_rt_sigaction:
7942         {
7943 #if defined(TARGET_ALPHA)
7944             /* For Alpha and SPARC this is a 5 argument syscall, with
7945              * a 'restorer' parameter which must be copied into the
7946              * sa_restorer field of the sigaction struct.
7947              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7948              * and arg5 is the sigsetsize.
7949              * Alpha also has a separate rt_sigaction struct that it uses
7950              * here; SPARC uses the usual sigaction struct.
7951              */
7952             struct target_rt_sigaction *rt_act;
7953             struct target_sigaction act, oact, *pact = 0;
7954 
7955             if (arg4 != sizeof(target_sigset_t)) {
7956                 return -TARGET_EINVAL;
7957             }
7958             if (arg2) {
7959                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7960                     return -TARGET_EFAULT;
7961                 act._sa_handler = rt_act->_sa_handler;
7962                 act.sa_mask = rt_act->sa_mask;
7963                 act.sa_flags = rt_act->sa_flags;
7964                 act.sa_restorer = arg5;
7965                 unlock_user_struct(rt_act, arg2, 0);
7966                 pact = &act;
7967             }
7968             ret = get_errno(do_sigaction(arg1, pact, &oact));
7969             if (!is_error(ret) && arg3) {
7970                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7971                     return -TARGET_EFAULT;
7972                 rt_act->_sa_handler = oact._sa_handler;
7973                 rt_act->sa_mask = oact.sa_mask;
7974                 rt_act->sa_flags = oact.sa_flags;
7975                 unlock_user_struct(rt_act, arg3, 1);
7976             }
7977 #else
7978 #ifdef TARGET_SPARC
7979             target_ulong restorer = arg4;
7980             target_ulong sigsetsize = arg5;
7981 #else
7982             target_ulong sigsetsize = arg4;
7983 #endif
7984             struct target_sigaction *act;
7985             struct target_sigaction *oact;
7986 
7987             if (sigsetsize != sizeof(target_sigset_t)) {
7988                 return -TARGET_EINVAL;
7989             }
7990             if (arg2) {
7991                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7992                     return -TARGET_EFAULT;
7993                 }
7994 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7995                 act->ka_restorer = restorer;
7996 #endif
7997             } else {
7998                 act = NULL;
7999             }
8000             if (arg3) {
8001                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8002                     ret = -TARGET_EFAULT;
8003                     goto rt_sigaction_fail;
8004                 }
8005             } else
8006                 oact = NULL;
8007             ret = get_errno(do_sigaction(arg1, act, oact));
8008 	rt_sigaction_fail:
8009             if (act)
8010                 unlock_user_struct(act, arg2, 0);
8011             if (oact)
8012                 unlock_user_struct(oact, arg3, 1);
8013 #endif
8014         }
8015         return ret;
8016 #ifdef TARGET_NR_sgetmask /* not on alpha */
8017     case TARGET_NR_sgetmask:
8018         {
8019             sigset_t cur_set;
8020             abi_ulong target_set;
8021             ret = do_sigprocmask(0, NULL, &cur_set);
8022             if (!ret) {
8023                 host_to_target_old_sigset(&target_set, &cur_set);
8024                 ret = target_set;
8025             }
8026         }
8027         return ret;
8028 #endif
8029 #ifdef TARGET_NR_ssetmask /* not on alpha */
8030     case TARGET_NR_ssetmask:
8031         {
8032             sigset_t set, oset;
8033             abi_ulong target_set = arg1;
8034             target_to_host_old_sigset(&set, &target_set);
8035             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8036             if (!ret) {
8037                 host_to_target_old_sigset(&target_set, &oset);
8038                 ret = target_set;
8039             }
8040         }
8041         return ret;
8042 #endif
8043 #ifdef TARGET_NR_sigprocmask
8044     case TARGET_NR_sigprocmask:
8045         {
8046 #if defined(TARGET_ALPHA)
8047             sigset_t set, oldset;
8048             abi_ulong mask;
8049             int how;
8050 
8051             switch (arg1) {
8052             case TARGET_SIG_BLOCK:
8053                 how = SIG_BLOCK;
8054                 break;
8055             case TARGET_SIG_UNBLOCK:
8056                 how = SIG_UNBLOCK;
8057                 break;
8058             case TARGET_SIG_SETMASK:
8059                 how = SIG_SETMASK;
8060                 break;
8061             default:
8062                 return -TARGET_EINVAL;
8063             }
8064             mask = arg2;
8065             target_to_host_old_sigset(&set, &mask);
8066 
8067             ret = do_sigprocmask(how, &set, &oldset);
8068             if (!is_error(ret)) {
8069                 host_to_target_old_sigset(&mask, &oldset);
8070                 ret = mask;
8071                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8072             }
8073 #else
8074             sigset_t set, oldset, *set_ptr;
8075             int how;
8076 
8077             if (arg2) {
8078                 switch (arg1) {
8079                 case TARGET_SIG_BLOCK:
8080                     how = SIG_BLOCK;
8081                     break;
8082                 case TARGET_SIG_UNBLOCK:
8083                     how = SIG_UNBLOCK;
8084                     break;
8085                 case TARGET_SIG_SETMASK:
8086                     how = SIG_SETMASK;
8087                     break;
8088                 default:
8089                     return -TARGET_EINVAL;
8090                 }
8091                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8092                     return -TARGET_EFAULT;
8093                 target_to_host_old_sigset(&set, p);
8094                 unlock_user(p, arg2, 0);
8095                 set_ptr = &set;
8096             } else {
8097                 how = 0;
8098                 set_ptr = NULL;
8099             }
8100             ret = do_sigprocmask(how, set_ptr, &oldset);
8101             if (!is_error(ret) && arg3) {
8102                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8103                     return -TARGET_EFAULT;
8104                 host_to_target_old_sigset(p, &oldset);
8105                 unlock_user(p, arg3, sizeof(target_sigset_t));
8106             }
8107 #endif
8108         }
8109         return ret;
8110 #endif
8111     case TARGET_NR_rt_sigprocmask:
8112         {
8113             int how = arg1;
8114             sigset_t set, oldset, *set_ptr;
8115 
8116             if (arg4 != sizeof(target_sigset_t)) {
8117                 return -TARGET_EINVAL;
8118             }
8119 
8120             if (arg2) {
8121                 switch(how) {
8122                 case TARGET_SIG_BLOCK:
8123                     how = SIG_BLOCK;
8124                     break;
8125                 case TARGET_SIG_UNBLOCK:
8126                     how = SIG_UNBLOCK;
8127                     break;
8128                 case TARGET_SIG_SETMASK:
8129                     how = SIG_SETMASK;
8130                     break;
8131                 default:
8132                     return -TARGET_EINVAL;
8133                 }
8134                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8135                     return -TARGET_EFAULT;
8136                 target_to_host_sigset(&set, p);
8137                 unlock_user(p, arg2, 0);
8138                 set_ptr = &set;
8139             } else {
8140                 how = 0;
8141                 set_ptr = NULL;
8142             }
8143             ret = do_sigprocmask(how, set_ptr, &oldset);
8144             if (!is_error(ret) && arg3) {
8145                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8146                     return -TARGET_EFAULT;
8147                 host_to_target_sigset(p, &oldset);
8148                 unlock_user(p, arg3, sizeof(target_sigset_t));
8149             }
8150         }
8151         return ret;
8152 #ifdef TARGET_NR_sigpending
8153     case TARGET_NR_sigpending:
8154         {
8155             sigset_t set;
8156             ret = get_errno(sigpending(&set));
8157             if (!is_error(ret)) {
8158                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8159                     return -TARGET_EFAULT;
8160                 host_to_target_old_sigset(p, &set);
8161                 unlock_user(p, arg1, sizeof(target_sigset_t));
8162             }
8163         }
8164         return ret;
8165 #endif
8166     case TARGET_NR_rt_sigpending:
8167         {
8168             sigset_t set;
8169 
8170             /* Yes, this check is >, not != like most. We follow the kernel's
8171              * logic and it does it like this because it implements
8172              * NR_sigpending through the same code path, and in that case
8173              * the old_sigset_t is smaller in size.
8174              */
8175             if (arg2 > sizeof(target_sigset_t)) {
8176                 return -TARGET_EINVAL;
8177             }
8178 
8179             ret = get_errno(sigpending(&set));
8180             if (!is_error(ret)) {
8181                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8182                     return -TARGET_EFAULT;
8183                 host_to_target_sigset(p, &set);
8184                 unlock_user(p, arg1, sizeof(target_sigset_t));
8185             }
8186         }
8187         return ret;
8188 #ifdef TARGET_NR_sigsuspend
8189     case TARGET_NR_sigsuspend:
8190         {
8191             TaskState *ts = cpu->opaque;
8192 #if defined(TARGET_ALPHA)
8193             abi_ulong mask = arg1;
8194             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8195 #else
8196             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8197                 return -TARGET_EFAULT;
8198             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8199             unlock_user(p, arg1, 0);
8200 #endif
8201             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8202                                                SIGSET_T_SIZE));
8203             if (ret != -TARGET_ERESTARTSYS) {
8204                 ts->in_sigsuspend = 1;
8205             }
8206         }
8207         return ret;
8208 #endif
8209     case TARGET_NR_rt_sigsuspend:
8210         {
8211             TaskState *ts = cpu->opaque;
8212 
8213             if (arg2 != sizeof(target_sigset_t)) {
8214                 return -TARGET_EINVAL;
8215             }
8216             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8217                 return -TARGET_EFAULT;
8218             target_to_host_sigset(&ts->sigsuspend_mask, p);
8219             unlock_user(p, arg1, 0);
8220             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8221                                                SIGSET_T_SIZE));
8222             if (ret != -TARGET_ERESTARTSYS) {
8223                 ts->in_sigsuspend = 1;
8224             }
8225         }
8226         return ret;
8227     case TARGET_NR_rt_sigtimedwait:
8228         {
8229             sigset_t set;
8230             struct timespec uts, *puts;
8231             siginfo_t uinfo;
8232 
8233             if (arg4 != sizeof(target_sigset_t)) {
8234                 return -TARGET_EINVAL;
8235             }
8236 
8237             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8238                 return -TARGET_EFAULT;
8239             target_to_host_sigset(&set, p);
8240             unlock_user(p, arg1, 0);
8241             if (arg3) {
8242                 puts = &uts;
8243                 target_to_host_timespec(puts, arg3);
8244             } else {
8245                 puts = NULL;
8246             }
8247             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8248                                                  SIGSET_T_SIZE));
8249             if (!is_error(ret)) {
8250                 if (arg2) {
8251                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8252                                   0);
8253                     if (!p) {
8254                         return -TARGET_EFAULT;
8255                     }
8256                     host_to_target_siginfo(p, &uinfo);
8257                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8258                 }
8259                 ret = host_to_target_signal(ret);
8260             }
8261         }
8262         return ret;
8263     case TARGET_NR_rt_sigqueueinfo:
8264         {
8265             siginfo_t uinfo;
8266 
8267             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8268             if (!p) {
8269                 return -TARGET_EFAULT;
8270             }
8271             target_to_host_siginfo(&uinfo, p);
8272             unlock_user(p, arg3, 0);
8273             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8274         }
8275         return ret;
8276     case TARGET_NR_rt_tgsigqueueinfo:
8277         {
8278             siginfo_t uinfo;
8279 
8280             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8281             if (!p) {
8282                 return -TARGET_EFAULT;
8283             }
8284             target_to_host_siginfo(&uinfo, p);
8285             unlock_user(p, arg4, 0);
8286             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8287         }
8288         return ret;
8289 #ifdef TARGET_NR_sigreturn
8290     case TARGET_NR_sigreturn:
8291         if (block_signals()) {
8292             return -TARGET_ERESTARTSYS;
8293         }
8294         return do_sigreturn(cpu_env);
8295 #endif
8296     case TARGET_NR_rt_sigreturn:
8297         if (block_signals()) {
8298             return -TARGET_ERESTARTSYS;
8299         }
8300         return do_rt_sigreturn(cpu_env);
8301     case TARGET_NR_sethostname:
8302         if (!(p = lock_user_string(arg1)))
8303             return -TARGET_EFAULT;
8304         ret = get_errno(sethostname(p, arg2));
8305         unlock_user(p, arg1, 0);
8306         return ret;
8307 #ifdef TARGET_NR_setrlimit
8308     case TARGET_NR_setrlimit:
8309         {
8310             int resource = target_to_host_resource(arg1);
8311             struct target_rlimit *target_rlim;
8312             struct rlimit rlim;
8313             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8314                 return -TARGET_EFAULT;
8315             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8316             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8317             unlock_user_struct(target_rlim, arg2, 0);
8318             /*
8319              * If we just passed through resource limit settings for memory then
8320              * they would also apply to QEMU's own allocations, and QEMU will
8321              * crash or hang or die if its allocations fail. Ideally we would
8322              * track the guest allocations in QEMU and apply the limits ourselves.
8323              * For now, just tell the guest the call succeeded but don't actually
8324              * limit anything.
8325              */
8326             if (resource != RLIMIT_AS &&
8327                 resource != RLIMIT_DATA &&
8328                 resource != RLIMIT_STACK) {
8329                 return get_errno(setrlimit(resource, &rlim));
8330             } else {
8331                 return 0;
8332             }
8333         }
8334 #endif
8335 #ifdef TARGET_NR_getrlimit
8336     case TARGET_NR_getrlimit:
8337         {
8338             int resource = target_to_host_resource(arg1);
8339             struct target_rlimit *target_rlim;
8340             struct rlimit rlim;
8341 
8342             ret = get_errno(getrlimit(resource, &rlim));
8343             if (!is_error(ret)) {
8344                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8345                     return -TARGET_EFAULT;
8346                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8347                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8348                 unlock_user_struct(target_rlim, arg2, 1);
8349             }
8350         }
8351         return ret;
8352 #endif
8353     case TARGET_NR_getrusage:
8354         {
8355             struct rusage rusage;
8356             ret = get_errno(getrusage(arg1, &rusage));
8357             if (!is_error(ret)) {
8358                 ret = host_to_target_rusage(arg2, &rusage);
8359             }
8360         }
8361         return ret;
8362     case TARGET_NR_gettimeofday:
8363         {
8364             struct timeval tv;
8365             ret = get_errno(gettimeofday(&tv, NULL));
8366             if (!is_error(ret)) {
8367                 if (copy_to_user_timeval(arg1, &tv))
8368                     return -TARGET_EFAULT;
8369             }
8370         }
8371         return ret;
8372     case TARGET_NR_settimeofday:
8373         {
8374             struct timeval tv, *ptv = NULL;
8375             struct timezone tz, *ptz = NULL;
8376 
8377             if (arg1) {
8378                 if (copy_from_user_timeval(&tv, arg1)) {
8379                     return -TARGET_EFAULT;
8380                 }
8381                 ptv = &tv;
8382             }
8383 
8384             if (arg2) {
8385                 if (copy_from_user_timezone(&tz, arg2)) {
8386                     return -TARGET_EFAULT;
8387                 }
8388                 ptz = &tz;
8389             }
8390 
8391             return get_errno(settimeofday(ptv, ptz));
8392         }
8393 #if defined(TARGET_NR_select)
8394     case TARGET_NR_select:
8395 #if defined(TARGET_WANT_NI_OLD_SELECT)
8396         /* some architectures used to have old_select here
8397          * but now ENOSYS it.
8398          */
8399         ret = -TARGET_ENOSYS;
8400 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8401         ret = do_old_select(arg1);
8402 #else
8403         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8404 #endif
8405         return ret;
8406 #endif
8407 #ifdef TARGET_NR_pselect6
8408     case TARGET_NR_pselect6:
8409         {
8410             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8411             fd_set rfds, wfds, efds;
8412             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8413             struct timespec ts, *ts_ptr;
8414 
8415             /*
8416              * The 6th arg is actually two args smashed together,
8417              * so we cannot use the C library.
8418              */
8419             sigset_t set;
8420             struct {
8421                 sigset_t *set;
8422                 size_t size;
8423             } sig, *sig_ptr;
8424 
8425             abi_ulong arg_sigset, arg_sigsize, *arg7;
8426             target_sigset_t *target_sigset;
8427 
8428             n = arg1;
8429             rfd_addr = arg2;
8430             wfd_addr = arg3;
8431             efd_addr = arg4;
8432             ts_addr = arg5;
8433 
8434             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8435             if (ret) {
8436                 return ret;
8437             }
8438             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8439             if (ret) {
8440                 return ret;
8441             }
8442             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8443             if (ret) {
8444                 return ret;
8445             }
8446 
8447             /*
8448              * This takes a timespec, and not a timeval, so we cannot
8449              * use the do_select() helper ...
8450              */
8451             if (ts_addr) {
8452                 if (target_to_host_timespec(&ts, ts_addr)) {
8453                     return -TARGET_EFAULT;
8454                 }
8455                 ts_ptr = &ts;
8456             } else {
8457                 ts_ptr = NULL;
8458             }
8459 
8460             /* Extract the two packed args for the sigset */
8461             if (arg6) {
8462                 sig_ptr = &sig;
8463                 sig.size = SIGSET_T_SIZE;
8464 
8465                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8466                 if (!arg7) {
8467                     return -TARGET_EFAULT;
8468                 }
8469                 arg_sigset = tswapal(arg7[0]);
8470                 arg_sigsize = tswapal(arg7[1]);
8471                 unlock_user(arg7, arg6, 0);
8472 
8473                 if (arg_sigset) {
8474                     sig.set = &set;
8475                     if (arg_sigsize != sizeof(*target_sigset)) {
8476                         /* Like the kernel, we enforce correct size sigsets */
8477                         return -TARGET_EINVAL;
8478                     }
8479                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8480                                               sizeof(*target_sigset), 1);
8481                     if (!target_sigset) {
8482                         return -TARGET_EFAULT;
8483                     }
8484                     target_to_host_sigset(&set, target_sigset);
8485                     unlock_user(target_sigset, arg_sigset, 0);
8486                 } else {
8487                     sig.set = NULL;
8488                 }
8489             } else {
8490                 sig_ptr = NULL;
8491             }
8492 
8493             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8494                                           ts_ptr, sig_ptr));
8495 
8496             if (!is_error(ret)) {
8497                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8498                     return -TARGET_EFAULT;
8499                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8500                     return -TARGET_EFAULT;
8501                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8502                     return -TARGET_EFAULT;
8503 
8504                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8505                     return -TARGET_EFAULT;
8506             }
8507         }
8508         return ret;
8509 #endif
8510 #ifdef TARGET_NR_symlink
8511     case TARGET_NR_symlink:
8512         {
8513             void *p2;
8514             p = lock_user_string(arg1);
8515             p2 = lock_user_string(arg2);
8516             if (!p || !p2)
8517                 ret = -TARGET_EFAULT;
8518             else
8519                 ret = get_errno(symlink(p, p2));
8520             unlock_user(p2, arg2, 0);
8521             unlock_user(p, arg1, 0);
8522         }
8523         return ret;
8524 #endif
8525 #if defined(TARGET_NR_symlinkat)
8526     case TARGET_NR_symlinkat:
8527         {
8528             void *p2;
8529             p  = lock_user_string(arg1);
8530             p2 = lock_user_string(arg3);
8531             if (!p || !p2)
8532                 ret = -TARGET_EFAULT;
8533             else
8534                 ret = get_errno(symlinkat(p, arg2, p2));
8535             unlock_user(p2, arg3, 0);
8536             unlock_user(p, arg1, 0);
8537         }
8538         return ret;
8539 #endif
8540 #ifdef TARGET_NR_readlink
8541     case TARGET_NR_readlink:
8542         {
8543             void *p2;
8544             p = lock_user_string(arg1);
8545             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8546             if (!p || !p2) {
8547                 ret = -TARGET_EFAULT;
8548             } else if (!arg3) {
8549                 /* Short circuit this for the magic exe check. */
8550                 ret = -TARGET_EINVAL;
8551             } else if (is_proc_myself((const char *)p, "exe")) {
8552                 char real[PATH_MAX], *temp;
8553                 temp = realpath(exec_path, real);
8554                 /* Return value is # of bytes that we wrote to the buffer. */
8555                 if (temp == NULL) {
8556                     ret = get_errno(-1);
8557                 } else {
8558                     /* Don't worry about sign mismatch as earlier mapping
8559                      * logic would have thrown a bad address error. */
8560                     ret = MIN(strlen(real), arg3);
8561                     /* We cannot NUL terminate the string. */
8562                     memcpy(p2, real, ret);
8563                 }
8564             } else {
8565                 ret = get_errno(readlink(path(p), p2, arg3));
8566             }
8567             unlock_user(p2, arg2, ret);
8568             unlock_user(p, arg1, 0);
8569         }
8570         return ret;
8571 #endif
8572 #if defined(TARGET_NR_readlinkat)
8573     case TARGET_NR_readlinkat:
8574         {
8575             void *p2;
8576             p  = lock_user_string(arg2);
8577             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8578             if (!p || !p2) {
8579                 ret = -TARGET_EFAULT;
8580             } else if (is_proc_myself((const char *)p, "exe")) {
8581                 char real[PATH_MAX], *temp;
8582                 temp = realpath(exec_path, real);
8583                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8584                 snprintf((char *)p2, arg4, "%s", real);
8585             } else {
8586                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8587             }
8588             unlock_user(p2, arg3, ret);
8589             unlock_user(p, arg2, 0);
8590         }
8591         return ret;
8592 #endif
8593 #ifdef TARGET_NR_swapon
8594     case TARGET_NR_swapon:
8595         if (!(p = lock_user_string(arg1)))
8596             return -TARGET_EFAULT;
8597         ret = get_errno(swapon(p, arg2));
8598         unlock_user(p, arg1, 0);
8599         return ret;
8600 #endif
8601     case TARGET_NR_reboot:
8602         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8603            /* arg4 must be ignored in all other cases */
8604            p = lock_user_string(arg4);
8605            if (!p) {
8606                return -TARGET_EFAULT;
8607            }
8608            ret = get_errno(reboot(arg1, arg2, arg3, p));
8609            unlock_user(p, arg4, 0);
8610         } else {
8611            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8612         }
8613         return ret;
8614 #ifdef TARGET_NR_mmap
8615     case TARGET_NR_mmap:
8616 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8617     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8618     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8619     || defined(TARGET_S390X)
8620         {
8621             abi_ulong *v;
8622             abi_ulong v1, v2, v3, v4, v5, v6;
8623             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8624                 return -TARGET_EFAULT;
8625             v1 = tswapal(v[0]);
8626             v2 = tswapal(v[1]);
8627             v3 = tswapal(v[2]);
8628             v4 = tswapal(v[3]);
8629             v5 = tswapal(v[4]);
8630             v6 = tswapal(v[5]);
8631             unlock_user(v, arg1, 0);
8632             ret = get_errno(target_mmap(v1, v2, v3,
8633                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8634                                         v5, v6));
8635         }
8636 #else
8637         ret = get_errno(target_mmap(arg1, arg2, arg3,
8638                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8639                                     arg5,
8640                                     arg6));
8641 #endif
8642         return ret;
8643 #endif
8644 #ifdef TARGET_NR_mmap2
8645     case TARGET_NR_mmap2:
8646 #ifndef MMAP_SHIFT
8647 #define MMAP_SHIFT 12
8648 #endif
8649         ret = target_mmap(arg1, arg2, arg3,
8650                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8651                           arg5, arg6 << MMAP_SHIFT);
8652         return get_errno(ret);
8653 #endif
8654     case TARGET_NR_munmap:
8655         return get_errno(target_munmap(arg1, arg2));
8656     case TARGET_NR_mprotect:
8657         {
8658             TaskState *ts = cpu->opaque;
8659             /* Special hack to detect libc making the stack executable.  */
8660             if ((arg3 & PROT_GROWSDOWN)
8661                 && arg1 >= ts->info->stack_limit
8662                 && arg1 <= ts->info->start_stack) {
8663                 arg3 &= ~PROT_GROWSDOWN;
8664                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8665                 arg1 = ts->info->stack_limit;
8666             }
8667         }
8668         return get_errno(target_mprotect(arg1, arg2, arg3));
8669 #ifdef TARGET_NR_mremap
8670     case TARGET_NR_mremap:
8671         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8672 #endif
8673         /* ??? msync/mlock/munlock are broken for softmmu.  */
8674 #ifdef TARGET_NR_msync
8675     case TARGET_NR_msync:
8676         return get_errno(msync(g2h(arg1), arg2, arg3));
8677 #endif
8678 #ifdef TARGET_NR_mlock
8679     case TARGET_NR_mlock:
8680         return get_errno(mlock(g2h(arg1), arg2));
8681 #endif
8682 #ifdef TARGET_NR_munlock
8683     case TARGET_NR_munlock:
8684         return get_errno(munlock(g2h(arg1), arg2));
8685 #endif
8686 #ifdef TARGET_NR_mlockall
8687     case TARGET_NR_mlockall:
8688         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8689 #endif
8690 #ifdef TARGET_NR_munlockall
8691     case TARGET_NR_munlockall:
8692         return get_errno(munlockall());
8693 #endif
8694 #ifdef TARGET_NR_truncate
8695     case TARGET_NR_truncate:
8696         if (!(p = lock_user_string(arg1)))
8697             return -TARGET_EFAULT;
8698         ret = get_errno(truncate(p, arg2));
8699         unlock_user(p, arg1, 0);
8700         return ret;
8701 #endif
8702 #ifdef TARGET_NR_ftruncate
8703     case TARGET_NR_ftruncate:
8704         return get_errno(ftruncate(arg1, arg2));
8705 #endif
8706     case TARGET_NR_fchmod:
8707         return get_errno(fchmod(arg1, arg2));
8708 #if defined(TARGET_NR_fchmodat)
8709     case TARGET_NR_fchmodat:
8710         if (!(p = lock_user_string(arg2)))
8711             return -TARGET_EFAULT;
8712         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8713         unlock_user(p, arg2, 0);
8714         return ret;
8715 #endif
8716     case TARGET_NR_getpriority:
8717         /* Note that negative values are valid for getpriority, so we must
8718            differentiate based on errno settings.  */
8719         errno = 0;
8720         ret = getpriority(arg1, arg2);
8721         if (ret == -1 && errno != 0) {
8722             return -host_to_target_errno(errno);
8723         }
8724 #ifdef TARGET_ALPHA
8725         /* Return value is the unbiased priority.  Signal no error.  */
8726         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8727 #else
8728         /* Return value is a biased priority to avoid negative numbers.  */
8729         ret = 20 - ret;
8730 #endif
8731         return ret;
8732     case TARGET_NR_setpriority:
8733         return get_errno(setpriority(arg1, arg2, arg3));
8734 #ifdef TARGET_NR_statfs
8735     case TARGET_NR_statfs:
8736         if (!(p = lock_user_string(arg1))) {
8737             return -TARGET_EFAULT;
8738         }
8739         ret = get_errno(statfs(path(p), &stfs));
8740         unlock_user(p, arg1, 0);
8741     convert_statfs:
8742         if (!is_error(ret)) {
8743             struct target_statfs *target_stfs;
8744 
8745             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8746                 return -TARGET_EFAULT;
8747             __put_user(stfs.f_type, &target_stfs->f_type);
8748             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8749             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8750             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8751             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8752             __put_user(stfs.f_files, &target_stfs->f_files);
8753             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8754             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8755             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8756             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8757             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8758 #ifdef _STATFS_F_FLAGS
8759             __put_user(stfs.f_flags, &target_stfs->f_flags);
8760 #else
8761             __put_user(0, &target_stfs->f_flags);
8762 #endif
8763             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8764             unlock_user_struct(target_stfs, arg2, 1);
8765         }
8766         return ret;
8767 #endif
8768 #ifdef TARGET_NR_fstatfs
8769     case TARGET_NR_fstatfs:
8770         ret = get_errno(fstatfs(arg1, &stfs));
8771         goto convert_statfs;
8772 #endif
8773 #ifdef TARGET_NR_statfs64
8774     case TARGET_NR_statfs64:
8775         if (!(p = lock_user_string(arg1))) {
8776             return -TARGET_EFAULT;
8777         }
8778         ret = get_errno(statfs(path(p), &stfs));
8779         unlock_user(p, arg1, 0);
8780     convert_statfs64:
8781         if (!is_error(ret)) {
8782             struct target_statfs64 *target_stfs;
8783 
8784             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8785                 return -TARGET_EFAULT;
8786             __put_user(stfs.f_type, &target_stfs->f_type);
8787             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8788             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8789             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8790             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8791             __put_user(stfs.f_files, &target_stfs->f_files);
8792             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8793             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8794             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8795             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8796             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8797             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8798             unlock_user_struct(target_stfs, arg3, 1);
8799         }
8800         return ret;
8801     case TARGET_NR_fstatfs64:
8802         ret = get_errno(fstatfs(arg1, &stfs));
8803         goto convert_statfs64;
8804 #endif
8805 #ifdef TARGET_NR_socketcall
8806     case TARGET_NR_socketcall:
8807         return do_socketcall(arg1, arg2);
8808 #endif
8809 #ifdef TARGET_NR_accept
8810     case TARGET_NR_accept:
8811         return do_accept4(arg1, arg2, arg3, 0);
8812 #endif
8813 #ifdef TARGET_NR_accept4
8814     case TARGET_NR_accept4:
8815         return do_accept4(arg1, arg2, arg3, arg4);
8816 #endif
8817 #ifdef TARGET_NR_bind
8818     case TARGET_NR_bind:
8819         return do_bind(arg1, arg2, arg3);
8820 #endif
8821 #ifdef TARGET_NR_connect
8822     case TARGET_NR_connect:
8823         return do_connect(arg1, arg2, arg3);
8824 #endif
8825 #ifdef TARGET_NR_getpeername
8826     case TARGET_NR_getpeername:
8827         return do_getpeername(arg1, arg2, arg3);
8828 #endif
8829 #ifdef TARGET_NR_getsockname
8830     case TARGET_NR_getsockname:
8831         return do_getsockname(arg1, arg2, arg3);
8832 #endif
8833 #ifdef TARGET_NR_getsockopt
8834     case TARGET_NR_getsockopt:
8835         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8836 #endif
8837 #ifdef TARGET_NR_listen
8838     case TARGET_NR_listen:
8839         return get_errno(listen(arg1, arg2));
8840 #endif
8841 #ifdef TARGET_NR_recv
8842     case TARGET_NR_recv:
8843         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8844 #endif
8845 #ifdef TARGET_NR_recvfrom
8846     case TARGET_NR_recvfrom:
8847         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8848 #endif
8849 #ifdef TARGET_NR_recvmsg
8850     case TARGET_NR_recvmsg:
8851         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8852 #endif
8853 #ifdef TARGET_NR_send
8854     case TARGET_NR_send:
8855         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8856 #endif
8857 #ifdef TARGET_NR_sendmsg
8858     case TARGET_NR_sendmsg:
8859         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8860 #endif
8861 #ifdef TARGET_NR_sendmmsg
8862     case TARGET_NR_sendmmsg:
8863         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8864     case TARGET_NR_recvmmsg:
8865         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8866 #endif
8867 #ifdef TARGET_NR_sendto
8868     case TARGET_NR_sendto:
8869         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8870 #endif
8871 #ifdef TARGET_NR_shutdown
8872     case TARGET_NR_shutdown:
8873         return get_errno(shutdown(arg1, arg2));
8874 #endif
8875 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8876     case TARGET_NR_getrandom:
8877         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8878         if (!p) {
8879             return -TARGET_EFAULT;
8880         }
8881         ret = get_errno(getrandom(p, arg2, arg3));
8882         unlock_user(p, arg1, ret);
8883         return ret;
8884 #endif
8885 #ifdef TARGET_NR_socket
8886     case TARGET_NR_socket:
8887         return do_socket(arg1, arg2, arg3);
8888 #endif
8889 #ifdef TARGET_NR_socketpair
8890     case TARGET_NR_socketpair:
8891         return do_socketpair(arg1, arg2, arg3, arg4);
8892 #endif
8893 #ifdef TARGET_NR_setsockopt
8894     case TARGET_NR_setsockopt:
8895         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8896 #endif
8897 #if defined(TARGET_NR_syslog)
8898     case TARGET_NR_syslog:
8899         {
8900             int len = arg2;
8901 
8902             switch (arg1) {
8903             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8904             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8905             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8906             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8907             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8908             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8909             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8910             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8911                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8912             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8913             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8914             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8915                 {
8916                     if (len < 0) {
8917                         return -TARGET_EINVAL;
8918                     }
8919                     if (len == 0) {
8920                         return 0;
8921                     }
8922                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8923                     if (!p) {
8924                         return -TARGET_EFAULT;
8925                     }
8926                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8927                     unlock_user(p, arg2, arg3);
8928                 }
8929                 return ret;
8930             default:
8931                 return -TARGET_EINVAL;
8932             }
8933         }
8934         break;
8935 #endif
8936     case TARGET_NR_setitimer:
8937         {
8938             struct itimerval value, ovalue, *pvalue;
8939 
8940             if (arg2) {
8941                 pvalue = &value;
8942                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8943                     || copy_from_user_timeval(&pvalue->it_value,
8944                                               arg2 + sizeof(struct target_timeval)))
8945                     return -TARGET_EFAULT;
8946             } else {
8947                 pvalue = NULL;
8948             }
8949             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8950             if (!is_error(ret) && arg3) {
8951                 if (copy_to_user_timeval(arg3,
8952                                          &ovalue.it_interval)
8953                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8954                                             &ovalue.it_value))
8955                     return -TARGET_EFAULT;
8956             }
8957         }
8958         return ret;
8959     case TARGET_NR_getitimer:
8960         {
8961             struct itimerval value;
8962 
8963             ret = get_errno(getitimer(arg1, &value));
8964             if (!is_error(ret) && arg2) {
8965                 if (copy_to_user_timeval(arg2,
8966                                          &value.it_interval)
8967                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8968                                             &value.it_value))
8969                     return -TARGET_EFAULT;
8970             }
8971         }
8972         return ret;
8973 #ifdef TARGET_NR_stat
8974     case TARGET_NR_stat:
8975         if (!(p = lock_user_string(arg1))) {
8976             return -TARGET_EFAULT;
8977         }
8978         ret = get_errno(stat(path(p), &st));
8979         unlock_user(p, arg1, 0);
8980         goto do_stat;
8981 #endif
8982 #ifdef TARGET_NR_lstat
8983     case TARGET_NR_lstat:
8984         if (!(p = lock_user_string(arg1))) {
8985             return -TARGET_EFAULT;
8986         }
8987         ret = get_errno(lstat(path(p), &st));
8988         unlock_user(p, arg1, 0);
8989         goto do_stat;
8990 #endif
8991 #ifdef TARGET_NR_fstat
8992     case TARGET_NR_fstat:
8993         {
8994             ret = get_errno(fstat(arg1, &st));
8995 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8996         do_stat:
8997 #endif
8998             if (!is_error(ret)) {
8999                 struct target_stat *target_st;
9000 
9001                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9002                     return -TARGET_EFAULT;
9003                 memset(target_st, 0, sizeof(*target_st));
9004                 __put_user(st.st_dev, &target_st->st_dev);
9005                 __put_user(st.st_ino, &target_st->st_ino);
9006                 __put_user(st.st_mode, &target_st->st_mode);
9007                 __put_user(st.st_uid, &target_st->st_uid);
9008                 __put_user(st.st_gid, &target_st->st_gid);
9009                 __put_user(st.st_nlink, &target_st->st_nlink);
9010                 __put_user(st.st_rdev, &target_st->st_rdev);
9011                 __put_user(st.st_size, &target_st->st_size);
9012                 __put_user(st.st_blksize, &target_st->st_blksize);
9013                 __put_user(st.st_blocks, &target_st->st_blocks);
9014                 __put_user(st.st_atime, &target_st->target_st_atime);
9015                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9016                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9017 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9018     defined(TARGET_STAT_HAVE_NSEC)
9019                 __put_user(st.st_atim.tv_nsec,
9020                            &target_st->target_st_atime_nsec);
9021                 __put_user(st.st_mtim.tv_nsec,
9022                            &target_st->target_st_mtime_nsec);
9023                 __put_user(st.st_ctim.tv_nsec,
9024                            &target_st->target_st_ctime_nsec);
9025 #endif
9026                 unlock_user_struct(target_st, arg2, 1);
9027             }
9028         }
9029         return ret;
9030 #endif
9031     case TARGET_NR_vhangup:
9032         return get_errno(vhangup());
9033 #ifdef TARGET_NR_syscall
9034     case TARGET_NR_syscall:
9035         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9036                           arg6, arg7, arg8, 0);
9037 #endif
9038     case TARGET_NR_wait4:
9039         {
9040             int status;
9041             abi_long status_ptr = arg2;
9042             struct rusage rusage, *rusage_ptr;
9043             abi_ulong target_rusage = arg4;
9044             abi_long rusage_err;
9045             if (target_rusage)
9046                 rusage_ptr = &rusage;
9047             else
9048                 rusage_ptr = NULL;
9049             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9050             if (!is_error(ret)) {
9051                 if (status_ptr && ret) {
9052                     status = host_to_target_waitstatus(status);
9053                     if (put_user_s32(status, status_ptr))
9054                         return -TARGET_EFAULT;
9055                 }
9056                 if (target_rusage) {
9057                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9058                     if (rusage_err) {
9059                         ret = rusage_err;
9060                     }
9061                 }
9062             }
9063         }
9064         return ret;
9065 #ifdef TARGET_NR_swapoff
9066     case TARGET_NR_swapoff:
9067         if (!(p = lock_user_string(arg1)))
9068             return -TARGET_EFAULT;
9069         ret = get_errno(swapoff(p));
9070         unlock_user(p, arg1, 0);
9071         return ret;
9072 #endif
9073     case TARGET_NR_sysinfo:
9074         {
9075             struct target_sysinfo *target_value;
9076             struct sysinfo value;
9077             ret = get_errno(sysinfo(&value));
9078             if (!is_error(ret) && arg1)
9079             {
9080                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9081                     return -TARGET_EFAULT;
9082                 __put_user(value.uptime, &target_value->uptime);
9083                 __put_user(value.loads[0], &target_value->loads[0]);
9084                 __put_user(value.loads[1], &target_value->loads[1]);
9085                 __put_user(value.loads[2], &target_value->loads[2]);
9086                 __put_user(value.totalram, &target_value->totalram);
9087                 __put_user(value.freeram, &target_value->freeram);
9088                 __put_user(value.sharedram, &target_value->sharedram);
9089                 __put_user(value.bufferram, &target_value->bufferram);
9090                 __put_user(value.totalswap, &target_value->totalswap);
9091                 __put_user(value.freeswap, &target_value->freeswap);
9092                 __put_user(value.procs, &target_value->procs);
9093                 __put_user(value.totalhigh, &target_value->totalhigh);
9094                 __put_user(value.freehigh, &target_value->freehigh);
9095                 __put_user(value.mem_unit, &target_value->mem_unit);
9096                 unlock_user_struct(target_value, arg1, 1);
9097             }
9098         }
9099         return ret;
9100 #ifdef TARGET_NR_ipc
9101     case TARGET_NR_ipc:
9102         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9103 #endif
9104 #ifdef TARGET_NR_semget
9105     case TARGET_NR_semget:
9106         return get_errno(semget(arg1, arg2, arg3));
9107 #endif
9108 #ifdef TARGET_NR_semop
9109     case TARGET_NR_semop:
9110         return do_semop(arg1, arg2, arg3);
9111 #endif
9112 #ifdef TARGET_NR_semctl
9113     case TARGET_NR_semctl:
9114         return do_semctl(arg1, arg2, arg3, arg4);
9115 #endif
9116 #ifdef TARGET_NR_msgctl
9117     case TARGET_NR_msgctl:
9118         return do_msgctl(arg1, arg2, arg3);
9119 #endif
9120 #ifdef TARGET_NR_msgget
9121     case TARGET_NR_msgget:
9122         return get_errno(msgget(arg1, arg2));
9123 #endif
9124 #ifdef TARGET_NR_msgrcv
9125     case TARGET_NR_msgrcv:
9126         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9127 #endif
9128 #ifdef TARGET_NR_msgsnd
9129     case TARGET_NR_msgsnd:
9130         return do_msgsnd(arg1, arg2, arg3, arg4);
9131 #endif
9132 #ifdef TARGET_NR_shmget
9133     case TARGET_NR_shmget:
9134         return get_errno(shmget(arg1, arg2, arg3));
9135 #endif
9136 #ifdef TARGET_NR_shmctl
9137     case TARGET_NR_shmctl:
9138         return do_shmctl(arg1, arg2, arg3);
9139 #endif
9140 #ifdef TARGET_NR_shmat
9141     case TARGET_NR_shmat:
9142         return do_shmat(cpu_env, arg1, arg2, arg3);
9143 #endif
9144 #ifdef TARGET_NR_shmdt
9145     case TARGET_NR_shmdt:
9146         return do_shmdt(arg1);
9147 #endif
9148     case TARGET_NR_fsync:
9149         return get_errno(fsync(arg1));
9150     case TARGET_NR_clone:
9151         /* Linux manages to have three different orderings for its
9152          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9153          * match the kernel's CONFIG_CLONE_* settings.
9154          * Microblaze is further special in that it uses a sixth
9155          * implicit argument to clone for the TLS pointer.
9156          */
9157 #if defined(TARGET_MICROBLAZE)
9158         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9159 #elif defined(TARGET_CLONE_BACKWARDS)
9160         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9161 #elif defined(TARGET_CLONE_BACKWARDS2)
9162         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9163 #else
9164         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9165 #endif
9166         return ret;
9167 #ifdef __NR_exit_group
9168         /* new thread calls */
9169     case TARGET_NR_exit_group:
9170         preexit_cleanup(cpu_env, arg1);
9171         return get_errno(exit_group(arg1));
9172 #endif
9173     case TARGET_NR_setdomainname:
9174         if (!(p = lock_user_string(arg1)))
9175             return -TARGET_EFAULT;
9176         ret = get_errno(setdomainname(p, arg2));
9177         unlock_user(p, arg1, 0);
9178         return ret;
9179     case TARGET_NR_uname:
9180         /* no need to transcode because we use the linux syscall */
9181         {
9182             struct new_utsname * buf;
9183 
9184             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9185                 return -TARGET_EFAULT;
9186             ret = get_errno(sys_uname(buf));
9187             if (!is_error(ret)) {
9188                 /* Overwrite the native machine name with whatever is being
9189                    emulated. */
9190                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9191                           sizeof(buf->machine));
9192                 /* Allow the user to override the reported release.  */
9193                 if (qemu_uname_release && *qemu_uname_release) {
9194                     g_strlcpy(buf->release, qemu_uname_release,
9195                               sizeof(buf->release));
9196                 }
9197             }
9198             unlock_user_struct(buf, arg1, 1);
9199         }
9200         return ret;
9201 #ifdef TARGET_I386
9202     case TARGET_NR_modify_ldt:
9203         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9204 #if !defined(TARGET_X86_64)
9205     case TARGET_NR_vm86:
9206         return do_vm86(cpu_env, arg1, arg2);
9207 #endif
9208 #endif
9209     case TARGET_NR_adjtimex:
9210         {
9211             struct timex host_buf;
9212 
9213             if (target_to_host_timex(&host_buf, arg1) != 0) {
9214                 return -TARGET_EFAULT;
9215             }
9216             ret = get_errno(adjtimex(&host_buf));
9217             if (!is_error(ret)) {
9218                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9219                     return -TARGET_EFAULT;
9220                 }
9221             }
9222         }
9223         return ret;
9224 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9225     case TARGET_NR_clock_adjtime:
9226         {
9227             struct timex htx, *phtx = &htx;
9228 
9229             if (target_to_host_timex(phtx, arg2) != 0) {
9230                 return -TARGET_EFAULT;
9231             }
9232             ret = get_errno(clock_adjtime(arg1, phtx));
9233             if (!is_error(ret) && phtx) {
9234                 if (host_to_target_timex(arg2, phtx) != 0) {
9235                     return -TARGET_EFAULT;
9236                 }
9237             }
9238         }
9239         return ret;
9240 #endif
9241     case TARGET_NR_getpgid:
9242         return get_errno(getpgid(arg1));
9243     case TARGET_NR_fchdir:
9244         return get_errno(fchdir(arg1));
9245     case TARGET_NR_personality:
9246         return get_errno(personality(arg1));
9247 #ifdef TARGET_NR__llseek /* Not on alpha */
9248     case TARGET_NR__llseek:
9249         {
9250             int64_t res;
9251 #if !defined(__NR_llseek)
9252             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9253             if (res == -1) {
9254                 ret = get_errno(res);
9255             } else {
9256                 ret = 0;
9257             }
9258 #else
9259             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9260 #endif
9261             if ((ret == 0) && put_user_s64(res, arg4)) {
9262                 return -TARGET_EFAULT;
9263             }
9264         }
9265         return ret;
9266 #endif
9267 #ifdef TARGET_NR_getdents
9268     case TARGET_NR_getdents:
9269 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9270 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9271         {
9272             struct target_dirent *target_dirp;
9273             struct linux_dirent *dirp;
9274             abi_long count = arg3;
9275 
9276             dirp = g_try_malloc(count);
9277             if (!dirp) {
9278                 return -TARGET_ENOMEM;
9279             }
9280 
9281             ret = get_errno(sys_getdents(arg1, dirp, count));
9282             if (!is_error(ret)) {
9283                 struct linux_dirent *de;
9284 		struct target_dirent *tde;
9285                 int len = ret;
9286                 int reclen, treclen;
9287 		int count1, tnamelen;
9288 
9289 		count1 = 0;
9290                 de = dirp;
9291                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9292                     return -TARGET_EFAULT;
9293 		tde = target_dirp;
9294                 while (len > 0) {
9295                     reclen = de->d_reclen;
9296                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9297                     assert(tnamelen >= 0);
9298                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9299                     assert(count1 + treclen <= count);
9300                     tde->d_reclen = tswap16(treclen);
9301                     tde->d_ino = tswapal(de->d_ino);
9302                     tde->d_off = tswapal(de->d_off);
9303                     memcpy(tde->d_name, de->d_name, tnamelen);
9304                     de = (struct linux_dirent *)((char *)de + reclen);
9305                     len -= reclen;
9306                     tde = (struct target_dirent *)((char *)tde + treclen);
9307 		    count1 += treclen;
9308                 }
9309 		ret = count1;
9310                 unlock_user(target_dirp, arg2, ret);
9311             }
9312             g_free(dirp);
9313         }
9314 #else
9315         {
9316             struct linux_dirent *dirp;
9317             abi_long count = arg3;
9318 
9319             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9320                 return -TARGET_EFAULT;
9321             ret = get_errno(sys_getdents(arg1, dirp, count));
9322             if (!is_error(ret)) {
9323                 struct linux_dirent *de;
9324                 int len = ret;
9325                 int reclen;
9326                 de = dirp;
9327                 while (len > 0) {
9328                     reclen = de->d_reclen;
9329                     if (reclen > len)
9330                         break;
9331                     de->d_reclen = tswap16(reclen);
9332                     tswapls(&de->d_ino);
9333                     tswapls(&de->d_off);
9334                     de = (struct linux_dirent *)((char *)de + reclen);
9335                     len -= reclen;
9336                 }
9337             }
9338             unlock_user(dirp, arg2, ret);
9339         }
9340 #endif
9341 #else
9342         /* Implement getdents in terms of getdents64 */
9343         {
9344             struct linux_dirent64 *dirp;
9345             abi_long count = arg3;
9346 
9347             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9348             if (!dirp) {
9349                 return -TARGET_EFAULT;
9350             }
9351             ret = get_errno(sys_getdents64(arg1, dirp, count));
9352             if (!is_error(ret)) {
9353                 /* Convert the dirent64 structs to target dirent.  We do this
9354                  * in-place, since we can guarantee that a target_dirent is no
9355                  * larger than a dirent64; however this means we have to be
9356                  * careful to read everything before writing in the new format.
9357                  */
9358                 struct linux_dirent64 *de;
9359                 struct target_dirent *tde;
9360                 int len = ret;
9361                 int tlen = 0;
9362 
9363                 de = dirp;
9364                 tde = (struct target_dirent *)dirp;
9365                 while (len > 0) {
9366                     int namelen, treclen;
9367                     int reclen = de->d_reclen;
9368                     uint64_t ino = de->d_ino;
9369                     int64_t off = de->d_off;
9370                     uint8_t type = de->d_type;
9371 
9372                     namelen = strlen(de->d_name);
9373                     treclen = offsetof(struct target_dirent, d_name)
9374                         + namelen + 2;
9375                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9376 
9377                     memmove(tde->d_name, de->d_name, namelen + 1);
9378                     tde->d_ino = tswapal(ino);
9379                     tde->d_off = tswapal(off);
9380                     tde->d_reclen = tswap16(treclen);
9381                     /* The target_dirent type is in what was formerly a padding
9382                      * byte at the end of the structure:
9383                      */
9384                     *(((char *)tde) + treclen - 1) = type;
9385 
9386                     de = (struct linux_dirent64 *)((char *)de + reclen);
9387                     tde = (struct target_dirent *)((char *)tde + treclen);
9388                     len -= reclen;
9389                     tlen += treclen;
9390                 }
9391                 ret = tlen;
9392             }
9393             unlock_user(dirp, arg2, ret);
9394         }
9395 #endif
9396         return ret;
9397 #endif /* TARGET_NR_getdents */
9398 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9399     case TARGET_NR_getdents64:
9400         {
9401             struct linux_dirent64 *dirp;
9402             abi_long count = arg3;
9403             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9404                 return -TARGET_EFAULT;
9405             ret = get_errno(sys_getdents64(arg1, dirp, count));
9406             if (!is_error(ret)) {
9407                 struct linux_dirent64 *de;
9408                 int len = ret;
9409                 int reclen;
9410                 de = dirp;
9411                 while (len > 0) {
9412                     reclen = de->d_reclen;
9413                     if (reclen > len)
9414                         break;
9415                     de->d_reclen = tswap16(reclen);
9416                     tswap64s((uint64_t *)&de->d_ino);
9417                     tswap64s((uint64_t *)&de->d_off);
9418                     de = (struct linux_dirent64 *)((char *)de + reclen);
9419                     len -= reclen;
9420                 }
9421             }
9422             unlock_user(dirp, arg2, ret);
9423         }
9424         return ret;
9425 #endif /* TARGET_NR_getdents64 */
9426 #if defined(TARGET_NR__newselect)
9427     case TARGET_NR__newselect:
9428         return do_select(arg1, arg2, arg3, arg4, arg5);
9429 #endif
9430 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9431 # ifdef TARGET_NR_poll
9432     case TARGET_NR_poll:
9433 # endif
9434 # ifdef TARGET_NR_ppoll
9435     case TARGET_NR_ppoll:
9436 # endif
9437         {
9438             struct target_pollfd *target_pfd;
9439             unsigned int nfds = arg2;
9440             struct pollfd *pfd;
9441             unsigned int i;
9442 
9443             pfd = NULL;
9444             target_pfd = NULL;
9445             if (nfds) {
9446                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9447                     return -TARGET_EINVAL;
9448                 }
9449 
9450                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9451                                        sizeof(struct target_pollfd) * nfds, 1);
9452                 if (!target_pfd) {
9453                     return -TARGET_EFAULT;
9454                 }
9455 
9456                 pfd = alloca(sizeof(struct pollfd) * nfds);
9457                 for (i = 0; i < nfds; i++) {
9458                     pfd[i].fd = tswap32(target_pfd[i].fd);
9459                     pfd[i].events = tswap16(target_pfd[i].events);
9460                 }
9461             }
9462 
9463             switch (num) {
9464 # ifdef TARGET_NR_ppoll
9465             case TARGET_NR_ppoll:
9466             {
9467                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9468                 target_sigset_t *target_set;
9469                 sigset_t _set, *set = &_set;
9470 
9471                 if (arg3) {
9472                     if (target_to_host_timespec(timeout_ts, arg3)) {
9473                         unlock_user(target_pfd, arg1, 0);
9474                         return -TARGET_EFAULT;
9475                     }
9476                 } else {
9477                     timeout_ts = NULL;
9478                 }
9479 
9480                 if (arg4) {
9481                     if (arg5 != sizeof(target_sigset_t)) {
9482                         unlock_user(target_pfd, arg1, 0);
9483                         return -TARGET_EINVAL;
9484                     }
9485 
9486                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9487                     if (!target_set) {
9488                         unlock_user(target_pfd, arg1, 0);
9489                         return -TARGET_EFAULT;
9490                     }
9491                     target_to_host_sigset(set, target_set);
9492                 } else {
9493                     set = NULL;
9494                 }
9495 
9496                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9497                                            set, SIGSET_T_SIZE));
9498 
9499                 if (!is_error(ret) && arg3) {
9500                     host_to_target_timespec(arg3, timeout_ts);
9501                 }
9502                 if (arg4) {
9503                     unlock_user(target_set, arg4, 0);
9504                 }
9505                 break;
9506             }
9507 # endif
9508 # ifdef TARGET_NR_poll
9509             case TARGET_NR_poll:
9510             {
9511                 struct timespec ts, *pts;
9512 
9513                 if (arg3 >= 0) {
9514                     /* Convert ms to secs, ns */
9515                     ts.tv_sec = arg3 / 1000;
9516                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9517                     pts = &ts;
9518                 } else {
9519                     /* -ve poll() timeout means "infinite" */
9520                     pts = NULL;
9521                 }
9522                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9523                 break;
9524             }
9525 # endif
9526             default:
9527                 g_assert_not_reached();
9528             }
9529 
9530             if (!is_error(ret)) {
9531                 for(i = 0; i < nfds; i++) {
9532                     target_pfd[i].revents = tswap16(pfd[i].revents);
9533                 }
9534             }
9535             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9536         }
9537         return ret;
9538 #endif
9539     case TARGET_NR_flock:
9540         /* NOTE: the flock constant seems to be the same for every
9541            Linux platform */
9542         return get_errno(safe_flock(arg1, arg2));
9543     case TARGET_NR_readv:
9544         {
9545             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9546             if (vec != NULL) {
9547                 ret = get_errno(safe_readv(arg1, vec, arg3));
9548                 unlock_iovec(vec, arg2, arg3, 1);
9549             } else {
9550                 ret = -host_to_target_errno(errno);
9551             }
9552         }
9553         return ret;
9554     case TARGET_NR_writev:
9555         {
9556             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9557             if (vec != NULL) {
9558                 ret = get_errno(safe_writev(arg1, vec, arg3));
9559                 unlock_iovec(vec, arg2, arg3, 0);
9560             } else {
9561                 ret = -host_to_target_errno(errno);
9562             }
9563         }
9564         return ret;
9565 #if defined(TARGET_NR_preadv)
9566     case TARGET_NR_preadv:
9567         {
9568             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9569             if (vec != NULL) {
9570                 unsigned long low, high;
9571 
9572                 target_to_host_low_high(arg4, arg5, &low, &high);
9573                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9574                 unlock_iovec(vec, arg2, arg3, 1);
9575             } else {
9576                 ret = -host_to_target_errno(errno);
9577            }
9578         }
9579         return ret;
9580 #endif
9581 #if defined(TARGET_NR_pwritev)
9582     case TARGET_NR_pwritev:
9583         {
9584             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9585             if (vec != NULL) {
9586                 unsigned long low, high;
9587 
9588                 target_to_host_low_high(arg4, arg5, &low, &high);
9589                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9590                 unlock_iovec(vec, arg2, arg3, 0);
9591             } else {
9592                 ret = -host_to_target_errno(errno);
9593            }
9594         }
9595         return ret;
9596 #endif
9597     case TARGET_NR_getsid:
9598         return get_errno(getsid(arg1));
9599 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9600     case TARGET_NR_fdatasync:
9601         return get_errno(fdatasync(arg1));
9602 #endif
9603 #ifdef TARGET_NR__sysctl
9604     case TARGET_NR__sysctl:
9605         /* We don't implement this, but ENOTDIR is always a safe
9606            return value. */
9607         return -TARGET_ENOTDIR;
9608 #endif
9609     case TARGET_NR_sched_getaffinity:
9610         {
9611             unsigned int mask_size;
9612             unsigned long *mask;
9613 
9614             /*
9615              * sched_getaffinity needs multiples of ulong, so need to take
9616              * care of mismatches between target ulong and host ulong sizes.
9617              */
9618             if (arg2 & (sizeof(abi_ulong) - 1)) {
9619                 return -TARGET_EINVAL;
9620             }
9621             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9622 
9623             mask = alloca(mask_size);
9624             memset(mask, 0, mask_size);
9625             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9626 
9627             if (!is_error(ret)) {
9628                 if (ret > arg2) {
9629                     /* More data returned than the caller's buffer will fit.
9630                      * This only happens if sizeof(abi_long) < sizeof(long)
9631                      * and the caller passed us a buffer holding an odd number
9632                      * of abi_longs. If the host kernel is actually using the
9633                      * extra 4 bytes then fail EINVAL; otherwise we can just
9634                      * ignore them and only copy the interesting part.
9635                      */
9636                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9637                     if (numcpus > arg2 * 8) {
9638                         return -TARGET_EINVAL;
9639                     }
9640                     ret = arg2;
9641                 }
9642 
9643                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9644                     return -TARGET_EFAULT;
9645                 }
9646             }
9647         }
9648         return ret;
9649     case TARGET_NR_sched_setaffinity:
9650         {
9651             unsigned int mask_size;
9652             unsigned long *mask;
9653 
9654             /*
9655              * sched_setaffinity needs multiples of ulong, so need to take
9656              * care of mismatches between target ulong and host ulong sizes.
9657              */
9658             if (arg2 & (sizeof(abi_ulong) - 1)) {
9659                 return -TARGET_EINVAL;
9660             }
9661             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9662             mask = alloca(mask_size);
9663 
9664             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9665             if (ret) {
9666                 return ret;
9667             }
9668 
9669             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9670         }
9671     case TARGET_NR_getcpu:
9672         {
9673             unsigned cpu, node;
9674             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9675                                        arg2 ? &node : NULL,
9676                                        NULL));
9677             if (is_error(ret)) {
9678                 return ret;
9679             }
9680             if (arg1 && put_user_u32(cpu, arg1)) {
9681                 return -TARGET_EFAULT;
9682             }
9683             if (arg2 && put_user_u32(node, arg2)) {
9684                 return -TARGET_EFAULT;
9685             }
9686         }
9687         return ret;
9688     case TARGET_NR_sched_setparam:
9689         {
9690             struct sched_param *target_schp;
9691             struct sched_param schp;
9692 
9693             if (arg2 == 0) {
9694                 return -TARGET_EINVAL;
9695             }
9696             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9697                 return -TARGET_EFAULT;
9698             schp.sched_priority = tswap32(target_schp->sched_priority);
9699             unlock_user_struct(target_schp, arg2, 0);
9700             return get_errno(sched_setparam(arg1, &schp));
9701         }
9702     case TARGET_NR_sched_getparam:
9703         {
9704             struct sched_param *target_schp;
9705             struct sched_param schp;
9706 
9707             if (arg2 == 0) {
9708                 return -TARGET_EINVAL;
9709             }
9710             ret = get_errno(sched_getparam(arg1, &schp));
9711             if (!is_error(ret)) {
9712                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9713                     return -TARGET_EFAULT;
9714                 target_schp->sched_priority = tswap32(schp.sched_priority);
9715                 unlock_user_struct(target_schp, arg2, 1);
9716             }
9717         }
9718         return ret;
9719     case TARGET_NR_sched_setscheduler:
9720         {
9721             struct sched_param *target_schp;
9722             struct sched_param schp;
9723             if (arg3 == 0) {
9724                 return -TARGET_EINVAL;
9725             }
9726             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9727                 return -TARGET_EFAULT;
9728             schp.sched_priority = tswap32(target_schp->sched_priority);
9729             unlock_user_struct(target_schp, arg3, 0);
9730             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9731         }
9732     case TARGET_NR_sched_getscheduler:
9733         return get_errno(sched_getscheduler(arg1));
9734     case TARGET_NR_sched_yield:
9735         return get_errno(sched_yield());
9736     case TARGET_NR_sched_get_priority_max:
9737         return get_errno(sched_get_priority_max(arg1));
9738     case TARGET_NR_sched_get_priority_min:
9739         return get_errno(sched_get_priority_min(arg1));
9740     case TARGET_NR_sched_rr_get_interval:
9741         {
9742             struct timespec ts;
9743             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9744             if (!is_error(ret)) {
9745                 ret = host_to_target_timespec(arg2, &ts);
9746             }
9747         }
9748         return ret;
9749     case TARGET_NR_nanosleep:
9750         {
9751             struct timespec req, rem;
9752             target_to_host_timespec(&req, arg1);
9753             ret = get_errno(safe_nanosleep(&req, &rem));
9754             if (is_error(ret) && arg2) {
9755                 host_to_target_timespec(arg2, &rem);
9756             }
9757         }
9758         return ret;
9759     case TARGET_NR_prctl:
9760         switch (arg1) {
9761         case PR_GET_PDEATHSIG:
9762         {
9763             int deathsig;
9764             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9765             if (!is_error(ret) && arg2
9766                 && put_user_ual(deathsig, arg2)) {
9767                 return -TARGET_EFAULT;
9768             }
9769             return ret;
9770         }
9771 #ifdef PR_GET_NAME
9772         case PR_GET_NAME:
9773         {
9774             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9775             if (!name) {
9776                 return -TARGET_EFAULT;
9777             }
9778             ret = get_errno(prctl(arg1, (unsigned long)name,
9779                                   arg3, arg4, arg5));
9780             unlock_user(name, arg2, 16);
9781             return ret;
9782         }
9783         case PR_SET_NAME:
9784         {
9785             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9786             if (!name) {
9787                 return -TARGET_EFAULT;
9788             }
9789             ret = get_errno(prctl(arg1, (unsigned long)name,
9790                                   arg3, arg4, arg5));
9791             unlock_user(name, arg2, 0);
9792             return ret;
9793         }
9794 #endif
9795 #ifdef TARGET_MIPS
9796         case TARGET_PR_GET_FP_MODE:
9797         {
9798             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9799             ret = 0;
9800             if (env->CP0_Status & (1 << CP0St_FR)) {
9801                 ret |= TARGET_PR_FP_MODE_FR;
9802             }
9803             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9804                 ret |= TARGET_PR_FP_MODE_FRE;
9805             }
9806             return ret;
9807         }
9808         case TARGET_PR_SET_FP_MODE:
9809         {
9810             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9811             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9812             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9813             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9814             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9815 
9816             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9817                                             TARGET_PR_FP_MODE_FRE;
9818 
9819             /* If nothing to change, return right away, successfully.  */
9820             if (old_fr == new_fr && old_fre == new_fre) {
9821                 return 0;
9822             }
9823             /* Check the value is valid */
9824             if (arg2 & ~known_bits) {
9825                 return -TARGET_EOPNOTSUPP;
9826             }
9827             /* Setting FRE without FR is not supported.  */
9828             if (new_fre && !new_fr) {
9829                 return -TARGET_EOPNOTSUPP;
9830             }
9831             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9832                 /* FR1 is not supported */
9833                 return -TARGET_EOPNOTSUPP;
9834             }
9835             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9836                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9837                 /* cannot set FR=0 */
9838                 return -TARGET_EOPNOTSUPP;
9839             }
9840             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9841                 /* Cannot set FRE=1 */
9842                 return -TARGET_EOPNOTSUPP;
9843             }
9844 
9845             int i;
9846             fpr_t *fpr = env->active_fpu.fpr;
9847             for (i = 0; i < 32 ; i += 2) {
9848                 if (!old_fr && new_fr) {
9849                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9850                 } else if (old_fr && !new_fr) {
9851                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9852                 }
9853             }
9854 
9855             if (new_fr) {
9856                 env->CP0_Status |= (1 << CP0St_FR);
9857                 env->hflags |= MIPS_HFLAG_F64;
9858             } else {
9859                 env->CP0_Status &= ~(1 << CP0St_FR);
9860                 env->hflags &= ~MIPS_HFLAG_F64;
9861             }
9862             if (new_fre) {
9863                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9864                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9865                     env->hflags |= MIPS_HFLAG_FRE;
9866                 }
9867             } else {
9868                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9869                 env->hflags &= ~MIPS_HFLAG_FRE;
9870             }
9871 
9872             return 0;
9873         }
9874 #endif /* MIPS */
9875 #ifdef TARGET_AARCH64
9876         case TARGET_PR_SVE_SET_VL:
9877             /*
9878              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9879              * PR_SVE_VL_INHERIT.  Note the kernel definition
9880              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9881              * even though the current architectural maximum is VQ=16.
9882              */
9883             ret = -TARGET_EINVAL;
9884             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9885                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9886                 CPUARMState *env = cpu_env;
9887                 ARMCPU *cpu = env_archcpu(env);
9888                 uint32_t vq, old_vq;
9889 
9890                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9891                 vq = MAX(arg2 / 16, 1);
9892                 vq = MIN(vq, cpu->sve_max_vq);
9893 
9894                 if (vq < old_vq) {
9895                     aarch64_sve_narrow_vq(env, vq);
9896                 }
9897                 env->vfp.zcr_el[1] = vq - 1;
9898                 ret = vq * 16;
9899             }
9900             return ret;
9901         case TARGET_PR_SVE_GET_VL:
9902             ret = -TARGET_EINVAL;
9903             {
9904                 ARMCPU *cpu = env_archcpu(cpu_env);
9905                 if (cpu_isar_feature(aa64_sve, cpu)) {
9906                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9907                 }
9908             }
9909             return ret;
9910         case TARGET_PR_PAC_RESET_KEYS:
9911             {
9912                 CPUARMState *env = cpu_env;
9913                 ARMCPU *cpu = env_archcpu(env);
9914 
9915                 if (arg3 || arg4 || arg5) {
9916                     return -TARGET_EINVAL;
9917                 }
9918                 if (cpu_isar_feature(aa64_pauth, cpu)) {
9919                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9920                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9921                                TARGET_PR_PAC_APGAKEY);
9922                     int ret = 0;
9923                     Error *err = NULL;
9924 
9925                     if (arg2 == 0) {
9926                         arg2 = all;
9927                     } else if (arg2 & ~all) {
9928                         return -TARGET_EINVAL;
9929                     }
9930                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
9931                         ret |= qemu_guest_getrandom(&env->keys.apia,
9932                                                     sizeof(ARMPACKey), &err);
9933                     }
9934                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
9935                         ret |= qemu_guest_getrandom(&env->keys.apib,
9936                                                     sizeof(ARMPACKey), &err);
9937                     }
9938                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
9939                         ret |= qemu_guest_getrandom(&env->keys.apda,
9940                                                     sizeof(ARMPACKey), &err);
9941                     }
9942                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
9943                         ret |= qemu_guest_getrandom(&env->keys.apdb,
9944                                                     sizeof(ARMPACKey), &err);
9945                     }
9946                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
9947                         ret |= qemu_guest_getrandom(&env->keys.apga,
9948                                                     sizeof(ARMPACKey), &err);
9949                     }
9950                     if (ret != 0) {
9951                         /*
9952                          * Some unknown failure in the crypto.  The best
9953                          * we can do is log it and fail the syscall.
9954                          * The real syscall cannot fail this way.
9955                          */
9956                         qemu_log_mask(LOG_UNIMP,
9957                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
9958                                       error_get_pretty(err));
9959                         error_free(err);
9960                         return -TARGET_EIO;
9961                     }
9962                     return 0;
9963                 }
9964             }
9965             return -TARGET_EINVAL;
9966 #endif /* AARCH64 */
9967         case PR_GET_SECCOMP:
9968         case PR_SET_SECCOMP:
9969             /* Disable seccomp to prevent the target disabling syscalls we
9970              * need. */
9971             return -TARGET_EINVAL;
9972         default:
9973             /* Most prctl options have no pointer arguments */
9974             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9975         }
9976         break;
9977 #ifdef TARGET_NR_arch_prctl
9978     case TARGET_NR_arch_prctl:
9979 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9980         return do_arch_prctl(cpu_env, arg1, arg2);
9981 #else
9982 #error unreachable
9983 #endif
9984 #endif
9985 #ifdef TARGET_NR_pread64
9986     case TARGET_NR_pread64:
9987         if (regpairs_aligned(cpu_env, num)) {
9988             arg4 = arg5;
9989             arg5 = arg6;
9990         }
9991         if (arg2 == 0 && arg3 == 0) {
9992             /* Special-case NULL buffer and zero length, which should succeed */
9993             p = 0;
9994         } else {
9995             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9996             if (!p) {
9997                 return -TARGET_EFAULT;
9998             }
9999         }
10000         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10001         unlock_user(p, arg2, ret);
10002         return ret;
10003     case TARGET_NR_pwrite64:
10004         if (regpairs_aligned(cpu_env, num)) {
10005             arg4 = arg5;
10006             arg5 = arg6;
10007         }
10008         if (arg2 == 0 && arg3 == 0) {
10009             /* Special-case NULL buffer and zero length, which should succeed */
10010             p = 0;
10011         } else {
10012             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10013             if (!p) {
10014                 return -TARGET_EFAULT;
10015             }
10016         }
10017         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10018         unlock_user(p, arg2, 0);
10019         return ret;
10020 #endif
10021     case TARGET_NR_getcwd:
10022         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10023             return -TARGET_EFAULT;
10024         ret = get_errno(sys_getcwd1(p, arg2));
10025         unlock_user(p, arg1, ret);
10026         return ret;
10027     case TARGET_NR_capget:
10028     case TARGET_NR_capset:
10029     {
10030         struct target_user_cap_header *target_header;
10031         struct target_user_cap_data *target_data = NULL;
10032         struct __user_cap_header_struct header;
10033         struct __user_cap_data_struct data[2];
10034         struct __user_cap_data_struct *dataptr = NULL;
10035         int i, target_datalen;
10036         int data_items = 1;
10037 
10038         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10039             return -TARGET_EFAULT;
10040         }
10041         header.version = tswap32(target_header->version);
10042         header.pid = tswap32(target_header->pid);
10043 
10044         if (header.version != _LINUX_CAPABILITY_VERSION) {
10045             /* Version 2 and up takes pointer to two user_data structs */
10046             data_items = 2;
10047         }
10048 
10049         target_datalen = sizeof(*target_data) * data_items;
10050 
10051         if (arg2) {
10052             if (num == TARGET_NR_capget) {
10053                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10054             } else {
10055                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10056             }
10057             if (!target_data) {
10058                 unlock_user_struct(target_header, arg1, 0);
10059                 return -TARGET_EFAULT;
10060             }
10061 
10062             if (num == TARGET_NR_capset) {
10063                 for (i = 0; i < data_items; i++) {
10064                     data[i].effective = tswap32(target_data[i].effective);
10065                     data[i].permitted = tswap32(target_data[i].permitted);
10066                     data[i].inheritable = tswap32(target_data[i].inheritable);
10067                 }
10068             }
10069 
10070             dataptr = data;
10071         }
10072 
10073         if (num == TARGET_NR_capget) {
10074             ret = get_errno(capget(&header, dataptr));
10075         } else {
10076             ret = get_errno(capset(&header, dataptr));
10077         }
10078 
10079         /* The kernel always updates version for both capget and capset */
10080         target_header->version = tswap32(header.version);
10081         unlock_user_struct(target_header, arg1, 1);
10082 
10083         if (arg2) {
10084             if (num == TARGET_NR_capget) {
10085                 for (i = 0; i < data_items; i++) {
10086                     target_data[i].effective = tswap32(data[i].effective);
10087                     target_data[i].permitted = tswap32(data[i].permitted);
10088                     target_data[i].inheritable = tswap32(data[i].inheritable);
10089                 }
10090                 unlock_user(target_data, arg2, target_datalen);
10091             } else {
10092                 unlock_user(target_data, arg2, 0);
10093             }
10094         }
10095         return ret;
10096     }
10097     case TARGET_NR_sigaltstack:
10098         return do_sigaltstack(arg1, arg2,
10099                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10100 
10101 #ifdef CONFIG_SENDFILE
10102 #ifdef TARGET_NR_sendfile
10103     case TARGET_NR_sendfile:
10104     {
10105         off_t *offp = NULL;
10106         off_t off;
10107         if (arg3) {
10108             ret = get_user_sal(off, arg3);
10109             if (is_error(ret)) {
10110                 return ret;
10111             }
10112             offp = &off;
10113         }
10114         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10115         if (!is_error(ret) && arg3) {
10116             abi_long ret2 = put_user_sal(off, arg3);
10117             if (is_error(ret2)) {
10118                 ret = ret2;
10119             }
10120         }
10121         return ret;
10122     }
10123 #endif
10124 #ifdef TARGET_NR_sendfile64
10125     case TARGET_NR_sendfile64:
10126     {
10127         off_t *offp = NULL;
10128         off_t off;
10129         if (arg3) {
10130             ret = get_user_s64(off, arg3);
10131             if (is_error(ret)) {
10132                 return ret;
10133             }
10134             offp = &off;
10135         }
10136         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10137         if (!is_error(ret) && arg3) {
10138             abi_long ret2 = put_user_s64(off, arg3);
10139             if (is_error(ret2)) {
10140                 ret = ret2;
10141             }
10142         }
10143         return ret;
10144     }
10145 #endif
10146 #endif
10147 #ifdef TARGET_NR_vfork
10148     case TARGET_NR_vfork:
10149         return get_errno(do_fork(cpu_env,
10150                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10151                          0, 0, 0, 0));
10152 #endif
10153 #ifdef TARGET_NR_ugetrlimit
10154     case TARGET_NR_ugetrlimit:
10155     {
10156 	struct rlimit rlim;
10157 	int resource = target_to_host_resource(arg1);
10158 	ret = get_errno(getrlimit(resource, &rlim));
10159 	if (!is_error(ret)) {
10160 	    struct target_rlimit *target_rlim;
10161             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10162                 return -TARGET_EFAULT;
10163 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10164 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10165             unlock_user_struct(target_rlim, arg2, 1);
10166 	}
10167         return ret;
10168     }
10169 #endif
10170 #ifdef TARGET_NR_truncate64
10171     case TARGET_NR_truncate64:
10172         if (!(p = lock_user_string(arg1)))
10173             return -TARGET_EFAULT;
10174 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10175         unlock_user(p, arg1, 0);
10176         return ret;
10177 #endif
10178 #ifdef TARGET_NR_ftruncate64
10179     case TARGET_NR_ftruncate64:
10180         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10181 #endif
10182 #ifdef TARGET_NR_stat64
10183     case TARGET_NR_stat64:
10184         if (!(p = lock_user_string(arg1))) {
10185             return -TARGET_EFAULT;
10186         }
10187         ret = get_errno(stat(path(p), &st));
10188         unlock_user(p, arg1, 0);
10189         if (!is_error(ret))
10190             ret = host_to_target_stat64(cpu_env, arg2, &st);
10191         return ret;
10192 #endif
10193 #ifdef TARGET_NR_lstat64
10194     case TARGET_NR_lstat64:
10195         if (!(p = lock_user_string(arg1))) {
10196             return -TARGET_EFAULT;
10197         }
10198         ret = get_errno(lstat(path(p), &st));
10199         unlock_user(p, arg1, 0);
10200         if (!is_error(ret))
10201             ret = host_to_target_stat64(cpu_env, arg2, &st);
10202         return ret;
10203 #endif
10204 #ifdef TARGET_NR_fstat64
10205     case TARGET_NR_fstat64:
10206         ret = get_errno(fstat(arg1, &st));
10207         if (!is_error(ret))
10208             ret = host_to_target_stat64(cpu_env, arg2, &st);
10209         return ret;
10210 #endif
10211 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10212 #ifdef TARGET_NR_fstatat64
10213     case TARGET_NR_fstatat64:
10214 #endif
10215 #ifdef TARGET_NR_newfstatat
10216     case TARGET_NR_newfstatat:
10217 #endif
10218         if (!(p = lock_user_string(arg2))) {
10219             return -TARGET_EFAULT;
10220         }
10221         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10222         unlock_user(p, arg2, 0);
10223         if (!is_error(ret))
10224             ret = host_to_target_stat64(cpu_env, arg3, &st);
10225         return ret;
10226 #endif
10227 #if defined(TARGET_NR_statx)
10228     case TARGET_NR_statx:
10229         {
10230             struct target_statx *target_stx;
10231             int dirfd = arg1;
10232             int flags = arg3;
10233 
10234             p = lock_user_string(arg2);
10235             if (p == NULL) {
10236                 return -TARGET_EFAULT;
10237             }
10238 #if defined(__NR_statx)
10239             {
10240                 /*
10241                  * It is assumed that struct statx is architecture independent.
10242                  */
10243                 struct target_statx host_stx;
10244                 int mask = arg4;
10245 
10246                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10247                 if (!is_error(ret)) {
10248                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10249                         unlock_user(p, arg2, 0);
10250                         return -TARGET_EFAULT;
10251                     }
10252                 }
10253 
10254                 if (ret != -TARGET_ENOSYS) {
10255                     unlock_user(p, arg2, 0);
10256                     return ret;
10257                 }
10258             }
10259 #endif
10260             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10261             unlock_user(p, arg2, 0);
10262 
10263             if (!is_error(ret)) {
10264                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10265                     return -TARGET_EFAULT;
10266                 }
10267                 memset(target_stx, 0, sizeof(*target_stx));
10268                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10269                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10270                 __put_user(st.st_ino, &target_stx->stx_ino);
10271                 __put_user(st.st_mode, &target_stx->stx_mode);
10272                 __put_user(st.st_uid, &target_stx->stx_uid);
10273                 __put_user(st.st_gid, &target_stx->stx_gid);
10274                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10275                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10276                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10277                 __put_user(st.st_size, &target_stx->stx_size);
10278                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10279                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10280                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10281                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10282                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10283                 unlock_user_struct(target_stx, arg5, 1);
10284             }
10285         }
10286         return ret;
10287 #endif
10288 #ifdef TARGET_NR_lchown
10289     case TARGET_NR_lchown:
10290         if (!(p = lock_user_string(arg1)))
10291             return -TARGET_EFAULT;
10292         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10293         unlock_user(p, arg1, 0);
10294         return ret;
10295 #endif
10296 #ifdef TARGET_NR_getuid
10297     case TARGET_NR_getuid:
10298         return get_errno(high2lowuid(getuid()));
10299 #endif
10300 #ifdef TARGET_NR_getgid
10301     case TARGET_NR_getgid:
10302         return get_errno(high2lowgid(getgid()));
10303 #endif
10304 #ifdef TARGET_NR_geteuid
10305     case TARGET_NR_geteuid:
10306         return get_errno(high2lowuid(geteuid()));
10307 #endif
10308 #ifdef TARGET_NR_getegid
10309     case TARGET_NR_getegid:
10310         return get_errno(high2lowgid(getegid()));
10311 #endif
10312     case TARGET_NR_setreuid:
10313         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10314     case TARGET_NR_setregid:
10315         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10316     case TARGET_NR_getgroups:
10317         {
10318             int gidsetsize = arg1;
10319             target_id *target_grouplist;
10320             gid_t *grouplist;
10321             int i;
10322 
10323             grouplist = alloca(gidsetsize * sizeof(gid_t));
10324             ret = get_errno(getgroups(gidsetsize, grouplist));
10325             if (gidsetsize == 0)
10326                 return ret;
10327             if (!is_error(ret)) {
10328                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10329                 if (!target_grouplist)
10330                     return -TARGET_EFAULT;
10331                 for(i = 0;i < ret; i++)
10332                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10333                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10334             }
10335         }
10336         return ret;
10337     case TARGET_NR_setgroups:
10338         {
10339             int gidsetsize = arg1;
10340             target_id *target_grouplist;
10341             gid_t *grouplist = NULL;
10342             int i;
10343             if (gidsetsize) {
10344                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10345                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10346                 if (!target_grouplist) {
10347                     return -TARGET_EFAULT;
10348                 }
10349                 for (i = 0; i < gidsetsize; i++) {
10350                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10351                 }
10352                 unlock_user(target_grouplist, arg2, 0);
10353             }
10354             return get_errno(setgroups(gidsetsize, grouplist));
10355         }
10356     case TARGET_NR_fchown:
10357         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10358 #if defined(TARGET_NR_fchownat)
10359     case TARGET_NR_fchownat:
10360         if (!(p = lock_user_string(arg2)))
10361             return -TARGET_EFAULT;
10362         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10363                                  low2highgid(arg4), arg5));
10364         unlock_user(p, arg2, 0);
10365         return ret;
10366 #endif
10367 #ifdef TARGET_NR_setresuid
10368     case TARGET_NR_setresuid:
10369         return get_errno(sys_setresuid(low2highuid(arg1),
10370                                        low2highuid(arg2),
10371                                        low2highuid(arg3)));
10372 #endif
10373 #ifdef TARGET_NR_getresuid
10374     case TARGET_NR_getresuid:
10375         {
10376             uid_t ruid, euid, suid;
10377             ret = get_errno(getresuid(&ruid, &euid, &suid));
10378             if (!is_error(ret)) {
10379                 if (put_user_id(high2lowuid(ruid), arg1)
10380                     || put_user_id(high2lowuid(euid), arg2)
10381                     || put_user_id(high2lowuid(suid), arg3))
10382                     return -TARGET_EFAULT;
10383             }
10384         }
10385         return ret;
10386 #endif
10387 #ifdef TARGET_NR_getresgid
10388     case TARGET_NR_setresgid:
10389         return get_errno(sys_setresgid(low2highgid(arg1),
10390                                        low2highgid(arg2),
10391                                        low2highgid(arg3)));
10392 #endif
10393 #ifdef TARGET_NR_getresgid
10394     case TARGET_NR_getresgid:
10395         {
10396             gid_t rgid, egid, sgid;
10397             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10398             if (!is_error(ret)) {
10399                 if (put_user_id(high2lowgid(rgid), arg1)
10400                     || put_user_id(high2lowgid(egid), arg2)
10401                     || put_user_id(high2lowgid(sgid), arg3))
10402                     return -TARGET_EFAULT;
10403             }
10404         }
10405         return ret;
10406 #endif
10407 #ifdef TARGET_NR_chown
10408     case TARGET_NR_chown:
10409         if (!(p = lock_user_string(arg1)))
10410             return -TARGET_EFAULT;
10411         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10412         unlock_user(p, arg1, 0);
10413         return ret;
10414 #endif
10415     case TARGET_NR_setuid:
10416         return get_errno(sys_setuid(low2highuid(arg1)));
10417     case TARGET_NR_setgid:
10418         return get_errno(sys_setgid(low2highgid(arg1)));
10419     case TARGET_NR_setfsuid:
10420         return get_errno(setfsuid(arg1));
10421     case TARGET_NR_setfsgid:
10422         return get_errno(setfsgid(arg1));
10423 
10424 #ifdef TARGET_NR_lchown32
10425     case TARGET_NR_lchown32:
10426         if (!(p = lock_user_string(arg1)))
10427             return -TARGET_EFAULT;
10428         ret = get_errno(lchown(p, arg2, arg3));
10429         unlock_user(p, arg1, 0);
10430         return ret;
10431 #endif
10432 #ifdef TARGET_NR_getuid32
10433     case TARGET_NR_getuid32:
10434         return get_errno(getuid());
10435 #endif
10436 
10437 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10438    /* Alpha specific */
10439     case TARGET_NR_getxuid:
10440          {
10441             uid_t euid;
10442             euid=geteuid();
10443             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10444          }
10445         return get_errno(getuid());
10446 #endif
10447 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10448    /* Alpha specific */
10449     case TARGET_NR_getxgid:
10450          {
10451             uid_t egid;
10452             egid=getegid();
10453             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10454          }
10455         return get_errno(getgid());
10456 #endif
10457 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10458     /* Alpha specific */
10459     case TARGET_NR_osf_getsysinfo:
10460         ret = -TARGET_EOPNOTSUPP;
10461         switch (arg1) {
10462           case TARGET_GSI_IEEE_FP_CONTROL:
10463             {
10464                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10465                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10466 
10467                 swcr &= ~SWCR_STATUS_MASK;
10468                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10469 
10470                 if (put_user_u64 (swcr, arg2))
10471                         return -TARGET_EFAULT;
10472                 ret = 0;
10473             }
10474             break;
10475 
10476           /* case GSI_IEEE_STATE_AT_SIGNAL:
10477              -- Not implemented in linux kernel.
10478              case GSI_UACPROC:
10479              -- Retrieves current unaligned access state; not much used.
10480              case GSI_PROC_TYPE:
10481              -- Retrieves implver information; surely not used.
10482              case GSI_GET_HWRPB:
10483              -- Grabs a copy of the HWRPB; surely not used.
10484           */
10485         }
10486         return ret;
10487 #endif
10488 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10489     /* Alpha specific */
10490     case TARGET_NR_osf_setsysinfo:
10491         ret = -TARGET_EOPNOTSUPP;
10492         switch (arg1) {
10493           case TARGET_SSI_IEEE_FP_CONTROL:
10494             {
10495                 uint64_t swcr, fpcr;
10496 
10497                 if (get_user_u64 (swcr, arg2)) {
10498                     return -TARGET_EFAULT;
10499                 }
10500 
10501                 /*
10502                  * The kernel calls swcr_update_status to update the
10503                  * status bits from the fpcr at every point that it
10504                  * could be queried.  Therefore, we store the status
10505                  * bits only in FPCR.
10506                  */
10507                 ((CPUAlphaState *)cpu_env)->swcr
10508                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10509 
10510                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10511                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10512                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10513                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10514                 ret = 0;
10515             }
10516             break;
10517 
10518           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10519             {
10520                 uint64_t exc, fpcr, fex;
10521 
10522                 if (get_user_u64(exc, arg2)) {
10523                     return -TARGET_EFAULT;
10524                 }
10525                 exc &= SWCR_STATUS_MASK;
10526                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10527 
10528                 /* Old exceptions are not signaled.  */
10529                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10530                 fex = exc & ~fex;
10531                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10532                 fex &= ((CPUArchState *)cpu_env)->swcr;
10533 
10534                 /* Update the hardware fpcr.  */
10535                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10536                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10537 
10538                 if (fex) {
10539                     int si_code = TARGET_FPE_FLTUNK;
10540                     target_siginfo_t info;
10541 
10542                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10543                         si_code = TARGET_FPE_FLTUND;
10544                     }
10545                     if (fex & SWCR_TRAP_ENABLE_INE) {
10546                         si_code = TARGET_FPE_FLTRES;
10547                     }
10548                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10549                         si_code = TARGET_FPE_FLTUND;
10550                     }
10551                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10552                         si_code = TARGET_FPE_FLTOVF;
10553                     }
10554                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10555                         si_code = TARGET_FPE_FLTDIV;
10556                     }
10557                     if (fex & SWCR_TRAP_ENABLE_INV) {
10558                         si_code = TARGET_FPE_FLTINV;
10559                     }
10560 
10561                     info.si_signo = SIGFPE;
10562                     info.si_errno = 0;
10563                     info.si_code = si_code;
10564                     info._sifields._sigfault._addr
10565                         = ((CPUArchState *)cpu_env)->pc;
10566                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10567                                  QEMU_SI_FAULT, &info);
10568                 }
10569                 ret = 0;
10570             }
10571             break;
10572 
10573           /* case SSI_NVPAIRS:
10574              -- Used with SSIN_UACPROC to enable unaligned accesses.
10575              case SSI_IEEE_STATE_AT_SIGNAL:
10576              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10577              -- Not implemented in linux kernel
10578           */
10579         }
10580         return ret;
10581 #endif
10582 #ifdef TARGET_NR_osf_sigprocmask
10583     /* Alpha specific.  */
10584     case TARGET_NR_osf_sigprocmask:
10585         {
10586             abi_ulong mask;
10587             int how;
10588             sigset_t set, oldset;
10589 
10590             switch(arg1) {
10591             case TARGET_SIG_BLOCK:
10592                 how = SIG_BLOCK;
10593                 break;
10594             case TARGET_SIG_UNBLOCK:
10595                 how = SIG_UNBLOCK;
10596                 break;
10597             case TARGET_SIG_SETMASK:
10598                 how = SIG_SETMASK;
10599                 break;
10600             default:
10601                 return -TARGET_EINVAL;
10602             }
10603             mask = arg2;
10604             target_to_host_old_sigset(&set, &mask);
10605             ret = do_sigprocmask(how, &set, &oldset);
10606             if (!ret) {
10607                 host_to_target_old_sigset(&mask, &oldset);
10608                 ret = mask;
10609             }
10610         }
10611         return ret;
10612 #endif
10613 
10614 #ifdef TARGET_NR_getgid32
10615     case TARGET_NR_getgid32:
10616         return get_errno(getgid());
10617 #endif
10618 #ifdef TARGET_NR_geteuid32
10619     case TARGET_NR_geteuid32:
10620         return get_errno(geteuid());
10621 #endif
10622 #ifdef TARGET_NR_getegid32
10623     case TARGET_NR_getegid32:
10624         return get_errno(getegid());
10625 #endif
10626 #ifdef TARGET_NR_setreuid32
10627     case TARGET_NR_setreuid32:
10628         return get_errno(setreuid(arg1, arg2));
10629 #endif
10630 #ifdef TARGET_NR_setregid32
10631     case TARGET_NR_setregid32:
10632         return get_errno(setregid(arg1, arg2));
10633 #endif
10634 #ifdef TARGET_NR_getgroups32
10635     case TARGET_NR_getgroups32:
10636         {
10637             int gidsetsize = arg1;
10638             uint32_t *target_grouplist;
10639             gid_t *grouplist;
10640             int i;
10641 
10642             grouplist = alloca(gidsetsize * sizeof(gid_t));
10643             ret = get_errno(getgroups(gidsetsize, grouplist));
10644             if (gidsetsize == 0)
10645                 return ret;
10646             if (!is_error(ret)) {
10647                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10648                 if (!target_grouplist) {
10649                     return -TARGET_EFAULT;
10650                 }
10651                 for(i = 0;i < ret; i++)
10652                     target_grouplist[i] = tswap32(grouplist[i]);
10653                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10654             }
10655         }
10656         return ret;
10657 #endif
10658 #ifdef TARGET_NR_setgroups32
10659     case TARGET_NR_setgroups32:
10660         {
10661             int gidsetsize = arg1;
10662             uint32_t *target_grouplist;
10663             gid_t *grouplist;
10664             int i;
10665 
10666             grouplist = alloca(gidsetsize * sizeof(gid_t));
10667             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10668             if (!target_grouplist) {
10669                 return -TARGET_EFAULT;
10670             }
10671             for(i = 0;i < gidsetsize; i++)
10672                 grouplist[i] = tswap32(target_grouplist[i]);
10673             unlock_user(target_grouplist, arg2, 0);
10674             return get_errno(setgroups(gidsetsize, grouplist));
10675         }
10676 #endif
10677 #ifdef TARGET_NR_fchown32
10678     case TARGET_NR_fchown32:
10679         return get_errno(fchown(arg1, arg2, arg3));
10680 #endif
10681 #ifdef TARGET_NR_setresuid32
10682     case TARGET_NR_setresuid32:
10683         return get_errno(sys_setresuid(arg1, arg2, arg3));
10684 #endif
10685 #ifdef TARGET_NR_getresuid32
10686     case TARGET_NR_getresuid32:
10687         {
10688             uid_t ruid, euid, suid;
10689             ret = get_errno(getresuid(&ruid, &euid, &suid));
10690             if (!is_error(ret)) {
10691                 if (put_user_u32(ruid, arg1)
10692                     || put_user_u32(euid, arg2)
10693                     || put_user_u32(suid, arg3))
10694                     return -TARGET_EFAULT;
10695             }
10696         }
10697         return ret;
10698 #endif
10699 #ifdef TARGET_NR_setresgid32
10700     case TARGET_NR_setresgid32:
10701         return get_errno(sys_setresgid(arg1, arg2, arg3));
10702 #endif
10703 #ifdef TARGET_NR_getresgid32
10704     case TARGET_NR_getresgid32:
10705         {
10706             gid_t rgid, egid, sgid;
10707             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10708             if (!is_error(ret)) {
10709                 if (put_user_u32(rgid, arg1)
10710                     || put_user_u32(egid, arg2)
10711                     || put_user_u32(sgid, arg3))
10712                     return -TARGET_EFAULT;
10713             }
10714         }
10715         return ret;
10716 #endif
10717 #ifdef TARGET_NR_chown32
10718     case TARGET_NR_chown32:
10719         if (!(p = lock_user_string(arg1)))
10720             return -TARGET_EFAULT;
10721         ret = get_errno(chown(p, arg2, arg3));
10722         unlock_user(p, arg1, 0);
10723         return ret;
10724 #endif
10725 #ifdef TARGET_NR_setuid32
10726     case TARGET_NR_setuid32:
10727         return get_errno(sys_setuid(arg1));
10728 #endif
10729 #ifdef TARGET_NR_setgid32
10730     case TARGET_NR_setgid32:
10731         return get_errno(sys_setgid(arg1));
10732 #endif
10733 #ifdef TARGET_NR_setfsuid32
10734     case TARGET_NR_setfsuid32:
10735         return get_errno(setfsuid(arg1));
10736 #endif
10737 #ifdef TARGET_NR_setfsgid32
10738     case TARGET_NR_setfsgid32:
10739         return get_errno(setfsgid(arg1));
10740 #endif
10741 #ifdef TARGET_NR_mincore
10742     case TARGET_NR_mincore:
10743         {
10744             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10745             if (!a) {
10746                 return -TARGET_ENOMEM;
10747             }
10748             p = lock_user_string(arg3);
10749             if (!p) {
10750                 ret = -TARGET_EFAULT;
10751             } else {
10752                 ret = get_errno(mincore(a, arg2, p));
10753                 unlock_user(p, arg3, ret);
10754             }
10755             unlock_user(a, arg1, 0);
10756         }
10757         return ret;
10758 #endif
10759 #ifdef TARGET_NR_arm_fadvise64_64
10760     case TARGET_NR_arm_fadvise64_64:
10761         /* arm_fadvise64_64 looks like fadvise64_64 but
10762          * with different argument order: fd, advice, offset, len
10763          * rather than the usual fd, offset, len, advice.
10764          * Note that offset and len are both 64-bit so appear as
10765          * pairs of 32-bit registers.
10766          */
10767         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10768                             target_offset64(arg5, arg6), arg2);
10769         return -host_to_target_errno(ret);
10770 #endif
10771 
10772 #if TARGET_ABI_BITS == 32
10773 
10774 #ifdef TARGET_NR_fadvise64_64
10775     case TARGET_NR_fadvise64_64:
10776 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10777         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10778         ret = arg2;
10779         arg2 = arg3;
10780         arg3 = arg4;
10781         arg4 = arg5;
10782         arg5 = arg6;
10783         arg6 = ret;
10784 #else
10785         /* 6 args: fd, offset (high, low), len (high, low), advice */
10786         if (regpairs_aligned(cpu_env, num)) {
10787             /* offset is in (3,4), len in (5,6) and advice in 7 */
10788             arg2 = arg3;
10789             arg3 = arg4;
10790             arg4 = arg5;
10791             arg5 = arg6;
10792             arg6 = arg7;
10793         }
10794 #endif
10795         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10796                             target_offset64(arg4, arg5), arg6);
10797         return -host_to_target_errno(ret);
10798 #endif
10799 
10800 #ifdef TARGET_NR_fadvise64
10801     case TARGET_NR_fadvise64:
10802         /* 5 args: fd, offset (high, low), len, advice */
10803         if (regpairs_aligned(cpu_env, num)) {
10804             /* offset is in (3,4), len in 5 and advice in 6 */
10805             arg2 = arg3;
10806             arg3 = arg4;
10807             arg4 = arg5;
10808             arg5 = arg6;
10809         }
10810         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10811         return -host_to_target_errno(ret);
10812 #endif
10813 
10814 #else /* not a 32-bit ABI */
10815 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10816 #ifdef TARGET_NR_fadvise64_64
10817     case TARGET_NR_fadvise64_64:
10818 #endif
10819 #ifdef TARGET_NR_fadvise64
10820     case TARGET_NR_fadvise64:
10821 #endif
10822 #ifdef TARGET_S390X
10823         switch (arg4) {
10824         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10825         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10826         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10827         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10828         default: break;
10829         }
10830 #endif
10831         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10832 #endif
10833 #endif /* end of 64-bit ABI fadvise handling */
10834 
10835 #ifdef TARGET_NR_madvise
10836     case TARGET_NR_madvise:
10837         /* A straight passthrough may not be safe because qemu sometimes
10838            turns private file-backed mappings into anonymous mappings.
10839            This will break MADV_DONTNEED.
10840            This is a hint, so ignoring and returning success is ok.  */
10841         return 0;
10842 #endif
10843 #if TARGET_ABI_BITS == 32
10844     case TARGET_NR_fcntl64:
10845     {
10846 	int cmd;
10847 	struct flock64 fl;
10848         from_flock64_fn *copyfrom = copy_from_user_flock64;
10849         to_flock64_fn *copyto = copy_to_user_flock64;
10850 
10851 #ifdef TARGET_ARM
10852         if (!((CPUARMState *)cpu_env)->eabi) {
10853             copyfrom = copy_from_user_oabi_flock64;
10854             copyto = copy_to_user_oabi_flock64;
10855         }
10856 #endif
10857 
10858 	cmd = target_to_host_fcntl_cmd(arg2);
10859         if (cmd == -TARGET_EINVAL) {
10860             return cmd;
10861         }
10862 
10863         switch(arg2) {
10864         case TARGET_F_GETLK64:
10865             ret = copyfrom(&fl, arg3);
10866             if (ret) {
10867                 break;
10868             }
10869             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10870             if (ret == 0) {
10871                 ret = copyto(arg3, &fl);
10872             }
10873 	    break;
10874 
10875         case TARGET_F_SETLK64:
10876         case TARGET_F_SETLKW64:
10877             ret = copyfrom(&fl, arg3);
10878             if (ret) {
10879                 break;
10880             }
10881             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10882 	    break;
10883         default:
10884             ret = do_fcntl(arg1, arg2, arg3);
10885             break;
10886         }
10887         return ret;
10888     }
10889 #endif
10890 #ifdef TARGET_NR_cacheflush
10891     case TARGET_NR_cacheflush:
10892         /* self-modifying code is handled automatically, so nothing needed */
10893         return 0;
10894 #endif
10895 #ifdef TARGET_NR_getpagesize
10896     case TARGET_NR_getpagesize:
10897         return TARGET_PAGE_SIZE;
10898 #endif
10899     case TARGET_NR_gettid:
10900         return get_errno(sys_gettid());
10901 #ifdef TARGET_NR_readahead
10902     case TARGET_NR_readahead:
10903 #if TARGET_ABI_BITS == 32
10904         if (regpairs_aligned(cpu_env, num)) {
10905             arg2 = arg3;
10906             arg3 = arg4;
10907             arg4 = arg5;
10908         }
10909         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10910 #else
10911         ret = get_errno(readahead(arg1, arg2, arg3));
10912 #endif
10913         return ret;
10914 #endif
10915 #ifdef CONFIG_ATTR
10916 #ifdef TARGET_NR_setxattr
10917     case TARGET_NR_listxattr:
10918     case TARGET_NR_llistxattr:
10919     {
10920         void *p, *b = 0;
10921         if (arg2) {
10922             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10923             if (!b) {
10924                 return -TARGET_EFAULT;
10925             }
10926         }
10927         p = lock_user_string(arg1);
10928         if (p) {
10929             if (num == TARGET_NR_listxattr) {
10930                 ret = get_errno(listxattr(p, b, arg3));
10931             } else {
10932                 ret = get_errno(llistxattr(p, b, arg3));
10933             }
10934         } else {
10935             ret = -TARGET_EFAULT;
10936         }
10937         unlock_user(p, arg1, 0);
10938         unlock_user(b, arg2, arg3);
10939         return ret;
10940     }
10941     case TARGET_NR_flistxattr:
10942     {
10943         void *b = 0;
10944         if (arg2) {
10945             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10946             if (!b) {
10947                 return -TARGET_EFAULT;
10948             }
10949         }
10950         ret = get_errno(flistxattr(arg1, b, arg3));
10951         unlock_user(b, arg2, arg3);
10952         return ret;
10953     }
10954     case TARGET_NR_setxattr:
10955     case TARGET_NR_lsetxattr:
10956         {
10957             void *p, *n, *v = 0;
10958             if (arg3) {
10959                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10960                 if (!v) {
10961                     return -TARGET_EFAULT;
10962                 }
10963             }
10964             p = lock_user_string(arg1);
10965             n = lock_user_string(arg2);
10966             if (p && n) {
10967                 if (num == TARGET_NR_setxattr) {
10968                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
10969                 } else {
10970                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10971                 }
10972             } else {
10973                 ret = -TARGET_EFAULT;
10974             }
10975             unlock_user(p, arg1, 0);
10976             unlock_user(n, arg2, 0);
10977             unlock_user(v, arg3, 0);
10978         }
10979         return ret;
10980     case TARGET_NR_fsetxattr:
10981         {
10982             void *n, *v = 0;
10983             if (arg3) {
10984                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10985                 if (!v) {
10986                     return -TARGET_EFAULT;
10987                 }
10988             }
10989             n = lock_user_string(arg2);
10990             if (n) {
10991                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10992             } else {
10993                 ret = -TARGET_EFAULT;
10994             }
10995             unlock_user(n, arg2, 0);
10996             unlock_user(v, arg3, 0);
10997         }
10998         return ret;
10999     case TARGET_NR_getxattr:
11000     case TARGET_NR_lgetxattr:
11001         {
11002             void *p, *n, *v = 0;
11003             if (arg3) {
11004                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11005                 if (!v) {
11006                     return -TARGET_EFAULT;
11007                 }
11008             }
11009             p = lock_user_string(arg1);
11010             n = lock_user_string(arg2);
11011             if (p && n) {
11012                 if (num == TARGET_NR_getxattr) {
11013                     ret = get_errno(getxattr(p, n, v, arg4));
11014                 } else {
11015                     ret = get_errno(lgetxattr(p, n, v, arg4));
11016                 }
11017             } else {
11018                 ret = -TARGET_EFAULT;
11019             }
11020             unlock_user(p, arg1, 0);
11021             unlock_user(n, arg2, 0);
11022             unlock_user(v, arg3, arg4);
11023         }
11024         return ret;
11025     case TARGET_NR_fgetxattr:
11026         {
11027             void *n, *v = 0;
11028             if (arg3) {
11029                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11030                 if (!v) {
11031                     return -TARGET_EFAULT;
11032                 }
11033             }
11034             n = lock_user_string(arg2);
11035             if (n) {
11036                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11037             } else {
11038                 ret = -TARGET_EFAULT;
11039             }
11040             unlock_user(n, arg2, 0);
11041             unlock_user(v, arg3, arg4);
11042         }
11043         return ret;
11044     case TARGET_NR_removexattr:
11045     case TARGET_NR_lremovexattr:
11046         {
11047             void *p, *n;
11048             p = lock_user_string(arg1);
11049             n = lock_user_string(arg2);
11050             if (p && n) {
11051                 if (num == TARGET_NR_removexattr) {
11052                     ret = get_errno(removexattr(p, n));
11053                 } else {
11054                     ret = get_errno(lremovexattr(p, n));
11055                 }
11056             } else {
11057                 ret = -TARGET_EFAULT;
11058             }
11059             unlock_user(p, arg1, 0);
11060             unlock_user(n, arg2, 0);
11061         }
11062         return ret;
11063     case TARGET_NR_fremovexattr:
11064         {
11065             void *n;
11066             n = lock_user_string(arg2);
11067             if (n) {
11068                 ret = get_errno(fremovexattr(arg1, n));
11069             } else {
11070                 ret = -TARGET_EFAULT;
11071             }
11072             unlock_user(n, arg2, 0);
11073         }
11074         return ret;
11075 #endif
11076 #endif /* CONFIG_ATTR */
11077 #ifdef TARGET_NR_set_thread_area
11078     case TARGET_NR_set_thread_area:
11079 #if defined(TARGET_MIPS)
11080       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11081       return 0;
11082 #elif defined(TARGET_CRIS)
11083       if (arg1 & 0xff)
11084           ret = -TARGET_EINVAL;
11085       else {
11086           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11087           ret = 0;
11088       }
11089       return ret;
11090 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11091       return do_set_thread_area(cpu_env, arg1);
11092 #elif defined(TARGET_M68K)
11093       {
11094           TaskState *ts = cpu->opaque;
11095           ts->tp_value = arg1;
11096           return 0;
11097       }
11098 #else
11099       return -TARGET_ENOSYS;
11100 #endif
11101 #endif
11102 #ifdef TARGET_NR_get_thread_area
11103     case TARGET_NR_get_thread_area:
11104 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11105         return do_get_thread_area(cpu_env, arg1);
11106 #elif defined(TARGET_M68K)
11107         {
11108             TaskState *ts = cpu->opaque;
11109             return ts->tp_value;
11110         }
11111 #else
11112         return -TARGET_ENOSYS;
11113 #endif
11114 #endif
11115 #ifdef TARGET_NR_getdomainname
11116     case TARGET_NR_getdomainname:
11117         return -TARGET_ENOSYS;
11118 #endif
11119 
11120 #ifdef TARGET_NR_clock_settime
11121     case TARGET_NR_clock_settime:
11122     {
11123         struct timespec ts;
11124 
11125         ret = target_to_host_timespec(&ts, arg2);
11126         if (!is_error(ret)) {
11127             ret = get_errno(clock_settime(arg1, &ts));
11128         }
11129         return ret;
11130     }
11131 #endif
11132 #ifdef TARGET_NR_clock_gettime
11133     case TARGET_NR_clock_gettime:
11134     {
11135         struct timespec ts;
11136         ret = get_errno(clock_gettime(arg1, &ts));
11137         if (!is_error(ret)) {
11138             ret = host_to_target_timespec(arg2, &ts);
11139         }
11140         return ret;
11141     }
11142 #endif
11143 #ifdef TARGET_NR_clock_getres
11144     case TARGET_NR_clock_getres:
11145     {
11146         struct timespec ts;
11147         ret = get_errno(clock_getres(arg1, &ts));
11148         if (!is_error(ret)) {
11149             host_to_target_timespec(arg2, &ts);
11150         }
11151         return ret;
11152     }
11153 #endif
11154 #ifdef TARGET_NR_clock_nanosleep
11155     case TARGET_NR_clock_nanosleep:
11156     {
11157         struct timespec ts;
11158         target_to_host_timespec(&ts, arg3);
11159         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11160                                              &ts, arg4 ? &ts : NULL));
11161         if (arg4)
11162             host_to_target_timespec(arg4, &ts);
11163 
11164 #if defined(TARGET_PPC)
11165         /* clock_nanosleep is odd in that it returns positive errno values.
11166          * On PPC, CR0 bit 3 should be set in such a situation. */
11167         if (ret && ret != -TARGET_ERESTARTSYS) {
11168             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11169         }
11170 #endif
11171         return ret;
11172     }
11173 #endif
11174 
11175 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11176     case TARGET_NR_set_tid_address:
11177         return get_errno(set_tid_address((int *)g2h(arg1)));
11178 #endif
11179 
11180     case TARGET_NR_tkill:
11181         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11182 
11183     case TARGET_NR_tgkill:
11184         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11185                          target_to_host_signal(arg3)));
11186 
11187 #ifdef TARGET_NR_set_robust_list
11188     case TARGET_NR_set_robust_list:
11189     case TARGET_NR_get_robust_list:
11190         /* The ABI for supporting robust futexes has userspace pass
11191          * the kernel a pointer to a linked list which is updated by
11192          * userspace after the syscall; the list is walked by the kernel
11193          * when the thread exits. Since the linked list in QEMU guest
11194          * memory isn't a valid linked list for the host and we have
11195          * no way to reliably intercept the thread-death event, we can't
11196          * support these. Silently return ENOSYS so that guest userspace
11197          * falls back to a non-robust futex implementation (which should
11198          * be OK except in the corner case of the guest crashing while
11199          * holding a mutex that is shared with another process via
11200          * shared memory).
11201          */
11202         return -TARGET_ENOSYS;
11203 #endif
11204 
11205 #if defined(TARGET_NR_utimensat)
11206     case TARGET_NR_utimensat:
11207         {
11208             struct timespec *tsp, ts[2];
11209             if (!arg3) {
11210                 tsp = NULL;
11211             } else {
11212                 target_to_host_timespec(ts, arg3);
11213                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11214                 tsp = ts;
11215             }
11216             if (!arg2)
11217                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11218             else {
11219                 if (!(p = lock_user_string(arg2))) {
11220                     return -TARGET_EFAULT;
11221                 }
11222                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11223                 unlock_user(p, arg2, 0);
11224             }
11225         }
11226         return ret;
11227 #endif
11228     case TARGET_NR_futex:
11229         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11230 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11231     case TARGET_NR_inotify_init:
11232         ret = get_errno(sys_inotify_init());
11233         if (ret >= 0) {
11234             fd_trans_register(ret, &target_inotify_trans);
11235         }
11236         return ret;
11237 #endif
11238 #ifdef CONFIG_INOTIFY1
11239 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11240     case TARGET_NR_inotify_init1:
11241         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11242                                           fcntl_flags_tbl)));
11243         if (ret >= 0) {
11244             fd_trans_register(ret, &target_inotify_trans);
11245         }
11246         return ret;
11247 #endif
11248 #endif
11249 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11250     case TARGET_NR_inotify_add_watch:
11251         p = lock_user_string(arg2);
11252         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11253         unlock_user(p, arg2, 0);
11254         return ret;
11255 #endif
11256 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11257     case TARGET_NR_inotify_rm_watch:
11258         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11259 #endif
11260 
11261 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11262     case TARGET_NR_mq_open:
11263         {
11264             struct mq_attr posix_mq_attr;
11265             struct mq_attr *pposix_mq_attr;
11266             int host_flags;
11267 
11268             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11269             pposix_mq_attr = NULL;
11270             if (arg4) {
11271                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11272                     return -TARGET_EFAULT;
11273                 }
11274                 pposix_mq_attr = &posix_mq_attr;
11275             }
11276             p = lock_user_string(arg1 - 1);
11277             if (!p) {
11278                 return -TARGET_EFAULT;
11279             }
11280             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11281             unlock_user (p, arg1, 0);
11282         }
11283         return ret;
11284 
11285     case TARGET_NR_mq_unlink:
11286         p = lock_user_string(arg1 - 1);
11287         if (!p) {
11288             return -TARGET_EFAULT;
11289         }
11290         ret = get_errno(mq_unlink(p));
11291         unlock_user (p, arg1, 0);
11292         return ret;
11293 
11294     case TARGET_NR_mq_timedsend:
11295         {
11296             struct timespec ts;
11297 
11298             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11299             if (arg5 != 0) {
11300                 target_to_host_timespec(&ts, arg5);
11301                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11302                 host_to_target_timespec(arg5, &ts);
11303             } else {
11304                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11305             }
11306             unlock_user (p, arg2, arg3);
11307         }
11308         return ret;
11309 
11310     case TARGET_NR_mq_timedreceive:
11311         {
11312             struct timespec ts;
11313             unsigned int prio;
11314 
11315             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11316             if (arg5 != 0) {
11317                 target_to_host_timespec(&ts, arg5);
11318                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11319                                                      &prio, &ts));
11320                 host_to_target_timespec(arg5, &ts);
11321             } else {
11322                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11323                                                      &prio, NULL));
11324             }
11325             unlock_user (p, arg2, arg3);
11326             if (arg4 != 0)
11327                 put_user_u32(prio, arg4);
11328         }
11329         return ret;
11330 
11331     /* Not implemented for now... */
11332 /*     case TARGET_NR_mq_notify: */
11333 /*         break; */
11334 
11335     case TARGET_NR_mq_getsetattr:
11336         {
11337             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11338             ret = 0;
11339             if (arg2 != 0) {
11340                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11341                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11342                                            &posix_mq_attr_out));
11343             } else if (arg3 != 0) {
11344                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11345             }
11346             if (ret == 0 && arg3 != 0) {
11347                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11348             }
11349         }
11350         return ret;
11351 #endif
11352 
11353 #ifdef CONFIG_SPLICE
11354 #ifdef TARGET_NR_tee
11355     case TARGET_NR_tee:
11356         {
11357             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11358         }
11359         return ret;
11360 #endif
11361 #ifdef TARGET_NR_splice
11362     case TARGET_NR_splice:
11363         {
11364             loff_t loff_in, loff_out;
11365             loff_t *ploff_in = NULL, *ploff_out = NULL;
11366             if (arg2) {
11367                 if (get_user_u64(loff_in, arg2)) {
11368                     return -TARGET_EFAULT;
11369                 }
11370                 ploff_in = &loff_in;
11371             }
11372             if (arg4) {
11373                 if (get_user_u64(loff_out, arg4)) {
11374                     return -TARGET_EFAULT;
11375                 }
11376                 ploff_out = &loff_out;
11377             }
11378             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11379             if (arg2) {
11380                 if (put_user_u64(loff_in, arg2)) {
11381                     return -TARGET_EFAULT;
11382                 }
11383             }
11384             if (arg4) {
11385                 if (put_user_u64(loff_out, arg4)) {
11386                     return -TARGET_EFAULT;
11387                 }
11388             }
11389         }
11390         return ret;
11391 #endif
11392 #ifdef TARGET_NR_vmsplice
11393 	case TARGET_NR_vmsplice:
11394         {
11395             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11396             if (vec != NULL) {
11397                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11398                 unlock_iovec(vec, arg2, arg3, 0);
11399             } else {
11400                 ret = -host_to_target_errno(errno);
11401             }
11402         }
11403         return ret;
11404 #endif
11405 #endif /* CONFIG_SPLICE */
11406 #ifdef CONFIG_EVENTFD
11407 #if defined(TARGET_NR_eventfd)
11408     case TARGET_NR_eventfd:
11409         ret = get_errno(eventfd(arg1, 0));
11410         if (ret >= 0) {
11411             fd_trans_register(ret, &target_eventfd_trans);
11412         }
11413         return ret;
11414 #endif
11415 #if defined(TARGET_NR_eventfd2)
11416     case TARGET_NR_eventfd2:
11417     {
11418         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11419         if (arg2 & TARGET_O_NONBLOCK) {
11420             host_flags |= O_NONBLOCK;
11421         }
11422         if (arg2 & TARGET_O_CLOEXEC) {
11423             host_flags |= O_CLOEXEC;
11424         }
11425         ret = get_errno(eventfd(arg1, host_flags));
11426         if (ret >= 0) {
11427             fd_trans_register(ret, &target_eventfd_trans);
11428         }
11429         return ret;
11430     }
11431 #endif
11432 #endif /* CONFIG_EVENTFD  */
11433 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11434     case TARGET_NR_fallocate:
11435 #if TARGET_ABI_BITS == 32
11436         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11437                                   target_offset64(arg5, arg6)));
11438 #else
11439         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11440 #endif
11441         return ret;
11442 #endif
11443 #if defined(CONFIG_SYNC_FILE_RANGE)
11444 #if defined(TARGET_NR_sync_file_range)
11445     case TARGET_NR_sync_file_range:
11446 #if TARGET_ABI_BITS == 32
11447 #if defined(TARGET_MIPS)
11448         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11449                                         target_offset64(arg5, arg6), arg7));
11450 #else
11451         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11452                                         target_offset64(arg4, arg5), arg6));
11453 #endif /* !TARGET_MIPS */
11454 #else
11455         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11456 #endif
11457         return ret;
11458 #endif
11459 #if defined(TARGET_NR_sync_file_range2)
11460     case TARGET_NR_sync_file_range2:
11461         /* This is like sync_file_range but the arguments are reordered */
11462 #if TARGET_ABI_BITS == 32
11463         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11464                                         target_offset64(arg5, arg6), arg2));
11465 #else
11466         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11467 #endif
11468         return ret;
11469 #endif
11470 #endif
11471 #if defined(TARGET_NR_signalfd4)
11472     case TARGET_NR_signalfd4:
11473         return do_signalfd4(arg1, arg2, arg4);
11474 #endif
11475 #if defined(TARGET_NR_signalfd)
11476     case TARGET_NR_signalfd:
11477         return do_signalfd4(arg1, arg2, 0);
11478 #endif
11479 #if defined(CONFIG_EPOLL)
11480 #if defined(TARGET_NR_epoll_create)
11481     case TARGET_NR_epoll_create:
11482         return get_errno(epoll_create(arg1));
11483 #endif
11484 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11485     case TARGET_NR_epoll_create1:
11486         return get_errno(epoll_create1(arg1));
11487 #endif
11488 #if defined(TARGET_NR_epoll_ctl)
11489     case TARGET_NR_epoll_ctl:
11490     {
11491         struct epoll_event ep;
11492         struct epoll_event *epp = 0;
11493         if (arg4) {
11494             struct target_epoll_event *target_ep;
11495             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11496                 return -TARGET_EFAULT;
11497             }
11498             ep.events = tswap32(target_ep->events);
11499             /* The epoll_data_t union is just opaque data to the kernel,
11500              * so we transfer all 64 bits across and need not worry what
11501              * actual data type it is.
11502              */
11503             ep.data.u64 = tswap64(target_ep->data.u64);
11504             unlock_user_struct(target_ep, arg4, 0);
11505             epp = &ep;
11506         }
11507         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11508     }
11509 #endif
11510 
11511 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11512 #if defined(TARGET_NR_epoll_wait)
11513     case TARGET_NR_epoll_wait:
11514 #endif
11515 #if defined(TARGET_NR_epoll_pwait)
11516     case TARGET_NR_epoll_pwait:
11517 #endif
11518     {
11519         struct target_epoll_event *target_ep;
11520         struct epoll_event *ep;
11521         int epfd = arg1;
11522         int maxevents = arg3;
11523         int timeout = arg4;
11524 
11525         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11526             return -TARGET_EINVAL;
11527         }
11528 
11529         target_ep = lock_user(VERIFY_WRITE, arg2,
11530                               maxevents * sizeof(struct target_epoll_event), 1);
11531         if (!target_ep) {
11532             return -TARGET_EFAULT;
11533         }
11534 
11535         ep = g_try_new(struct epoll_event, maxevents);
11536         if (!ep) {
11537             unlock_user(target_ep, arg2, 0);
11538             return -TARGET_ENOMEM;
11539         }
11540 
11541         switch (num) {
11542 #if defined(TARGET_NR_epoll_pwait)
11543         case TARGET_NR_epoll_pwait:
11544         {
11545             target_sigset_t *target_set;
11546             sigset_t _set, *set = &_set;
11547 
11548             if (arg5) {
11549                 if (arg6 != sizeof(target_sigset_t)) {
11550                     ret = -TARGET_EINVAL;
11551                     break;
11552                 }
11553 
11554                 target_set = lock_user(VERIFY_READ, arg5,
11555                                        sizeof(target_sigset_t), 1);
11556                 if (!target_set) {
11557                     ret = -TARGET_EFAULT;
11558                     break;
11559                 }
11560                 target_to_host_sigset(set, target_set);
11561                 unlock_user(target_set, arg5, 0);
11562             } else {
11563                 set = NULL;
11564             }
11565 
11566             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11567                                              set, SIGSET_T_SIZE));
11568             break;
11569         }
11570 #endif
11571 #if defined(TARGET_NR_epoll_wait)
11572         case TARGET_NR_epoll_wait:
11573             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11574                                              NULL, 0));
11575             break;
11576 #endif
11577         default:
11578             ret = -TARGET_ENOSYS;
11579         }
11580         if (!is_error(ret)) {
11581             int i;
11582             for (i = 0; i < ret; i++) {
11583                 target_ep[i].events = tswap32(ep[i].events);
11584                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11585             }
11586             unlock_user(target_ep, arg2,
11587                         ret * sizeof(struct target_epoll_event));
11588         } else {
11589             unlock_user(target_ep, arg2, 0);
11590         }
11591         g_free(ep);
11592         return ret;
11593     }
11594 #endif
11595 #endif
11596 #ifdef TARGET_NR_prlimit64
11597     case TARGET_NR_prlimit64:
11598     {
11599         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11600         struct target_rlimit64 *target_rnew, *target_rold;
11601         struct host_rlimit64 rnew, rold, *rnewp = 0;
11602         int resource = target_to_host_resource(arg2);
11603         if (arg3) {
11604             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11605                 return -TARGET_EFAULT;
11606             }
11607             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11608             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11609             unlock_user_struct(target_rnew, arg3, 0);
11610             rnewp = &rnew;
11611         }
11612 
11613         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11614         if (!is_error(ret) && arg4) {
11615             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11616                 return -TARGET_EFAULT;
11617             }
11618             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11619             target_rold->rlim_max = tswap64(rold.rlim_max);
11620             unlock_user_struct(target_rold, arg4, 1);
11621         }
11622         return ret;
11623     }
11624 #endif
11625 #ifdef TARGET_NR_gethostname
11626     case TARGET_NR_gethostname:
11627     {
11628         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11629         if (name) {
11630             ret = get_errno(gethostname(name, arg2));
11631             unlock_user(name, arg1, arg2);
11632         } else {
11633             ret = -TARGET_EFAULT;
11634         }
11635         return ret;
11636     }
11637 #endif
11638 #ifdef TARGET_NR_atomic_cmpxchg_32
11639     case TARGET_NR_atomic_cmpxchg_32:
11640     {
11641         /* should use start_exclusive from main.c */
11642         abi_ulong mem_value;
11643         if (get_user_u32(mem_value, arg6)) {
11644             target_siginfo_t info;
11645             info.si_signo = SIGSEGV;
11646             info.si_errno = 0;
11647             info.si_code = TARGET_SEGV_MAPERR;
11648             info._sifields._sigfault._addr = arg6;
11649             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11650                          QEMU_SI_FAULT, &info);
11651             ret = 0xdeadbeef;
11652 
11653         }
11654         if (mem_value == arg2)
11655             put_user_u32(arg1, arg6);
11656         return mem_value;
11657     }
11658 #endif
11659 #ifdef TARGET_NR_atomic_barrier
11660     case TARGET_NR_atomic_barrier:
11661         /* Like the kernel implementation and the
11662            qemu arm barrier, no-op this? */
11663         return 0;
11664 #endif
11665 
11666 #ifdef TARGET_NR_timer_create
11667     case TARGET_NR_timer_create:
11668     {
11669         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11670 
11671         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11672 
11673         int clkid = arg1;
11674         int timer_index = next_free_host_timer();
11675 
11676         if (timer_index < 0) {
11677             ret = -TARGET_EAGAIN;
11678         } else {
11679             timer_t *phtimer = g_posix_timers  + timer_index;
11680 
11681             if (arg2) {
11682                 phost_sevp = &host_sevp;
11683                 ret = target_to_host_sigevent(phost_sevp, arg2);
11684                 if (ret != 0) {
11685                     return ret;
11686                 }
11687             }
11688 
11689             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11690             if (ret) {
11691                 phtimer = NULL;
11692             } else {
11693                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11694                     return -TARGET_EFAULT;
11695                 }
11696             }
11697         }
11698         return ret;
11699     }
11700 #endif
11701 
11702 #ifdef TARGET_NR_timer_settime
11703     case TARGET_NR_timer_settime:
11704     {
11705         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11706          * struct itimerspec * old_value */
11707         target_timer_t timerid = get_timer_id(arg1);
11708 
11709         if (timerid < 0) {
11710             ret = timerid;
11711         } else if (arg3 == 0) {
11712             ret = -TARGET_EINVAL;
11713         } else {
11714             timer_t htimer = g_posix_timers[timerid];
11715             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11716 
11717             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11718                 return -TARGET_EFAULT;
11719             }
11720             ret = get_errno(
11721                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11722             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11723                 return -TARGET_EFAULT;
11724             }
11725         }
11726         return ret;
11727     }
11728 #endif
11729 
11730 #ifdef TARGET_NR_timer_gettime
11731     case TARGET_NR_timer_gettime:
11732     {
11733         /* args: timer_t timerid, struct itimerspec *curr_value */
11734         target_timer_t timerid = get_timer_id(arg1);
11735 
11736         if (timerid < 0) {
11737             ret = timerid;
11738         } else if (!arg2) {
11739             ret = -TARGET_EFAULT;
11740         } else {
11741             timer_t htimer = g_posix_timers[timerid];
11742             struct itimerspec hspec;
11743             ret = get_errno(timer_gettime(htimer, &hspec));
11744 
11745             if (host_to_target_itimerspec(arg2, &hspec)) {
11746                 ret = -TARGET_EFAULT;
11747             }
11748         }
11749         return ret;
11750     }
11751 #endif
11752 
11753 #ifdef TARGET_NR_timer_getoverrun
11754     case TARGET_NR_timer_getoverrun:
11755     {
11756         /* args: timer_t timerid */
11757         target_timer_t timerid = get_timer_id(arg1);
11758 
11759         if (timerid < 0) {
11760             ret = timerid;
11761         } else {
11762             timer_t htimer = g_posix_timers[timerid];
11763             ret = get_errno(timer_getoverrun(htimer));
11764         }
11765         fd_trans_unregister(ret);
11766         return ret;
11767     }
11768 #endif
11769 
11770 #ifdef TARGET_NR_timer_delete
11771     case TARGET_NR_timer_delete:
11772     {
11773         /* args: timer_t timerid */
11774         target_timer_t timerid = get_timer_id(arg1);
11775 
11776         if (timerid < 0) {
11777             ret = timerid;
11778         } else {
11779             timer_t htimer = g_posix_timers[timerid];
11780             ret = get_errno(timer_delete(htimer));
11781             g_posix_timers[timerid] = 0;
11782         }
11783         return ret;
11784     }
11785 #endif
11786 
11787 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11788     case TARGET_NR_timerfd_create:
11789         return get_errno(timerfd_create(arg1,
11790                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11791 #endif
11792 
11793 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11794     case TARGET_NR_timerfd_gettime:
11795         {
11796             struct itimerspec its_curr;
11797 
11798             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11799 
11800             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11801                 return -TARGET_EFAULT;
11802             }
11803         }
11804         return ret;
11805 #endif
11806 
11807 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11808     case TARGET_NR_timerfd_settime:
11809         {
11810             struct itimerspec its_new, its_old, *p_new;
11811 
11812             if (arg3) {
11813                 if (target_to_host_itimerspec(&its_new, arg3)) {
11814                     return -TARGET_EFAULT;
11815                 }
11816                 p_new = &its_new;
11817             } else {
11818                 p_new = NULL;
11819             }
11820 
11821             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11822 
11823             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11824                 return -TARGET_EFAULT;
11825             }
11826         }
11827         return ret;
11828 #endif
11829 
11830 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11831     case TARGET_NR_ioprio_get:
11832         return get_errno(ioprio_get(arg1, arg2));
11833 #endif
11834 
11835 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11836     case TARGET_NR_ioprio_set:
11837         return get_errno(ioprio_set(arg1, arg2, arg3));
11838 #endif
11839 
11840 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11841     case TARGET_NR_setns:
11842         return get_errno(setns(arg1, arg2));
11843 #endif
11844 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11845     case TARGET_NR_unshare:
11846         return get_errno(unshare(arg1));
11847 #endif
11848 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11849     case TARGET_NR_kcmp:
11850         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11851 #endif
11852 #ifdef TARGET_NR_swapcontext
11853     case TARGET_NR_swapcontext:
11854         /* PowerPC specific.  */
11855         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11856 #endif
11857 
11858     default:
11859         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11860         return -TARGET_ENOSYS;
11861     }
11862     return ret;
11863 }
11864 
11865 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11866                     abi_long arg2, abi_long arg3, abi_long arg4,
11867                     abi_long arg5, abi_long arg6, abi_long arg7,
11868                     abi_long arg8)
11869 {
11870     CPUState *cpu = env_cpu(cpu_env);
11871     abi_long ret;
11872 
11873 #ifdef DEBUG_ERESTARTSYS
11874     /* Debug-only code for exercising the syscall-restart code paths
11875      * in the per-architecture cpu main loops: restart every syscall
11876      * the guest makes once before letting it through.
11877      */
11878     {
11879         static bool flag;
11880         flag = !flag;
11881         if (flag) {
11882             return -TARGET_ERESTARTSYS;
11883         }
11884     }
11885 #endif
11886 
11887     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11888                              arg5, arg6, arg7, arg8);
11889 
11890     if (unlikely(do_strace)) {
11891         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11892         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11893                           arg5, arg6, arg7, arg8);
11894         print_syscall_ret(num, ret);
11895     } else {
11896         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11897                           arg5, arg6, arg7, arg8);
11898     }
11899 
11900     trace_guest_user_syscall_ret(cpu, num, ret);
11901     return ret;
11902 }
11903