xref: /openbmc/qemu/linux-user/syscall.c (revision 7678b74a)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "qapi/error.h"
116 #include "fd-trans.h"
117 
118 #ifndef CLONE_IO
119 #define CLONE_IO                0x80000000      /* Clone io context */
120 #endif
121 
122 /* We can't directly call the host clone syscall, because this will
123  * badly confuse libc (breaking mutexes, for example). So we must
124  * divide clone flags into:
125  *  * flag combinations that look like pthread_create()
126  *  * flag combinations that look like fork()
127  *  * flags we can implement within QEMU itself
128  *  * flags we can't support and will return an error for
129  */
130 /* For thread creation, all these flags must be present; for
131  * fork, none must be present.
132  */
133 #define CLONE_THREAD_FLAGS                              \
134     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
135      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 
137 /* These flags are ignored:
138  * CLONE_DETACHED is now ignored by the kernel;
139  * CLONE_IO is just an optimisation hint to the I/O scheduler
140  */
141 #define CLONE_IGNORED_FLAGS                     \
142     (CLONE_DETACHED | CLONE_IO)
143 
144 /* Flags for fork which we can implement within QEMU itself */
145 #define CLONE_OPTIONAL_FORK_FLAGS               \
146     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
147      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 
149 /* Flags for thread creation which we can implement within QEMU itself */
150 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
151     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
152      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 
154 #define CLONE_INVALID_FORK_FLAGS                                        \
155     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 
157 #define CLONE_INVALID_THREAD_FLAGS                                      \
158     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
159        CLONE_IGNORED_FLAGS))
160 
161 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
162  * have almost all been allocated. We cannot support any of
163  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
164  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
165  * The checks against the invalid thread masks above will catch these.
166  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167  */
168 
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170  * once. This exercises the codepaths for restart.
171  */
172 //#define DEBUG_ERESTARTSYS
173 
174 //#include <linux/msdos_fs.h>
175 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
176 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
177 
178 #undef _syscall0
179 #undef _syscall1
180 #undef _syscall2
181 #undef _syscall3
182 #undef _syscall4
183 #undef _syscall5
184 #undef _syscall6
185 
186 #define _syscall0(type,name)		\
187 static type name (void)			\
188 {					\
189 	return syscall(__NR_##name);	\
190 }
191 
192 #define _syscall1(type,name,type1,arg1)		\
193 static type name (type1 arg1)			\
194 {						\
195 	return syscall(__NR_##name, arg1);	\
196 }
197 
198 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
199 static type name (type1 arg1,type2 arg2)		\
200 {							\
201 	return syscall(__NR_##name, arg1, arg2);	\
202 }
203 
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
205 static type name (type1 arg1,type2 arg2,type3 arg3)		\
206 {								\
207 	return syscall(__NR_##name, arg1, arg2, arg3);		\
208 }
209 
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
212 {										\
213 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
214 }
215 
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
217 		  type5,arg5)							\
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
219 {										\
220 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
221 }
222 
223 
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
225 		  type5,arg5,type6,arg6)					\
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
227                   type6 arg6)							\
228 {										\
229 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
230 }
231 
232 
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
240 #define __NR_sys_syslog __NR_syslog
241 #define __NR_sys_futex __NR_futex
242 #define __NR_sys_inotify_init __NR_inotify_init
243 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
244 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #define __NR_sys_statx __NR_statx
246 
247 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
248 #define __NR__llseek __NR_lseek
249 #endif
250 
251 /* Newer kernel ports have llseek() instead of _llseek() */
252 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
253 #define TARGET_NR__llseek TARGET_NR_llseek
254 #endif
255 
256 #define __NR_sys_gettid __NR_gettid
257 _syscall0(int, sys_gettid)
258 
259 /* For the 64-bit guest on 32-bit host case we must emulate
260  * getdents using getdents64, because otherwise the host
261  * might hand us back more dirent records than we can fit
262  * into the guest buffer after structure format conversion.
263  * Otherwise we emulate getdents with getdents if the host has it.
264  */
265 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
266 #define EMULATE_GETDENTS_WITH_GETDENTS
267 #endif
268 
269 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
270 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
271 #endif
272 #if (defined(TARGET_NR_getdents) && \
273       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
274     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
275 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
276 #endif
277 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
278 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
279           loff_t *, res, uint, wh);
280 #endif
281 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
282 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
283           siginfo_t *, uinfo)
284 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
285 #ifdef __NR_exit_group
286 _syscall1(int,exit_group,int,error_code)
287 #endif
288 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
289 _syscall1(int,set_tid_address,int *,tidptr)
290 #endif
291 #if defined(TARGET_NR_futex) && defined(__NR_futex)
292 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
293           const struct timespec *,timeout,int *,uaddr2,int,val3)
294 #endif
295 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
296 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
297           unsigned long *, user_mask_ptr);
298 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
299 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_getcpu __NR_getcpu
302 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
303 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
304           void *, arg);
305 _syscall2(int, capget, struct __user_cap_header_struct *, header,
306           struct __user_cap_data_struct *, data);
307 _syscall2(int, capset, struct __user_cap_header_struct *, header,
308           struct __user_cap_data_struct *, data);
309 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
310 _syscall2(int, ioprio_get, int, which, int, who)
311 #endif
312 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
313 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
314 #endif
315 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
316 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
317 #endif
318 
319 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
320 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
321           unsigned long, idx1, unsigned long, idx2)
322 #endif
323 
324 /*
325  * It is assumed that struct statx is architecture independent.
326  */
327 #if defined(TARGET_NR_statx) && defined(__NR_statx)
328 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
329           unsigned int, mask, struct target_statx *, statxbuf)
330 #endif
331 
332 static bitmask_transtbl fcntl_flags_tbl[] = {
333   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
334   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
335   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
336   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
337   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
338   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
339   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
340   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
341   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
342   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
343   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
344   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
345   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
346 #if defined(O_DIRECT)
347   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
348 #endif
349 #if defined(O_NOATIME)
350   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
351 #endif
352 #if defined(O_CLOEXEC)
353   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
354 #endif
355 #if defined(O_PATH)
356   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
357 #endif
358 #if defined(O_TMPFILE)
359   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
360 #endif
361   /* Don't terminate the list prematurely on 64-bit host+guest.  */
362 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
363   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
364 #endif
365   { 0, 0, 0, 0 }
366 };
367 
368 static int sys_getcwd1(char *buf, size_t size)
369 {
370   if (getcwd(buf, size) == NULL) {
371       /* getcwd() sets errno */
372       return (-1);
373   }
374   return strlen(buf)+1;
375 }
376 
377 #ifdef TARGET_NR_utimensat
378 #if defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
381           const struct timespec *,tsp,int,flags)
382 #else
383 static int sys_utimensat(int dirfd, const char *pathname,
384                          const struct timespec times[2], int flags)
385 {
386     errno = ENOSYS;
387     return -1;
388 }
389 #endif
390 #endif /* TARGET_NR_utimensat */
391 
392 #ifdef TARGET_NR_renameat2
393 #if defined(__NR_renameat2)
394 #define __NR_sys_renameat2 __NR_renameat2
395 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
396           const char *, new, unsigned int, flags)
397 #else
398 static int sys_renameat2(int oldfd, const char *old,
399                          int newfd, const char *new, int flags)
400 {
401     if (flags == 0) {
402         return renameat(oldfd, old, newfd, new);
403     }
404     errno = ENOSYS;
405     return -1;
406 }
407 #endif
408 #endif /* TARGET_NR_renameat2 */
409 
410 #ifdef CONFIG_INOTIFY
411 #include <sys/inotify.h>
412 
413 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
414 static int sys_inotify_init(void)
415 {
416   return (inotify_init());
417 }
418 #endif
419 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
420 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
421 {
422   return (inotify_add_watch(fd, pathname, mask));
423 }
424 #endif
425 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
426 static int sys_inotify_rm_watch(int fd, int32_t wd)
427 {
428   return (inotify_rm_watch(fd, wd));
429 }
430 #endif
431 #ifdef CONFIG_INOTIFY1
432 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
433 static int sys_inotify_init1(int flags)
434 {
435   return (inotify_init1(flags));
436 }
437 #endif
438 #endif
439 #else
440 /* Userspace can usually survive runtime without inotify */
441 #undef TARGET_NR_inotify_init
442 #undef TARGET_NR_inotify_init1
443 #undef TARGET_NR_inotify_add_watch
444 #undef TARGET_NR_inotify_rm_watch
445 #endif /* CONFIG_INOTIFY  */
446 
447 #if defined(TARGET_NR_prlimit64)
448 #ifndef __NR_prlimit64
449 # define __NR_prlimit64 -1
450 #endif
451 #define __NR_sys_prlimit64 __NR_prlimit64
452 /* The glibc rlimit structure may not be that used by the underlying syscall */
453 struct host_rlimit64 {
454     uint64_t rlim_cur;
455     uint64_t rlim_max;
456 };
457 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
458           const struct host_rlimit64 *, new_limit,
459           struct host_rlimit64 *, old_limit)
460 #endif
461 
462 
463 #if defined(TARGET_NR_timer_create)
464 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
465 static timer_t g_posix_timers[32] = { 0, } ;
466 
467 static inline int next_free_host_timer(void)
468 {
469     int k ;
470     /* FIXME: Does finding the next free slot require a lock? */
471     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
472         if (g_posix_timers[k] == 0) {
473             g_posix_timers[k] = (timer_t) 1;
474             return k;
475         }
476     }
477     return -1;
478 }
479 #endif
480 
481 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
482 #ifdef TARGET_ARM
483 static inline int regpairs_aligned(void *cpu_env, int num)
484 {
485     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
486 }
487 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
490 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
491  * of registers which translates to the same as ARM/MIPS, because we start with
492  * r3 as arg1 */
493 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
494 #elif defined(TARGET_SH4)
495 /* SH4 doesn't align register pairs, except for p{read,write}64 */
496 static inline int regpairs_aligned(void *cpu_env, int num)
497 {
498     switch (num) {
499     case TARGET_NR_pread64:
500     case TARGET_NR_pwrite64:
501         return 1;
502 
503     default:
504         return 0;
505     }
506 }
507 #elif defined(TARGET_XTENSA)
508 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
509 #else
510 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
511 #endif
512 
513 #define ERRNO_TABLE_SIZE 1200
514 
515 /* target_to_host_errno_table[] is initialized from
516  * host_to_target_errno_table[] in syscall_init(). */
517 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
518 };
519 
520 /*
521  * This list is the union of errno values overridden in asm-<arch>/errno.h
522  * minus the errnos that are not actually generic to all archs.
523  */
524 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
525     [EAGAIN]		= TARGET_EAGAIN,
526     [EIDRM]		= TARGET_EIDRM,
527     [ECHRNG]		= TARGET_ECHRNG,
528     [EL2NSYNC]		= TARGET_EL2NSYNC,
529     [EL3HLT]		= TARGET_EL3HLT,
530     [EL3RST]		= TARGET_EL3RST,
531     [ELNRNG]		= TARGET_ELNRNG,
532     [EUNATCH]		= TARGET_EUNATCH,
533     [ENOCSI]		= TARGET_ENOCSI,
534     [EL2HLT]		= TARGET_EL2HLT,
535     [EDEADLK]		= TARGET_EDEADLK,
536     [ENOLCK]		= TARGET_ENOLCK,
537     [EBADE]		= TARGET_EBADE,
538     [EBADR]		= TARGET_EBADR,
539     [EXFULL]		= TARGET_EXFULL,
540     [ENOANO]		= TARGET_ENOANO,
541     [EBADRQC]		= TARGET_EBADRQC,
542     [EBADSLT]		= TARGET_EBADSLT,
543     [EBFONT]		= TARGET_EBFONT,
544     [ENOSTR]		= TARGET_ENOSTR,
545     [ENODATA]		= TARGET_ENODATA,
546     [ETIME]		= TARGET_ETIME,
547     [ENOSR]		= TARGET_ENOSR,
548     [ENONET]		= TARGET_ENONET,
549     [ENOPKG]		= TARGET_ENOPKG,
550     [EREMOTE]		= TARGET_EREMOTE,
551     [ENOLINK]		= TARGET_ENOLINK,
552     [EADV]		= TARGET_EADV,
553     [ESRMNT]		= TARGET_ESRMNT,
554     [ECOMM]		= TARGET_ECOMM,
555     [EPROTO]		= TARGET_EPROTO,
556     [EDOTDOT]		= TARGET_EDOTDOT,
557     [EMULTIHOP]		= TARGET_EMULTIHOP,
558     [EBADMSG]		= TARGET_EBADMSG,
559     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
560     [EOVERFLOW]		= TARGET_EOVERFLOW,
561     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
562     [EBADFD]		= TARGET_EBADFD,
563     [EREMCHG]		= TARGET_EREMCHG,
564     [ELIBACC]		= TARGET_ELIBACC,
565     [ELIBBAD]		= TARGET_ELIBBAD,
566     [ELIBSCN]		= TARGET_ELIBSCN,
567     [ELIBMAX]		= TARGET_ELIBMAX,
568     [ELIBEXEC]		= TARGET_ELIBEXEC,
569     [EILSEQ]		= TARGET_EILSEQ,
570     [ENOSYS]		= TARGET_ENOSYS,
571     [ELOOP]		= TARGET_ELOOP,
572     [ERESTART]		= TARGET_ERESTART,
573     [ESTRPIPE]		= TARGET_ESTRPIPE,
574     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
575     [EUSERS]		= TARGET_EUSERS,
576     [ENOTSOCK]		= TARGET_ENOTSOCK,
577     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
578     [EMSGSIZE]		= TARGET_EMSGSIZE,
579     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
580     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
581     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
582     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
583     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
584     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
585     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
586     [EADDRINUSE]	= TARGET_EADDRINUSE,
587     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
588     [ENETDOWN]		= TARGET_ENETDOWN,
589     [ENETUNREACH]	= TARGET_ENETUNREACH,
590     [ENETRESET]		= TARGET_ENETRESET,
591     [ECONNABORTED]	= TARGET_ECONNABORTED,
592     [ECONNRESET]	= TARGET_ECONNRESET,
593     [ENOBUFS]		= TARGET_ENOBUFS,
594     [EISCONN]		= TARGET_EISCONN,
595     [ENOTCONN]		= TARGET_ENOTCONN,
596     [EUCLEAN]		= TARGET_EUCLEAN,
597     [ENOTNAM]		= TARGET_ENOTNAM,
598     [ENAVAIL]		= TARGET_ENAVAIL,
599     [EISNAM]		= TARGET_EISNAM,
600     [EREMOTEIO]		= TARGET_EREMOTEIO,
601     [EDQUOT]            = TARGET_EDQUOT,
602     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
603     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
604     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
605     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
606     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
607     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
608     [EALREADY]		= TARGET_EALREADY,
609     [EINPROGRESS]	= TARGET_EINPROGRESS,
610     [ESTALE]		= TARGET_ESTALE,
611     [ECANCELED]		= TARGET_ECANCELED,
612     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
613     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
614 #ifdef ENOKEY
615     [ENOKEY]		= TARGET_ENOKEY,
616 #endif
617 #ifdef EKEYEXPIRED
618     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
619 #endif
620 #ifdef EKEYREVOKED
621     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
622 #endif
623 #ifdef EKEYREJECTED
624     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
625 #endif
626 #ifdef EOWNERDEAD
627     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
628 #endif
629 #ifdef ENOTRECOVERABLE
630     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
631 #endif
632 #ifdef ENOMSG
633     [ENOMSG]            = TARGET_ENOMSG,
634 #endif
635 #ifdef ERKFILL
636     [ERFKILL]           = TARGET_ERFKILL,
637 #endif
638 #ifdef EHWPOISON
639     [EHWPOISON]         = TARGET_EHWPOISON,
640 #endif
641 };
642 
643 static inline int host_to_target_errno(int err)
644 {
645     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
646         host_to_target_errno_table[err]) {
647         return host_to_target_errno_table[err];
648     }
649     return err;
650 }
651 
652 static inline int target_to_host_errno(int err)
653 {
654     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
655         target_to_host_errno_table[err]) {
656         return target_to_host_errno_table[err];
657     }
658     return err;
659 }
660 
661 static inline abi_long get_errno(abi_long ret)
662 {
663     if (ret == -1)
664         return -host_to_target_errno(errno);
665     else
666         return ret;
667 }
668 
669 const char *target_strerror(int err)
670 {
671     if (err == TARGET_ERESTARTSYS) {
672         return "To be restarted";
673     }
674     if (err == TARGET_QEMU_ESIGRETURN) {
675         return "Successful exit from sigreturn";
676     }
677 
678     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
679         return NULL;
680     }
681     return strerror(target_to_host_errno(err));
682 }
683 
684 #define safe_syscall0(type, name) \
685 static type safe_##name(void) \
686 { \
687     return safe_syscall(__NR_##name); \
688 }
689 
690 #define safe_syscall1(type, name, type1, arg1) \
691 static type safe_##name(type1 arg1) \
692 { \
693     return safe_syscall(__NR_##name, arg1); \
694 }
695 
696 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
697 static type safe_##name(type1 arg1, type2 arg2) \
698 { \
699     return safe_syscall(__NR_##name, arg1, arg2); \
700 }
701 
702 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
703 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
706 }
707 
708 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
711 { \
712     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
713 }
714 
715 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
716     type4, arg4, type5, arg5) \
717 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
718     type5 arg5) \
719 { \
720     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
721 }
722 
723 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
724     type4, arg4, type5, arg5, type6, arg6) \
725 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
726     type5 arg5, type6 arg6) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
729 }
730 
731 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
732 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
733 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
734               int, flags, mode_t, mode)
735 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
736               struct rusage *, rusage)
737 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
738               int, options, struct rusage *, rusage)
739 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
740 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
741               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
742 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
743               struct timespec *, tsp, const sigset_t *, sigmask,
744               size_t, sigsetsize)
745 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
746               int, maxevents, int, timeout, const sigset_t *, sigmask,
747               size_t, sigsetsize)
748 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
751 safe_syscall2(int, kill, pid_t, pid, int, sig)
752 safe_syscall2(int, tkill, int, tid, int, sig)
753 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
754 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
755 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
759               unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
761               socklen_t, addrlen)
762 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
763               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
764 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
765               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
766 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
767 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
768 safe_syscall2(int, flock, int, fd, int, operation)
769 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
770               const struct timespec *, uts, size_t, sigsetsize)
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 safe_syscall2(int, nanosleep, const struct timespec *, req,
774               struct timespec *, rem)
775 #ifdef TARGET_NR_clock_nanosleep
776 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
777               const struct timespec *, req, struct timespec *, rem)
778 #endif
779 #ifdef __NR_ipc
780 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
781               void *, ptr, long, fifth)
782 #endif
783 #ifdef __NR_msgsnd
784 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
785               int, flags)
786 #endif
787 #ifdef __NR_msgrcv
788 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
789               long, msgtype, int, flags)
790 #endif
791 #ifdef __NR_semtimedop
792 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
793               unsigned, nsops, const struct timespec *, timeout)
794 #endif
795 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
796 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
797               size_t, len, unsigned, prio, const struct timespec *, timeout)
798 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
799               size_t, len, unsigned *, prio, const struct timespec *, timeout)
800 #endif
801 /* We do ioctl like this rather than via safe_syscall3 to preserve the
802  * "third argument might be integer or pointer or not present" behaviour of
803  * the libc function.
804  */
805 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
806 /* Similarly for fcntl. Note that callers must always:
807  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
808  *  use the flock64 struct rather than unsuffixed flock
809  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
810  */
811 #ifdef __NR_fcntl64
812 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
813 #else
814 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
815 #endif
816 
817 static inline int host_to_target_sock_type(int host_type)
818 {
819     int target_type;
820 
821     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
822     case SOCK_DGRAM:
823         target_type = TARGET_SOCK_DGRAM;
824         break;
825     case SOCK_STREAM:
826         target_type = TARGET_SOCK_STREAM;
827         break;
828     default:
829         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
830         break;
831     }
832 
833 #if defined(SOCK_CLOEXEC)
834     if (host_type & SOCK_CLOEXEC) {
835         target_type |= TARGET_SOCK_CLOEXEC;
836     }
837 #endif
838 
839 #if defined(SOCK_NONBLOCK)
840     if (host_type & SOCK_NONBLOCK) {
841         target_type |= TARGET_SOCK_NONBLOCK;
842     }
843 #endif
844 
845     return target_type;
846 }
847 
848 static abi_ulong target_brk;
849 static abi_ulong target_original_brk;
850 static abi_ulong brk_page;
851 
852 void target_set_brk(abi_ulong new_brk)
853 {
854     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
855     brk_page = HOST_PAGE_ALIGN(target_brk);
856 }
857 
858 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
859 #define DEBUGF_BRK(message, args...)
860 
861 /* do_brk() must return target values and target errnos. */
862 abi_long do_brk(abi_ulong new_brk)
863 {
864     abi_long mapped_addr;
865     abi_ulong new_alloc_size;
866 
867     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
868 
869     if (!new_brk) {
870         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
871         return target_brk;
872     }
873     if (new_brk < target_original_brk) {
874         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
875                    target_brk);
876         return target_brk;
877     }
878 
879     /* If the new brk is less than the highest page reserved to the
880      * target heap allocation, set it and we're almost done...  */
881     if (new_brk <= brk_page) {
882         /* Heap contents are initialized to zero, as for anonymous
883          * mapped pages.  */
884         if (new_brk > target_brk) {
885             memset(g2h(target_brk), 0, new_brk - target_brk);
886         }
887 	target_brk = new_brk;
888         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
889 	return target_brk;
890     }
891 
892     /* We need to allocate more memory after the brk... Note that
893      * we don't use MAP_FIXED because that will map over the top of
894      * any existing mapping (like the one with the host libc or qemu
895      * itself); instead we treat "mapped but at wrong address" as
896      * a failure and unmap again.
897      */
898     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
899     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
900                                         PROT_READ|PROT_WRITE,
901                                         MAP_ANON|MAP_PRIVATE, 0, 0));
902 
903     if (mapped_addr == brk_page) {
904         /* Heap contents are initialized to zero, as for anonymous
905          * mapped pages.  Technically the new pages are already
906          * initialized to zero since they *are* anonymous mapped
907          * pages, however we have to take care with the contents that
908          * come from the remaining part of the previous page: it may
909          * contains garbage data due to a previous heap usage (grown
910          * then shrunken).  */
911         memset(g2h(target_brk), 0, brk_page - target_brk);
912 
913         target_brk = new_brk;
914         brk_page = HOST_PAGE_ALIGN(target_brk);
915         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
916             target_brk);
917         return target_brk;
918     } else if (mapped_addr != -1) {
919         /* Mapped but at wrong address, meaning there wasn't actually
920          * enough space for this brk.
921          */
922         target_munmap(mapped_addr, new_alloc_size);
923         mapped_addr = -1;
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
925     }
926     else {
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
928     }
929 
930 #if defined(TARGET_ALPHA)
931     /* We (partially) emulate OSF/1 on Alpha, which requires we
932        return a proper errno, not an unchanged brk value.  */
933     return -TARGET_ENOMEM;
934 #endif
935     /* For everything else, return the previous break. */
936     return target_brk;
937 }
938 
939 static inline abi_long copy_from_user_fdset(fd_set *fds,
940                                             abi_ulong target_fds_addr,
941                                             int n)
942 {
943     int i, nw, j, k;
944     abi_ulong b, *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_READ,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  1)))
951         return -TARGET_EFAULT;
952 
953     FD_ZERO(fds);
954     k = 0;
955     for (i = 0; i < nw; i++) {
956         /* grab the abi_ulong */
957         __get_user(b, &target_fds[i]);
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             /* check the bit inside the abi_ulong */
960             if ((b >> j) & 1)
961                 FD_SET(k, fds);
962             k++;
963         }
964     }
965 
966     unlock_user(target_fds, target_fds_addr, 0);
967 
968     return 0;
969 }
970 
971 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
972                                                  abi_ulong target_fds_addr,
973                                                  int n)
974 {
975     if (target_fds_addr) {
976         if (copy_from_user_fdset(fds, target_fds_addr, n))
977             return -TARGET_EFAULT;
978         *fds_ptr = fds;
979     } else {
980         *fds_ptr = NULL;
981     }
982     return 0;
983 }
984 
985 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
986                                           const fd_set *fds,
987                                           int n)
988 {
989     int i, nw, j, k;
990     abi_long v;
991     abi_ulong *target_fds;
992 
993     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
994     if (!(target_fds = lock_user(VERIFY_WRITE,
995                                  target_fds_addr,
996                                  sizeof(abi_ulong) * nw,
997                                  0)))
998         return -TARGET_EFAULT;
999 
1000     k = 0;
1001     for (i = 0; i < nw; i++) {
1002         v = 0;
1003         for (j = 0; j < TARGET_ABI_BITS; j++) {
1004             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1005             k++;
1006         }
1007         __put_user(v, &target_fds[i]);
1008     }
1009 
1010     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1011 
1012     return 0;
1013 }
1014 
1015 #if defined(__alpha__)
1016 #define HOST_HZ 1024
1017 #else
1018 #define HOST_HZ 100
1019 #endif
1020 
1021 static inline abi_long host_to_target_clock_t(long ticks)
1022 {
1023 #if HOST_HZ == TARGET_HZ
1024     return ticks;
1025 #else
1026     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1027 #endif
1028 }
1029 
1030 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1031                                              const struct rusage *rusage)
1032 {
1033     struct target_rusage *target_rusage;
1034 
1035     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1036         return -TARGET_EFAULT;
1037     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1038     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1039     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1040     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1041     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1042     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1043     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1044     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1045     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1046     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1047     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1048     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1049     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1050     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1051     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1052     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1053     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1054     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1055     unlock_user_struct(target_rusage, target_addr, 1);
1056 
1057     return 0;
1058 }
1059 
1060 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1061 {
1062     abi_ulong target_rlim_swap;
1063     rlim_t result;
1064 
1065     target_rlim_swap = tswapal(target_rlim);
1066     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1067         return RLIM_INFINITY;
1068 
1069     result = target_rlim_swap;
1070     if (target_rlim_swap != (rlim_t)result)
1071         return RLIM_INFINITY;
1072 
1073     return result;
1074 }
1075 
1076 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     abi_ulong result;
1080 
1081     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1082         target_rlim_swap = TARGET_RLIM_INFINITY;
1083     else
1084         target_rlim_swap = rlim;
1085     result = tswapal(target_rlim_swap);
1086 
1087     return result;
1088 }
1089 
1090 static inline int target_to_host_resource(int code)
1091 {
1092     switch (code) {
1093     case TARGET_RLIMIT_AS:
1094         return RLIMIT_AS;
1095     case TARGET_RLIMIT_CORE:
1096         return RLIMIT_CORE;
1097     case TARGET_RLIMIT_CPU:
1098         return RLIMIT_CPU;
1099     case TARGET_RLIMIT_DATA:
1100         return RLIMIT_DATA;
1101     case TARGET_RLIMIT_FSIZE:
1102         return RLIMIT_FSIZE;
1103     case TARGET_RLIMIT_LOCKS:
1104         return RLIMIT_LOCKS;
1105     case TARGET_RLIMIT_MEMLOCK:
1106         return RLIMIT_MEMLOCK;
1107     case TARGET_RLIMIT_MSGQUEUE:
1108         return RLIMIT_MSGQUEUE;
1109     case TARGET_RLIMIT_NICE:
1110         return RLIMIT_NICE;
1111     case TARGET_RLIMIT_NOFILE:
1112         return RLIMIT_NOFILE;
1113     case TARGET_RLIMIT_NPROC:
1114         return RLIMIT_NPROC;
1115     case TARGET_RLIMIT_RSS:
1116         return RLIMIT_RSS;
1117     case TARGET_RLIMIT_RTPRIO:
1118         return RLIMIT_RTPRIO;
1119     case TARGET_RLIMIT_SIGPENDING:
1120         return RLIMIT_SIGPENDING;
1121     case TARGET_RLIMIT_STACK:
1122         return RLIMIT_STACK;
1123     default:
1124         return code;
1125     }
1126 }
1127 
1128 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1129                                               abi_ulong target_tv_addr)
1130 {
1131     struct target_timeval *target_tv;
1132 
1133     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1134         return -TARGET_EFAULT;
1135     }
1136 
1137     __get_user(tv->tv_sec, &target_tv->tv_sec);
1138     __get_user(tv->tv_usec, &target_tv->tv_usec);
1139 
1140     unlock_user_struct(target_tv, target_tv_addr, 0);
1141 
1142     return 0;
1143 }
1144 
1145 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1146                                             const struct timeval *tv)
1147 {
1148     struct target_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1163                                              const struct timeval *tv)
1164 {
1165     struct target__kernel_sock_timeval *target_tv;
1166 
1167     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1168         return -TARGET_EFAULT;
1169     }
1170 
1171     __put_user(tv->tv_sec, &target_tv->tv_sec);
1172     __put_user(tv->tv_usec, &target_tv->tv_usec);
1173 
1174     unlock_user_struct(target_tv, target_tv_addr, 1);
1175 
1176     return 0;
1177 }
1178 
1179 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1180                                                abi_ulong target_addr)
1181 {
1182     struct target_timespec *target_ts;
1183 
1184     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1185         return -TARGET_EFAULT;
1186     }
1187     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1188     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189     unlock_user_struct(target_ts, target_addr, 0);
1190     return 0;
1191 }
1192 
1193 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1194                                                struct timespec *host_ts)
1195 {
1196     struct target_timespec *target_ts;
1197 
1198     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1199         return -TARGET_EFAULT;
1200     }
1201     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1202     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1203     unlock_user_struct(target_ts, target_addr, 1);
1204     return 0;
1205 }
1206 
1207 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1208                                                  struct timespec *host_ts)
1209 {
1210     struct target__kernel_timespec *target_ts;
1211 
1212     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1213         return -TARGET_EFAULT;
1214     }
1215     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1216     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1217     unlock_user_struct(target_ts, target_addr, 1);
1218     return 0;
1219 }
1220 
1221 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1222                                                abi_ulong target_tz_addr)
1223 {
1224     struct target_timezone *target_tz;
1225 
1226     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1227         return -TARGET_EFAULT;
1228     }
1229 
1230     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1231     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1232 
1233     unlock_user_struct(target_tz, target_tz_addr, 0);
1234 
1235     return 0;
1236 }
1237 
1238 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1239 #include <mqueue.h>
1240 
1241 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1242                                               abi_ulong target_mq_attr_addr)
1243 {
1244     struct target_mq_attr *target_mq_attr;
1245 
1246     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1247                           target_mq_attr_addr, 1))
1248         return -TARGET_EFAULT;
1249 
1250     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1251     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1252     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1253     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1254 
1255     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1256 
1257     return 0;
1258 }
1259 
1260 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1261                                             const struct mq_attr *attr)
1262 {
1263     struct target_mq_attr *target_mq_attr;
1264 
1265     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1266                           target_mq_attr_addr, 0))
1267         return -TARGET_EFAULT;
1268 
1269     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1270     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1271     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1272     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1273 
1274     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1281 /* do_select() must return target values and target errnos. */
1282 static abi_long do_select(int n,
1283                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1284                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1285 {
1286     fd_set rfds, wfds, efds;
1287     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1288     struct timeval tv;
1289     struct timespec ts, *ts_ptr;
1290     abi_long ret;
1291 
1292     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1293     if (ret) {
1294         return ret;
1295     }
1296     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1297     if (ret) {
1298         return ret;
1299     }
1300     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1301     if (ret) {
1302         return ret;
1303     }
1304 
1305     if (target_tv_addr) {
1306         if (copy_from_user_timeval(&tv, target_tv_addr))
1307             return -TARGET_EFAULT;
1308         ts.tv_sec = tv.tv_sec;
1309         ts.tv_nsec = tv.tv_usec * 1000;
1310         ts_ptr = &ts;
1311     } else {
1312         ts_ptr = NULL;
1313     }
1314 
1315     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1316                                   ts_ptr, NULL));
1317 
1318     if (!is_error(ret)) {
1319         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1320             return -TARGET_EFAULT;
1321         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1322             return -TARGET_EFAULT;
1323         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1324             return -TARGET_EFAULT;
1325 
1326         if (target_tv_addr) {
1327             tv.tv_sec = ts.tv_sec;
1328             tv.tv_usec = ts.tv_nsec / 1000;
1329             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1330                 return -TARGET_EFAULT;
1331             }
1332         }
1333     }
1334 
1335     return ret;
1336 }
1337 
1338 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1339 static abi_long do_old_select(abi_ulong arg1)
1340 {
1341     struct target_sel_arg_struct *sel;
1342     abi_ulong inp, outp, exp, tvp;
1343     long nsel;
1344 
1345     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1346         return -TARGET_EFAULT;
1347     }
1348 
1349     nsel = tswapal(sel->n);
1350     inp = tswapal(sel->inp);
1351     outp = tswapal(sel->outp);
1352     exp = tswapal(sel->exp);
1353     tvp = tswapal(sel->tvp);
1354 
1355     unlock_user_struct(sel, arg1, 0);
1356 
1357     return do_select(nsel, inp, outp, exp, tvp);
1358 }
1359 #endif
1360 #endif
1361 
1362 static abi_long do_pipe2(int host_pipe[], int flags)
1363 {
1364 #ifdef CONFIG_PIPE2
1365     return pipe2(host_pipe, flags);
1366 #else
1367     return -ENOSYS;
1368 #endif
1369 }
1370 
1371 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1372                         int flags, int is_pipe2)
1373 {
1374     int host_pipe[2];
1375     abi_long ret;
1376     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1377 
1378     if (is_error(ret))
1379         return get_errno(ret);
1380 
1381     /* Several targets have special calling conventions for the original
1382        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1383     if (!is_pipe2) {
1384 #if defined(TARGET_ALPHA)
1385         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1386         return host_pipe[0];
1387 #elif defined(TARGET_MIPS)
1388         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1389         return host_pipe[0];
1390 #elif defined(TARGET_SH4)
1391         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1392         return host_pipe[0];
1393 #elif defined(TARGET_SPARC)
1394         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1395         return host_pipe[0];
1396 #endif
1397     }
1398 
1399     if (put_user_s32(host_pipe[0], pipedes)
1400         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1401         return -TARGET_EFAULT;
1402     return get_errno(ret);
1403 }
1404 
1405 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1406                                               abi_ulong target_addr,
1407                                               socklen_t len)
1408 {
1409     struct target_ip_mreqn *target_smreqn;
1410 
1411     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1412     if (!target_smreqn)
1413         return -TARGET_EFAULT;
1414     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1415     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1416     if (len == sizeof(struct target_ip_mreqn))
1417         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1418     unlock_user(target_smreqn, target_addr, 0);
1419 
1420     return 0;
1421 }
1422 
1423 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1424                                                abi_ulong target_addr,
1425                                                socklen_t len)
1426 {
1427     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1428     sa_family_t sa_family;
1429     struct target_sockaddr *target_saddr;
1430 
1431     if (fd_trans_target_to_host_addr(fd)) {
1432         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1433     }
1434 
1435     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1436     if (!target_saddr)
1437         return -TARGET_EFAULT;
1438 
1439     sa_family = tswap16(target_saddr->sa_family);
1440 
1441     /* Oops. The caller might send a incomplete sun_path; sun_path
1442      * must be terminated by \0 (see the manual page), but
1443      * unfortunately it is quite common to specify sockaddr_un
1444      * length as "strlen(x->sun_path)" while it should be
1445      * "strlen(...) + 1". We'll fix that here if needed.
1446      * Linux kernel has a similar feature.
1447      */
1448 
1449     if (sa_family == AF_UNIX) {
1450         if (len < unix_maxlen && len > 0) {
1451             char *cp = (char*)target_saddr;
1452 
1453             if ( cp[len-1] && !cp[len] )
1454                 len++;
1455         }
1456         if (len > unix_maxlen)
1457             len = unix_maxlen;
1458     }
1459 
1460     memcpy(addr, target_saddr, len);
1461     addr->sa_family = sa_family;
1462     if (sa_family == AF_NETLINK) {
1463         struct sockaddr_nl *nladdr;
1464 
1465         nladdr = (struct sockaddr_nl *)addr;
1466         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1467         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1468     } else if (sa_family == AF_PACKET) {
1469 	struct target_sockaddr_ll *lladdr;
1470 
1471 	lladdr = (struct target_sockaddr_ll *)addr;
1472 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1473 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1474     }
1475     unlock_user(target_saddr, target_addr, 0);
1476 
1477     return 0;
1478 }
1479 
1480 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1481                                                struct sockaddr *addr,
1482                                                socklen_t len)
1483 {
1484     struct target_sockaddr *target_saddr;
1485 
1486     if (len == 0) {
1487         return 0;
1488     }
1489     assert(addr);
1490 
1491     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1492     if (!target_saddr)
1493         return -TARGET_EFAULT;
1494     memcpy(target_saddr, addr, len);
1495     if (len >= offsetof(struct target_sockaddr, sa_family) +
1496         sizeof(target_saddr->sa_family)) {
1497         target_saddr->sa_family = tswap16(addr->sa_family);
1498     }
1499     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1500         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1501         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1502         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1503     } else if (addr->sa_family == AF_PACKET) {
1504         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1505         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1506         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1507     } else if (addr->sa_family == AF_INET6 &&
1508                len >= sizeof(struct target_sockaddr_in6)) {
1509         struct target_sockaddr_in6 *target_in6 =
1510                (struct target_sockaddr_in6 *)target_saddr;
1511         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1512     }
1513     unlock_user(target_saddr, target_addr, len);
1514 
1515     return 0;
1516 }
1517 
1518 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1519                                            struct target_msghdr *target_msgh)
1520 {
1521     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1522     abi_long msg_controllen;
1523     abi_ulong target_cmsg_addr;
1524     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1525     socklen_t space = 0;
1526 
1527     msg_controllen = tswapal(target_msgh->msg_controllen);
1528     if (msg_controllen < sizeof (struct target_cmsghdr))
1529         goto the_end;
1530     target_cmsg_addr = tswapal(target_msgh->msg_control);
1531     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1532     target_cmsg_start = target_cmsg;
1533     if (!target_cmsg)
1534         return -TARGET_EFAULT;
1535 
1536     while (cmsg && target_cmsg) {
1537         void *data = CMSG_DATA(cmsg);
1538         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1539 
1540         int len = tswapal(target_cmsg->cmsg_len)
1541             - sizeof(struct target_cmsghdr);
1542 
1543         space += CMSG_SPACE(len);
1544         if (space > msgh->msg_controllen) {
1545             space -= CMSG_SPACE(len);
1546             /* This is a QEMU bug, since we allocated the payload
1547              * area ourselves (unlike overflow in host-to-target
1548              * conversion, which is just the guest giving us a buffer
1549              * that's too small). It can't happen for the payload types
1550              * we currently support; if it becomes an issue in future
1551              * we would need to improve our allocation strategy to
1552              * something more intelligent than "twice the size of the
1553              * target buffer we're reading from".
1554              */
1555             gemu_log("Host cmsg overflow\n");
1556             break;
1557         }
1558 
1559         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1560             cmsg->cmsg_level = SOL_SOCKET;
1561         } else {
1562             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1563         }
1564         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1565         cmsg->cmsg_len = CMSG_LEN(len);
1566 
1567         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1568             int *fd = (int *)data;
1569             int *target_fd = (int *)target_data;
1570             int i, numfds = len / sizeof(int);
1571 
1572             for (i = 0; i < numfds; i++) {
1573                 __get_user(fd[i], target_fd + i);
1574             }
1575         } else if (cmsg->cmsg_level == SOL_SOCKET
1576                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1577             struct ucred *cred = (struct ucred *)data;
1578             struct target_ucred *target_cred =
1579                 (struct target_ucred *)target_data;
1580 
1581             __get_user(cred->pid, &target_cred->pid);
1582             __get_user(cred->uid, &target_cred->uid);
1583             __get_user(cred->gid, &target_cred->gid);
1584         } else {
1585             gemu_log("Unsupported ancillary data: %d/%d\n",
1586                                         cmsg->cmsg_level, cmsg->cmsg_type);
1587             memcpy(data, target_data, len);
1588         }
1589 
1590         cmsg = CMSG_NXTHDR(msgh, cmsg);
1591         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1592                                          target_cmsg_start);
1593     }
1594     unlock_user(target_cmsg, target_cmsg_addr, 0);
1595  the_end:
1596     msgh->msg_controllen = space;
1597     return 0;
1598 }
1599 
1600 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1601                                            struct msghdr *msgh)
1602 {
1603     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1604     abi_long msg_controllen;
1605     abi_ulong target_cmsg_addr;
1606     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1607     socklen_t space = 0;
1608 
1609     msg_controllen = tswapal(target_msgh->msg_controllen);
1610     if (msg_controllen < sizeof (struct target_cmsghdr))
1611         goto the_end;
1612     target_cmsg_addr = tswapal(target_msgh->msg_control);
1613     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1614     target_cmsg_start = target_cmsg;
1615     if (!target_cmsg)
1616         return -TARGET_EFAULT;
1617 
1618     while (cmsg && target_cmsg) {
1619         void *data = CMSG_DATA(cmsg);
1620         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1621 
1622         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1623         int tgt_len, tgt_space;
1624 
1625         /* We never copy a half-header but may copy half-data;
1626          * this is Linux's behaviour in put_cmsg(). Note that
1627          * truncation here is a guest problem (which we report
1628          * to the guest via the CTRUNC bit), unlike truncation
1629          * in target_to_host_cmsg, which is a QEMU bug.
1630          */
1631         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1632             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1633             break;
1634         }
1635 
1636         if (cmsg->cmsg_level == SOL_SOCKET) {
1637             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1638         } else {
1639             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1640         }
1641         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1642 
1643         /* Payload types which need a different size of payload on
1644          * the target must adjust tgt_len here.
1645          */
1646         tgt_len = len;
1647         switch (cmsg->cmsg_level) {
1648         case SOL_SOCKET:
1649             switch (cmsg->cmsg_type) {
1650             case SO_TIMESTAMP:
1651                 tgt_len = sizeof(struct target_timeval);
1652                 break;
1653             default:
1654                 break;
1655             }
1656             break;
1657         default:
1658             break;
1659         }
1660 
1661         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1662             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1663             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1664         }
1665 
1666         /* We must now copy-and-convert len bytes of payload
1667          * into tgt_len bytes of destination space. Bear in mind
1668          * that in both source and destination we may be dealing
1669          * with a truncated value!
1670          */
1671         switch (cmsg->cmsg_level) {
1672         case SOL_SOCKET:
1673             switch (cmsg->cmsg_type) {
1674             case SCM_RIGHTS:
1675             {
1676                 int *fd = (int *)data;
1677                 int *target_fd = (int *)target_data;
1678                 int i, numfds = tgt_len / sizeof(int);
1679 
1680                 for (i = 0; i < numfds; i++) {
1681                     __put_user(fd[i], target_fd + i);
1682                 }
1683                 break;
1684             }
1685             case SO_TIMESTAMP:
1686             {
1687                 struct timeval *tv = (struct timeval *)data;
1688                 struct target_timeval *target_tv =
1689                     (struct target_timeval *)target_data;
1690 
1691                 if (len != sizeof(struct timeval) ||
1692                     tgt_len != sizeof(struct target_timeval)) {
1693                     goto unimplemented;
1694                 }
1695 
1696                 /* copy struct timeval to target */
1697                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1698                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1699                 break;
1700             }
1701             case SCM_CREDENTIALS:
1702             {
1703                 struct ucred *cred = (struct ucred *)data;
1704                 struct target_ucred *target_cred =
1705                     (struct target_ucred *)target_data;
1706 
1707                 __put_user(cred->pid, &target_cred->pid);
1708                 __put_user(cred->uid, &target_cred->uid);
1709                 __put_user(cred->gid, &target_cred->gid);
1710                 break;
1711             }
1712             default:
1713                 goto unimplemented;
1714             }
1715             break;
1716 
1717         case SOL_IP:
1718             switch (cmsg->cmsg_type) {
1719             case IP_TTL:
1720             {
1721                 uint32_t *v = (uint32_t *)data;
1722                 uint32_t *t_int = (uint32_t *)target_data;
1723 
1724                 if (len != sizeof(uint32_t) ||
1725                     tgt_len != sizeof(uint32_t)) {
1726                     goto unimplemented;
1727                 }
1728                 __put_user(*v, t_int);
1729                 break;
1730             }
1731             case IP_RECVERR:
1732             {
1733                 struct errhdr_t {
1734                    struct sock_extended_err ee;
1735                    struct sockaddr_in offender;
1736                 };
1737                 struct errhdr_t *errh = (struct errhdr_t *)data;
1738                 struct errhdr_t *target_errh =
1739                     (struct errhdr_t *)target_data;
1740 
1741                 if (len != sizeof(struct errhdr_t) ||
1742                     tgt_len != sizeof(struct errhdr_t)) {
1743                     goto unimplemented;
1744                 }
1745                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1746                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1747                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1748                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1749                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1750                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1751                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1752                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1753                     (void *) &errh->offender, sizeof(errh->offender));
1754                 break;
1755             }
1756             default:
1757                 goto unimplemented;
1758             }
1759             break;
1760 
1761         case SOL_IPV6:
1762             switch (cmsg->cmsg_type) {
1763             case IPV6_HOPLIMIT:
1764             {
1765                 uint32_t *v = (uint32_t *)data;
1766                 uint32_t *t_int = (uint32_t *)target_data;
1767 
1768                 if (len != sizeof(uint32_t) ||
1769                     tgt_len != sizeof(uint32_t)) {
1770                     goto unimplemented;
1771                 }
1772                 __put_user(*v, t_int);
1773                 break;
1774             }
1775             case IPV6_RECVERR:
1776             {
1777                 struct errhdr6_t {
1778                    struct sock_extended_err ee;
1779                    struct sockaddr_in6 offender;
1780                 };
1781                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1782                 struct errhdr6_t *target_errh =
1783                     (struct errhdr6_t *)target_data;
1784 
1785                 if (len != sizeof(struct errhdr6_t) ||
1786                     tgt_len != sizeof(struct errhdr6_t)) {
1787                     goto unimplemented;
1788                 }
1789                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1790                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1791                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1792                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1793                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1794                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1795                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1796                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1797                     (void *) &errh->offender, sizeof(errh->offender));
1798                 break;
1799             }
1800             default:
1801                 goto unimplemented;
1802             }
1803             break;
1804 
1805         default:
1806         unimplemented:
1807             gemu_log("Unsupported ancillary data: %d/%d\n",
1808                                         cmsg->cmsg_level, cmsg->cmsg_type);
1809             memcpy(target_data, data, MIN(len, tgt_len));
1810             if (tgt_len > len) {
1811                 memset(target_data + len, 0, tgt_len - len);
1812             }
1813         }
1814 
1815         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1816         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1817         if (msg_controllen < tgt_space) {
1818             tgt_space = msg_controllen;
1819         }
1820         msg_controllen -= tgt_space;
1821         space += tgt_space;
1822         cmsg = CMSG_NXTHDR(msgh, cmsg);
1823         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1824                                          target_cmsg_start);
1825     }
1826     unlock_user(target_cmsg, target_cmsg_addr, space);
1827  the_end:
1828     target_msgh->msg_controllen = tswapal(space);
1829     return 0;
1830 }
1831 
1832 /* do_setsockopt() Must return target values and target errnos. */
1833 static abi_long do_setsockopt(int sockfd, int level, int optname,
1834                               abi_ulong optval_addr, socklen_t optlen)
1835 {
1836     abi_long ret;
1837     int val;
1838     struct ip_mreqn *ip_mreq;
1839     struct ip_mreq_source *ip_mreq_source;
1840 
1841     switch(level) {
1842     case SOL_TCP:
1843         /* TCP options all take an 'int' value.  */
1844         if (optlen < sizeof(uint32_t))
1845             return -TARGET_EINVAL;
1846 
1847         if (get_user_u32(val, optval_addr))
1848             return -TARGET_EFAULT;
1849         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1850         break;
1851     case SOL_IP:
1852         switch(optname) {
1853         case IP_TOS:
1854         case IP_TTL:
1855         case IP_HDRINCL:
1856         case IP_ROUTER_ALERT:
1857         case IP_RECVOPTS:
1858         case IP_RETOPTS:
1859         case IP_PKTINFO:
1860         case IP_MTU_DISCOVER:
1861         case IP_RECVERR:
1862         case IP_RECVTTL:
1863         case IP_RECVTOS:
1864 #ifdef IP_FREEBIND
1865         case IP_FREEBIND:
1866 #endif
1867         case IP_MULTICAST_TTL:
1868         case IP_MULTICAST_LOOP:
1869             val = 0;
1870             if (optlen >= sizeof(uint32_t)) {
1871                 if (get_user_u32(val, optval_addr))
1872                     return -TARGET_EFAULT;
1873             } else if (optlen >= 1) {
1874                 if (get_user_u8(val, optval_addr))
1875                     return -TARGET_EFAULT;
1876             }
1877             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1878             break;
1879         case IP_ADD_MEMBERSHIP:
1880         case IP_DROP_MEMBERSHIP:
1881             if (optlen < sizeof (struct target_ip_mreq) ||
1882                 optlen > sizeof (struct target_ip_mreqn))
1883                 return -TARGET_EINVAL;
1884 
1885             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1886             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1887             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1888             break;
1889 
1890         case IP_BLOCK_SOURCE:
1891         case IP_UNBLOCK_SOURCE:
1892         case IP_ADD_SOURCE_MEMBERSHIP:
1893         case IP_DROP_SOURCE_MEMBERSHIP:
1894             if (optlen != sizeof (struct target_ip_mreq_source))
1895                 return -TARGET_EINVAL;
1896 
1897             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1898             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1899             unlock_user (ip_mreq_source, optval_addr, 0);
1900             break;
1901 
1902         default:
1903             goto unimplemented;
1904         }
1905         break;
1906     case SOL_IPV6:
1907         switch (optname) {
1908         case IPV6_MTU_DISCOVER:
1909         case IPV6_MTU:
1910         case IPV6_V6ONLY:
1911         case IPV6_RECVPKTINFO:
1912         case IPV6_UNICAST_HOPS:
1913         case IPV6_MULTICAST_HOPS:
1914         case IPV6_MULTICAST_LOOP:
1915         case IPV6_RECVERR:
1916         case IPV6_RECVHOPLIMIT:
1917         case IPV6_2292HOPLIMIT:
1918         case IPV6_CHECKSUM:
1919         case IPV6_ADDRFORM:
1920         case IPV6_2292PKTINFO:
1921         case IPV6_RECVTCLASS:
1922         case IPV6_RECVRTHDR:
1923         case IPV6_2292RTHDR:
1924         case IPV6_RECVHOPOPTS:
1925         case IPV6_2292HOPOPTS:
1926         case IPV6_RECVDSTOPTS:
1927         case IPV6_2292DSTOPTS:
1928         case IPV6_TCLASS:
1929 #ifdef IPV6_RECVPATHMTU
1930         case IPV6_RECVPATHMTU:
1931 #endif
1932 #ifdef IPV6_TRANSPARENT
1933         case IPV6_TRANSPARENT:
1934 #endif
1935 #ifdef IPV6_FREEBIND
1936         case IPV6_FREEBIND:
1937 #endif
1938 #ifdef IPV6_RECVORIGDSTADDR
1939         case IPV6_RECVORIGDSTADDR:
1940 #endif
1941             val = 0;
1942             if (optlen < sizeof(uint32_t)) {
1943                 return -TARGET_EINVAL;
1944             }
1945             if (get_user_u32(val, optval_addr)) {
1946                 return -TARGET_EFAULT;
1947             }
1948             ret = get_errno(setsockopt(sockfd, level, optname,
1949                                        &val, sizeof(val)));
1950             break;
1951         case IPV6_PKTINFO:
1952         {
1953             struct in6_pktinfo pki;
1954 
1955             if (optlen < sizeof(pki)) {
1956                 return -TARGET_EINVAL;
1957             }
1958 
1959             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1960                 return -TARGET_EFAULT;
1961             }
1962 
1963             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1964 
1965             ret = get_errno(setsockopt(sockfd, level, optname,
1966                                        &pki, sizeof(pki)));
1967             break;
1968         }
1969         case IPV6_ADD_MEMBERSHIP:
1970         case IPV6_DROP_MEMBERSHIP:
1971         {
1972             struct ipv6_mreq ipv6mreq;
1973 
1974             if (optlen < sizeof(ipv6mreq)) {
1975                 return -TARGET_EINVAL;
1976             }
1977 
1978             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1979                 return -TARGET_EFAULT;
1980             }
1981 
1982             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1983 
1984             ret = get_errno(setsockopt(sockfd, level, optname,
1985                                        &ipv6mreq, sizeof(ipv6mreq)));
1986             break;
1987         }
1988         default:
1989             goto unimplemented;
1990         }
1991         break;
1992     case SOL_ICMPV6:
1993         switch (optname) {
1994         case ICMPV6_FILTER:
1995         {
1996             struct icmp6_filter icmp6f;
1997 
1998             if (optlen > sizeof(icmp6f)) {
1999                 optlen = sizeof(icmp6f);
2000             }
2001 
2002             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2003                 return -TARGET_EFAULT;
2004             }
2005 
2006             for (val = 0; val < 8; val++) {
2007                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2008             }
2009 
2010             ret = get_errno(setsockopt(sockfd, level, optname,
2011                                        &icmp6f, optlen));
2012             break;
2013         }
2014         default:
2015             goto unimplemented;
2016         }
2017         break;
2018     case SOL_RAW:
2019         switch (optname) {
2020         case ICMP_FILTER:
2021         case IPV6_CHECKSUM:
2022             /* those take an u32 value */
2023             if (optlen < sizeof(uint32_t)) {
2024                 return -TARGET_EINVAL;
2025             }
2026 
2027             if (get_user_u32(val, optval_addr)) {
2028                 return -TARGET_EFAULT;
2029             }
2030             ret = get_errno(setsockopt(sockfd, level, optname,
2031                                        &val, sizeof(val)));
2032             break;
2033 
2034         default:
2035             goto unimplemented;
2036         }
2037         break;
2038 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2039     case SOL_ALG:
2040         switch (optname) {
2041         case ALG_SET_KEY:
2042         {
2043             char *alg_key = g_malloc(optlen);
2044 
2045             if (!alg_key) {
2046                 return -TARGET_ENOMEM;
2047             }
2048             if (copy_from_user(alg_key, optval_addr, optlen)) {
2049                 g_free(alg_key);
2050                 return -TARGET_EFAULT;
2051             }
2052             ret = get_errno(setsockopt(sockfd, level, optname,
2053                                        alg_key, optlen));
2054             g_free(alg_key);
2055             break;
2056         }
2057         case ALG_SET_AEAD_AUTHSIZE:
2058         {
2059             ret = get_errno(setsockopt(sockfd, level, optname,
2060                                        NULL, optlen));
2061             break;
2062         }
2063         default:
2064             goto unimplemented;
2065         }
2066         break;
2067 #endif
2068     case TARGET_SOL_SOCKET:
2069         switch (optname) {
2070         case TARGET_SO_RCVTIMEO:
2071         {
2072                 struct timeval tv;
2073 
2074                 optname = SO_RCVTIMEO;
2075 
2076 set_timeout:
2077                 if (optlen != sizeof(struct target_timeval)) {
2078                     return -TARGET_EINVAL;
2079                 }
2080 
2081                 if (copy_from_user_timeval(&tv, optval_addr)) {
2082                     return -TARGET_EFAULT;
2083                 }
2084 
2085                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2086                                 &tv, sizeof(tv)));
2087                 return ret;
2088         }
2089         case TARGET_SO_SNDTIMEO:
2090                 optname = SO_SNDTIMEO;
2091                 goto set_timeout;
2092         case TARGET_SO_ATTACH_FILTER:
2093         {
2094                 struct target_sock_fprog *tfprog;
2095                 struct target_sock_filter *tfilter;
2096                 struct sock_fprog fprog;
2097                 struct sock_filter *filter;
2098                 int i;
2099 
2100                 if (optlen != sizeof(*tfprog)) {
2101                     return -TARGET_EINVAL;
2102                 }
2103                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2104                     return -TARGET_EFAULT;
2105                 }
2106                 if (!lock_user_struct(VERIFY_READ, tfilter,
2107                                       tswapal(tfprog->filter), 0)) {
2108                     unlock_user_struct(tfprog, optval_addr, 1);
2109                     return -TARGET_EFAULT;
2110                 }
2111 
2112                 fprog.len = tswap16(tfprog->len);
2113                 filter = g_try_new(struct sock_filter, fprog.len);
2114                 if (filter == NULL) {
2115                     unlock_user_struct(tfilter, tfprog->filter, 1);
2116                     unlock_user_struct(tfprog, optval_addr, 1);
2117                     return -TARGET_ENOMEM;
2118                 }
2119                 for (i = 0; i < fprog.len; i++) {
2120                     filter[i].code = tswap16(tfilter[i].code);
2121                     filter[i].jt = tfilter[i].jt;
2122                     filter[i].jf = tfilter[i].jf;
2123                     filter[i].k = tswap32(tfilter[i].k);
2124                 }
2125                 fprog.filter = filter;
2126 
2127                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2128                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2129                 g_free(filter);
2130 
2131                 unlock_user_struct(tfilter, tfprog->filter, 1);
2132                 unlock_user_struct(tfprog, optval_addr, 1);
2133                 return ret;
2134         }
2135 	case TARGET_SO_BINDTODEVICE:
2136 	{
2137 		char *dev_ifname, *addr_ifname;
2138 
2139 		if (optlen > IFNAMSIZ - 1) {
2140 		    optlen = IFNAMSIZ - 1;
2141 		}
2142 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2143 		if (!dev_ifname) {
2144 		    return -TARGET_EFAULT;
2145 		}
2146 		optname = SO_BINDTODEVICE;
2147 		addr_ifname = alloca(IFNAMSIZ);
2148 		memcpy(addr_ifname, dev_ifname, optlen);
2149 		addr_ifname[optlen] = 0;
2150 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2151                                            addr_ifname, optlen));
2152 		unlock_user (dev_ifname, optval_addr, 0);
2153 		return ret;
2154 	}
2155         case TARGET_SO_LINGER:
2156         {
2157                 struct linger lg;
2158                 struct target_linger *tlg;
2159 
2160                 if (optlen != sizeof(struct target_linger)) {
2161                     return -TARGET_EINVAL;
2162                 }
2163                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2164                     return -TARGET_EFAULT;
2165                 }
2166                 __get_user(lg.l_onoff, &tlg->l_onoff);
2167                 __get_user(lg.l_linger, &tlg->l_linger);
2168                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2169                                 &lg, sizeof(lg)));
2170                 unlock_user_struct(tlg, optval_addr, 0);
2171                 return ret;
2172         }
2173             /* Options with 'int' argument.  */
2174         case TARGET_SO_DEBUG:
2175 		optname = SO_DEBUG;
2176 		break;
2177         case TARGET_SO_REUSEADDR:
2178 		optname = SO_REUSEADDR;
2179 		break;
2180 #ifdef SO_REUSEPORT
2181         case TARGET_SO_REUSEPORT:
2182                 optname = SO_REUSEPORT;
2183                 break;
2184 #endif
2185         case TARGET_SO_TYPE:
2186 		optname = SO_TYPE;
2187 		break;
2188         case TARGET_SO_ERROR:
2189 		optname = SO_ERROR;
2190 		break;
2191         case TARGET_SO_DONTROUTE:
2192 		optname = SO_DONTROUTE;
2193 		break;
2194         case TARGET_SO_BROADCAST:
2195 		optname = SO_BROADCAST;
2196 		break;
2197         case TARGET_SO_SNDBUF:
2198 		optname = SO_SNDBUF;
2199 		break;
2200         case TARGET_SO_SNDBUFFORCE:
2201                 optname = SO_SNDBUFFORCE;
2202                 break;
2203         case TARGET_SO_RCVBUF:
2204 		optname = SO_RCVBUF;
2205 		break;
2206         case TARGET_SO_RCVBUFFORCE:
2207                 optname = SO_RCVBUFFORCE;
2208                 break;
2209         case TARGET_SO_KEEPALIVE:
2210 		optname = SO_KEEPALIVE;
2211 		break;
2212         case TARGET_SO_OOBINLINE:
2213 		optname = SO_OOBINLINE;
2214 		break;
2215         case TARGET_SO_NO_CHECK:
2216 		optname = SO_NO_CHECK;
2217 		break;
2218         case TARGET_SO_PRIORITY:
2219 		optname = SO_PRIORITY;
2220 		break;
2221 #ifdef SO_BSDCOMPAT
2222         case TARGET_SO_BSDCOMPAT:
2223 		optname = SO_BSDCOMPAT;
2224 		break;
2225 #endif
2226         case TARGET_SO_PASSCRED:
2227 		optname = SO_PASSCRED;
2228 		break;
2229         case TARGET_SO_PASSSEC:
2230                 optname = SO_PASSSEC;
2231                 break;
2232         case TARGET_SO_TIMESTAMP:
2233 		optname = SO_TIMESTAMP;
2234 		break;
2235         case TARGET_SO_RCVLOWAT:
2236 		optname = SO_RCVLOWAT;
2237 		break;
2238         default:
2239             goto unimplemented;
2240         }
2241 	if (optlen < sizeof(uint32_t))
2242             return -TARGET_EINVAL;
2243 
2244 	if (get_user_u32(val, optval_addr))
2245             return -TARGET_EFAULT;
2246 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2247         break;
2248     default:
2249     unimplemented:
2250         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2251         ret = -TARGET_ENOPROTOOPT;
2252     }
2253     return ret;
2254 }
2255 
2256 /* do_getsockopt() Must return target values and target errnos. */
2257 static abi_long do_getsockopt(int sockfd, int level, int optname,
2258                               abi_ulong optval_addr, abi_ulong optlen)
2259 {
2260     abi_long ret;
2261     int len, val;
2262     socklen_t lv;
2263 
2264     switch(level) {
2265     case TARGET_SOL_SOCKET:
2266         level = SOL_SOCKET;
2267         switch (optname) {
2268         /* These don't just return a single integer */
2269         case TARGET_SO_RCVTIMEO:
2270         case TARGET_SO_SNDTIMEO:
2271         case TARGET_SO_PEERNAME:
2272             goto unimplemented;
2273         case TARGET_SO_PEERCRED: {
2274             struct ucred cr;
2275             socklen_t crlen;
2276             struct target_ucred *tcr;
2277 
2278             if (get_user_u32(len, optlen)) {
2279                 return -TARGET_EFAULT;
2280             }
2281             if (len < 0) {
2282                 return -TARGET_EINVAL;
2283             }
2284 
2285             crlen = sizeof(cr);
2286             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2287                                        &cr, &crlen));
2288             if (ret < 0) {
2289                 return ret;
2290             }
2291             if (len > crlen) {
2292                 len = crlen;
2293             }
2294             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2295                 return -TARGET_EFAULT;
2296             }
2297             __put_user(cr.pid, &tcr->pid);
2298             __put_user(cr.uid, &tcr->uid);
2299             __put_user(cr.gid, &tcr->gid);
2300             unlock_user_struct(tcr, optval_addr, 1);
2301             if (put_user_u32(len, optlen)) {
2302                 return -TARGET_EFAULT;
2303             }
2304             break;
2305         }
2306         case TARGET_SO_LINGER:
2307         {
2308             struct linger lg;
2309             socklen_t lglen;
2310             struct target_linger *tlg;
2311 
2312             if (get_user_u32(len, optlen)) {
2313                 return -TARGET_EFAULT;
2314             }
2315             if (len < 0) {
2316                 return -TARGET_EINVAL;
2317             }
2318 
2319             lglen = sizeof(lg);
2320             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2321                                        &lg, &lglen));
2322             if (ret < 0) {
2323                 return ret;
2324             }
2325             if (len > lglen) {
2326                 len = lglen;
2327             }
2328             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2329                 return -TARGET_EFAULT;
2330             }
2331             __put_user(lg.l_onoff, &tlg->l_onoff);
2332             __put_user(lg.l_linger, &tlg->l_linger);
2333             unlock_user_struct(tlg, optval_addr, 1);
2334             if (put_user_u32(len, optlen)) {
2335                 return -TARGET_EFAULT;
2336             }
2337             break;
2338         }
2339         /* Options with 'int' argument.  */
2340         case TARGET_SO_DEBUG:
2341             optname = SO_DEBUG;
2342             goto int_case;
2343         case TARGET_SO_REUSEADDR:
2344             optname = SO_REUSEADDR;
2345             goto int_case;
2346 #ifdef SO_REUSEPORT
2347         case TARGET_SO_REUSEPORT:
2348             optname = SO_REUSEPORT;
2349             goto int_case;
2350 #endif
2351         case TARGET_SO_TYPE:
2352             optname = SO_TYPE;
2353             goto int_case;
2354         case TARGET_SO_ERROR:
2355             optname = SO_ERROR;
2356             goto int_case;
2357         case TARGET_SO_DONTROUTE:
2358             optname = SO_DONTROUTE;
2359             goto int_case;
2360         case TARGET_SO_BROADCAST:
2361             optname = SO_BROADCAST;
2362             goto int_case;
2363         case TARGET_SO_SNDBUF:
2364             optname = SO_SNDBUF;
2365             goto int_case;
2366         case TARGET_SO_RCVBUF:
2367             optname = SO_RCVBUF;
2368             goto int_case;
2369         case TARGET_SO_KEEPALIVE:
2370             optname = SO_KEEPALIVE;
2371             goto int_case;
2372         case TARGET_SO_OOBINLINE:
2373             optname = SO_OOBINLINE;
2374             goto int_case;
2375         case TARGET_SO_NO_CHECK:
2376             optname = SO_NO_CHECK;
2377             goto int_case;
2378         case TARGET_SO_PRIORITY:
2379             optname = SO_PRIORITY;
2380             goto int_case;
2381 #ifdef SO_BSDCOMPAT
2382         case TARGET_SO_BSDCOMPAT:
2383             optname = SO_BSDCOMPAT;
2384             goto int_case;
2385 #endif
2386         case TARGET_SO_PASSCRED:
2387             optname = SO_PASSCRED;
2388             goto int_case;
2389         case TARGET_SO_TIMESTAMP:
2390             optname = SO_TIMESTAMP;
2391             goto int_case;
2392         case TARGET_SO_RCVLOWAT:
2393             optname = SO_RCVLOWAT;
2394             goto int_case;
2395         case TARGET_SO_ACCEPTCONN:
2396             optname = SO_ACCEPTCONN;
2397             goto int_case;
2398         default:
2399             goto int_case;
2400         }
2401         break;
2402     case SOL_TCP:
2403         /* TCP options all take an 'int' value.  */
2404     int_case:
2405         if (get_user_u32(len, optlen))
2406             return -TARGET_EFAULT;
2407         if (len < 0)
2408             return -TARGET_EINVAL;
2409         lv = sizeof(lv);
2410         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2411         if (ret < 0)
2412             return ret;
2413         if (optname == SO_TYPE) {
2414             val = host_to_target_sock_type(val);
2415         }
2416         if (len > lv)
2417             len = lv;
2418         if (len == 4) {
2419             if (put_user_u32(val, optval_addr))
2420                 return -TARGET_EFAULT;
2421         } else {
2422             if (put_user_u8(val, optval_addr))
2423                 return -TARGET_EFAULT;
2424         }
2425         if (put_user_u32(len, optlen))
2426             return -TARGET_EFAULT;
2427         break;
2428     case SOL_IP:
2429         switch(optname) {
2430         case IP_TOS:
2431         case IP_TTL:
2432         case IP_HDRINCL:
2433         case IP_ROUTER_ALERT:
2434         case IP_RECVOPTS:
2435         case IP_RETOPTS:
2436         case IP_PKTINFO:
2437         case IP_MTU_DISCOVER:
2438         case IP_RECVERR:
2439         case IP_RECVTOS:
2440 #ifdef IP_FREEBIND
2441         case IP_FREEBIND:
2442 #endif
2443         case IP_MULTICAST_TTL:
2444         case IP_MULTICAST_LOOP:
2445             if (get_user_u32(len, optlen))
2446                 return -TARGET_EFAULT;
2447             if (len < 0)
2448                 return -TARGET_EINVAL;
2449             lv = sizeof(lv);
2450             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2451             if (ret < 0)
2452                 return ret;
2453             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2454                 len = 1;
2455                 if (put_user_u32(len, optlen)
2456                     || put_user_u8(val, optval_addr))
2457                     return -TARGET_EFAULT;
2458             } else {
2459                 if (len > sizeof(int))
2460                     len = sizeof(int);
2461                 if (put_user_u32(len, optlen)
2462                     || put_user_u32(val, optval_addr))
2463                     return -TARGET_EFAULT;
2464             }
2465             break;
2466         default:
2467             ret = -TARGET_ENOPROTOOPT;
2468             break;
2469         }
2470         break;
2471     case SOL_IPV6:
2472         switch (optname) {
2473         case IPV6_MTU_DISCOVER:
2474         case IPV6_MTU:
2475         case IPV6_V6ONLY:
2476         case IPV6_RECVPKTINFO:
2477         case IPV6_UNICAST_HOPS:
2478         case IPV6_MULTICAST_HOPS:
2479         case IPV6_MULTICAST_LOOP:
2480         case IPV6_RECVERR:
2481         case IPV6_RECVHOPLIMIT:
2482         case IPV6_2292HOPLIMIT:
2483         case IPV6_CHECKSUM:
2484         case IPV6_ADDRFORM:
2485         case IPV6_2292PKTINFO:
2486         case IPV6_RECVTCLASS:
2487         case IPV6_RECVRTHDR:
2488         case IPV6_2292RTHDR:
2489         case IPV6_RECVHOPOPTS:
2490         case IPV6_2292HOPOPTS:
2491         case IPV6_RECVDSTOPTS:
2492         case IPV6_2292DSTOPTS:
2493         case IPV6_TCLASS:
2494 #ifdef IPV6_RECVPATHMTU
2495         case IPV6_RECVPATHMTU:
2496 #endif
2497 #ifdef IPV6_TRANSPARENT
2498         case IPV6_TRANSPARENT:
2499 #endif
2500 #ifdef IPV6_FREEBIND
2501         case IPV6_FREEBIND:
2502 #endif
2503 #ifdef IPV6_RECVORIGDSTADDR
2504         case IPV6_RECVORIGDSTADDR:
2505 #endif
2506             if (get_user_u32(len, optlen))
2507                 return -TARGET_EFAULT;
2508             if (len < 0)
2509                 return -TARGET_EINVAL;
2510             lv = sizeof(lv);
2511             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2512             if (ret < 0)
2513                 return ret;
2514             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2515                 len = 1;
2516                 if (put_user_u32(len, optlen)
2517                     || put_user_u8(val, optval_addr))
2518                     return -TARGET_EFAULT;
2519             } else {
2520                 if (len > sizeof(int))
2521                     len = sizeof(int);
2522                 if (put_user_u32(len, optlen)
2523                     || put_user_u32(val, optval_addr))
2524                     return -TARGET_EFAULT;
2525             }
2526             break;
2527         default:
2528             ret = -TARGET_ENOPROTOOPT;
2529             break;
2530         }
2531         break;
2532     default:
2533     unimplemented:
2534         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2535                  level, optname);
2536         ret = -TARGET_EOPNOTSUPP;
2537         break;
2538     }
2539     return ret;
2540 }
2541 
2542 /* Convert target low/high pair representing file offset into the host
2543  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2544  * as the kernel doesn't handle them either.
2545  */
2546 static void target_to_host_low_high(abi_ulong tlow,
2547                                     abi_ulong thigh,
2548                                     unsigned long *hlow,
2549                                     unsigned long *hhigh)
2550 {
2551     uint64_t off = tlow |
2552         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2553         TARGET_LONG_BITS / 2;
2554 
2555     *hlow = off;
2556     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2557 }
2558 
2559 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2560                                 abi_ulong count, int copy)
2561 {
2562     struct target_iovec *target_vec;
2563     struct iovec *vec;
2564     abi_ulong total_len, max_len;
2565     int i;
2566     int err = 0;
2567     bool bad_address = false;
2568 
2569     if (count == 0) {
2570         errno = 0;
2571         return NULL;
2572     }
2573     if (count > IOV_MAX) {
2574         errno = EINVAL;
2575         return NULL;
2576     }
2577 
2578     vec = g_try_new0(struct iovec, count);
2579     if (vec == NULL) {
2580         errno = ENOMEM;
2581         return NULL;
2582     }
2583 
2584     target_vec = lock_user(VERIFY_READ, target_addr,
2585                            count * sizeof(struct target_iovec), 1);
2586     if (target_vec == NULL) {
2587         err = EFAULT;
2588         goto fail2;
2589     }
2590 
2591     /* ??? If host page size > target page size, this will result in a
2592        value larger than what we can actually support.  */
2593     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2594     total_len = 0;
2595 
2596     for (i = 0; i < count; i++) {
2597         abi_ulong base = tswapal(target_vec[i].iov_base);
2598         abi_long len = tswapal(target_vec[i].iov_len);
2599 
2600         if (len < 0) {
2601             err = EINVAL;
2602             goto fail;
2603         } else if (len == 0) {
2604             /* Zero length pointer is ignored.  */
2605             vec[i].iov_base = 0;
2606         } else {
2607             vec[i].iov_base = lock_user(type, base, len, copy);
2608             /* If the first buffer pointer is bad, this is a fault.  But
2609              * subsequent bad buffers will result in a partial write; this
2610              * is realized by filling the vector with null pointers and
2611              * zero lengths. */
2612             if (!vec[i].iov_base) {
2613                 if (i == 0) {
2614                     err = EFAULT;
2615                     goto fail;
2616                 } else {
2617                     bad_address = true;
2618                 }
2619             }
2620             if (bad_address) {
2621                 len = 0;
2622             }
2623             if (len > max_len - total_len) {
2624                 len = max_len - total_len;
2625             }
2626         }
2627         vec[i].iov_len = len;
2628         total_len += len;
2629     }
2630 
2631     unlock_user(target_vec, target_addr, 0);
2632     return vec;
2633 
2634  fail:
2635     while (--i >= 0) {
2636         if (tswapal(target_vec[i].iov_len) > 0) {
2637             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2638         }
2639     }
2640     unlock_user(target_vec, target_addr, 0);
2641  fail2:
2642     g_free(vec);
2643     errno = err;
2644     return NULL;
2645 }
2646 
2647 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2648                          abi_ulong count, int copy)
2649 {
2650     struct target_iovec *target_vec;
2651     int i;
2652 
2653     target_vec = lock_user(VERIFY_READ, target_addr,
2654                            count * sizeof(struct target_iovec), 1);
2655     if (target_vec) {
2656         for (i = 0; i < count; i++) {
2657             abi_ulong base = tswapal(target_vec[i].iov_base);
2658             abi_long len = tswapal(target_vec[i].iov_len);
2659             if (len < 0) {
2660                 break;
2661             }
2662             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2663         }
2664         unlock_user(target_vec, target_addr, 0);
2665     }
2666 
2667     g_free(vec);
2668 }
2669 
2670 static inline int target_to_host_sock_type(int *type)
2671 {
2672     int host_type = 0;
2673     int target_type = *type;
2674 
2675     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2676     case TARGET_SOCK_DGRAM:
2677         host_type = SOCK_DGRAM;
2678         break;
2679     case TARGET_SOCK_STREAM:
2680         host_type = SOCK_STREAM;
2681         break;
2682     default:
2683         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2684         break;
2685     }
2686     if (target_type & TARGET_SOCK_CLOEXEC) {
2687 #if defined(SOCK_CLOEXEC)
2688         host_type |= SOCK_CLOEXEC;
2689 #else
2690         return -TARGET_EINVAL;
2691 #endif
2692     }
2693     if (target_type & TARGET_SOCK_NONBLOCK) {
2694 #if defined(SOCK_NONBLOCK)
2695         host_type |= SOCK_NONBLOCK;
2696 #elif !defined(O_NONBLOCK)
2697         return -TARGET_EINVAL;
2698 #endif
2699     }
2700     *type = host_type;
2701     return 0;
2702 }
2703 
2704 /* Try to emulate socket type flags after socket creation.  */
2705 static int sock_flags_fixup(int fd, int target_type)
2706 {
2707 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2708     if (target_type & TARGET_SOCK_NONBLOCK) {
2709         int flags = fcntl(fd, F_GETFL);
2710         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2711             close(fd);
2712             return -TARGET_EINVAL;
2713         }
2714     }
2715 #endif
2716     return fd;
2717 }
2718 
2719 /* do_socket() Must return target values and target errnos. */
2720 static abi_long do_socket(int domain, int type, int protocol)
2721 {
2722     int target_type = type;
2723     int ret;
2724 
2725     ret = target_to_host_sock_type(&type);
2726     if (ret) {
2727         return ret;
2728     }
2729 
2730     if (domain == PF_NETLINK && !(
2731 #ifdef CONFIG_RTNETLINK
2732          protocol == NETLINK_ROUTE ||
2733 #endif
2734          protocol == NETLINK_KOBJECT_UEVENT ||
2735          protocol == NETLINK_AUDIT)) {
2736         return -EPFNOSUPPORT;
2737     }
2738 
2739     if (domain == AF_PACKET ||
2740         (domain == AF_INET && type == SOCK_PACKET)) {
2741         protocol = tswap16(protocol);
2742     }
2743 
2744     ret = get_errno(socket(domain, type, protocol));
2745     if (ret >= 0) {
2746         ret = sock_flags_fixup(ret, target_type);
2747         if (type == SOCK_PACKET) {
2748             /* Manage an obsolete case :
2749              * if socket type is SOCK_PACKET, bind by name
2750              */
2751             fd_trans_register(ret, &target_packet_trans);
2752         } else if (domain == PF_NETLINK) {
2753             switch (protocol) {
2754 #ifdef CONFIG_RTNETLINK
2755             case NETLINK_ROUTE:
2756                 fd_trans_register(ret, &target_netlink_route_trans);
2757                 break;
2758 #endif
2759             case NETLINK_KOBJECT_UEVENT:
2760                 /* nothing to do: messages are strings */
2761                 break;
2762             case NETLINK_AUDIT:
2763                 fd_trans_register(ret, &target_netlink_audit_trans);
2764                 break;
2765             default:
2766                 g_assert_not_reached();
2767             }
2768         }
2769     }
2770     return ret;
2771 }
2772 
2773 /* do_bind() Must return target values and target errnos. */
2774 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2775                         socklen_t addrlen)
2776 {
2777     void *addr;
2778     abi_long ret;
2779 
2780     if ((int)addrlen < 0) {
2781         return -TARGET_EINVAL;
2782     }
2783 
2784     addr = alloca(addrlen+1);
2785 
2786     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2787     if (ret)
2788         return ret;
2789 
2790     return get_errno(bind(sockfd, addr, addrlen));
2791 }
2792 
2793 /* do_connect() Must return target values and target errnos. */
2794 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2795                            socklen_t addrlen)
2796 {
2797     void *addr;
2798     abi_long ret;
2799 
2800     if ((int)addrlen < 0) {
2801         return -TARGET_EINVAL;
2802     }
2803 
2804     addr = alloca(addrlen+1);
2805 
2806     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2807     if (ret)
2808         return ret;
2809 
2810     return get_errno(safe_connect(sockfd, addr, addrlen));
2811 }
2812 
2813 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2814 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2815                                       int flags, int send)
2816 {
2817     abi_long ret, len;
2818     struct msghdr msg;
2819     abi_ulong count;
2820     struct iovec *vec;
2821     abi_ulong target_vec;
2822 
2823     if (msgp->msg_name) {
2824         msg.msg_namelen = tswap32(msgp->msg_namelen);
2825         msg.msg_name = alloca(msg.msg_namelen+1);
2826         ret = target_to_host_sockaddr(fd, msg.msg_name,
2827                                       tswapal(msgp->msg_name),
2828                                       msg.msg_namelen);
2829         if (ret == -TARGET_EFAULT) {
2830             /* For connected sockets msg_name and msg_namelen must
2831              * be ignored, so returning EFAULT immediately is wrong.
2832              * Instead, pass a bad msg_name to the host kernel, and
2833              * let it decide whether to return EFAULT or not.
2834              */
2835             msg.msg_name = (void *)-1;
2836         } else if (ret) {
2837             goto out2;
2838         }
2839     } else {
2840         msg.msg_name = NULL;
2841         msg.msg_namelen = 0;
2842     }
2843     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2844     msg.msg_control = alloca(msg.msg_controllen);
2845     memset(msg.msg_control, 0, msg.msg_controllen);
2846 
2847     msg.msg_flags = tswap32(msgp->msg_flags);
2848 
2849     count = tswapal(msgp->msg_iovlen);
2850     target_vec = tswapal(msgp->msg_iov);
2851 
2852     if (count > IOV_MAX) {
2853         /* sendrcvmsg returns a different errno for this condition than
2854          * readv/writev, so we must catch it here before lock_iovec() does.
2855          */
2856         ret = -TARGET_EMSGSIZE;
2857         goto out2;
2858     }
2859 
2860     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2861                      target_vec, count, send);
2862     if (vec == NULL) {
2863         ret = -host_to_target_errno(errno);
2864         goto out2;
2865     }
2866     msg.msg_iovlen = count;
2867     msg.msg_iov = vec;
2868 
2869     if (send) {
2870         if (fd_trans_target_to_host_data(fd)) {
2871             void *host_msg;
2872 
2873             host_msg = g_malloc(msg.msg_iov->iov_len);
2874             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2875             ret = fd_trans_target_to_host_data(fd)(host_msg,
2876                                                    msg.msg_iov->iov_len);
2877             if (ret >= 0) {
2878                 msg.msg_iov->iov_base = host_msg;
2879                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2880             }
2881             g_free(host_msg);
2882         } else {
2883             ret = target_to_host_cmsg(&msg, msgp);
2884             if (ret == 0) {
2885                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2886             }
2887         }
2888     } else {
2889         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2890         if (!is_error(ret)) {
2891             len = ret;
2892             if (fd_trans_host_to_target_data(fd)) {
2893                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2894                                                MIN(msg.msg_iov->iov_len, len));
2895             } else {
2896                 ret = host_to_target_cmsg(msgp, &msg);
2897             }
2898             if (!is_error(ret)) {
2899                 msgp->msg_namelen = tswap32(msg.msg_namelen);
2900                 msgp->msg_flags = tswap32(msg.msg_flags);
2901                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2902                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2903                                     msg.msg_name, msg.msg_namelen);
2904                     if (ret) {
2905                         goto out;
2906                     }
2907                 }
2908 
2909                 ret = len;
2910             }
2911         }
2912     }
2913 
2914 out:
2915     unlock_iovec(vec, target_vec, count, !send);
2916 out2:
2917     return ret;
2918 }
2919 
2920 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2921                                int flags, int send)
2922 {
2923     abi_long ret;
2924     struct target_msghdr *msgp;
2925 
2926     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2927                           msgp,
2928                           target_msg,
2929                           send ? 1 : 0)) {
2930         return -TARGET_EFAULT;
2931     }
2932     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2933     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2934     return ret;
2935 }
2936 
2937 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2938  * so it might not have this *mmsg-specific flag either.
2939  */
2940 #ifndef MSG_WAITFORONE
2941 #define MSG_WAITFORONE 0x10000
2942 #endif
2943 
2944 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2945                                 unsigned int vlen, unsigned int flags,
2946                                 int send)
2947 {
2948     struct target_mmsghdr *mmsgp;
2949     abi_long ret = 0;
2950     int i;
2951 
2952     if (vlen > UIO_MAXIOV) {
2953         vlen = UIO_MAXIOV;
2954     }
2955 
2956     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2957     if (!mmsgp) {
2958         return -TARGET_EFAULT;
2959     }
2960 
2961     for (i = 0; i < vlen; i++) {
2962         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2963         if (is_error(ret)) {
2964             break;
2965         }
2966         mmsgp[i].msg_len = tswap32(ret);
2967         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2968         if (flags & MSG_WAITFORONE) {
2969             flags |= MSG_DONTWAIT;
2970         }
2971     }
2972 
2973     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2974 
2975     /* Return number of datagrams sent if we sent any at all;
2976      * otherwise return the error.
2977      */
2978     if (i) {
2979         return i;
2980     }
2981     return ret;
2982 }
2983 
2984 /* do_accept4() Must return target values and target errnos. */
2985 static abi_long do_accept4(int fd, abi_ulong target_addr,
2986                            abi_ulong target_addrlen_addr, int flags)
2987 {
2988     socklen_t addrlen, ret_addrlen;
2989     void *addr;
2990     abi_long ret;
2991     int host_flags;
2992 
2993     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2994 
2995     if (target_addr == 0) {
2996         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2997     }
2998 
2999     /* linux returns EINVAL if addrlen pointer is invalid */
3000     if (get_user_u32(addrlen, target_addrlen_addr))
3001         return -TARGET_EINVAL;
3002 
3003     if ((int)addrlen < 0) {
3004         return -TARGET_EINVAL;
3005     }
3006 
3007     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3008         return -TARGET_EINVAL;
3009 
3010     addr = alloca(addrlen);
3011 
3012     ret_addrlen = addrlen;
3013     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3014     if (!is_error(ret)) {
3015         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3016         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3017             ret = -TARGET_EFAULT;
3018         }
3019     }
3020     return ret;
3021 }
3022 
3023 /* do_getpeername() Must return target values and target errnos. */
3024 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3025                                abi_ulong target_addrlen_addr)
3026 {
3027     socklen_t addrlen, ret_addrlen;
3028     void *addr;
3029     abi_long ret;
3030 
3031     if (get_user_u32(addrlen, target_addrlen_addr))
3032         return -TARGET_EFAULT;
3033 
3034     if ((int)addrlen < 0) {
3035         return -TARGET_EINVAL;
3036     }
3037 
3038     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3039         return -TARGET_EFAULT;
3040 
3041     addr = alloca(addrlen);
3042 
3043     ret_addrlen = addrlen;
3044     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3045     if (!is_error(ret)) {
3046         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3047         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3048             ret = -TARGET_EFAULT;
3049         }
3050     }
3051     return ret;
3052 }
3053 
3054 /* do_getsockname() Must return target values and target errnos. */
3055 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3056                                abi_ulong target_addrlen_addr)
3057 {
3058     socklen_t addrlen, ret_addrlen;
3059     void *addr;
3060     abi_long ret;
3061 
3062     if (get_user_u32(addrlen, target_addrlen_addr))
3063         return -TARGET_EFAULT;
3064 
3065     if ((int)addrlen < 0) {
3066         return -TARGET_EINVAL;
3067     }
3068 
3069     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3070         return -TARGET_EFAULT;
3071 
3072     addr = alloca(addrlen);
3073 
3074     ret_addrlen = addrlen;
3075     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3076     if (!is_error(ret)) {
3077         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3078         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3079             ret = -TARGET_EFAULT;
3080         }
3081     }
3082     return ret;
3083 }
3084 
3085 /* do_socketpair() Must return target values and target errnos. */
3086 static abi_long do_socketpair(int domain, int type, int protocol,
3087                               abi_ulong target_tab_addr)
3088 {
3089     int tab[2];
3090     abi_long ret;
3091 
3092     target_to_host_sock_type(&type);
3093 
3094     ret = get_errno(socketpair(domain, type, protocol, tab));
3095     if (!is_error(ret)) {
3096         if (put_user_s32(tab[0], target_tab_addr)
3097             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3098             ret = -TARGET_EFAULT;
3099     }
3100     return ret;
3101 }
3102 
3103 /* do_sendto() Must return target values and target errnos. */
3104 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3105                           abi_ulong target_addr, socklen_t addrlen)
3106 {
3107     void *addr;
3108     void *host_msg;
3109     void *copy_msg = NULL;
3110     abi_long ret;
3111 
3112     if ((int)addrlen < 0) {
3113         return -TARGET_EINVAL;
3114     }
3115 
3116     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3117     if (!host_msg)
3118         return -TARGET_EFAULT;
3119     if (fd_trans_target_to_host_data(fd)) {
3120         copy_msg = host_msg;
3121         host_msg = g_malloc(len);
3122         memcpy(host_msg, copy_msg, len);
3123         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3124         if (ret < 0) {
3125             goto fail;
3126         }
3127     }
3128     if (target_addr) {
3129         addr = alloca(addrlen+1);
3130         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3131         if (ret) {
3132             goto fail;
3133         }
3134         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3135     } else {
3136         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3137     }
3138 fail:
3139     if (copy_msg) {
3140         g_free(host_msg);
3141         host_msg = copy_msg;
3142     }
3143     unlock_user(host_msg, msg, 0);
3144     return ret;
3145 }
3146 
3147 /* do_recvfrom() Must return target values and target errnos. */
3148 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3149                             abi_ulong target_addr,
3150                             abi_ulong target_addrlen)
3151 {
3152     socklen_t addrlen, ret_addrlen;
3153     void *addr;
3154     void *host_msg;
3155     abi_long ret;
3156 
3157     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3158     if (!host_msg)
3159         return -TARGET_EFAULT;
3160     if (target_addr) {
3161         if (get_user_u32(addrlen, target_addrlen)) {
3162             ret = -TARGET_EFAULT;
3163             goto fail;
3164         }
3165         if ((int)addrlen < 0) {
3166             ret = -TARGET_EINVAL;
3167             goto fail;
3168         }
3169         addr = alloca(addrlen);
3170         ret_addrlen = addrlen;
3171         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3172                                       addr, &ret_addrlen));
3173     } else {
3174         addr = NULL; /* To keep compiler quiet.  */
3175         addrlen = 0; /* To keep compiler quiet.  */
3176         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3177     }
3178     if (!is_error(ret)) {
3179         if (fd_trans_host_to_target_data(fd)) {
3180             abi_long trans;
3181             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3182             if (is_error(trans)) {
3183                 ret = trans;
3184                 goto fail;
3185             }
3186         }
3187         if (target_addr) {
3188             host_to_target_sockaddr(target_addr, addr,
3189                                     MIN(addrlen, ret_addrlen));
3190             if (put_user_u32(ret_addrlen, target_addrlen)) {
3191                 ret = -TARGET_EFAULT;
3192                 goto fail;
3193             }
3194         }
3195         unlock_user(host_msg, msg, len);
3196     } else {
3197 fail:
3198         unlock_user(host_msg, msg, 0);
3199     }
3200     return ret;
3201 }
3202 
3203 #ifdef TARGET_NR_socketcall
3204 /* do_socketcall() must return target values and target errnos. */
3205 static abi_long do_socketcall(int num, abi_ulong vptr)
3206 {
3207     static const unsigned nargs[] = { /* number of arguments per operation */
3208         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3209         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3210         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3211         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3212         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3213         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3214         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3215         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3216         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3217         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3218         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3219         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3220         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3221         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3222         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3223         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3224         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3225         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3226         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3227         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3228     };
3229     abi_long a[6]; /* max 6 args */
3230     unsigned i;
3231 
3232     /* check the range of the first argument num */
3233     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3234     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3235         return -TARGET_EINVAL;
3236     }
3237     /* ensure we have space for args */
3238     if (nargs[num] > ARRAY_SIZE(a)) {
3239         return -TARGET_EINVAL;
3240     }
3241     /* collect the arguments in a[] according to nargs[] */
3242     for (i = 0; i < nargs[num]; ++i) {
3243         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3244             return -TARGET_EFAULT;
3245         }
3246     }
3247     /* now when we have the args, invoke the appropriate underlying function */
3248     switch (num) {
3249     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3250         return do_socket(a[0], a[1], a[2]);
3251     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3252         return do_bind(a[0], a[1], a[2]);
3253     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3254         return do_connect(a[0], a[1], a[2]);
3255     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3256         return get_errno(listen(a[0], a[1]));
3257     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3258         return do_accept4(a[0], a[1], a[2], 0);
3259     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3260         return do_getsockname(a[0], a[1], a[2]);
3261     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3262         return do_getpeername(a[0], a[1], a[2]);
3263     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3264         return do_socketpair(a[0], a[1], a[2], a[3]);
3265     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3266         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3267     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3268         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3269     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3270         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3271     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3272         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3273     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3274         return get_errno(shutdown(a[0], a[1]));
3275     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3276         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3277     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3278         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3279     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3280         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3281     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3282         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3283     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3284         return do_accept4(a[0], a[1], a[2], a[3]);
3285     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3286         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3287     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3288         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3289     default:
3290         gemu_log("Unsupported socketcall: %d\n", num);
3291         return -TARGET_EINVAL;
3292     }
3293 }
3294 #endif
3295 
3296 #define N_SHM_REGIONS	32
3297 
3298 static struct shm_region {
3299     abi_ulong start;
3300     abi_ulong size;
3301     bool in_use;
3302 } shm_regions[N_SHM_REGIONS];
3303 
3304 #ifndef TARGET_SEMID64_DS
3305 /* asm-generic version of this struct */
3306 struct target_semid64_ds
3307 {
3308   struct target_ipc_perm sem_perm;
3309   abi_ulong sem_otime;
3310 #if TARGET_ABI_BITS == 32
3311   abi_ulong __unused1;
3312 #endif
3313   abi_ulong sem_ctime;
3314 #if TARGET_ABI_BITS == 32
3315   abi_ulong __unused2;
3316 #endif
3317   abi_ulong sem_nsems;
3318   abi_ulong __unused3;
3319   abi_ulong __unused4;
3320 };
3321 #endif
3322 
3323 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3324                                                abi_ulong target_addr)
3325 {
3326     struct target_ipc_perm *target_ip;
3327     struct target_semid64_ds *target_sd;
3328 
3329     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3330         return -TARGET_EFAULT;
3331     target_ip = &(target_sd->sem_perm);
3332     host_ip->__key = tswap32(target_ip->__key);
3333     host_ip->uid = tswap32(target_ip->uid);
3334     host_ip->gid = tswap32(target_ip->gid);
3335     host_ip->cuid = tswap32(target_ip->cuid);
3336     host_ip->cgid = tswap32(target_ip->cgid);
3337 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3338     host_ip->mode = tswap32(target_ip->mode);
3339 #else
3340     host_ip->mode = tswap16(target_ip->mode);
3341 #endif
3342 #if defined(TARGET_PPC)
3343     host_ip->__seq = tswap32(target_ip->__seq);
3344 #else
3345     host_ip->__seq = tswap16(target_ip->__seq);
3346 #endif
3347     unlock_user_struct(target_sd, target_addr, 0);
3348     return 0;
3349 }
3350 
3351 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3352                                                struct ipc_perm *host_ip)
3353 {
3354     struct target_ipc_perm *target_ip;
3355     struct target_semid64_ds *target_sd;
3356 
3357     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3358         return -TARGET_EFAULT;
3359     target_ip = &(target_sd->sem_perm);
3360     target_ip->__key = tswap32(host_ip->__key);
3361     target_ip->uid = tswap32(host_ip->uid);
3362     target_ip->gid = tswap32(host_ip->gid);
3363     target_ip->cuid = tswap32(host_ip->cuid);
3364     target_ip->cgid = tswap32(host_ip->cgid);
3365 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3366     target_ip->mode = tswap32(host_ip->mode);
3367 #else
3368     target_ip->mode = tswap16(host_ip->mode);
3369 #endif
3370 #if defined(TARGET_PPC)
3371     target_ip->__seq = tswap32(host_ip->__seq);
3372 #else
3373     target_ip->__seq = tswap16(host_ip->__seq);
3374 #endif
3375     unlock_user_struct(target_sd, target_addr, 1);
3376     return 0;
3377 }
3378 
3379 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3380                                                abi_ulong target_addr)
3381 {
3382     struct target_semid64_ds *target_sd;
3383 
3384     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3385         return -TARGET_EFAULT;
3386     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3387         return -TARGET_EFAULT;
3388     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3389     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3390     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3391     unlock_user_struct(target_sd, target_addr, 0);
3392     return 0;
3393 }
3394 
3395 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3396                                                struct semid_ds *host_sd)
3397 {
3398     struct target_semid64_ds *target_sd;
3399 
3400     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3401         return -TARGET_EFAULT;
3402     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3403         return -TARGET_EFAULT;
3404     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3405     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3406     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3407     unlock_user_struct(target_sd, target_addr, 1);
3408     return 0;
3409 }
3410 
3411 struct target_seminfo {
3412     int semmap;
3413     int semmni;
3414     int semmns;
3415     int semmnu;
3416     int semmsl;
3417     int semopm;
3418     int semume;
3419     int semusz;
3420     int semvmx;
3421     int semaem;
3422 };
3423 
3424 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3425                                               struct seminfo *host_seminfo)
3426 {
3427     struct target_seminfo *target_seminfo;
3428     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3429         return -TARGET_EFAULT;
3430     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3431     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3432     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3433     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3434     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3435     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3436     __put_user(host_seminfo->semume, &target_seminfo->semume);
3437     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3438     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3439     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3440     unlock_user_struct(target_seminfo, target_addr, 1);
3441     return 0;
3442 }
3443 
3444 union semun {
3445 	int val;
3446 	struct semid_ds *buf;
3447 	unsigned short *array;
3448 	struct seminfo *__buf;
3449 };
3450 
3451 union target_semun {
3452 	int val;
3453 	abi_ulong buf;
3454 	abi_ulong array;
3455 	abi_ulong __buf;
3456 };
3457 
3458 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3459                                                abi_ulong target_addr)
3460 {
3461     int nsems;
3462     unsigned short *array;
3463     union semun semun;
3464     struct semid_ds semid_ds;
3465     int i, ret;
3466 
3467     semun.buf = &semid_ds;
3468 
3469     ret = semctl(semid, 0, IPC_STAT, semun);
3470     if (ret == -1)
3471         return get_errno(ret);
3472 
3473     nsems = semid_ds.sem_nsems;
3474 
3475     *host_array = g_try_new(unsigned short, nsems);
3476     if (!*host_array) {
3477         return -TARGET_ENOMEM;
3478     }
3479     array = lock_user(VERIFY_READ, target_addr,
3480                       nsems*sizeof(unsigned short), 1);
3481     if (!array) {
3482         g_free(*host_array);
3483         return -TARGET_EFAULT;
3484     }
3485 
3486     for(i=0; i<nsems; i++) {
3487         __get_user((*host_array)[i], &array[i]);
3488     }
3489     unlock_user(array, target_addr, 0);
3490 
3491     return 0;
3492 }
3493 
3494 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3495                                                unsigned short **host_array)
3496 {
3497     int nsems;
3498     unsigned short *array;
3499     union semun semun;
3500     struct semid_ds semid_ds;
3501     int i, ret;
3502 
3503     semun.buf = &semid_ds;
3504 
3505     ret = semctl(semid, 0, IPC_STAT, semun);
3506     if (ret == -1)
3507         return get_errno(ret);
3508 
3509     nsems = semid_ds.sem_nsems;
3510 
3511     array = lock_user(VERIFY_WRITE, target_addr,
3512                       nsems*sizeof(unsigned short), 0);
3513     if (!array)
3514         return -TARGET_EFAULT;
3515 
3516     for(i=0; i<nsems; i++) {
3517         __put_user((*host_array)[i], &array[i]);
3518     }
3519     g_free(*host_array);
3520     unlock_user(array, target_addr, 1);
3521 
3522     return 0;
3523 }
3524 
3525 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3526                                  abi_ulong target_arg)
3527 {
3528     union target_semun target_su = { .buf = target_arg };
3529     union semun arg;
3530     struct semid_ds dsarg;
3531     unsigned short *array = NULL;
3532     struct seminfo seminfo;
3533     abi_long ret = -TARGET_EINVAL;
3534     abi_long err;
3535     cmd &= 0xff;
3536 
3537     switch( cmd ) {
3538 	case GETVAL:
3539 	case SETVAL:
3540             /* In 64 bit cross-endian situations, we will erroneously pick up
3541              * the wrong half of the union for the "val" element.  To rectify
3542              * this, the entire 8-byte structure is byteswapped, followed by
3543 	     * a swap of the 4 byte val field. In other cases, the data is
3544 	     * already in proper host byte order. */
3545 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3546 		target_su.buf = tswapal(target_su.buf);
3547 		arg.val = tswap32(target_su.val);
3548 	    } else {
3549 		arg.val = target_su.val;
3550 	    }
3551             ret = get_errno(semctl(semid, semnum, cmd, arg));
3552             break;
3553 	case GETALL:
3554 	case SETALL:
3555             err = target_to_host_semarray(semid, &array, target_su.array);
3556             if (err)
3557                 return err;
3558             arg.array = array;
3559             ret = get_errno(semctl(semid, semnum, cmd, arg));
3560             err = host_to_target_semarray(semid, target_su.array, &array);
3561             if (err)
3562                 return err;
3563             break;
3564 	case IPC_STAT:
3565 	case IPC_SET:
3566 	case SEM_STAT:
3567             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3568             if (err)
3569                 return err;
3570             arg.buf = &dsarg;
3571             ret = get_errno(semctl(semid, semnum, cmd, arg));
3572             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3573             if (err)
3574                 return err;
3575             break;
3576 	case IPC_INFO:
3577 	case SEM_INFO:
3578             arg.__buf = &seminfo;
3579             ret = get_errno(semctl(semid, semnum, cmd, arg));
3580             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3581             if (err)
3582                 return err;
3583             break;
3584 	case IPC_RMID:
3585 	case GETPID:
3586 	case GETNCNT:
3587 	case GETZCNT:
3588             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3589             break;
3590     }
3591 
3592     return ret;
3593 }
3594 
3595 struct target_sembuf {
3596     unsigned short sem_num;
3597     short sem_op;
3598     short sem_flg;
3599 };
3600 
3601 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3602                                              abi_ulong target_addr,
3603                                              unsigned nsops)
3604 {
3605     struct target_sembuf *target_sembuf;
3606     int i;
3607 
3608     target_sembuf = lock_user(VERIFY_READ, target_addr,
3609                               nsops*sizeof(struct target_sembuf), 1);
3610     if (!target_sembuf)
3611         return -TARGET_EFAULT;
3612 
3613     for(i=0; i<nsops; i++) {
3614         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3615         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3616         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3617     }
3618 
3619     unlock_user(target_sembuf, target_addr, 0);
3620 
3621     return 0;
3622 }
3623 
3624 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3625 {
3626     struct sembuf sops[nsops];
3627     abi_long ret;
3628 
3629     if (target_to_host_sembuf(sops, ptr, nsops))
3630         return -TARGET_EFAULT;
3631 
3632     ret = -TARGET_ENOSYS;
3633 #ifdef __NR_semtimedop
3634     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3635 #endif
3636 #ifdef __NR_ipc
3637     if (ret == -TARGET_ENOSYS) {
3638         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3639     }
3640 #endif
3641     return ret;
3642 }
3643 
3644 struct target_msqid_ds
3645 {
3646     struct target_ipc_perm msg_perm;
3647     abi_ulong msg_stime;
3648 #if TARGET_ABI_BITS == 32
3649     abi_ulong __unused1;
3650 #endif
3651     abi_ulong msg_rtime;
3652 #if TARGET_ABI_BITS == 32
3653     abi_ulong __unused2;
3654 #endif
3655     abi_ulong msg_ctime;
3656 #if TARGET_ABI_BITS == 32
3657     abi_ulong __unused3;
3658 #endif
3659     abi_ulong __msg_cbytes;
3660     abi_ulong msg_qnum;
3661     abi_ulong msg_qbytes;
3662     abi_ulong msg_lspid;
3663     abi_ulong msg_lrpid;
3664     abi_ulong __unused4;
3665     abi_ulong __unused5;
3666 };
3667 
3668 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3669                                                abi_ulong target_addr)
3670 {
3671     struct target_msqid_ds *target_md;
3672 
3673     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3674         return -TARGET_EFAULT;
3675     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3676         return -TARGET_EFAULT;
3677     host_md->msg_stime = tswapal(target_md->msg_stime);
3678     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3679     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3680     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3681     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3682     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3683     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3684     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3685     unlock_user_struct(target_md, target_addr, 0);
3686     return 0;
3687 }
3688 
3689 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3690                                                struct msqid_ds *host_md)
3691 {
3692     struct target_msqid_ds *target_md;
3693 
3694     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3695         return -TARGET_EFAULT;
3696     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3697         return -TARGET_EFAULT;
3698     target_md->msg_stime = tswapal(host_md->msg_stime);
3699     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3700     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3701     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3702     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3703     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3704     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3705     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3706     unlock_user_struct(target_md, target_addr, 1);
3707     return 0;
3708 }
3709 
3710 struct target_msginfo {
3711     int msgpool;
3712     int msgmap;
3713     int msgmax;
3714     int msgmnb;
3715     int msgmni;
3716     int msgssz;
3717     int msgtql;
3718     unsigned short int msgseg;
3719 };
3720 
3721 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3722                                               struct msginfo *host_msginfo)
3723 {
3724     struct target_msginfo *target_msginfo;
3725     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3726         return -TARGET_EFAULT;
3727     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3728     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3729     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3730     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3731     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3732     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3733     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3734     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3735     unlock_user_struct(target_msginfo, target_addr, 1);
3736     return 0;
3737 }
3738 
3739 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3740 {
3741     struct msqid_ds dsarg;
3742     struct msginfo msginfo;
3743     abi_long ret = -TARGET_EINVAL;
3744 
3745     cmd &= 0xff;
3746 
3747     switch (cmd) {
3748     case IPC_STAT:
3749     case IPC_SET:
3750     case MSG_STAT:
3751         if (target_to_host_msqid_ds(&dsarg,ptr))
3752             return -TARGET_EFAULT;
3753         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3754         if (host_to_target_msqid_ds(ptr,&dsarg))
3755             return -TARGET_EFAULT;
3756         break;
3757     case IPC_RMID:
3758         ret = get_errno(msgctl(msgid, cmd, NULL));
3759         break;
3760     case IPC_INFO:
3761     case MSG_INFO:
3762         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3763         if (host_to_target_msginfo(ptr, &msginfo))
3764             return -TARGET_EFAULT;
3765         break;
3766     }
3767 
3768     return ret;
3769 }
3770 
3771 struct target_msgbuf {
3772     abi_long mtype;
3773     char	mtext[1];
3774 };
3775 
3776 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3777                                  ssize_t msgsz, int msgflg)
3778 {
3779     struct target_msgbuf *target_mb;
3780     struct msgbuf *host_mb;
3781     abi_long ret = 0;
3782 
3783     if (msgsz < 0) {
3784         return -TARGET_EINVAL;
3785     }
3786 
3787     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3788         return -TARGET_EFAULT;
3789     host_mb = g_try_malloc(msgsz + sizeof(long));
3790     if (!host_mb) {
3791         unlock_user_struct(target_mb, msgp, 0);
3792         return -TARGET_ENOMEM;
3793     }
3794     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3795     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3796     ret = -TARGET_ENOSYS;
3797 #ifdef __NR_msgsnd
3798     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3799 #endif
3800 #ifdef __NR_ipc
3801     if (ret == -TARGET_ENOSYS) {
3802         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3803                                  host_mb, 0));
3804     }
3805 #endif
3806     g_free(host_mb);
3807     unlock_user_struct(target_mb, msgp, 0);
3808 
3809     return ret;
3810 }
3811 
3812 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3813                                  ssize_t msgsz, abi_long msgtyp,
3814                                  int msgflg)
3815 {
3816     struct target_msgbuf *target_mb;
3817     char *target_mtext;
3818     struct msgbuf *host_mb;
3819     abi_long ret = 0;
3820 
3821     if (msgsz < 0) {
3822         return -TARGET_EINVAL;
3823     }
3824 
3825     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3826         return -TARGET_EFAULT;
3827 
3828     host_mb = g_try_malloc(msgsz + sizeof(long));
3829     if (!host_mb) {
3830         ret = -TARGET_ENOMEM;
3831         goto end;
3832     }
3833     ret = -TARGET_ENOSYS;
3834 #ifdef __NR_msgrcv
3835     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3836 #endif
3837 #ifdef __NR_ipc
3838     if (ret == -TARGET_ENOSYS) {
3839         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3840                         msgflg, host_mb, msgtyp));
3841     }
3842 #endif
3843 
3844     if (ret > 0) {
3845         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3846         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3847         if (!target_mtext) {
3848             ret = -TARGET_EFAULT;
3849             goto end;
3850         }
3851         memcpy(target_mb->mtext, host_mb->mtext, ret);
3852         unlock_user(target_mtext, target_mtext_addr, ret);
3853     }
3854 
3855     target_mb->mtype = tswapal(host_mb->mtype);
3856 
3857 end:
3858     if (target_mb)
3859         unlock_user_struct(target_mb, msgp, 1);
3860     g_free(host_mb);
3861     return ret;
3862 }
3863 
3864 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3865                                                abi_ulong target_addr)
3866 {
3867     struct target_shmid_ds *target_sd;
3868 
3869     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3870         return -TARGET_EFAULT;
3871     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3872         return -TARGET_EFAULT;
3873     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3874     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3875     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3876     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3877     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3878     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3879     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3880     unlock_user_struct(target_sd, target_addr, 0);
3881     return 0;
3882 }
3883 
3884 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3885                                                struct shmid_ds *host_sd)
3886 {
3887     struct target_shmid_ds *target_sd;
3888 
3889     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3890         return -TARGET_EFAULT;
3891     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3892         return -TARGET_EFAULT;
3893     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3894     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3895     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3896     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3897     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3898     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3899     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3900     unlock_user_struct(target_sd, target_addr, 1);
3901     return 0;
3902 }
3903 
3904 struct  target_shminfo {
3905     abi_ulong shmmax;
3906     abi_ulong shmmin;
3907     abi_ulong shmmni;
3908     abi_ulong shmseg;
3909     abi_ulong shmall;
3910 };
3911 
3912 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3913                                               struct shminfo *host_shminfo)
3914 {
3915     struct target_shminfo *target_shminfo;
3916     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3917         return -TARGET_EFAULT;
3918     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3919     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3920     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3921     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3922     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3923     unlock_user_struct(target_shminfo, target_addr, 1);
3924     return 0;
3925 }
3926 
3927 struct target_shm_info {
3928     int used_ids;
3929     abi_ulong shm_tot;
3930     abi_ulong shm_rss;
3931     abi_ulong shm_swp;
3932     abi_ulong swap_attempts;
3933     abi_ulong swap_successes;
3934 };
3935 
3936 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3937                                                struct shm_info *host_shm_info)
3938 {
3939     struct target_shm_info *target_shm_info;
3940     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3941         return -TARGET_EFAULT;
3942     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3943     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3944     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3945     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3946     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3947     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3948     unlock_user_struct(target_shm_info, target_addr, 1);
3949     return 0;
3950 }
3951 
3952 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3953 {
3954     struct shmid_ds dsarg;
3955     struct shminfo shminfo;
3956     struct shm_info shm_info;
3957     abi_long ret = -TARGET_EINVAL;
3958 
3959     cmd &= 0xff;
3960 
3961     switch(cmd) {
3962     case IPC_STAT:
3963     case IPC_SET:
3964     case SHM_STAT:
3965         if (target_to_host_shmid_ds(&dsarg, buf))
3966             return -TARGET_EFAULT;
3967         ret = get_errno(shmctl(shmid, cmd, &dsarg));
3968         if (host_to_target_shmid_ds(buf, &dsarg))
3969             return -TARGET_EFAULT;
3970         break;
3971     case IPC_INFO:
3972         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3973         if (host_to_target_shminfo(buf, &shminfo))
3974             return -TARGET_EFAULT;
3975         break;
3976     case SHM_INFO:
3977         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3978         if (host_to_target_shm_info(buf, &shm_info))
3979             return -TARGET_EFAULT;
3980         break;
3981     case IPC_RMID:
3982     case SHM_LOCK:
3983     case SHM_UNLOCK:
3984         ret = get_errno(shmctl(shmid, cmd, NULL));
3985         break;
3986     }
3987 
3988     return ret;
3989 }
3990 
3991 #ifndef TARGET_FORCE_SHMLBA
3992 /* For most architectures, SHMLBA is the same as the page size;
3993  * some architectures have larger values, in which case they should
3994  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3995  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3996  * and defining its own value for SHMLBA.
3997  *
3998  * The kernel also permits SHMLBA to be set by the architecture to a
3999  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4000  * this means that addresses are rounded to the large size if
4001  * SHM_RND is set but addresses not aligned to that size are not rejected
4002  * as long as they are at least page-aligned. Since the only architecture
4003  * which uses this is ia64 this code doesn't provide for that oddity.
4004  */
4005 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4006 {
4007     return TARGET_PAGE_SIZE;
4008 }
4009 #endif
4010 
4011 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4012                                  int shmid, abi_ulong shmaddr, int shmflg)
4013 {
4014     abi_long raddr;
4015     void *host_raddr;
4016     struct shmid_ds shm_info;
4017     int i,ret;
4018     abi_ulong shmlba;
4019 
4020     /* find out the length of the shared memory segment */
4021     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4022     if (is_error(ret)) {
4023         /* can't get length, bail out */
4024         return ret;
4025     }
4026 
4027     shmlba = target_shmlba(cpu_env);
4028 
4029     if (shmaddr & (shmlba - 1)) {
4030         if (shmflg & SHM_RND) {
4031             shmaddr &= ~(shmlba - 1);
4032         } else {
4033             return -TARGET_EINVAL;
4034         }
4035     }
4036     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4037         return -TARGET_EINVAL;
4038     }
4039 
4040     mmap_lock();
4041 
4042     if (shmaddr)
4043         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4044     else {
4045         abi_ulong mmap_start;
4046 
4047         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4048         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4049 
4050         if (mmap_start == -1) {
4051             errno = ENOMEM;
4052             host_raddr = (void *)-1;
4053         } else
4054             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4055     }
4056 
4057     if (host_raddr == (void *)-1) {
4058         mmap_unlock();
4059         return get_errno((long)host_raddr);
4060     }
4061     raddr=h2g((unsigned long)host_raddr);
4062 
4063     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4064                    PAGE_VALID | PAGE_READ |
4065                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4066 
4067     for (i = 0; i < N_SHM_REGIONS; i++) {
4068         if (!shm_regions[i].in_use) {
4069             shm_regions[i].in_use = true;
4070             shm_regions[i].start = raddr;
4071             shm_regions[i].size = shm_info.shm_segsz;
4072             break;
4073         }
4074     }
4075 
4076     mmap_unlock();
4077     return raddr;
4078 
4079 }
4080 
4081 static inline abi_long do_shmdt(abi_ulong shmaddr)
4082 {
4083     int i;
4084     abi_long rv;
4085 
4086     mmap_lock();
4087 
4088     for (i = 0; i < N_SHM_REGIONS; ++i) {
4089         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4090             shm_regions[i].in_use = false;
4091             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4092             break;
4093         }
4094     }
4095     rv = get_errno(shmdt(g2h(shmaddr)));
4096 
4097     mmap_unlock();
4098 
4099     return rv;
4100 }
4101 
4102 #ifdef TARGET_NR_ipc
4103 /* ??? This only works with linear mappings.  */
4104 /* do_ipc() must return target values and target errnos. */
4105 static abi_long do_ipc(CPUArchState *cpu_env,
4106                        unsigned int call, abi_long first,
4107                        abi_long second, abi_long third,
4108                        abi_long ptr, abi_long fifth)
4109 {
4110     int version;
4111     abi_long ret = 0;
4112 
4113     version = call >> 16;
4114     call &= 0xffff;
4115 
4116     switch (call) {
4117     case IPCOP_semop:
4118         ret = do_semop(first, ptr, second);
4119         break;
4120 
4121     case IPCOP_semget:
4122         ret = get_errno(semget(first, second, third));
4123         break;
4124 
4125     case IPCOP_semctl: {
4126         /* The semun argument to semctl is passed by value, so dereference the
4127          * ptr argument. */
4128         abi_ulong atptr;
4129         get_user_ual(atptr, ptr);
4130         ret = do_semctl(first, second, third, atptr);
4131         break;
4132     }
4133 
4134     case IPCOP_msgget:
4135         ret = get_errno(msgget(first, second));
4136         break;
4137 
4138     case IPCOP_msgsnd:
4139         ret = do_msgsnd(first, ptr, second, third);
4140         break;
4141 
4142     case IPCOP_msgctl:
4143         ret = do_msgctl(first, second, ptr);
4144         break;
4145 
4146     case IPCOP_msgrcv:
4147         switch (version) {
4148         case 0:
4149             {
4150                 struct target_ipc_kludge {
4151                     abi_long msgp;
4152                     abi_long msgtyp;
4153                 } *tmp;
4154 
4155                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4156                     ret = -TARGET_EFAULT;
4157                     break;
4158                 }
4159 
4160                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4161 
4162                 unlock_user_struct(tmp, ptr, 0);
4163                 break;
4164             }
4165         default:
4166             ret = do_msgrcv(first, ptr, second, fifth, third);
4167         }
4168         break;
4169 
4170     case IPCOP_shmat:
4171         switch (version) {
4172         default:
4173         {
4174             abi_ulong raddr;
4175             raddr = do_shmat(cpu_env, first, ptr, second);
4176             if (is_error(raddr))
4177                 return get_errno(raddr);
4178             if (put_user_ual(raddr, third))
4179                 return -TARGET_EFAULT;
4180             break;
4181         }
4182         case 1:
4183             ret = -TARGET_EINVAL;
4184             break;
4185         }
4186 	break;
4187     case IPCOP_shmdt:
4188         ret = do_shmdt(ptr);
4189 	break;
4190 
4191     case IPCOP_shmget:
4192 	/* IPC_* flag values are the same on all linux platforms */
4193 	ret = get_errno(shmget(first, second, third));
4194 	break;
4195 
4196 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4197     case IPCOP_shmctl:
4198         ret = do_shmctl(first, second, ptr);
4199         break;
4200     default:
4201 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4202 	ret = -TARGET_ENOSYS;
4203 	break;
4204     }
4205     return ret;
4206 }
4207 #endif
4208 
4209 /* kernel structure types definitions */
4210 
4211 #define STRUCT(name, ...) STRUCT_ ## name,
4212 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4213 enum {
4214 #include "syscall_types.h"
4215 STRUCT_MAX
4216 };
4217 #undef STRUCT
4218 #undef STRUCT_SPECIAL
4219 
4220 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4221 #define STRUCT_SPECIAL(name)
4222 #include "syscall_types.h"
4223 #undef STRUCT
4224 #undef STRUCT_SPECIAL
4225 
4226 typedef struct IOCTLEntry IOCTLEntry;
4227 
4228 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4229                              int fd, int cmd, abi_long arg);
4230 
4231 struct IOCTLEntry {
4232     int target_cmd;
4233     unsigned int host_cmd;
4234     const char *name;
4235     int access;
4236     do_ioctl_fn *do_ioctl;
4237     const argtype arg_type[5];
4238 };
4239 
4240 #define IOC_R 0x0001
4241 #define IOC_W 0x0002
4242 #define IOC_RW (IOC_R | IOC_W)
4243 
4244 #define MAX_STRUCT_SIZE 4096
4245 
4246 #ifdef CONFIG_FIEMAP
4247 /* So fiemap access checks don't overflow on 32 bit systems.
4248  * This is very slightly smaller than the limit imposed by
4249  * the underlying kernel.
4250  */
4251 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4252                             / sizeof(struct fiemap_extent))
4253 
4254 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4255                                        int fd, int cmd, abi_long arg)
4256 {
4257     /* The parameter for this ioctl is a struct fiemap followed
4258      * by an array of struct fiemap_extent whose size is set
4259      * in fiemap->fm_extent_count. The array is filled in by the
4260      * ioctl.
4261      */
4262     int target_size_in, target_size_out;
4263     struct fiemap *fm;
4264     const argtype *arg_type = ie->arg_type;
4265     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4266     void *argptr, *p;
4267     abi_long ret;
4268     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4269     uint32_t outbufsz;
4270     int free_fm = 0;
4271 
4272     assert(arg_type[0] == TYPE_PTR);
4273     assert(ie->access == IOC_RW);
4274     arg_type++;
4275     target_size_in = thunk_type_size(arg_type, 0);
4276     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4277     if (!argptr) {
4278         return -TARGET_EFAULT;
4279     }
4280     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4281     unlock_user(argptr, arg, 0);
4282     fm = (struct fiemap *)buf_temp;
4283     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4284         return -TARGET_EINVAL;
4285     }
4286 
4287     outbufsz = sizeof (*fm) +
4288         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4289 
4290     if (outbufsz > MAX_STRUCT_SIZE) {
4291         /* We can't fit all the extents into the fixed size buffer.
4292          * Allocate one that is large enough and use it instead.
4293          */
4294         fm = g_try_malloc(outbufsz);
4295         if (!fm) {
4296             return -TARGET_ENOMEM;
4297         }
4298         memcpy(fm, buf_temp, sizeof(struct fiemap));
4299         free_fm = 1;
4300     }
4301     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4302     if (!is_error(ret)) {
4303         target_size_out = target_size_in;
4304         /* An extent_count of 0 means we were only counting the extents
4305          * so there are no structs to copy
4306          */
4307         if (fm->fm_extent_count != 0) {
4308             target_size_out += fm->fm_mapped_extents * extent_size;
4309         }
4310         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4311         if (!argptr) {
4312             ret = -TARGET_EFAULT;
4313         } else {
4314             /* Convert the struct fiemap */
4315             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4316             if (fm->fm_extent_count != 0) {
4317                 p = argptr + target_size_in;
4318                 /* ...and then all the struct fiemap_extents */
4319                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4320                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4321                                   THUNK_TARGET);
4322                     p += extent_size;
4323                 }
4324             }
4325             unlock_user(argptr, arg, target_size_out);
4326         }
4327     }
4328     if (free_fm) {
4329         g_free(fm);
4330     }
4331     return ret;
4332 }
4333 #endif
4334 
4335 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4336                                 int fd, int cmd, abi_long arg)
4337 {
4338     const argtype *arg_type = ie->arg_type;
4339     int target_size;
4340     void *argptr;
4341     int ret;
4342     struct ifconf *host_ifconf;
4343     uint32_t outbufsz;
4344     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4345     int target_ifreq_size;
4346     int nb_ifreq;
4347     int free_buf = 0;
4348     int i;
4349     int target_ifc_len;
4350     abi_long target_ifc_buf;
4351     int host_ifc_len;
4352     char *host_ifc_buf;
4353 
4354     assert(arg_type[0] == TYPE_PTR);
4355     assert(ie->access == IOC_RW);
4356 
4357     arg_type++;
4358     target_size = thunk_type_size(arg_type, 0);
4359 
4360     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4361     if (!argptr)
4362         return -TARGET_EFAULT;
4363     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4364     unlock_user(argptr, arg, 0);
4365 
4366     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4367     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4368     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4369 
4370     if (target_ifc_buf != 0) {
4371         target_ifc_len = host_ifconf->ifc_len;
4372         nb_ifreq = target_ifc_len / target_ifreq_size;
4373         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4374 
4375         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4376         if (outbufsz > MAX_STRUCT_SIZE) {
4377             /*
4378              * We can't fit all the extents into the fixed size buffer.
4379              * Allocate one that is large enough and use it instead.
4380              */
4381             host_ifconf = malloc(outbufsz);
4382             if (!host_ifconf) {
4383                 return -TARGET_ENOMEM;
4384             }
4385             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4386             free_buf = 1;
4387         }
4388         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4389 
4390         host_ifconf->ifc_len = host_ifc_len;
4391     } else {
4392       host_ifc_buf = NULL;
4393     }
4394     host_ifconf->ifc_buf = host_ifc_buf;
4395 
4396     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4397     if (!is_error(ret)) {
4398 	/* convert host ifc_len to target ifc_len */
4399 
4400         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4401         target_ifc_len = nb_ifreq * target_ifreq_size;
4402         host_ifconf->ifc_len = target_ifc_len;
4403 
4404 	/* restore target ifc_buf */
4405 
4406         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4407 
4408 	/* copy struct ifconf to target user */
4409 
4410         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4411         if (!argptr)
4412             return -TARGET_EFAULT;
4413         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4414         unlock_user(argptr, arg, target_size);
4415 
4416         if (target_ifc_buf != 0) {
4417             /* copy ifreq[] to target user */
4418             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4419             for (i = 0; i < nb_ifreq ; i++) {
4420                 thunk_convert(argptr + i * target_ifreq_size,
4421                               host_ifc_buf + i * sizeof(struct ifreq),
4422                               ifreq_arg_type, THUNK_TARGET);
4423             }
4424             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4425         }
4426     }
4427 
4428     if (free_buf) {
4429         free(host_ifconf);
4430     }
4431 
4432     return ret;
4433 }
4434 
4435 #if defined(CONFIG_USBFS)
4436 #if HOST_LONG_BITS > 64
4437 #error USBDEVFS thunks do not support >64 bit hosts yet.
4438 #endif
4439 struct live_urb {
4440     uint64_t target_urb_adr;
4441     uint64_t target_buf_adr;
4442     char *target_buf_ptr;
4443     struct usbdevfs_urb host_urb;
4444 };
4445 
4446 static GHashTable *usbdevfs_urb_hashtable(void)
4447 {
4448     static GHashTable *urb_hashtable;
4449 
4450     if (!urb_hashtable) {
4451         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4452     }
4453     return urb_hashtable;
4454 }
4455 
4456 static void urb_hashtable_insert(struct live_urb *urb)
4457 {
4458     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4459     g_hash_table_insert(urb_hashtable, urb, urb);
4460 }
4461 
4462 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4463 {
4464     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4465     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4466 }
4467 
4468 static void urb_hashtable_remove(struct live_urb *urb)
4469 {
4470     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4471     g_hash_table_remove(urb_hashtable, urb);
4472 }
4473 
4474 static abi_long
4475 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4476                           int fd, int cmd, abi_long arg)
4477 {
4478     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4479     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4480     struct live_urb *lurb;
4481     void *argptr;
4482     uint64_t hurb;
4483     int target_size;
4484     uintptr_t target_urb_adr;
4485     abi_long ret;
4486 
4487     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4488 
4489     memset(buf_temp, 0, sizeof(uint64_t));
4490     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4491     if (is_error(ret)) {
4492         return ret;
4493     }
4494 
4495     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4496     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4497     if (!lurb->target_urb_adr) {
4498         return -TARGET_EFAULT;
4499     }
4500     urb_hashtable_remove(lurb);
4501     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4502         lurb->host_urb.buffer_length);
4503     lurb->target_buf_ptr = NULL;
4504 
4505     /* restore the guest buffer pointer */
4506     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4507 
4508     /* update the guest urb struct */
4509     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4510     if (!argptr) {
4511         g_free(lurb);
4512         return -TARGET_EFAULT;
4513     }
4514     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4515     unlock_user(argptr, lurb->target_urb_adr, target_size);
4516 
4517     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4518     /* write back the urb handle */
4519     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4520     if (!argptr) {
4521         g_free(lurb);
4522         return -TARGET_EFAULT;
4523     }
4524 
4525     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4526     target_urb_adr = lurb->target_urb_adr;
4527     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4528     unlock_user(argptr, arg, target_size);
4529 
4530     g_free(lurb);
4531     return ret;
4532 }
4533 
4534 static abi_long
4535 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4536                              uint8_t *buf_temp __attribute__((unused)),
4537                              int fd, int cmd, abi_long arg)
4538 {
4539     struct live_urb *lurb;
4540 
4541     /* map target address back to host URB with metadata. */
4542     lurb = urb_hashtable_lookup(arg);
4543     if (!lurb) {
4544         return -TARGET_EFAULT;
4545     }
4546     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4547 }
4548 
4549 static abi_long
4550 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4551                             int fd, int cmd, abi_long arg)
4552 {
4553     const argtype *arg_type = ie->arg_type;
4554     int target_size;
4555     abi_long ret;
4556     void *argptr;
4557     int rw_dir;
4558     struct live_urb *lurb;
4559 
4560     /*
4561      * each submitted URB needs to map to a unique ID for the
4562      * kernel, and that unique ID needs to be a pointer to
4563      * host memory.  hence, we need to malloc for each URB.
4564      * isochronous transfers have a variable length struct.
4565      */
4566     arg_type++;
4567     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4568 
4569     /* construct host copy of urb and metadata */
4570     lurb = g_try_malloc0(sizeof(struct live_urb));
4571     if (!lurb) {
4572         return -TARGET_ENOMEM;
4573     }
4574 
4575     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4576     if (!argptr) {
4577         g_free(lurb);
4578         return -TARGET_EFAULT;
4579     }
4580     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4581     unlock_user(argptr, arg, 0);
4582 
4583     lurb->target_urb_adr = arg;
4584     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4585 
4586     /* buffer space used depends on endpoint type so lock the entire buffer */
4587     /* control type urbs should check the buffer contents for true direction */
4588     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4589     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4590         lurb->host_urb.buffer_length, 1);
4591     if (lurb->target_buf_ptr == NULL) {
4592         g_free(lurb);
4593         return -TARGET_EFAULT;
4594     }
4595 
4596     /* update buffer pointer in host copy */
4597     lurb->host_urb.buffer = lurb->target_buf_ptr;
4598 
4599     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4600     if (is_error(ret)) {
4601         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4602         g_free(lurb);
4603     } else {
4604         urb_hashtable_insert(lurb);
4605     }
4606 
4607     return ret;
4608 }
4609 #endif /* CONFIG_USBFS */
4610 
4611 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4612                             int cmd, abi_long arg)
4613 {
4614     void *argptr;
4615     struct dm_ioctl *host_dm;
4616     abi_long guest_data;
4617     uint32_t guest_data_size;
4618     int target_size;
4619     const argtype *arg_type = ie->arg_type;
4620     abi_long ret;
4621     void *big_buf = NULL;
4622     char *host_data;
4623 
4624     arg_type++;
4625     target_size = thunk_type_size(arg_type, 0);
4626     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4627     if (!argptr) {
4628         ret = -TARGET_EFAULT;
4629         goto out;
4630     }
4631     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4632     unlock_user(argptr, arg, 0);
4633 
4634     /* buf_temp is too small, so fetch things into a bigger buffer */
4635     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4636     memcpy(big_buf, buf_temp, target_size);
4637     buf_temp = big_buf;
4638     host_dm = big_buf;
4639 
4640     guest_data = arg + host_dm->data_start;
4641     if ((guest_data - arg) < 0) {
4642         ret = -TARGET_EINVAL;
4643         goto out;
4644     }
4645     guest_data_size = host_dm->data_size - host_dm->data_start;
4646     host_data = (char*)host_dm + host_dm->data_start;
4647 
4648     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4649     if (!argptr) {
4650         ret = -TARGET_EFAULT;
4651         goto out;
4652     }
4653 
4654     switch (ie->host_cmd) {
4655     case DM_REMOVE_ALL:
4656     case DM_LIST_DEVICES:
4657     case DM_DEV_CREATE:
4658     case DM_DEV_REMOVE:
4659     case DM_DEV_SUSPEND:
4660     case DM_DEV_STATUS:
4661     case DM_DEV_WAIT:
4662     case DM_TABLE_STATUS:
4663     case DM_TABLE_CLEAR:
4664     case DM_TABLE_DEPS:
4665     case DM_LIST_VERSIONS:
4666         /* no input data */
4667         break;
4668     case DM_DEV_RENAME:
4669     case DM_DEV_SET_GEOMETRY:
4670         /* data contains only strings */
4671         memcpy(host_data, argptr, guest_data_size);
4672         break;
4673     case DM_TARGET_MSG:
4674         memcpy(host_data, argptr, guest_data_size);
4675         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4676         break;
4677     case DM_TABLE_LOAD:
4678     {
4679         void *gspec = argptr;
4680         void *cur_data = host_data;
4681         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4682         int spec_size = thunk_type_size(arg_type, 0);
4683         int i;
4684 
4685         for (i = 0; i < host_dm->target_count; i++) {
4686             struct dm_target_spec *spec = cur_data;
4687             uint32_t next;
4688             int slen;
4689 
4690             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4691             slen = strlen((char*)gspec + spec_size) + 1;
4692             next = spec->next;
4693             spec->next = sizeof(*spec) + slen;
4694             strcpy((char*)&spec[1], gspec + spec_size);
4695             gspec += next;
4696             cur_data += spec->next;
4697         }
4698         break;
4699     }
4700     default:
4701         ret = -TARGET_EINVAL;
4702         unlock_user(argptr, guest_data, 0);
4703         goto out;
4704     }
4705     unlock_user(argptr, guest_data, 0);
4706 
4707     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4708     if (!is_error(ret)) {
4709         guest_data = arg + host_dm->data_start;
4710         guest_data_size = host_dm->data_size - host_dm->data_start;
4711         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4712         switch (ie->host_cmd) {
4713         case DM_REMOVE_ALL:
4714         case DM_DEV_CREATE:
4715         case DM_DEV_REMOVE:
4716         case DM_DEV_RENAME:
4717         case DM_DEV_SUSPEND:
4718         case DM_DEV_STATUS:
4719         case DM_TABLE_LOAD:
4720         case DM_TABLE_CLEAR:
4721         case DM_TARGET_MSG:
4722         case DM_DEV_SET_GEOMETRY:
4723             /* no return data */
4724             break;
4725         case DM_LIST_DEVICES:
4726         {
4727             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4728             uint32_t remaining_data = guest_data_size;
4729             void *cur_data = argptr;
4730             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4731             int nl_size = 12; /* can't use thunk_size due to alignment */
4732 
4733             while (1) {
4734                 uint32_t next = nl->next;
4735                 if (next) {
4736                     nl->next = nl_size + (strlen(nl->name) + 1);
4737                 }
4738                 if (remaining_data < nl->next) {
4739                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4740                     break;
4741                 }
4742                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4743                 strcpy(cur_data + nl_size, nl->name);
4744                 cur_data += nl->next;
4745                 remaining_data -= nl->next;
4746                 if (!next) {
4747                     break;
4748                 }
4749                 nl = (void*)nl + next;
4750             }
4751             break;
4752         }
4753         case DM_DEV_WAIT:
4754         case DM_TABLE_STATUS:
4755         {
4756             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4757             void *cur_data = argptr;
4758             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4759             int spec_size = thunk_type_size(arg_type, 0);
4760             int i;
4761 
4762             for (i = 0; i < host_dm->target_count; i++) {
4763                 uint32_t next = spec->next;
4764                 int slen = strlen((char*)&spec[1]) + 1;
4765                 spec->next = (cur_data - argptr) + spec_size + slen;
4766                 if (guest_data_size < spec->next) {
4767                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4768                     break;
4769                 }
4770                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4771                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4772                 cur_data = argptr + spec->next;
4773                 spec = (void*)host_dm + host_dm->data_start + next;
4774             }
4775             break;
4776         }
4777         case DM_TABLE_DEPS:
4778         {
4779             void *hdata = (void*)host_dm + host_dm->data_start;
4780             int count = *(uint32_t*)hdata;
4781             uint64_t *hdev = hdata + 8;
4782             uint64_t *gdev = argptr + 8;
4783             int i;
4784 
4785             *(uint32_t*)argptr = tswap32(count);
4786             for (i = 0; i < count; i++) {
4787                 *gdev = tswap64(*hdev);
4788                 gdev++;
4789                 hdev++;
4790             }
4791             break;
4792         }
4793         case DM_LIST_VERSIONS:
4794         {
4795             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4796             uint32_t remaining_data = guest_data_size;
4797             void *cur_data = argptr;
4798             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4799             int vers_size = thunk_type_size(arg_type, 0);
4800 
4801             while (1) {
4802                 uint32_t next = vers->next;
4803                 if (next) {
4804                     vers->next = vers_size + (strlen(vers->name) + 1);
4805                 }
4806                 if (remaining_data < vers->next) {
4807                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4808                     break;
4809                 }
4810                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4811                 strcpy(cur_data + vers_size, vers->name);
4812                 cur_data += vers->next;
4813                 remaining_data -= vers->next;
4814                 if (!next) {
4815                     break;
4816                 }
4817                 vers = (void*)vers + next;
4818             }
4819             break;
4820         }
4821         default:
4822             unlock_user(argptr, guest_data, 0);
4823             ret = -TARGET_EINVAL;
4824             goto out;
4825         }
4826         unlock_user(argptr, guest_data, guest_data_size);
4827 
4828         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4829         if (!argptr) {
4830             ret = -TARGET_EFAULT;
4831             goto out;
4832         }
4833         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4834         unlock_user(argptr, arg, target_size);
4835     }
4836 out:
4837     g_free(big_buf);
4838     return ret;
4839 }
4840 
4841 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4842                                int cmd, abi_long arg)
4843 {
4844     void *argptr;
4845     int target_size;
4846     const argtype *arg_type = ie->arg_type;
4847     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4848     abi_long ret;
4849 
4850     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4851     struct blkpg_partition host_part;
4852 
4853     /* Read and convert blkpg */
4854     arg_type++;
4855     target_size = thunk_type_size(arg_type, 0);
4856     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4857     if (!argptr) {
4858         ret = -TARGET_EFAULT;
4859         goto out;
4860     }
4861     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4862     unlock_user(argptr, arg, 0);
4863 
4864     switch (host_blkpg->op) {
4865     case BLKPG_ADD_PARTITION:
4866     case BLKPG_DEL_PARTITION:
4867         /* payload is struct blkpg_partition */
4868         break;
4869     default:
4870         /* Unknown opcode */
4871         ret = -TARGET_EINVAL;
4872         goto out;
4873     }
4874 
4875     /* Read and convert blkpg->data */
4876     arg = (abi_long)(uintptr_t)host_blkpg->data;
4877     target_size = thunk_type_size(part_arg_type, 0);
4878     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4879     if (!argptr) {
4880         ret = -TARGET_EFAULT;
4881         goto out;
4882     }
4883     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4884     unlock_user(argptr, arg, 0);
4885 
4886     /* Swizzle the data pointer to our local copy and call! */
4887     host_blkpg->data = &host_part;
4888     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4889 
4890 out:
4891     return ret;
4892 }
4893 
4894 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4895                                 int fd, int cmd, abi_long arg)
4896 {
4897     const argtype *arg_type = ie->arg_type;
4898     const StructEntry *se;
4899     const argtype *field_types;
4900     const int *dst_offsets, *src_offsets;
4901     int target_size;
4902     void *argptr;
4903     abi_ulong *target_rt_dev_ptr = NULL;
4904     unsigned long *host_rt_dev_ptr = NULL;
4905     abi_long ret;
4906     int i;
4907 
4908     assert(ie->access == IOC_W);
4909     assert(*arg_type == TYPE_PTR);
4910     arg_type++;
4911     assert(*arg_type == TYPE_STRUCT);
4912     target_size = thunk_type_size(arg_type, 0);
4913     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4914     if (!argptr) {
4915         return -TARGET_EFAULT;
4916     }
4917     arg_type++;
4918     assert(*arg_type == (int)STRUCT_rtentry);
4919     se = struct_entries + *arg_type++;
4920     assert(se->convert[0] == NULL);
4921     /* convert struct here to be able to catch rt_dev string */
4922     field_types = se->field_types;
4923     dst_offsets = se->field_offsets[THUNK_HOST];
4924     src_offsets = se->field_offsets[THUNK_TARGET];
4925     for (i = 0; i < se->nb_fields; i++) {
4926         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4927             assert(*field_types == TYPE_PTRVOID);
4928             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4929             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4930             if (*target_rt_dev_ptr != 0) {
4931                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4932                                                   tswapal(*target_rt_dev_ptr));
4933                 if (!*host_rt_dev_ptr) {
4934                     unlock_user(argptr, arg, 0);
4935                     return -TARGET_EFAULT;
4936                 }
4937             } else {
4938                 *host_rt_dev_ptr = 0;
4939             }
4940             field_types++;
4941             continue;
4942         }
4943         field_types = thunk_convert(buf_temp + dst_offsets[i],
4944                                     argptr + src_offsets[i],
4945                                     field_types, THUNK_HOST);
4946     }
4947     unlock_user(argptr, arg, 0);
4948 
4949     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4950 
4951     assert(host_rt_dev_ptr != NULL);
4952     assert(target_rt_dev_ptr != NULL);
4953     if (*host_rt_dev_ptr != 0) {
4954         unlock_user((void *)*host_rt_dev_ptr,
4955                     *target_rt_dev_ptr, 0);
4956     }
4957     return ret;
4958 }
4959 
4960 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4961                                      int fd, int cmd, abi_long arg)
4962 {
4963     int sig = target_to_host_signal(arg);
4964     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4965 }
4966 
4967 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
4968                                     int fd, int cmd, abi_long arg)
4969 {
4970     struct timeval tv;
4971     abi_long ret;
4972 
4973     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
4974     if (is_error(ret)) {
4975         return ret;
4976     }
4977 
4978     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
4979         if (copy_to_user_timeval(arg, &tv)) {
4980             return -TARGET_EFAULT;
4981         }
4982     } else {
4983         if (copy_to_user_timeval64(arg, &tv)) {
4984             return -TARGET_EFAULT;
4985         }
4986     }
4987 
4988     return ret;
4989 }
4990 
4991 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
4992                                       int fd, int cmd, abi_long arg)
4993 {
4994     struct timespec ts;
4995     abi_long ret;
4996 
4997     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
4998     if (is_error(ret)) {
4999         return ret;
5000     }
5001 
5002     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5003         if (host_to_target_timespec(arg, &ts)) {
5004             return -TARGET_EFAULT;
5005         }
5006     } else{
5007         if (host_to_target_timespec64(arg, &ts)) {
5008             return -TARGET_EFAULT;
5009         }
5010     }
5011 
5012     return ret;
5013 }
5014 
5015 #ifdef TIOCGPTPEER
5016 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5017                                      int fd, int cmd, abi_long arg)
5018 {
5019     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5020     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5021 }
5022 #endif
5023 
5024 static IOCTLEntry ioctl_entries[] = {
5025 #define IOCTL(cmd, access, ...) \
5026     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5027 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5028     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5029 #define IOCTL_IGNORE(cmd) \
5030     { TARGET_ ## cmd, 0, #cmd },
5031 #include "ioctls.h"
5032     { 0, 0, },
5033 };
5034 
5035 /* ??? Implement proper locking for ioctls.  */
5036 /* do_ioctl() Must return target values and target errnos. */
5037 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5038 {
5039     const IOCTLEntry *ie;
5040     const argtype *arg_type;
5041     abi_long ret;
5042     uint8_t buf_temp[MAX_STRUCT_SIZE];
5043     int target_size;
5044     void *argptr;
5045 
5046     ie = ioctl_entries;
5047     for(;;) {
5048         if (ie->target_cmd == 0) {
5049             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5050             return -TARGET_ENOSYS;
5051         }
5052         if (ie->target_cmd == cmd)
5053             break;
5054         ie++;
5055     }
5056     arg_type = ie->arg_type;
5057     if (ie->do_ioctl) {
5058         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5059     } else if (!ie->host_cmd) {
5060         /* Some architectures define BSD ioctls in their headers
5061            that are not implemented in Linux.  */
5062         return -TARGET_ENOSYS;
5063     }
5064 
5065     switch(arg_type[0]) {
5066     case TYPE_NULL:
5067         /* no argument */
5068         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5069         break;
5070     case TYPE_PTRVOID:
5071     case TYPE_INT:
5072         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5073         break;
5074     case TYPE_PTR:
5075         arg_type++;
5076         target_size = thunk_type_size(arg_type, 0);
5077         switch(ie->access) {
5078         case IOC_R:
5079             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5080             if (!is_error(ret)) {
5081                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5082                 if (!argptr)
5083                     return -TARGET_EFAULT;
5084                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5085                 unlock_user(argptr, arg, target_size);
5086             }
5087             break;
5088         case IOC_W:
5089             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5090             if (!argptr)
5091                 return -TARGET_EFAULT;
5092             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5093             unlock_user(argptr, arg, 0);
5094             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5095             break;
5096         default:
5097         case IOC_RW:
5098             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5099             if (!argptr)
5100                 return -TARGET_EFAULT;
5101             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5102             unlock_user(argptr, arg, 0);
5103             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5104             if (!is_error(ret)) {
5105                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5106                 if (!argptr)
5107                     return -TARGET_EFAULT;
5108                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5109                 unlock_user(argptr, arg, target_size);
5110             }
5111             break;
5112         }
5113         break;
5114     default:
5115         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5116                  (long)cmd, arg_type[0]);
5117         ret = -TARGET_ENOSYS;
5118         break;
5119     }
5120     return ret;
5121 }
5122 
5123 static const bitmask_transtbl iflag_tbl[] = {
5124         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5125         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5126         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5127         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5128         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5129         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5130         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5131         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5132         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5133         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5134         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5135         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5136         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5137         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5138         { 0, 0, 0, 0 }
5139 };
5140 
5141 static const bitmask_transtbl oflag_tbl[] = {
5142 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5143 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5144 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5145 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5146 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5147 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5148 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5149 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5150 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5151 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5152 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5153 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5154 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5155 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5156 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5157 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5158 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5159 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5160 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5161 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5162 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5163 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5164 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5165 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5166 	{ 0, 0, 0, 0 }
5167 };
5168 
5169 static const bitmask_transtbl cflag_tbl[] = {
5170 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5171 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5172 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5173 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5174 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5175 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5176 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5177 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5178 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5179 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5180 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5181 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5182 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5183 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5184 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5185 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5186 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5187 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5188 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5189 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5190 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5191 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5192 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5193 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5194 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5195 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5196 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5197 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5198 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5199 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5200 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5201 	{ 0, 0, 0, 0 }
5202 };
5203 
5204 static const bitmask_transtbl lflag_tbl[] = {
5205 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5206 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5207 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5208 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5209 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5210 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5211 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5212 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5213 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5214 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5215 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5216 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5217 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5218 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5219 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5220 	{ 0, 0, 0, 0 }
5221 };
5222 
5223 static void target_to_host_termios (void *dst, const void *src)
5224 {
5225     struct host_termios *host = dst;
5226     const struct target_termios *target = src;
5227 
5228     host->c_iflag =
5229         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5230     host->c_oflag =
5231         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5232     host->c_cflag =
5233         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5234     host->c_lflag =
5235         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5236     host->c_line = target->c_line;
5237 
5238     memset(host->c_cc, 0, sizeof(host->c_cc));
5239     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5240     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5241     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5242     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5243     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5244     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5245     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5246     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5247     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5248     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5249     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5250     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5251     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5252     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5253     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5254     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5255     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5256 }
5257 
5258 static void host_to_target_termios (void *dst, const void *src)
5259 {
5260     struct target_termios *target = dst;
5261     const struct host_termios *host = src;
5262 
5263     target->c_iflag =
5264         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5265     target->c_oflag =
5266         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5267     target->c_cflag =
5268         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5269     target->c_lflag =
5270         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5271     target->c_line = host->c_line;
5272 
5273     memset(target->c_cc, 0, sizeof(target->c_cc));
5274     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5275     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5276     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5277     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5278     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5279     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5280     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5281     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5282     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5283     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5284     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5285     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5286     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5287     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5288     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5289     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5290     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5291 }
5292 
5293 static const StructEntry struct_termios_def = {
5294     .convert = { host_to_target_termios, target_to_host_termios },
5295     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5296     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5297 };
5298 
5299 static bitmask_transtbl mmap_flags_tbl[] = {
5300     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5301     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5302     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5303     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5304       MAP_ANONYMOUS, MAP_ANONYMOUS },
5305     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5306       MAP_GROWSDOWN, MAP_GROWSDOWN },
5307     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5308       MAP_DENYWRITE, MAP_DENYWRITE },
5309     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5310       MAP_EXECUTABLE, MAP_EXECUTABLE },
5311     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5312     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5313       MAP_NORESERVE, MAP_NORESERVE },
5314     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5315     /* MAP_STACK had been ignored by the kernel for quite some time.
5316        Recognize it for the target insofar as we do not want to pass
5317        it through to the host.  */
5318     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5319     { 0, 0, 0, 0 }
5320 };
5321 
5322 #if defined(TARGET_I386)
5323 
5324 /* NOTE: there is really one LDT for all the threads */
5325 static uint8_t *ldt_table;
5326 
5327 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5328 {
5329     int size;
5330     void *p;
5331 
5332     if (!ldt_table)
5333         return 0;
5334     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5335     if (size > bytecount)
5336         size = bytecount;
5337     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5338     if (!p)
5339         return -TARGET_EFAULT;
5340     /* ??? Should this by byteswapped?  */
5341     memcpy(p, ldt_table, size);
5342     unlock_user(p, ptr, size);
5343     return size;
5344 }
5345 
5346 /* XXX: add locking support */
5347 static abi_long write_ldt(CPUX86State *env,
5348                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5349 {
5350     struct target_modify_ldt_ldt_s ldt_info;
5351     struct target_modify_ldt_ldt_s *target_ldt_info;
5352     int seg_32bit, contents, read_exec_only, limit_in_pages;
5353     int seg_not_present, useable, lm;
5354     uint32_t *lp, entry_1, entry_2;
5355 
5356     if (bytecount != sizeof(ldt_info))
5357         return -TARGET_EINVAL;
5358     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5359         return -TARGET_EFAULT;
5360     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5361     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5362     ldt_info.limit = tswap32(target_ldt_info->limit);
5363     ldt_info.flags = tswap32(target_ldt_info->flags);
5364     unlock_user_struct(target_ldt_info, ptr, 0);
5365 
5366     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5367         return -TARGET_EINVAL;
5368     seg_32bit = ldt_info.flags & 1;
5369     contents = (ldt_info.flags >> 1) & 3;
5370     read_exec_only = (ldt_info.flags >> 3) & 1;
5371     limit_in_pages = (ldt_info.flags >> 4) & 1;
5372     seg_not_present = (ldt_info.flags >> 5) & 1;
5373     useable = (ldt_info.flags >> 6) & 1;
5374 #ifdef TARGET_ABI32
5375     lm = 0;
5376 #else
5377     lm = (ldt_info.flags >> 7) & 1;
5378 #endif
5379     if (contents == 3) {
5380         if (oldmode)
5381             return -TARGET_EINVAL;
5382         if (seg_not_present == 0)
5383             return -TARGET_EINVAL;
5384     }
5385     /* allocate the LDT */
5386     if (!ldt_table) {
5387         env->ldt.base = target_mmap(0,
5388                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5389                                     PROT_READ|PROT_WRITE,
5390                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5391         if (env->ldt.base == -1)
5392             return -TARGET_ENOMEM;
5393         memset(g2h(env->ldt.base), 0,
5394                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5395         env->ldt.limit = 0xffff;
5396         ldt_table = g2h(env->ldt.base);
5397     }
5398 
5399     /* NOTE: same code as Linux kernel */
5400     /* Allow LDTs to be cleared by the user. */
5401     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5402         if (oldmode ||
5403             (contents == 0		&&
5404              read_exec_only == 1	&&
5405              seg_32bit == 0		&&
5406              limit_in_pages == 0	&&
5407              seg_not_present == 1	&&
5408              useable == 0 )) {
5409             entry_1 = 0;
5410             entry_2 = 0;
5411             goto install;
5412         }
5413     }
5414 
5415     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5416         (ldt_info.limit & 0x0ffff);
5417     entry_2 = (ldt_info.base_addr & 0xff000000) |
5418         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5419         (ldt_info.limit & 0xf0000) |
5420         ((read_exec_only ^ 1) << 9) |
5421         (contents << 10) |
5422         ((seg_not_present ^ 1) << 15) |
5423         (seg_32bit << 22) |
5424         (limit_in_pages << 23) |
5425         (lm << 21) |
5426         0x7000;
5427     if (!oldmode)
5428         entry_2 |= (useable << 20);
5429 
5430     /* Install the new entry ...  */
5431 install:
5432     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5433     lp[0] = tswap32(entry_1);
5434     lp[1] = tswap32(entry_2);
5435     return 0;
5436 }
5437 
5438 /* specific and weird i386 syscalls */
5439 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5440                               unsigned long bytecount)
5441 {
5442     abi_long ret;
5443 
5444     switch (func) {
5445     case 0:
5446         ret = read_ldt(ptr, bytecount);
5447         break;
5448     case 1:
5449         ret = write_ldt(env, ptr, bytecount, 1);
5450         break;
5451     case 0x11:
5452         ret = write_ldt(env, ptr, bytecount, 0);
5453         break;
5454     default:
5455         ret = -TARGET_ENOSYS;
5456         break;
5457     }
5458     return ret;
5459 }
5460 
5461 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5462 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5463 {
5464     uint64_t *gdt_table = g2h(env->gdt.base);
5465     struct target_modify_ldt_ldt_s ldt_info;
5466     struct target_modify_ldt_ldt_s *target_ldt_info;
5467     int seg_32bit, contents, read_exec_only, limit_in_pages;
5468     int seg_not_present, useable, lm;
5469     uint32_t *lp, entry_1, entry_2;
5470     int i;
5471 
5472     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5473     if (!target_ldt_info)
5474         return -TARGET_EFAULT;
5475     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5476     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5477     ldt_info.limit = tswap32(target_ldt_info->limit);
5478     ldt_info.flags = tswap32(target_ldt_info->flags);
5479     if (ldt_info.entry_number == -1) {
5480         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5481             if (gdt_table[i] == 0) {
5482                 ldt_info.entry_number = i;
5483                 target_ldt_info->entry_number = tswap32(i);
5484                 break;
5485             }
5486         }
5487     }
5488     unlock_user_struct(target_ldt_info, ptr, 1);
5489 
5490     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5491         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5492            return -TARGET_EINVAL;
5493     seg_32bit = ldt_info.flags & 1;
5494     contents = (ldt_info.flags >> 1) & 3;
5495     read_exec_only = (ldt_info.flags >> 3) & 1;
5496     limit_in_pages = (ldt_info.flags >> 4) & 1;
5497     seg_not_present = (ldt_info.flags >> 5) & 1;
5498     useable = (ldt_info.flags >> 6) & 1;
5499 #ifdef TARGET_ABI32
5500     lm = 0;
5501 #else
5502     lm = (ldt_info.flags >> 7) & 1;
5503 #endif
5504 
5505     if (contents == 3) {
5506         if (seg_not_present == 0)
5507             return -TARGET_EINVAL;
5508     }
5509 
5510     /* NOTE: same code as Linux kernel */
5511     /* Allow LDTs to be cleared by the user. */
5512     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5513         if ((contents == 0             &&
5514              read_exec_only == 1       &&
5515              seg_32bit == 0            &&
5516              limit_in_pages == 0       &&
5517              seg_not_present == 1      &&
5518              useable == 0 )) {
5519             entry_1 = 0;
5520             entry_2 = 0;
5521             goto install;
5522         }
5523     }
5524 
5525     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5526         (ldt_info.limit & 0x0ffff);
5527     entry_2 = (ldt_info.base_addr & 0xff000000) |
5528         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5529         (ldt_info.limit & 0xf0000) |
5530         ((read_exec_only ^ 1) << 9) |
5531         (contents << 10) |
5532         ((seg_not_present ^ 1) << 15) |
5533         (seg_32bit << 22) |
5534         (limit_in_pages << 23) |
5535         (useable << 20) |
5536         (lm << 21) |
5537         0x7000;
5538 
5539     /* Install the new entry ...  */
5540 install:
5541     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5542     lp[0] = tswap32(entry_1);
5543     lp[1] = tswap32(entry_2);
5544     return 0;
5545 }
5546 
5547 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5548 {
5549     struct target_modify_ldt_ldt_s *target_ldt_info;
5550     uint64_t *gdt_table = g2h(env->gdt.base);
5551     uint32_t base_addr, limit, flags;
5552     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5553     int seg_not_present, useable, lm;
5554     uint32_t *lp, entry_1, entry_2;
5555 
5556     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5557     if (!target_ldt_info)
5558         return -TARGET_EFAULT;
5559     idx = tswap32(target_ldt_info->entry_number);
5560     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5561         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5562         unlock_user_struct(target_ldt_info, ptr, 1);
5563         return -TARGET_EINVAL;
5564     }
5565     lp = (uint32_t *)(gdt_table + idx);
5566     entry_1 = tswap32(lp[0]);
5567     entry_2 = tswap32(lp[1]);
5568 
5569     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5570     contents = (entry_2 >> 10) & 3;
5571     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5572     seg_32bit = (entry_2 >> 22) & 1;
5573     limit_in_pages = (entry_2 >> 23) & 1;
5574     useable = (entry_2 >> 20) & 1;
5575 #ifdef TARGET_ABI32
5576     lm = 0;
5577 #else
5578     lm = (entry_2 >> 21) & 1;
5579 #endif
5580     flags = (seg_32bit << 0) | (contents << 1) |
5581         (read_exec_only << 3) | (limit_in_pages << 4) |
5582         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5583     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5584     base_addr = (entry_1 >> 16) |
5585         (entry_2 & 0xff000000) |
5586         ((entry_2 & 0xff) << 16);
5587     target_ldt_info->base_addr = tswapal(base_addr);
5588     target_ldt_info->limit = tswap32(limit);
5589     target_ldt_info->flags = tswap32(flags);
5590     unlock_user_struct(target_ldt_info, ptr, 1);
5591     return 0;
5592 }
5593 #endif /* TARGET_I386 && TARGET_ABI32 */
5594 
5595 #ifndef TARGET_ABI32
5596 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5597 {
5598     abi_long ret = 0;
5599     abi_ulong val;
5600     int idx;
5601 
5602     switch(code) {
5603     case TARGET_ARCH_SET_GS:
5604     case TARGET_ARCH_SET_FS:
5605         if (code == TARGET_ARCH_SET_GS)
5606             idx = R_GS;
5607         else
5608             idx = R_FS;
5609         cpu_x86_load_seg(env, idx, 0);
5610         env->segs[idx].base = addr;
5611         break;
5612     case TARGET_ARCH_GET_GS:
5613     case TARGET_ARCH_GET_FS:
5614         if (code == TARGET_ARCH_GET_GS)
5615             idx = R_GS;
5616         else
5617             idx = R_FS;
5618         val = env->segs[idx].base;
5619         if (put_user(val, addr, abi_ulong))
5620             ret = -TARGET_EFAULT;
5621         break;
5622     default:
5623         ret = -TARGET_EINVAL;
5624         break;
5625     }
5626     return ret;
5627 }
5628 #endif
5629 
5630 #endif /* defined(TARGET_I386) */
5631 
5632 #define NEW_STACK_SIZE 0x40000
5633 
5634 
5635 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5636 typedef struct {
5637     CPUArchState *env;
5638     pthread_mutex_t mutex;
5639     pthread_cond_t cond;
5640     pthread_t thread;
5641     uint32_t tid;
5642     abi_ulong child_tidptr;
5643     abi_ulong parent_tidptr;
5644     sigset_t sigmask;
5645 } new_thread_info;
5646 
5647 static void *clone_func(void *arg)
5648 {
5649     new_thread_info *info = arg;
5650     CPUArchState *env;
5651     CPUState *cpu;
5652     TaskState *ts;
5653 
5654     rcu_register_thread();
5655     tcg_register_thread();
5656     env = info->env;
5657     cpu = env_cpu(env);
5658     thread_cpu = cpu;
5659     ts = (TaskState *)cpu->opaque;
5660     info->tid = sys_gettid();
5661     task_settid(ts);
5662     if (info->child_tidptr)
5663         put_user_u32(info->tid, info->child_tidptr);
5664     if (info->parent_tidptr)
5665         put_user_u32(info->tid, info->parent_tidptr);
5666     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5667     /* Enable signals.  */
5668     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5669     /* Signal to the parent that we're ready.  */
5670     pthread_mutex_lock(&info->mutex);
5671     pthread_cond_broadcast(&info->cond);
5672     pthread_mutex_unlock(&info->mutex);
5673     /* Wait until the parent has finished initializing the tls state.  */
5674     pthread_mutex_lock(&clone_lock);
5675     pthread_mutex_unlock(&clone_lock);
5676     cpu_loop(env);
5677     /* never exits */
5678     return NULL;
5679 }
5680 
5681 /* do_fork() Must return host values and target errnos (unlike most
5682    do_*() functions). */
5683 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5684                    abi_ulong parent_tidptr, target_ulong newtls,
5685                    abi_ulong child_tidptr)
5686 {
5687     CPUState *cpu = env_cpu(env);
5688     int ret;
5689     TaskState *ts;
5690     CPUState *new_cpu;
5691     CPUArchState *new_env;
5692     sigset_t sigmask;
5693 
5694     flags &= ~CLONE_IGNORED_FLAGS;
5695 
5696     /* Emulate vfork() with fork() */
5697     if (flags & CLONE_VFORK)
5698         flags &= ~(CLONE_VFORK | CLONE_VM);
5699 
5700     if (flags & CLONE_VM) {
5701         TaskState *parent_ts = (TaskState *)cpu->opaque;
5702         new_thread_info info;
5703         pthread_attr_t attr;
5704 
5705         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5706             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5707             return -TARGET_EINVAL;
5708         }
5709 
5710         ts = g_new0(TaskState, 1);
5711         init_task_state(ts);
5712 
5713         /* Grab a mutex so that thread setup appears atomic.  */
5714         pthread_mutex_lock(&clone_lock);
5715 
5716         /* we create a new CPU instance. */
5717         new_env = cpu_copy(env);
5718         /* Init regs that differ from the parent.  */
5719         cpu_clone_regs(new_env, newsp);
5720         new_cpu = env_cpu(new_env);
5721         new_cpu->opaque = ts;
5722         ts->bprm = parent_ts->bprm;
5723         ts->info = parent_ts->info;
5724         ts->signal_mask = parent_ts->signal_mask;
5725 
5726         if (flags & CLONE_CHILD_CLEARTID) {
5727             ts->child_tidptr = child_tidptr;
5728         }
5729 
5730         if (flags & CLONE_SETTLS) {
5731             cpu_set_tls (new_env, newtls);
5732         }
5733 
5734         memset(&info, 0, sizeof(info));
5735         pthread_mutex_init(&info.mutex, NULL);
5736         pthread_mutex_lock(&info.mutex);
5737         pthread_cond_init(&info.cond, NULL);
5738         info.env = new_env;
5739         if (flags & CLONE_CHILD_SETTID) {
5740             info.child_tidptr = child_tidptr;
5741         }
5742         if (flags & CLONE_PARENT_SETTID) {
5743             info.parent_tidptr = parent_tidptr;
5744         }
5745 
5746         ret = pthread_attr_init(&attr);
5747         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5748         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5749         /* It is not safe to deliver signals until the child has finished
5750            initializing, so temporarily block all signals.  */
5751         sigfillset(&sigmask);
5752         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5753         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5754 
5755         /* If this is our first additional thread, we need to ensure we
5756          * generate code for parallel execution and flush old translations.
5757          */
5758         if (!parallel_cpus) {
5759             parallel_cpus = true;
5760             tb_flush(cpu);
5761         }
5762 
5763         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5764         /* TODO: Free new CPU state if thread creation failed.  */
5765 
5766         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5767         pthread_attr_destroy(&attr);
5768         if (ret == 0) {
5769             /* Wait for the child to initialize.  */
5770             pthread_cond_wait(&info.cond, &info.mutex);
5771             ret = info.tid;
5772         } else {
5773             ret = -1;
5774         }
5775         pthread_mutex_unlock(&info.mutex);
5776         pthread_cond_destroy(&info.cond);
5777         pthread_mutex_destroy(&info.mutex);
5778         pthread_mutex_unlock(&clone_lock);
5779     } else {
5780         /* if no CLONE_VM, we consider it is a fork */
5781         if (flags & CLONE_INVALID_FORK_FLAGS) {
5782             return -TARGET_EINVAL;
5783         }
5784 
5785         /* We can't support custom termination signals */
5786         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5787             return -TARGET_EINVAL;
5788         }
5789 
5790         if (block_signals()) {
5791             return -TARGET_ERESTARTSYS;
5792         }
5793 
5794         fork_start();
5795         ret = fork();
5796         if (ret == 0) {
5797             /* Child Process.  */
5798             cpu_clone_regs(env, newsp);
5799             fork_end(1);
5800             /* There is a race condition here.  The parent process could
5801                theoretically read the TID in the child process before the child
5802                tid is set.  This would require using either ptrace
5803                (not implemented) or having *_tidptr to point at a shared memory
5804                mapping.  We can't repeat the spinlock hack used above because
5805                the child process gets its own copy of the lock.  */
5806             if (flags & CLONE_CHILD_SETTID)
5807                 put_user_u32(sys_gettid(), child_tidptr);
5808             if (flags & CLONE_PARENT_SETTID)
5809                 put_user_u32(sys_gettid(), parent_tidptr);
5810             ts = (TaskState *)cpu->opaque;
5811             if (flags & CLONE_SETTLS)
5812                 cpu_set_tls (env, newtls);
5813             if (flags & CLONE_CHILD_CLEARTID)
5814                 ts->child_tidptr = child_tidptr;
5815         } else {
5816             fork_end(0);
5817         }
5818     }
5819     return ret;
5820 }
5821 
5822 /* warning : doesn't handle linux specific flags... */
5823 static int target_to_host_fcntl_cmd(int cmd)
5824 {
5825     int ret;
5826 
5827     switch(cmd) {
5828     case TARGET_F_DUPFD:
5829     case TARGET_F_GETFD:
5830     case TARGET_F_SETFD:
5831     case TARGET_F_GETFL:
5832     case TARGET_F_SETFL:
5833         ret = cmd;
5834         break;
5835     case TARGET_F_GETLK:
5836         ret = F_GETLK64;
5837         break;
5838     case TARGET_F_SETLK:
5839         ret = F_SETLK64;
5840         break;
5841     case TARGET_F_SETLKW:
5842         ret = F_SETLKW64;
5843         break;
5844     case TARGET_F_GETOWN:
5845         ret = F_GETOWN;
5846         break;
5847     case TARGET_F_SETOWN:
5848         ret = F_SETOWN;
5849         break;
5850     case TARGET_F_GETSIG:
5851         ret = F_GETSIG;
5852         break;
5853     case TARGET_F_SETSIG:
5854         ret = F_SETSIG;
5855         break;
5856 #if TARGET_ABI_BITS == 32
5857     case TARGET_F_GETLK64:
5858         ret = F_GETLK64;
5859         break;
5860     case TARGET_F_SETLK64:
5861         ret = F_SETLK64;
5862         break;
5863     case TARGET_F_SETLKW64:
5864         ret = F_SETLKW64;
5865         break;
5866 #endif
5867     case TARGET_F_SETLEASE:
5868         ret = F_SETLEASE;
5869         break;
5870     case TARGET_F_GETLEASE:
5871         ret = F_GETLEASE;
5872         break;
5873 #ifdef F_DUPFD_CLOEXEC
5874     case TARGET_F_DUPFD_CLOEXEC:
5875         ret = F_DUPFD_CLOEXEC;
5876         break;
5877 #endif
5878     case TARGET_F_NOTIFY:
5879         ret = F_NOTIFY;
5880         break;
5881 #ifdef F_GETOWN_EX
5882     case TARGET_F_GETOWN_EX:
5883         ret = F_GETOWN_EX;
5884         break;
5885 #endif
5886 #ifdef F_SETOWN_EX
5887     case TARGET_F_SETOWN_EX:
5888         ret = F_SETOWN_EX;
5889         break;
5890 #endif
5891 #ifdef F_SETPIPE_SZ
5892     case TARGET_F_SETPIPE_SZ:
5893         ret = F_SETPIPE_SZ;
5894         break;
5895     case TARGET_F_GETPIPE_SZ:
5896         ret = F_GETPIPE_SZ;
5897         break;
5898 #endif
5899     default:
5900         ret = -TARGET_EINVAL;
5901         break;
5902     }
5903 
5904 #if defined(__powerpc64__)
5905     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5906      * is not supported by kernel. The glibc fcntl call actually adjusts
5907      * them to 5, 6 and 7 before making the syscall(). Since we make the
5908      * syscall directly, adjust to what is supported by the kernel.
5909      */
5910     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5911         ret -= F_GETLK64 - 5;
5912     }
5913 #endif
5914 
5915     return ret;
5916 }
5917 
5918 #define FLOCK_TRANSTBL \
5919     switch (type) { \
5920     TRANSTBL_CONVERT(F_RDLCK); \
5921     TRANSTBL_CONVERT(F_WRLCK); \
5922     TRANSTBL_CONVERT(F_UNLCK); \
5923     TRANSTBL_CONVERT(F_EXLCK); \
5924     TRANSTBL_CONVERT(F_SHLCK); \
5925     }
5926 
5927 static int target_to_host_flock(int type)
5928 {
5929 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5930     FLOCK_TRANSTBL
5931 #undef  TRANSTBL_CONVERT
5932     return -TARGET_EINVAL;
5933 }
5934 
5935 static int host_to_target_flock(int type)
5936 {
5937 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5938     FLOCK_TRANSTBL
5939 #undef  TRANSTBL_CONVERT
5940     /* if we don't know how to convert the value coming
5941      * from the host we copy to the target field as-is
5942      */
5943     return type;
5944 }
5945 
5946 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5947                                             abi_ulong target_flock_addr)
5948 {
5949     struct target_flock *target_fl;
5950     int l_type;
5951 
5952     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5953         return -TARGET_EFAULT;
5954     }
5955 
5956     __get_user(l_type, &target_fl->l_type);
5957     l_type = target_to_host_flock(l_type);
5958     if (l_type < 0) {
5959         return l_type;
5960     }
5961     fl->l_type = l_type;
5962     __get_user(fl->l_whence, &target_fl->l_whence);
5963     __get_user(fl->l_start, &target_fl->l_start);
5964     __get_user(fl->l_len, &target_fl->l_len);
5965     __get_user(fl->l_pid, &target_fl->l_pid);
5966     unlock_user_struct(target_fl, target_flock_addr, 0);
5967     return 0;
5968 }
5969 
5970 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5971                                           const struct flock64 *fl)
5972 {
5973     struct target_flock *target_fl;
5974     short l_type;
5975 
5976     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5977         return -TARGET_EFAULT;
5978     }
5979 
5980     l_type = host_to_target_flock(fl->l_type);
5981     __put_user(l_type, &target_fl->l_type);
5982     __put_user(fl->l_whence, &target_fl->l_whence);
5983     __put_user(fl->l_start, &target_fl->l_start);
5984     __put_user(fl->l_len, &target_fl->l_len);
5985     __put_user(fl->l_pid, &target_fl->l_pid);
5986     unlock_user_struct(target_fl, target_flock_addr, 1);
5987     return 0;
5988 }
5989 
5990 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5991 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5992 
5993 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5994 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5995                                                    abi_ulong target_flock_addr)
5996 {
5997     struct target_oabi_flock64 *target_fl;
5998     int l_type;
5999 
6000     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6001         return -TARGET_EFAULT;
6002     }
6003 
6004     __get_user(l_type, &target_fl->l_type);
6005     l_type = target_to_host_flock(l_type);
6006     if (l_type < 0) {
6007         return l_type;
6008     }
6009     fl->l_type = l_type;
6010     __get_user(fl->l_whence, &target_fl->l_whence);
6011     __get_user(fl->l_start, &target_fl->l_start);
6012     __get_user(fl->l_len, &target_fl->l_len);
6013     __get_user(fl->l_pid, &target_fl->l_pid);
6014     unlock_user_struct(target_fl, target_flock_addr, 0);
6015     return 0;
6016 }
6017 
6018 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6019                                                  const struct flock64 *fl)
6020 {
6021     struct target_oabi_flock64 *target_fl;
6022     short l_type;
6023 
6024     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6025         return -TARGET_EFAULT;
6026     }
6027 
6028     l_type = host_to_target_flock(fl->l_type);
6029     __put_user(l_type, &target_fl->l_type);
6030     __put_user(fl->l_whence, &target_fl->l_whence);
6031     __put_user(fl->l_start, &target_fl->l_start);
6032     __put_user(fl->l_len, &target_fl->l_len);
6033     __put_user(fl->l_pid, &target_fl->l_pid);
6034     unlock_user_struct(target_fl, target_flock_addr, 1);
6035     return 0;
6036 }
6037 #endif
6038 
6039 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6040                                               abi_ulong target_flock_addr)
6041 {
6042     struct target_flock64 *target_fl;
6043     int l_type;
6044 
6045     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6046         return -TARGET_EFAULT;
6047     }
6048 
6049     __get_user(l_type, &target_fl->l_type);
6050     l_type = target_to_host_flock(l_type);
6051     if (l_type < 0) {
6052         return l_type;
6053     }
6054     fl->l_type = l_type;
6055     __get_user(fl->l_whence, &target_fl->l_whence);
6056     __get_user(fl->l_start, &target_fl->l_start);
6057     __get_user(fl->l_len, &target_fl->l_len);
6058     __get_user(fl->l_pid, &target_fl->l_pid);
6059     unlock_user_struct(target_fl, target_flock_addr, 0);
6060     return 0;
6061 }
6062 
6063 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6064                                             const struct flock64 *fl)
6065 {
6066     struct target_flock64 *target_fl;
6067     short l_type;
6068 
6069     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6070         return -TARGET_EFAULT;
6071     }
6072 
6073     l_type = host_to_target_flock(fl->l_type);
6074     __put_user(l_type, &target_fl->l_type);
6075     __put_user(fl->l_whence, &target_fl->l_whence);
6076     __put_user(fl->l_start, &target_fl->l_start);
6077     __put_user(fl->l_len, &target_fl->l_len);
6078     __put_user(fl->l_pid, &target_fl->l_pid);
6079     unlock_user_struct(target_fl, target_flock_addr, 1);
6080     return 0;
6081 }
6082 
6083 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6084 {
6085     struct flock64 fl64;
6086 #ifdef F_GETOWN_EX
6087     struct f_owner_ex fox;
6088     struct target_f_owner_ex *target_fox;
6089 #endif
6090     abi_long ret;
6091     int host_cmd = target_to_host_fcntl_cmd(cmd);
6092 
6093     if (host_cmd == -TARGET_EINVAL)
6094 	    return host_cmd;
6095 
6096     switch(cmd) {
6097     case TARGET_F_GETLK:
6098         ret = copy_from_user_flock(&fl64, arg);
6099         if (ret) {
6100             return ret;
6101         }
6102         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6103         if (ret == 0) {
6104             ret = copy_to_user_flock(arg, &fl64);
6105         }
6106         break;
6107 
6108     case TARGET_F_SETLK:
6109     case TARGET_F_SETLKW:
6110         ret = copy_from_user_flock(&fl64, arg);
6111         if (ret) {
6112             return ret;
6113         }
6114         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6115         break;
6116 
6117     case TARGET_F_GETLK64:
6118         ret = copy_from_user_flock64(&fl64, arg);
6119         if (ret) {
6120             return ret;
6121         }
6122         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6123         if (ret == 0) {
6124             ret = copy_to_user_flock64(arg, &fl64);
6125         }
6126         break;
6127     case TARGET_F_SETLK64:
6128     case TARGET_F_SETLKW64:
6129         ret = copy_from_user_flock64(&fl64, arg);
6130         if (ret) {
6131             return ret;
6132         }
6133         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6134         break;
6135 
6136     case TARGET_F_GETFL:
6137         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6138         if (ret >= 0) {
6139             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6140         }
6141         break;
6142 
6143     case TARGET_F_SETFL:
6144         ret = get_errno(safe_fcntl(fd, host_cmd,
6145                                    target_to_host_bitmask(arg,
6146                                                           fcntl_flags_tbl)));
6147         break;
6148 
6149 #ifdef F_GETOWN_EX
6150     case TARGET_F_GETOWN_EX:
6151         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6152         if (ret >= 0) {
6153             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6154                 return -TARGET_EFAULT;
6155             target_fox->type = tswap32(fox.type);
6156             target_fox->pid = tswap32(fox.pid);
6157             unlock_user_struct(target_fox, arg, 1);
6158         }
6159         break;
6160 #endif
6161 
6162 #ifdef F_SETOWN_EX
6163     case TARGET_F_SETOWN_EX:
6164         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6165             return -TARGET_EFAULT;
6166         fox.type = tswap32(target_fox->type);
6167         fox.pid = tswap32(target_fox->pid);
6168         unlock_user_struct(target_fox, arg, 0);
6169         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6170         break;
6171 #endif
6172 
6173     case TARGET_F_SETOWN:
6174     case TARGET_F_GETOWN:
6175     case TARGET_F_SETSIG:
6176     case TARGET_F_GETSIG:
6177     case TARGET_F_SETLEASE:
6178     case TARGET_F_GETLEASE:
6179     case TARGET_F_SETPIPE_SZ:
6180     case TARGET_F_GETPIPE_SZ:
6181         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6182         break;
6183 
6184     default:
6185         ret = get_errno(safe_fcntl(fd, cmd, arg));
6186         break;
6187     }
6188     return ret;
6189 }
6190 
6191 #ifdef USE_UID16
6192 
6193 static inline int high2lowuid(int uid)
6194 {
6195     if (uid > 65535)
6196         return 65534;
6197     else
6198         return uid;
6199 }
6200 
6201 static inline int high2lowgid(int gid)
6202 {
6203     if (gid > 65535)
6204         return 65534;
6205     else
6206         return gid;
6207 }
6208 
6209 static inline int low2highuid(int uid)
6210 {
6211     if ((int16_t)uid == -1)
6212         return -1;
6213     else
6214         return uid;
6215 }
6216 
6217 static inline int low2highgid(int gid)
6218 {
6219     if ((int16_t)gid == -1)
6220         return -1;
6221     else
6222         return gid;
6223 }
6224 static inline int tswapid(int id)
6225 {
6226     return tswap16(id);
6227 }
6228 
6229 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6230 
6231 #else /* !USE_UID16 */
6232 static inline int high2lowuid(int uid)
6233 {
6234     return uid;
6235 }
6236 static inline int high2lowgid(int gid)
6237 {
6238     return gid;
6239 }
6240 static inline int low2highuid(int uid)
6241 {
6242     return uid;
6243 }
6244 static inline int low2highgid(int gid)
6245 {
6246     return gid;
6247 }
6248 static inline int tswapid(int id)
6249 {
6250     return tswap32(id);
6251 }
6252 
6253 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6254 
6255 #endif /* USE_UID16 */
6256 
6257 /* We must do direct syscalls for setting UID/GID, because we want to
6258  * implement the Linux system call semantics of "change only for this thread",
6259  * not the libc/POSIX semantics of "change for all threads in process".
6260  * (See http://ewontfix.com/17/ for more details.)
6261  * We use the 32-bit version of the syscalls if present; if it is not
6262  * then either the host architecture supports 32-bit UIDs natively with
6263  * the standard syscall, or the 16-bit UID is the best we can do.
6264  */
6265 #ifdef __NR_setuid32
6266 #define __NR_sys_setuid __NR_setuid32
6267 #else
6268 #define __NR_sys_setuid __NR_setuid
6269 #endif
6270 #ifdef __NR_setgid32
6271 #define __NR_sys_setgid __NR_setgid32
6272 #else
6273 #define __NR_sys_setgid __NR_setgid
6274 #endif
6275 #ifdef __NR_setresuid32
6276 #define __NR_sys_setresuid __NR_setresuid32
6277 #else
6278 #define __NR_sys_setresuid __NR_setresuid
6279 #endif
6280 #ifdef __NR_setresgid32
6281 #define __NR_sys_setresgid __NR_setresgid32
6282 #else
6283 #define __NR_sys_setresgid __NR_setresgid
6284 #endif
6285 
6286 _syscall1(int, sys_setuid, uid_t, uid)
6287 _syscall1(int, sys_setgid, gid_t, gid)
6288 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6289 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6290 
6291 void syscall_init(void)
6292 {
6293     IOCTLEntry *ie;
6294     const argtype *arg_type;
6295     int size;
6296     int i;
6297 
6298     thunk_init(STRUCT_MAX);
6299 
6300 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6301 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6302 #include "syscall_types.h"
6303 #undef STRUCT
6304 #undef STRUCT_SPECIAL
6305 
6306     /* Build target_to_host_errno_table[] table from
6307      * host_to_target_errno_table[]. */
6308     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6309         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6310     }
6311 
6312     /* we patch the ioctl size if necessary. We rely on the fact that
6313        no ioctl has all the bits at '1' in the size field */
6314     ie = ioctl_entries;
6315     while (ie->target_cmd != 0) {
6316         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6317             TARGET_IOC_SIZEMASK) {
6318             arg_type = ie->arg_type;
6319             if (arg_type[0] != TYPE_PTR) {
6320                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6321                         ie->target_cmd);
6322                 exit(1);
6323             }
6324             arg_type++;
6325             size = thunk_type_size(arg_type, 0);
6326             ie->target_cmd = (ie->target_cmd &
6327                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6328                 (size << TARGET_IOC_SIZESHIFT);
6329         }
6330 
6331         /* automatic consistency check if same arch */
6332 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6333     (defined(__x86_64__) && defined(TARGET_X86_64))
6334         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6335             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6336                     ie->name, ie->target_cmd, ie->host_cmd);
6337         }
6338 #endif
6339         ie++;
6340     }
6341 }
6342 
6343 #if TARGET_ABI_BITS == 32
6344 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6345 {
6346 #ifdef TARGET_WORDS_BIGENDIAN
6347     return ((uint64_t)word0 << 32) | word1;
6348 #else
6349     return ((uint64_t)word1 << 32) | word0;
6350 #endif
6351 }
6352 #else /* TARGET_ABI_BITS == 32 */
6353 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6354 {
6355     return word0;
6356 }
6357 #endif /* TARGET_ABI_BITS != 32 */
6358 
6359 #ifdef TARGET_NR_truncate64
6360 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6361                                          abi_long arg2,
6362                                          abi_long arg3,
6363                                          abi_long arg4)
6364 {
6365     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6366         arg2 = arg3;
6367         arg3 = arg4;
6368     }
6369     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6370 }
6371 #endif
6372 
6373 #ifdef TARGET_NR_ftruncate64
6374 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6375                                           abi_long arg2,
6376                                           abi_long arg3,
6377                                           abi_long arg4)
6378 {
6379     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6380         arg2 = arg3;
6381         arg3 = arg4;
6382     }
6383     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6384 }
6385 #endif
6386 
6387 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6388                                                  abi_ulong target_addr)
6389 {
6390     struct target_itimerspec *target_itspec;
6391 
6392     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6393         return -TARGET_EFAULT;
6394     }
6395 
6396     host_itspec->it_interval.tv_sec =
6397                             tswapal(target_itspec->it_interval.tv_sec);
6398     host_itspec->it_interval.tv_nsec =
6399                             tswapal(target_itspec->it_interval.tv_nsec);
6400     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6401     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6402 
6403     unlock_user_struct(target_itspec, target_addr, 1);
6404     return 0;
6405 }
6406 
6407 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6408                                                struct itimerspec *host_its)
6409 {
6410     struct target_itimerspec *target_itspec;
6411 
6412     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6413         return -TARGET_EFAULT;
6414     }
6415 
6416     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6417     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6418 
6419     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6420     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6421 
6422     unlock_user_struct(target_itspec, target_addr, 0);
6423     return 0;
6424 }
6425 
6426 static inline abi_long target_to_host_timex(struct timex *host_tx,
6427                                             abi_long target_addr)
6428 {
6429     struct target_timex *target_tx;
6430 
6431     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6432         return -TARGET_EFAULT;
6433     }
6434 
6435     __get_user(host_tx->modes, &target_tx->modes);
6436     __get_user(host_tx->offset, &target_tx->offset);
6437     __get_user(host_tx->freq, &target_tx->freq);
6438     __get_user(host_tx->maxerror, &target_tx->maxerror);
6439     __get_user(host_tx->esterror, &target_tx->esterror);
6440     __get_user(host_tx->status, &target_tx->status);
6441     __get_user(host_tx->constant, &target_tx->constant);
6442     __get_user(host_tx->precision, &target_tx->precision);
6443     __get_user(host_tx->tolerance, &target_tx->tolerance);
6444     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6445     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6446     __get_user(host_tx->tick, &target_tx->tick);
6447     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6448     __get_user(host_tx->jitter, &target_tx->jitter);
6449     __get_user(host_tx->shift, &target_tx->shift);
6450     __get_user(host_tx->stabil, &target_tx->stabil);
6451     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6452     __get_user(host_tx->calcnt, &target_tx->calcnt);
6453     __get_user(host_tx->errcnt, &target_tx->errcnt);
6454     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6455     __get_user(host_tx->tai, &target_tx->tai);
6456 
6457     unlock_user_struct(target_tx, target_addr, 0);
6458     return 0;
6459 }
6460 
6461 static inline abi_long host_to_target_timex(abi_long target_addr,
6462                                             struct timex *host_tx)
6463 {
6464     struct target_timex *target_tx;
6465 
6466     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6467         return -TARGET_EFAULT;
6468     }
6469 
6470     __put_user(host_tx->modes, &target_tx->modes);
6471     __put_user(host_tx->offset, &target_tx->offset);
6472     __put_user(host_tx->freq, &target_tx->freq);
6473     __put_user(host_tx->maxerror, &target_tx->maxerror);
6474     __put_user(host_tx->esterror, &target_tx->esterror);
6475     __put_user(host_tx->status, &target_tx->status);
6476     __put_user(host_tx->constant, &target_tx->constant);
6477     __put_user(host_tx->precision, &target_tx->precision);
6478     __put_user(host_tx->tolerance, &target_tx->tolerance);
6479     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6480     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6481     __put_user(host_tx->tick, &target_tx->tick);
6482     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6483     __put_user(host_tx->jitter, &target_tx->jitter);
6484     __put_user(host_tx->shift, &target_tx->shift);
6485     __put_user(host_tx->stabil, &target_tx->stabil);
6486     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6487     __put_user(host_tx->calcnt, &target_tx->calcnt);
6488     __put_user(host_tx->errcnt, &target_tx->errcnt);
6489     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6490     __put_user(host_tx->tai, &target_tx->tai);
6491 
6492     unlock_user_struct(target_tx, target_addr, 1);
6493     return 0;
6494 }
6495 
6496 
6497 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6498                                                abi_ulong target_addr)
6499 {
6500     struct target_sigevent *target_sevp;
6501 
6502     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6503         return -TARGET_EFAULT;
6504     }
6505 
6506     /* This union is awkward on 64 bit systems because it has a 32 bit
6507      * integer and a pointer in it; we follow the conversion approach
6508      * used for handling sigval types in signal.c so the guest should get
6509      * the correct value back even if we did a 64 bit byteswap and it's
6510      * using the 32 bit integer.
6511      */
6512     host_sevp->sigev_value.sival_ptr =
6513         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6514     host_sevp->sigev_signo =
6515         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6516     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6517     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6518 
6519     unlock_user_struct(target_sevp, target_addr, 1);
6520     return 0;
6521 }
6522 
6523 #if defined(TARGET_NR_mlockall)
6524 static inline int target_to_host_mlockall_arg(int arg)
6525 {
6526     int result = 0;
6527 
6528     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6529         result |= MCL_CURRENT;
6530     }
6531     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6532         result |= MCL_FUTURE;
6533     }
6534     return result;
6535 }
6536 #endif
6537 
6538 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6539      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6540      defined(TARGET_NR_newfstatat))
6541 static inline abi_long host_to_target_stat64(void *cpu_env,
6542                                              abi_ulong target_addr,
6543                                              struct stat *host_st)
6544 {
6545 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6546     if (((CPUARMState *)cpu_env)->eabi) {
6547         struct target_eabi_stat64 *target_st;
6548 
6549         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6550             return -TARGET_EFAULT;
6551         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6552         __put_user(host_st->st_dev, &target_st->st_dev);
6553         __put_user(host_st->st_ino, &target_st->st_ino);
6554 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6555         __put_user(host_st->st_ino, &target_st->__st_ino);
6556 #endif
6557         __put_user(host_st->st_mode, &target_st->st_mode);
6558         __put_user(host_st->st_nlink, &target_st->st_nlink);
6559         __put_user(host_st->st_uid, &target_st->st_uid);
6560         __put_user(host_st->st_gid, &target_st->st_gid);
6561         __put_user(host_st->st_rdev, &target_st->st_rdev);
6562         __put_user(host_st->st_size, &target_st->st_size);
6563         __put_user(host_st->st_blksize, &target_st->st_blksize);
6564         __put_user(host_st->st_blocks, &target_st->st_blocks);
6565         __put_user(host_st->st_atime, &target_st->target_st_atime);
6566         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6567         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6568 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6569         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6570         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6571         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6572 #endif
6573         unlock_user_struct(target_st, target_addr, 1);
6574     } else
6575 #endif
6576     {
6577 #if defined(TARGET_HAS_STRUCT_STAT64)
6578         struct target_stat64 *target_st;
6579 #else
6580         struct target_stat *target_st;
6581 #endif
6582 
6583         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6584             return -TARGET_EFAULT;
6585         memset(target_st, 0, sizeof(*target_st));
6586         __put_user(host_st->st_dev, &target_st->st_dev);
6587         __put_user(host_st->st_ino, &target_st->st_ino);
6588 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6589         __put_user(host_st->st_ino, &target_st->__st_ino);
6590 #endif
6591         __put_user(host_st->st_mode, &target_st->st_mode);
6592         __put_user(host_st->st_nlink, &target_st->st_nlink);
6593         __put_user(host_st->st_uid, &target_st->st_uid);
6594         __put_user(host_st->st_gid, &target_st->st_gid);
6595         __put_user(host_st->st_rdev, &target_st->st_rdev);
6596         /* XXX: better use of kernel struct */
6597         __put_user(host_st->st_size, &target_st->st_size);
6598         __put_user(host_st->st_blksize, &target_st->st_blksize);
6599         __put_user(host_st->st_blocks, &target_st->st_blocks);
6600         __put_user(host_st->st_atime, &target_st->target_st_atime);
6601         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6602         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6603 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6604         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6605         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6606         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6607 #endif
6608         unlock_user_struct(target_st, target_addr, 1);
6609     }
6610 
6611     return 0;
6612 }
6613 #endif
6614 
6615 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6616 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6617                                             abi_ulong target_addr)
6618 {
6619     struct target_statx *target_stx;
6620 
6621     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6622         return -TARGET_EFAULT;
6623     }
6624     memset(target_stx, 0, sizeof(*target_stx));
6625 
6626     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6627     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6628     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6629     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6630     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6631     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6632     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6633     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6634     __put_user(host_stx->stx_size, &target_stx->stx_size);
6635     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6636     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6637     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6638     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6639     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6640     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6641     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6642     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6643     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6644     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6645     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6646     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6647     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6648     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6649 
6650     unlock_user_struct(target_stx, target_addr, 1);
6651 
6652     return 0;
6653 }
6654 #endif
6655 
6656 
6657 /* ??? Using host futex calls even when target atomic operations
6658    are not really atomic probably breaks things.  However implementing
6659    futexes locally would make futexes shared between multiple processes
6660    tricky.  However they're probably useless because guest atomic
6661    operations won't work either.  */
6662 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6663                     target_ulong uaddr2, int val3)
6664 {
6665     struct timespec ts, *pts;
6666     int base_op;
6667 
6668     /* ??? We assume FUTEX_* constants are the same on both host
6669        and target.  */
6670 #ifdef FUTEX_CMD_MASK
6671     base_op = op & FUTEX_CMD_MASK;
6672 #else
6673     base_op = op;
6674 #endif
6675     switch (base_op) {
6676     case FUTEX_WAIT:
6677     case FUTEX_WAIT_BITSET:
6678         if (timeout) {
6679             pts = &ts;
6680             target_to_host_timespec(pts, timeout);
6681         } else {
6682             pts = NULL;
6683         }
6684         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6685                          pts, NULL, val3));
6686     case FUTEX_WAKE:
6687         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6688     case FUTEX_FD:
6689         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6690     case FUTEX_REQUEUE:
6691     case FUTEX_CMP_REQUEUE:
6692     case FUTEX_WAKE_OP:
6693         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6694            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6695            But the prototype takes a `struct timespec *'; insert casts
6696            to satisfy the compiler.  We do not need to tswap TIMEOUT
6697            since it's not compared to guest memory.  */
6698         pts = (struct timespec *)(uintptr_t) timeout;
6699         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6700                                     g2h(uaddr2),
6701                                     (base_op == FUTEX_CMP_REQUEUE
6702                                      ? tswap32(val3)
6703                                      : val3)));
6704     default:
6705         return -TARGET_ENOSYS;
6706     }
6707 }
6708 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6709 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6710                                      abi_long handle, abi_long mount_id,
6711                                      abi_long flags)
6712 {
6713     struct file_handle *target_fh;
6714     struct file_handle *fh;
6715     int mid = 0;
6716     abi_long ret;
6717     char *name;
6718     unsigned int size, total_size;
6719 
6720     if (get_user_s32(size, handle)) {
6721         return -TARGET_EFAULT;
6722     }
6723 
6724     name = lock_user_string(pathname);
6725     if (!name) {
6726         return -TARGET_EFAULT;
6727     }
6728 
6729     total_size = sizeof(struct file_handle) + size;
6730     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6731     if (!target_fh) {
6732         unlock_user(name, pathname, 0);
6733         return -TARGET_EFAULT;
6734     }
6735 
6736     fh = g_malloc0(total_size);
6737     fh->handle_bytes = size;
6738 
6739     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6740     unlock_user(name, pathname, 0);
6741 
6742     /* man name_to_handle_at(2):
6743      * Other than the use of the handle_bytes field, the caller should treat
6744      * the file_handle structure as an opaque data type
6745      */
6746 
6747     memcpy(target_fh, fh, total_size);
6748     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6749     target_fh->handle_type = tswap32(fh->handle_type);
6750     g_free(fh);
6751     unlock_user(target_fh, handle, total_size);
6752 
6753     if (put_user_s32(mid, mount_id)) {
6754         return -TARGET_EFAULT;
6755     }
6756 
6757     return ret;
6758 
6759 }
6760 #endif
6761 
6762 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6763 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6764                                      abi_long flags)
6765 {
6766     struct file_handle *target_fh;
6767     struct file_handle *fh;
6768     unsigned int size, total_size;
6769     abi_long ret;
6770 
6771     if (get_user_s32(size, handle)) {
6772         return -TARGET_EFAULT;
6773     }
6774 
6775     total_size = sizeof(struct file_handle) + size;
6776     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6777     if (!target_fh) {
6778         return -TARGET_EFAULT;
6779     }
6780 
6781     fh = g_memdup(target_fh, total_size);
6782     fh->handle_bytes = size;
6783     fh->handle_type = tswap32(target_fh->handle_type);
6784 
6785     ret = get_errno(open_by_handle_at(mount_fd, fh,
6786                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6787 
6788     g_free(fh);
6789 
6790     unlock_user(target_fh, handle, total_size);
6791 
6792     return ret;
6793 }
6794 #endif
6795 
6796 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6797 
6798 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6799 {
6800     int host_flags;
6801     target_sigset_t *target_mask;
6802     sigset_t host_mask;
6803     abi_long ret;
6804 
6805     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6806         return -TARGET_EINVAL;
6807     }
6808     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6809         return -TARGET_EFAULT;
6810     }
6811 
6812     target_to_host_sigset(&host_mask, target_mask);
6813 
6814     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6815 
6816     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6817     if (ret >= 0) {
6818         fd_trans_register(ret, &target_signalfd_trans);
6819     }
6820 
6821     unlock_user_struct(target_mask, mask, 0);
6822 
6823     return ret;
6824 }
6825 #endif
6826 
6827 /* Map host to target signal numbers for the wait family of syscalls.
6828    Assume all other status bits are the same.  */
6829 int host_to_target_waitstatus(int status)
6830 {
6831     if (WIFSIGNALED(status)) {
6832         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6833     }
6834     if (WIFSTOPPED(status)) {
6835         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6836                | (status & 0xff);
6837     }
6838     return status;
6839 }
6840 
6841 static int open_self_cmdline(void *cpu_env, int fd)
6842 {
6843     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6844     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6845     int i;
6846 
6847     for (i = 0; i < bprm->argc; i++) {
6848         size_t len = strlen(bprm->argv[i]) + 1;
6849 
6850         if (write(fd, bprm->argv[i], len) != len) {
6851             return -1;
6852         }
6853     }
6854 
6855     return 0;
6856 }
6857 
6858 static int open_self_maps(void *cpu_env, int fd)
6859 {
6860     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6861     TaskState *ts = cpu->opaque;
6862     FILE *fp;
6863     char *line = NULL;
6864     size_t len = 0;
6865     ssize_t read;
6866 
6867     fp = fopen("/proc/self/maps", "r");
6868     if (fp == NULL) {
6869         return -1;
6870     }
6871 
6872     while ((read = getline(&line, &len, fp)) != -1) {
6873         int fields, dev_maj, dev_min, inode;
6874         uint64_t min, max, offset;
6875         char flag_r, flag_w, flag_x, flag_p;
6876         char path[512] = "";
6877         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6878                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6879                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6880 
6881         if ((fields < 10) || (fields > 11)) {
6882             continue;
6883         }
6884         if (h2g_valid(min)) {
6885             int flags = page_get_flags(h2g(min));
6886             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6887             if (page_check_range(h2g(min), max - min, flags) == -1) {
6888                 continue;
6889             }
6890             if (h2g(min) == ts->info->stack_limit) {
6891                 pstrcpy(path, sizeof(path), "      [stack]");
6892             }
6893             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6894                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6895                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6896                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
6897                     path[0] ? "         " : "", path);
6898         }
6899     }
6900 
6901     free(line);
6902     fclose(fp);
6903 
6904     return 0;
6905 }
6906 
6907 static int open_self_stat(void *cpu_env, int fd)
6908 {
6909     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6910     TaskState *ts = cpu->opaque;
6911     abi_ulong start_stack = ts->info->start_stack;
6912     int i;
6913 
6914     for (i = 0; i < 44; i++) {
6915       char buf[128];
6916       int len;
6917       uint64_t val = 0;
6918 
6919       if (i == 0) {
6920         /* pid */
6921         val = getpid();
6922         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6923       } else if (i == 1) {
6924         /* app name */
6925         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6926       } else if (i == 27) {
6927         /* stack bottom */
6928         val = start_stack;
6929         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6930       } else {
6931         /* for the rest, there is MasterCard */
6932         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6933       }
6934 
6935       len = strlen(buf);
6936       if (write(fd, buf, len) != len) {
6937           return -1;
6938       }
6939     }
6940 
6941     return 0;
6942 }
6943 
6944 static int open_self_auxv(void *cpu_env, int fd)
6945 {
6946     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6947     TaskState *ts = cpu->opaque;
6948     abi_ulong auxv = ts->info->saved_auxv;
6949     abi_ulong len = ts->info->auxv_len;
6950     char *ptr;
6951 
6952     /*
6953      * Auxiliary vector is stored in target process stack.
6954      * read in whole auxv vector and copy it to file
6955      */
6956     ptr = lock_user(VERIFY_READ, auxv, len, 0);
6957     if (ptr != NULL) {
6958         while (len > 0) {
6959             ssize_t r;
6960             r = write(fd, ptr, len);
6961             if (r <= 0) {
6962                 break;
6963             }
6964             len -= r;
6965             ptr += r;
6966         }
6967         lseek(fd, 0, SEEK_SET);
6968         unlock_user(ptr, auxv, len);
6969     }
6970 
6971     return 0;
6972 }
6973 
6974 static int is_proc_myself(const char *filename, const char *entry)
6975 {
6976     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6977         filename += strlen("/proc/");
6978         if (!strncmp(filename, "self/", strlen("self/"))) {
6979             filename += strlen("self/");
6980         } else if (*filename >= '1' && *filename <= '9') {
6981             char myself[80];
6982             snprintf(myself, sizeof(myself), "%d/", getpid());
6983             if (!strncmp(filename, myself, strlen(myself))) {
6984                 filename += strlen(myself);
6985             } else {
6986                 return 0;
6987             }
6988         } else {
6989             return 0;
6990         }
6991         if (!strcmp(filename, entry)) {
6992             return 1;
6993         }
6994     }
6995     return 0;
6996 }
6997 
6998 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6999     defined(TARGET_SPARC) || defined(TARGET_M68K)
7000 static int is_proc(const char *filename, const char *entry)
7001 {
7002     return strcmp(filename, entry) == 0;
7003 }
7004 #endif
7005 
7006 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7007 static int open_net_route(void *cpu_env, int fd)
7008 {
7009     FILE *fp;
7010     char *line = NULL;
7011     size_t len = 0;
7012     ssize_t read;
7013 
7014     fp = fopen("/proc/net/route", "r");
7015     if (fp == NULL) {
7016         return -1;
7017     }
7018 
7019     /* read header */
7020 
7021     read = getline(&line, &len, fp);
7022     dprintf(fd, "%s", line);
7023 
7024     /* read routes */
7025 
7026     while ((read = getline(&line, &len, fp)) != -1) {
7027         char iface[16];
7028         uint32_t dest, gw, mask;
7029         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7030         int fields;
7031 
7032         fields = sscanf(line,
7033                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7034                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7035                         &mask, &mtu, &window, &irtt);
7036         if (fields != 11) {
7037             continue;
7038         }
7039         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7040                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7041                 metric, tswap32(mask), mtu, window, irtt);
7042     }
7043 
7044     free(line);
7045     fclose(fp);
7046 
7047     return 0;
7048 }
7049 #endif
7050 
7051 #if defined(TARGET_SPARC)
7052 static int open_cpuinfo(void *cpu_env, int fd)
7053 {
7054     dprintf(fd, "type\t\t: sun4u\n");
7055     return 0;
7056 }
7057 #endif
7058 
7059 #if defined(TARGET_M68K)
7060 static int open_hardware(void *cpu_env, int fd)
7061 {
7062     dprintf(fd, "Model:\t\tqemu-m68k\n");
7063     return 0;
7064 }
7065 #endif
7066 
7067 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7068 {
7069     struct fake_open {
7070         const char *filename;
7071         int (*fill)(void *cpu_env, int fd);
7072         int (*cmp)(const char *s1, const char *s2);
7073     };
7074     const struct fake_open *fake_open;
7075     static const struct fake_open fakes[] = {
7076         { "maps", open_self_maps, is_proc_myself },
7077         { "stat", open_self_stat, is_proc_myself },
7078         { "auxv", open_self_auxv, is_proc_myself },
7079         { "cmdline", open_self_cmdline, is_proc_myself },
7080 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7081         { "/proc/net/route", open_net_route, is_proc },
7082 #endif
7083 #if defined(TARGET_SPARC)
7084         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7085 #endif
7086 #if defined(TARGET_M68K)
7087         { "/proc/hardware", open_hardware, is_proc },
7088 #endif
7089         { NULL, NULL, NULL }
7090     };
7091 
7092     if (is_proc_myself(pathname, "exe")) {
7093         int execfd = qemu_getauxval(AT_EXECFD);
7094         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7095     }
7096 
7097     for (fake_open = fakes; fake_open->filename; fake_open++) {
7098         if (fake_open->cmp(pathname, fake_open->filename)) {
7099             break;
7100         }
7101     }
7102 
7103     if (fake_open->filename) {
7104         const char *tmpdir;
7105         char filename[PATH_MAX];
7106         int fd, r;
7107 
7108         /* create temporary file to map stat to */
7109         tmpdir = getenv("TMPDIR");
7110         if (!tmpdir)
7111             tmpdir = "/tmp";
7112         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7113         fd = mkstemp(filename);
7114         if (fd < 0) {
7115             return fd;
7116         }
7117         unlink(filename);
7118 
7119         if ((r = fake_open->fill(cpu_env, fd))) {
7120             int e = errno;
7121             close(fd);
7122             errno = e;
7123             return r;
7124         }
7125         lseek(fd, 0, SEEK_SET);
7126 
7127         return fd;
7128     }
7129 
7130     return safe_openat(dirfd, path(pathname), flags, mode);
7131 }
7132 
7133 #define TIMER_MAGIC 0x0caf0000
7134 #define TIMER_MAGIC_MASK 0xffff0000
7135 
7136 /* Convert QEMU provided timer ID back to internal 16bit index format */
7137 static target_timer_t get_timer_id(abi_long arg)
7138 {
7139     target_timer_t timerid = arg;
7140 
7141     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7142         return -TARGET_EINVAL;
7143     }
7144 
7145     timerid &= 0xffff;
7146 
7147     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7148         return -TARGET_EINVAL;
7149     }
7150 
7151     return timerid;
7152 }
7153 
7154 static int target_to_host_cpu_mask(unsigned long *host_mask,
7155                                    size_t host_size,
7156                                    abi_ulong target_addr,
7157                                    size_t target_size)
7158 {
7159     unsigned target_bits = sizeof(abi_ulong) * 8;
7160     unsigned host_bits = sizeof(*host_mask) * 8;
7161     abi_ulong *target_mask;
7162     unsigned i, j;
7163 
7164     assert(host_size >= target_size);
7165 
7166     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7167     if (!target_mask) {
7168         return -TARGET_EFAULT;
7169     }
7170     memset(host_mask, 0, host_size);
7171 
7172     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7173         unsigned bit = i * target_bits;
7174         abi_ulong val;
7175 
7176         __get_user(val, &target_mask[i]);
7177         for (j = 0; j < target_bits; j++, bit++) {
7178             if (val & (1UL << j)) {
7179                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7180             }
7181         }
7182     }
7183 
7184     unlock_user(target_mask, target_addr, 0);
7185     return 0;
7186 }
7187 
7188 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7189                                    size_t host_size,
7190                                    abi_ulong target_addr,
7191                                    size_t target_size)
7192 {
7193     unsigned target_bits = sizeof(abi_ulong) * 8;
7194     unsigned host_bits = sizeof(*host_mask) * 8;
7195     abi_ulong *target_mask;
7196     unsigned i, j;
7197 
7198     assert(host_size >= target_size);
7199 
7200     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7201     if (!target_mask) {
7202         return -TARGET_EFAULT;
7203     }
7204 
7205     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7206         unsigned bit = i * target_bits;
7207         abi_ulong val = 0;
7208 
7209         for (j = 0; j < target_bits; j++, bit++) {
7210             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7211                 val |= 1UL << j;
7212             }
7213         }
7214         __put_user(val, &target_mask[i]);
7215     }
7216 
7217     unlock_user(target_mask, target_addr, target_size);
7218     return 0;
7219 }
7220 
7221 /* This is an internal helper for do_syscall so that it is easier
7222  * to have a single return point, so that actions, such as logging
7223  * of syscall results, can be performed.
7224  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7225  */
7226 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7227                             abi_long arg2, abi_long arg3, abi_long arg4,
7228                             abi_long arg5, abi_long arg6, abi_long arg7,
7229                             abi_long arg8)
7230 {
7231     CPUState *cpu = env_cpu(cpu_env);
7232     abi_long ret;
7233 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7234     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7235     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7236     || defined(TARGET_NR_statx)
7237     struct stat st;
7238 #endif
7239 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7240     || defined(TARGET_NR_fstatfs)
7241     struct statfs stfs;
7242 #endif
7243     void *p;
7244 
7245     switch(num) {
7246     case TARGET_NR_exit:
7247         /* In old applications this may be used to implement _exit(2).
7248            However in threaded applictions it is used for thread termination,
7249            and _exit_group is used for application termination.
7250            Do thread termination if we have more then one thread.  */
7251 
7252         if (block_signals()) {
7253             return -TARGET_ERESTARTSYS;
7254         }
7255 
7256         cpu_list_lock();
7257 
7258         if (CPU_NEXT(first_cpu)) {
7259             TaskState *ts;
7260 
7261             /* Remove the CPU from the list.  */
7262             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7263 
7264             cpu_list_unlock();
7265 
7266             ts = cpu->opaque;
7267             if (ts->child_tidptr) {
7268                 put_user_u32(0, ts->child_tidptr);
7269                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7270                           NULL, NULL, 0);
7271             }
7272             thread_cpu = NULL;
7273             object_unref(OBJECT(cpu));
7274             g_free(ts);
7275             rcu_unregister_thread();
7276             pthread_exit(NULL);
7277         }
7278 
7279         cpu_list_unlock();
7280         preexit_cleanup(cpu_env, arg1);
7281         _exit(arg1);
7282         return 0; /* avoid warning */
7283     case TARGET_NR_read:
7284         if (arg2 == 0 && arg3 == 0) {
7285             return get_errno(safe_read(arg1, 0, 0));
7286         } else {
7287             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7288                 return -TARGET_EFAULT;
7289             ret = get_errno(safe_read(arg1, p, arg3));
7290             if (ret >= 0 &&
7291                 fd_trans_host_to_target_data(arg1)) {
7292                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7293             }
7294             unlock_user(p, arg2, ret);
7295         }
7296         return ret;
7297     case TARGET_NR_write:
7298         if (arg2 == 0 && arg3 == 0) {
7299             return get_errno(safe_write(arg1, 0, 0));
7300         }
7301         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7302             return -TARGET_EFAULT;
7303         if (fd_trans_target_to_host_data(arg1)) {
7304             void *copy = g_malloc(arg3);
7305             memcpy(copy, p, arg3);
7306             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7307             if (ret >= 0) {
7308                 ret = get_errno(safe_write(arg1, copy, ret));
7309             }
7310             g_free(copy);
7311         } else {
7312             ret = get_errno(safe_write(arg1, p, arg3));
7313         }
7314         unlock_user(p, arg2, 0);
7315         return ret;
7316 
7317 #ifdef TARGET_NR_open
7318     case TARGET_NR_open:
7319         if (!(p = lock_user_string(arg1)))
7320             return -TARGET_EFAULT;
7321         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7322                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7323                                   arg3));
7324         fd_trans_unregister(ret);
7325         unlock_user(p, arg1, 0);
7326         return ret;
7327 #endif
7328     case TARGET_NR_openat:
7329         if (!(p = lock_user_string(arg2)))
7330             return -TARGET_EFAULT;
7331         ret = get_errno(do_openat(cpu_env, arg1, p,
7332                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7333                                   arg4));
7334         fd_trans_unregister(ret);
7335         unlock_user(p, arg2, 0);
7336         return ret;
7337 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7338     case TARGET_NR_name_to_handle_at:
7339         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7340         return ret;
7341 #endif
7342 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7343     case TARGET_NR_open_by_handle_at:
7344         ret = do_open_by_handle_at(arg1, arg2, arg3);
7345         fd_trans_unregister(ret);
7346         return ret;
7347 #endif
7348     case TARGET_NR_close:
7349         fd_trans_unregister(arg1);
7350         return get_errno(close(arg1));
7351 
7352     case TARGET_NR_brk:
7353         return do_brk(arg1);
7354 #ifdef TARGET_NR_fork
7355     case TARGET_NR_fork:
7356         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7357 #endif
7358 #ifdef TARGET_NR_waitpid
7359     case TARGET_NR_waitpid:
7360         {
7361             int status;
7362             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7363             if (!is_error(ret) && arg2 && ret
7364                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7365                 return -TARGET_EFAULT;
7366         }
7367         return ret;
7368 #endif
7369 #ifdef TARGET_NR_waitid
7370     case TARGET_NR_waitid:
7371         {
7372             siginfo_t info;
7373             info.si_pid = 0;
7374             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7375             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7376                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7377                     return -TARGET_EFAULT;
7378                 host_to_target_siginfo(p, &info);
7379                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7380             }
7381         }
7382         return ret;
7383 #endif
7384 #ifdef TARGET_NR_creat /* not on alpha */
7385     case TARGET_NR_creat:
7386         if (!(p = lock_user_string(arg1)))
7387             return -TARGET_EFAULT;
7388         ret = get_errno(creat(p, arg2));
7389         fd_trans_unregister(ret);
7390         unlock_user(p, arg1, 0);
7391         return ret;
7392 #endif
7393 #ifdef TARGET_NR_link
7394     case TARGET_NR_link:
7395         {
7396             void * p2;
7397             p = lock_user_string(arg1);
7398             p2 = lock_user_string(arg2);
7399             if (!p || !p2)
7400                 ret = -TARGET_EFAULT;
7401             else
7402                 ret = get_errno(link(p, p2));
7403             unlock_user(p2, arg2, 0);
7404             unlock_user(p, arg1, 0);
7405         }
7406         return ret;
7407 #endif
7408 #if defined(TARGET_NR_linkat)
7409     case TARGET_NR_linkat:
7410         {
7411             void * p2 = NULL;
7412             if (!arg2 || !arg4)
7413                 return -TARGET_EFAULT;
7414             p  = lock_user_string(arg2);
7415             p2 = lock_user_string(arg4);
7416             if (!p || !p2)
7417                 ret = -TARGET_EFAULT;
7418             else
7419                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7420             unlock_user(p, arg2, 0);
7421             unlock_user(p2, arg4, 0);
7422         }
7423         return ret;
7424 #endif
7425 #ifdef TARGET_NR_unlink
7426     case TARGET_NR_unlink:
7427         if (!(p = lock_user_string(arg1)))
7428             return -TARGET_EFAULT;
7429         ret = get_errno(unlink(p));
7430         unlock_user(p, arg1, 0);
7431         return ret;
7432 #endif
7433 #if defined(TARGET_NR_unlinkat)
7434     case TARGET_NR_unlinkat:
7435         if (!(p = lock_user_string(arg2)))
7436             return -TARGET_EFAULT;
7437         ret = get_errno(unlinkat(arg1, p, arg3));
7438         unlock_user(p, arg2, 0);
7439         return ret;
7440 #endif
7441     case TARGET_NR_execve:
7442         {
7443             char **argp, **envp;
7444             int argc, envc;
7445             abi_ulong gp;
7446             abi_ulong guest_argp;
7447             abi_ulong guest_envp;
7448             abi_ulong addr;
7449             char **q;
7450             int total_size = 0;
7451 
7452             argc = 0;
7453             guest_argp = arg2;
7454             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7455                 if (get_user_ual(addr, gp))
7456                     return -TARGET_EFAULT;
7457                 if (!addr)
7458                     break;
7459                 argc++;
7460             }
7461             envc = 0;
7462             guest_envp = arg3;
7463             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7464                 if (get_user_ual(addr, gp))
7465                     return -TARGET_EFAULT;
7466                 if (!addr)
7467                     break;
7468                 envc++;
7469             }
7470 
7471             argp = g_new0(char *, argc + 1);
7472             envp = g_new0(char *, envc + 1);
7473 
7474             for (gp = guest_argp, q = argp; gp;
7475                   gp += sizeof(abi_ulong), q++) {
7476                 if (get_user_ual(addr, gp))
7477                     goto execve_efault;
7478                 if (!addr)
7479                     break;
7480                 if (!(*q = lock_user_string(addr)))
7481                     goto execve_efault;
7482                 total_size += strlen(*q) + 1;
7483             }
7484             *q = NULL;
7485 
7486             for (gp = guest_envp, q = envp; gp;
7487                   gp += sizeof(abi_ulong), q++) {
7488                 if (get_user_ual(addr, gp))
7489                     goto execve_efault;
7490                 if (!addr)
7491                     break;
7492                 if (!(*q = lock_user_string(addr)))
7493                     goto execve_efault;
7494                 total_size += strlen(*q) + 1;
7495             }
7496             *q = NULL;
7497 
7498             if (!(p = lock_user_string(arg1)))
7499                 goto execve_efault;
7500             /* Although execve() is not an interruptible syscall it is
7501              * a special case where we must use the safe_syscall wrapper:
7502              * if we allow a signal to happen before we make the host
7503              * syscall then we will 'lose' it, because at the point of
7504              * execve the process leaves QEMU's control. So we use the
7505              * safe syscall wrapper to ensure that we either take the
7506              * signal as a guest signal, or else it does not happen
7507              * before the execve completes and makes it the other
7508              * program's problem.
7509              */
7510             ret = get_errno(safe_execve(p, argp, envp));
7511             unlock_user(p, arg1, 0);
7512 
7513             goto execve_end;
7514 
7515         execve_efault:
7516             ret = -TARGET_EFAULT;
7517 
7518         execve_end:
7519             for (gp = guest_argp, q = argp; *q;
7520                   gp += sizeof(abi_ulong), q++) {
7521                 if (get_user_ual(addr, gp)
7522                     || !addr)
7523                     break;
7524                 unlock_user(*q, addr, 0);
7525             }
7526             for (gp = guest_envp, q = envp; *q;
7527                   gp += sizeof(abi_ulong), q++) {
7528                 if (get_user_ual(addr, gp)
7529                     || !addr)
7530                     break;
7531                 unlock_user(*q, addr, 0);
7532             }
7533 
7534             g_free(argp);
7535             g_free(envp);
7536         }
7537         return ret;
7538     case TARGET_NR_chdir:
7539         if (!(p = lock_user_string(arg1)))
7540             return -TARGET_EFAULT;
7541         ret = get_errno(chdir(p));
7542         unlock_user(p, arg1, 0);
7543         return ret;
7544 #ifdef TARGET_NR_time
7545     case TARGET_NR_time:
7546         {
7547             time_t host_time;
7548             ret = get_errno(time(&host_time));
7549             if (!is_error(ret)
7550                 && arg1
7551                 && put_user_sal(host_time, arg1))
7552                 return -TARGET_EFAULT;
7553         }
7554         return ret;
7555 #endif
7556 #ifdef TARGET_NR_mknod
7557     case TARGET_NR_mknod:
7558         if (!(p = lock_user_string(arg1)))
7559             return -TARGET_EFAULT;
7560         ret = get_errno(mknod(p, arg2, arg3));
7561         unlock_user(p, arg1, 0);
7562         return ret;
7563 #endif
7564 #if defined(TARGET_NR_mknodat)
7565     case TARGET_NR_mknodat:
7566         if (!(p = lock_user_string(arg2)))
7567             return -TARGET_EFAULT;
7568         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7569         unlock_user(p, arg2, 0);
7570         return ret;
7571 #endif
7572 #ifdef TARGET_NR_chmod
7573     case TARGET_NR_chmod:
7574         if (!(p = lock_user_string(arg1)))
7575             return -TARGET_EFAULT;
7576         ret = get_errno(chmod(p, arg2));
7577         unlock_user(p, arg1, 0);
7578         return ret;
7579 #endif
7580 #ifdef TARGET_NR_lseek
7581     case TARGET_NR_lseek:
7582         return get_errno(lseek(arg1, arg2, arg3));
7583 #endif
7584 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7585     /* Alpha specific */
7586     case TARGET_NR_getxpid:
7587         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7588         return get_errno(getpid());
7589 #endif
7590 #ifdef TARGET_NR_getpid
7591     case TARGET_NR_getpid:
7592         return get_errno(getpid());
7593 #endif
7594     case TARGET_NR_mount:
7595         {
7596             /* need to look at the data field */
7597             void *p2, *p3;
7598 
7599             if (arg1) {
7600                 p = lock_user_string(arg1);
7601                 if (!p) {
7602                     return -TARGET_EFAULT;
7603                 }
7604             } else {
7605                 p = NULL;
7606             }
7607 
7608             p2 = lock_user_string(arg2);
7609             if (!p2) {
7610                 if (arg1) {
7611                     unlock_user(p, arg1, 0);
7612                 }
7613                 return -TARGET_EFAULT;
7614             }
7615 
7616             if (arg3) {
7617                 p3 = lock_user_string(arg3);
7618                 if (!p3) {
7619                     if (arg1) {
7620                         unlock_user(p, arg1, 0);
7621                     }
7622                     unlock_user(p2, arg2, 0);
7623                     return -TARGET_EFAULT;
7624                 }
7625             } else {
7626                 p3 = NULL;
7627             }
7628 
7629             /* FIXME - arg5 should be locked, but it isn't clear how to
7630              * do that since it's not guaranteed to be a NULL-terminated
7631              * string.
7632              */
7633             if (!arg5) {
7634                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7635             } else {
7636                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7637             }
7638             ret = get_errno(ret);
7639 
7640             if (arg1) {
7641                 unlock_user(p, arg1, 0);
7642             }
7643             unlock_user(p2, arg2, 0);
7644             if (arg3) {
7645                 unlock_user(p3, arg3, 0);
7646             }
7647         }
7648         return ret;
7649 #ifdef TARGET_NR_umount
7650     case TARGET_NR_umount:
7651         if (!(p = lock_user_string(arg1)))
7652             return -TARGET_EFAULT;
7653         ret = get_errno(umount(p));
7654         unlock_user(p, arg1, 0);
7655         return ret;
7656 #endif
7657 #ifdef TARGET_NR_stime /* not on alpha */
7658     case TARGET_NR_stime:
7659         {
7660             time_t host_time;
7661             if (get_user_sal(host_time, arg1))
7662                 return -TARGET_EFAULT;
7663             return get_errno(stime(&host_time));
7664         }
7665 #endif
7666 #ifdef TARGET_NR_alarm /* not on alpha */
7667     case TARGET_NR_alarm:
7668         return alarm(arg1);
7669 #endif
7670 #ifdef TARGET_NR_pause /* not on alpha */
7671     case TARGET_NR_pause:
7672         if (!block_signals()) {
7673             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7674         }
7675         return -TARGET_EINTR;
7676 #endif
7677 #ifdef TARGET_NR_utime
7678     case TARGET_NR_utime:
7679         {
7680             struct utimbuf tbuf, *host_tbuf;
7681             struct target_utimbuf *target_tbuf;
7682             if (arg2) {
7683                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7684                     return -TARGET_EFAULT;
7685                 tbuf.actime = tswapal(target_tbuf->actime);
7686                 tbuf.modtime = tswapal(target_tbuf->modtime);
7687                 unlock_user_struct(target_tbuf, arg2, 0);
7688                 host_tbuf = &tbuf;
7689             } else {
7690                 host_tbuf = NULL;
7691             }
7692             if (!(p = lock_user_string(arg1)))
7693                 return -TARGET_EFAULT;
7694             ret = get_errno(utime(p, host_tbuf));
7695             unlock_user(p, arg1, 0);
7696         }
7697         return ret;
7698 #endif
7699 #ifdef TARGET_NR_utimes
7700     case TARGET_NR_utimes:
7701         {
7702             struct timeval *tvp, tv[2];
7703             if (arg2) {
7704                 if (copy_from_user_timeval(&tv[0], arg2)
7705                     || copy_from_user_timeval(&tv[1],
7706                                               arg2 + sizeof(struct target_timeval)))
7707                     return -TARGET_EFAULT;
7708                 tvp = tv;
7709             } else {
7710                 tvp = NULL;
7711             }
7712             if (!(p = lock_user_string(arg1)))
7713                 return -TARGET_EFAULT;
7714             ret = get_errno(utimes(p, tvp));
7715             unlock_user(p, arg1, 0);
7716         }
7717         return ret;
7718 #endif
7719 #if defined(TARGET_NR_futimesat)
7720     case TARGET_NR_futimesat:
7721         {
7722             struct timeval *tvp, tv[2];
7723             if (arg3) {
7724                 if (copy_from_user_timeval(&tv[0], arg3)
7725                     || copy_from_user_timeval(&tv[1],
7726                                               arg3 + sizeof(struct target_timeval)))
7727                     return -TARGET_EFAULT;
7728                 tvp = tv;
7729             } else {
7730                 tvp = NULL;
7731             }
7732             if (!(p = lock_user_string(arg2))) {
7733                 return -TARGET_EFAULT;
7734             }
7735             ret = get_errno(futimesat(arg1, path(p), tvp));
7736             unlock_user(p, arg2, 0);
7737         }
7738         return ret;
7739 #endif
7740 #ifdef TARGET_NR_access
7741     case TARGET_NR_access:
7742         if (!(p = lock_user_string(arg1))) {
7743             return -TARGET_EFAULT;
7744         }
7745         ret = get_errno(access(path(p), arg2));
7746         unlock_user(p, arg1, 0);
7747         return ret;
7748 #endif
7749 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7750     case TARGET_NR_faccessat:
7751         if (!(p = lock_user_string(arg2))) {
7752             return -TARGET_EFAULT;
7753         }
7754         ret = get_errno(faccessat(arg1, p, arg3, 0));
7755         unlock_user(p, arg2, 0);
7756         return ret;
7757 #endif
7758 #ifdef TARGET_NR_nice /* not on alpha */
7759     case TARGET_NR_nice:
7760         return get_errno(nice(arg1));
7761 #endif
7762     case TARGET_NR_sync:
7763         sync();
7764         return 0;
7765 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7766     case TARGET_NR_syncfs:
7767         return get_errno(syncfs(arg1));
7768 #endif
7769     case TARGET_NR_kill:
7770         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7771 #ifdef TARGET_NR_rename
7772     case TARGET_NR_rename:
7773         {
7774             void *p2;
7775             p = lock_user_string(arg1);
7776             p2 = lock_user_string(arg2);
7777             if (!p || !p2)
7778                 ret = -TARGET_EFAULT;
7779             else
7780                 ret = get_errno(rename(p, p2));
7781             unlock_user(p2, arg2, 0);
7782             unlock_user(p, arg1, 0);
7783         }
7784         return ret;
7785 #endif
7786 #if defined(TARGET_NR_renameat)
7787     case TARGET_NR_renameat:
7788         {
7789             void *p2;
7790             p  = lock_user_string(arg2);
7791             p2 = lock_user_string(arg4);
7792             if (!p || !p2)
7793                 ret = -TARGET_EFAULT;
7794             else
7795                 ret = get_errno(renameat(arg1, p, arg3, p2));
7796             unlock_user(p2, arg4, 0);
7797             unlock_user(p, arg2, 0);
7798         }
7799         return ret;
7800 #endif
7801 #if defined(TARGET_NR_renameat2)
7802     case TARGET_NR_renameat2:
7803         {
7804             void *p2;
7805             p  = lock_user_string(arg2);
7806             p2 = lock_user_string(arg4);
7807             if (!p || !p2) {
7808                 ret = -TARGET_EFAULT;
7809             } else {
7810                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7811             }
7812             unlock_user(p2, arg4, 0);
7813             unlock_user(p, arg2, 0);
7814         }
7815         return ret;
7816 #endif
7817 #ifdef TARGET_NR_mkdir
7818     case TARGET_NR_mkdir:
7819         if (!(p = lock_user_string(arg1)))
7820             return -TARGET_EFAULT;
7821         ret = get_errno(mkdir(p, arg2));
7822         unlock_user(p, arg1, 0);
7823         return ret;
7824 #endif
7825 #if defined(TARGET_NR_mkdirat)
7826     case TARGET_NR_mkdirat:
7827         if (!(p = lock_user_string(arg2)))
7828             return -TARGET_EFAULT;
7829         ret = get_errno(mkdirat(arg1, p, arg3));
7830         unlock_user(p, arg2, 0);
7831         return ret;
7832 #endif
7833 #ifdef TARGET_NR_rmdir
7834     case TARGET_NR_rmdir:
7835         if (!(p = lock_user_string(arg1)))
7836             return -TARGET_EFAULT;
7837         ret = get_errno(rmdir(p));
7838         unlock_user(p, arg1, 0);
7839         return ret;
7840 #endif
7841     case TARGET_NR_dup:
7842         ret = get_errno(dup(arg1));
7843         if (ret >= 0) {
7844             fd_trans_dup(arg1, ret);
7845         }
7846         return ret;
7847 #ifdef TARGET_NR_pipe
7848     case TARGET_NR_pipe:
7849         return do_pipe(cpu_env, arg1, 0, 0);
7850 #endif
7851 #ifdef TARGET_NR_pipe2
7852     case TARGET_NR_pipe2:
7853         return do_pipe(cpu_env, arg1,
7854                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7855 #endif
7856     case TARGET_NR_times:
7857         {
7858             struct target_tms *tmsp;
7859             struct tms tms;
7860             ret = get_errno(times(&tms));
7861             if (arg1) {
7862                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7863                 if (!tmsp)
7864                     return -TARGET_EFAULT;
7865                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7866                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7867                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7868                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7869             }
7870             if (!is_error(ret))
7871                 ret = host_to_target_clock_t(ret);
7872         }
7873         return ret;
7874     case TARGET_NR_acct:
7875         if (arg1 == 0) {
7876             ret = get_errno(acct(NULL));
7877         } else {
7878             if (!(p = lock_user_string(arg1))) {
7879                 return -TARGET_EFAULT;
7880             }
7881             ret = get_errno(acct(path(p)));
7882             unlock_user(p, arg1, 0);
7883         }
7884         return ret;
7885 #ifdef TARGET_NR_umount2
7886     case TARGET_NR_umount2:
7887         if (!(p = lock_user_string(arg1)))
7888             return -TARGET_EFAULT;
7889         ret = get_errno(umount2(p, arg2));
7890         unlock_user(p, arg1, 0);
7891         return ret;
7892 #endif
7893     case TARGET_NR_ioctl:
7894         return do_ioctl(arg1, arg2, arg3);
7895 #ifdef TARGET_NR_fcntl
7896     case TARGET_NR_fcntl:
7897         return do_fcntl(arg1, arg2, arg3);
7898 #endif
7899     case TARGET_NR_setpgid:
7900         return get_errno(setpgid(arg1, arg2));
7901     case TARGET_NR_umask:
7902         return get_errno(umask(arg1));
7903     case TARGET_NR_chroot:
7904         if (!(p = lock_user_string(arg1)))
7905             return -TARGET_EFAULT;
7906         ret = get_errno(chroot(p));
7907         unlock_user(p, arg1, 0);
7908         return ret;
7909 #ifdef TARGET_NR_dup2
7910     case TARGET_NR_dup2:
7911         ret = get_errno(dup2(arg1, arg2));
7912         if (ret >= 0) {
7913             fd_trans_dup(arg1, arg2);
7914         }
7915         return ret;
7916 #endif
7917 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7918     case TARGET_NR_dup3:
7919     {
7920         int host_flags;
7921 
7922         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7923             return -EINVAL;
7924         }
7925         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7926         ret = get_errno(dup3(arg1, arg2, host_flags));
7927         if (ret >= 0) {
7928             fd_trans_dup(arg1, arg2);
7929         }
7930         return ret;
7931     }
7932 #endif
7933 #ifdef TARGET_NR_getppid /* not on alpha */
7934     case TARGET_NR_getppid:
7935         return get_errno(getppid());
7936 #endif
7937 #ifdef TARGET_NR_getpgrp
7938     case TARGET_NR_getpgrp:
7939         return get_errno(getpgrp());
7940 #endif
7941     case TARGET_NR_setsid:
7942         return get_errno(setsid());
7943 #ifdef TARGET_NR_sigaction
7944     case TARGET_NR_sigaction:
7945         {
7946 #if defined(TARGET_ALPHA)
7947             struct target_sigaction act, oact, *pact = 0;
7948             struct target_old_sigaction *old_act;
7949             if (arg2) {
7950                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7951                     return -TARGET_EFAULT;
7952                 act._sa_handler = old_act->_sa_handler;
7953                 target_siginitset(&act.sa_mask, old_act->sa_mask);
7954                 act.sa_flags = old_act->sa_flags;
7955                 act.sa_restorer = 0;
7956                 unlock_user_struct(old_act, arg2, 0);
7957                 pact = &act;
7958             }
7959             ret = get_errno(do_sigaction(arg1, pact, &oact));
7960             if (!is_error(ret) && arg3) {
7961                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7962                     return -TARGET_EFAULT;
7963                 old_act->_sa_handler = oact._sa_handler;
7964                 old_act->sa_mask = oact.sa_mask.sig[0];
7965                 old_act->sa_flags = oact.sa_flags;
7966                 unlock_user_struct(old_act, arg3, 1);
7967             }
7968 #elif defined(TARGET_MIPS)
7969 	    struct target_sigaction act, oact, *pact, *old_act;
7970 
7971 	    if (arg2) {
7972                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7973                     return -TARGET_EFAULT;
7974 		act._sa_handler = old_act->_sa_handler;
7975 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7976 		act.sa_flags = old_act->sa_flags;
7977 		unlock_user_struct(old_act, arg2, 0);
7978 		pact = &act;
7979 	    } else {
7980 		pact = NULL;
7981 	    }
7982 
7983 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
7984 
7985 	    if (!is_error(ret) && arg3) {
7986                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7987                     return -TARGET_EFAULT;
7988 		old_act->_sa_handler = oact._sa_handler;
7989 		old_act->sa_flags = oact.sa_flags;
7990 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7991 		old_act->sa_mask.sig[1] = 0;
7992 		old_act->sa_mask.sig[2] = 0;
7993 		old_act->sa_mask.sig[3] = 0;
7994 		unlock_user_struct(old_act, arg3, 1);
7995 	    }
7996 #else
7997             struct target_old_sigaction *old_act;
7998             struct target_sigaction act, oact, *pact;
7999             if (arg2) {
8000                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8001                     return -TARGET_EFAULT;
8002                 act._sa_handler = old_act->_sa_handler;
8003                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8004                 act.sa_flags = old_act->sa_flags;
8005                 act.sa_restorer = old_act->sa_restorer;
8006 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8007                 act.ka_restorer = 0;
8008 #endif
8009                 unlock_user_struct(old_act, arg2, 0);
8010                 pact = &act;
8011             } else {
8012                 pact = NULL;
8013             }
8014             ret = get_errno(do_sigaction(arg1, pact, &oact));
8015             if (!is_error(ret) && arg3) {
8016                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8017                     return -TARGET_EFAULT;
8018                 old_act->_sa_handler = oact._sa_handler;
8019                 old_act->sa_mask = oact.sa_mask.sig[0];
8020                 old_act->sa_flags = oact.sa_flags;
8021                 old_act->sa_restorer = oact.sa_restorer;
8022                 unlock_user_struct(old_act, arg3, 1);
8023             }
8024 #endif
8025         }
8026         return ret;
8027 #endif
8028     case TARGET_NR_rt_sigaction:
8029         {
8030 #if defined(TARGET_ALPHA)
8031             /* For Alpha and SPARC this is a 5 argument syscall, with
8032              * a 'restorer' parameter which must be copied into the
8033              * sa_restorer field of the sigaction struct.
8034              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8035              * and arg5 is the sigsetsize.
8036              * Alpha also has a separate rt_sigaction struct that it uses
8037              * here; SPARC uses the usual sigaction struct.
8038              */
8039             struct target_rt_sigaction *rt_act;
8040             struct target_sigaction act, oact, *pact = 0;
8041 
8042             if (arg4 != sizeof(target_sigset_t)) {
8043                 return -TARGET_EINVAL;
8044             }
8045             if (arg2) {
8046                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8047                     return -TARGET_EFAULT;
8048                 act._sa_handler = rt_act->_sa_handler;
8049                 act.sa_mask = rt_act->sa_mask;
8050                 act.sa_flags = rt_act->sa_flags;
8051                 act.sa_restorer = arg5;
8052                 unlock_user_struct(rt_act, arg2, 0);
8053                 pact = &act;
8054             }
8055             ret = get_errno(do_sigaction(arg1, pact, &oact));
8056             if (!is_error(ret) && arg3) {
8057                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8058                     return -TARGET_EFAULT;
8059                 rt_act->_sa_handler = oact._sa_handler;
8060                 rt_act->sa_mask = oact.sa_mask;
8061                 rt_act->sa_flags = oact.sa_flags;
8062                 unlock_user_struct(rt_act, arg3, 1);
8063             }
8064 #else
8065 #ifdef TARGET_SPARC
8066             target_ulong restorer = arg4;
8067             target_ulong sigsetsize = arg5;
8068 #else
8069             target_ulong sigsetsize = arg4;
8070 #endif
8071             struct target_sigaction *act;
8072             struct target_sigaction *oact;
8073 
8074             if (sigsetsize != sizeof(target_sigset_t)) {
8075                 return -TARGET_EINVAL;
8076             }
8077             if (arg2) {
8078                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8079                     return -TARGET_EFAULT;
8080                 }
8081 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8082                 act->ka_restorer = restorer;
8083 #endif
8084             } else {
8085                 act = NULL;
8086             }
8087             if (arg3) {
8088                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8089                     ret = -TARGET_EFAULT;
8090                     goto rt_sigaction_fail;
8091                 }
8092             } else
8093                 oact = NULL;
8094             ret = get_errno(do_sigaction(arg1, act, oact));
8095 	rt_sigaction_fail:
8096             if (act)
8097                 unlock_user_struct(act, arg2, 0);
8098             if (oact)
8099                 unlock_user_struct(oact, arg3, 1);
8100 #endif
8101         }
8102         return ret;
8103 #ifdef TARGET_NR_sgetmask /* not on alpha */
8104     case TARGET_NR_sgetmask:
8105         {
8106             sigset_t cur_set;
8107             abi_ulong target_set;
8108             ret = do_sigprocmask(0, NULL, &cur_set);
8109             if (!ret) {
8110                 host_to_target_old_sigset(&target_set, &cur_set);
8111                 ret = target_set;
8112             }
8113         }
8114         return ret;
8115 #endif
8116 #ifdef TARGET_NR_ssetmask /* not on alpha */
8117     case TARGET_NR_ssetmask:
8118         {
8119             sigset_t set, oset;
8120             abi_ulong target_set = arg1;
8121             target_to_host_old_sigset(&set, &target_set);
8122             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8123             if (!ret) {
8124                 host_to_target_old_sigset(&target_set, &oset);
8125                 ret = target_set;
8126             }
8127         }
8128         return ret;
8129 #endif
8130 #ifdef TARGET_NR_sigprocmask
8131     case TARGET_NR_sigprocmask:
8132         {
8133 #if defined(TARGET_ALPHA)
8134             sigset_t set, oldset;
8135             abi_ulong mask;
8136             int how;
8137 
8138             switch (arg1) {
8139             case TARGET_SIG_BLOCK:
8140                 how = SIG_BLOCK;
8141                 break;
8142             case TARGET_SIG_UNBLOCK:
8143                 how = SIG_UNBLOCK;
8144                 break;
8145             case TARGET_SIG_SETMASK:
8146                 how = SIG_SETMASK;
8147                 break;
8148             default:
8149                 return -TARGET_EINVAL;
8150             }
8151             mask = arg2;
8152             target_to_host_old_sigset(&set, &mask);
8153 
8154             ret = do_sigprocmask(how, &set, &oldset);
8155             if (!is_error(ret)) {
8156                 host_to_target_old_sigset(&mask, &oldset);
8157                 ret = mask;
8158                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8159             }
8160 #else
8161             sigset_t set, oldset, *set_ptr;
8162             int how;
8163 
8164             if (arg2) {
8165                 switch (arg1) {
8166                 case TARGET_SIG_BLOCK:
8167                     how = SIG_BLOCK;
8168                     break;
8169                 case TARGET_SIG_UNBLOCK:
8170                     how = SIG_UNBLOCK;
8171                     break;
8172                 case TARGET_SIG_SETMASK:
8173                     how = SIG_SETMASK;
8174                     break;
8175                 default:
8176                     return -TARGET_EINVAL;
8177                 }
8178                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8179                     return -TARGET_EFAULT;
8180                 target_to_host_old_sigset(&set, p);
8181                 unlock_user(p, arg2, 0);
8182                 set_ptr = &set;
8183             } else {
8184                 how = 0;
8185                 set_ptr = NULL;
8186             }
8187             ret = do_sigprocmask(how, set_ptr, &oldset);
8188             if (!is_error(ret) && arg3) {
8189                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8190                     return -TARGET_EFAULT;
8191                 host_to_target_old_sigset(p, &oldset);
8192                 unlock_user(p, arg3, sizeof(target_sigset_t));
8193             }
8194 #endif
8195         }
8196         return ret;
8197 #endif
8198     case TARGET_NR_rt_sigprocmask:
8199         {
8200             int how = arg1;
8201             sigset_t set, oldset, *set_ptr;
8202 
8203             if (arg4 != sizeof(target_sigset_t)) {
8204                 return -TARGET_EINVAL;
8205             }
8206 
8207             if (arg2) {
8208                 switch(how) {
8209                 case TARGET_SIG_BLOCK:
8210                     how = SIG_BLOCK;
8211                     break;
8212                 case TARGET_SIG_UNBLOCK:
8213                     how = SIG_UNBLOCK;
8214                     break;
8215                 case TARGET_SIG_SETMASK:
8216                     how = SIG_SETMASK;
8217                     break;
8218                 default:
8219                     return -TARGET_EINVAL;
8220                 }
8221                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8222                     return -TARGET_EFAULT;
8223                 target_to_host_sigset(&set, p);
8224                 unlock_user(p, arg2, 0);
8225                 set_ptr = &set;
8226             } else {
8227                 how = 0;
8228                 set_ptr = NULL;
8229             }
8230             ret = do_sigprocmask(how, set_ptr, &oldset);
8231             if (!is_error(ret) && arg3) {
8232                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8233                     return -TARGET_EFAULT;
8234                 host_to_target_sigset(p, &oldset);
8235                 unlock_user(p, arg3, sizeof(target_sigset_t));
8236             }
8237         }
8238         return ret;
8239 #ifdef TARGET_NR_sigpending
8240     case TARGET_NR_sigpending:
8241         {
8242             sigset_t set;
8243             ret = get_errno(sigpending(&set));
8244             if (!is_error(ret)) {
8245                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8246                     return -TARGET_EFAULT;
8247                 host_to_target_old_sigset(p, &set);
8248                 unlock_user(p, arg1, sizeof(target_sigset_t));
8249             }
8250         }
8251         return ret;
8252 #endif
8253     case TARGET_NR_rt_sigpending:
8254         {
8255             sigset_t set;
8256 
8257             /* Yes, this check is >, not != like most. We follow the kernel's
8258              * logic and it does it like this because it implements
8259              * NR_sigpending through the same code path, and in that case
8260              * the old_sigset_t is smaller in size.
8261              */
8262             if (arg2 > sizeof(target_sigset_t)) {
8263                 return -TARGET_EINVAL;
8264             }
8265 
8266             ret = get_errno(sigpending(&set));
8267             if (!is_error(ret)) {
8268                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8269                     return -TARGET_EFAULT;
8270                 host_to_target_sigset(p, &set);
8271                 unlock_user(p, arg1, sizeof(target_sigset_t));
8272             }
8273         }
8274         return ret;
8275 #ifdef TARGET_NR_sigsuspend
8276     case TARGET_NR_sigsuspend:
8277         {
8278             TaskState *ts = cpu->opaque;
8279 #if defined(TARGET_ALPHA)
8280             abi_ulong mask = arg1;
8281             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8282 #else
8283             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8284                 return -TARGET_EFAULT;
8285             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8286             unlock_user(p, arg1, 0);
8287 #endif
8288             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8289                                                SIGSET_T_SIZE));
8290             if (ret != -TARGET_ERESTARTSYS) {
8291                 ts->in_sigsuspend = 1;
8292             }
8293         }
8294         return ret;
8295 #endif
8296     case TARGET_NR_rt_sigsuspend:
8297         {
8298             TaskState *ts = cpu->opaque;
8299 
8300             if (arg2 != sizeof(target_sigset_t)) {
8301                 return -TARGET_EINVAL;
8302             }
8303             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8304                 return -TARGET_EFAULT;
8305             target_to_host_sigset(&ts->sigsuspend_mask, p);
8306             unlock_user(p, arg1, 0);
8307             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8308                                                SIGSET_T_SIZE));
8309             if (ret != -TARGET_ERESTARTSYS) {
8310                 ts->in_sigsuspend = 1;
8311             }
8312         }
8313         return ret;
8314     case TARGET_NR_rt_sigtimedwait:
8315         {
8316             sigset_t set;
8317             struct timespec uts, *puts;
8318             siginfo_t uinfo;
8319 
8320             if (arg4 != sizeof(target_sigset_t)) {
8321                 return -TARGET_EINVAL;
8322             }
8323 
8324             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8325                 return -TARGET_EFAULT;
8326             target_to_host_sigset(&set, p);
8327             unlock_user(p, arg1, 0);
8328             if (arg3) {
8329                 puts = &uts;
8330                 target_to_host_timespec(puts, arg3);
8331             } else {
8332                 puts = NULL;
8333             }
8334             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8335                                                  SIGSET_T_SIZE));
8336             if (!is_error(ret)) {
8337                 if (arg2) {
8338                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8339                                   0);
8340                     if (!p) {
8341                         return -TARGET_EFAULT;
8342                     }
8343                     host_to_target_siginfo(p, &uinfo);
8344                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8345                 }
8346                 ret = host_to_target_signal(ret);
8347             }
8348         }
8349         return ret;
8350     case TARGET_NR_rt_sigqueueinfo:
8351         {
8352             siginfo_t uinfo;
8353 
8354             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8355             if (!p) {
8356                 return -TARGET_EFAULT;
8357             }
8358             target_to_host_siginfo(&uinfo, p);
8359             unlock_user(p, arg3, 0);
8360             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8361         }
8362         return ret;
8363     case TARGET_NR_rt_tgsigqueueinfo:
8364         {
8365             siginfo_t uinfo;
8366 
8367             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8368             if (!p) {
8369                 return -TARGET_EFAULT;
8370             }
8371             target_to_host_siginfo(&uinfo, p);
8372             unlock_user(p, arg4, 0);
8373             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8374         }
8375         return ret;
8376 #ifdef TARGET_NR_sigreturn
8377     case TARGET_NR_sigreturn:
8378         if (block_signals()) {
8379             return -TARGET_ERESTARTSYS;
8380         }
8381         return do_sigreturn(cpu_env);
8382 #endif
8383     case TARGET_NR_rt_sigreturn:
8384         if (block_signals()) {
8385             return -TARGET_ERESTARTSYS;
8386         }
8387         return do_rt_sigreturn(cpu_env);
8388     case TARGET_NR_sethostname:
8389         if (!(p = lock_user_string(arg1)))
8390             return -TARGET_EFAULT;
8391         ret = get_errno(sethostname(p, arg2));
8392         unlock_user(p, arg1, 0);
8393         return ret;
8394 #ifdef TARGET_NR_setrlimit
8395     case TARGET_NR_setrlimit:
8396         {
8397             int resource = target_to_host_resource(arg1);
8398             struct target_rlimit *target_rlim;
8399             struct rlimit rlim;
8400             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8401                 return -TARGET_EFAULT;
8402             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8403             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8404             unlock_user_struct(target_rlim, arg2, 0);
8405             /*
8406              * If we just passed through resource limit settings for memory then
8407              * they would also apply to QEMU's own allocations, and QEMU will
8408              * crash or hang or die if its allocations fail. Ideally we would
8409              * track the guest allocations in QEMU and apply the limits ourselves.
8410              * For now, just tell the guest the call succeeded but don't actually
8411              * limit anything.
8412              */
8413             if (resource != RLIMIT_AS &&
8414                 resource != RLIMIT_DATA &&
8415                 resource != RLIMIT_STACK) {
8416                 return get_errno(setrlimit(resource, &rlim));
8417             } else {
8418                 return 0;
8419             }
8420         }
8421 #endif
8422 #ifdef TARGET_NR_getrlimit
8423     case TARGET_NR_getrlimit:
8424         {
8425             int resource = target_to_host_resource(arg1);
8426             struct target_rlimit *target_rlim;
8427             struct rlimit rlim;
8428 
8429             ret = get_errno(getrlimit(resource, &rlim));
8430             if (!is_error(ret)) {
8431                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8432                     return -TARGET_EFAULT;
8433                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8434                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8435                 unlock_user_struct(target_rlim, arg2, 1);
8436             }
8437         }
8438         return ret;
8439 #endif
8440     case TARGET_NR_getrusage:
8441         {
8442             struct rusage rusage;
8443             ret = get_errno(getrusage(arg1, &rusage));
8444             if (!is_error(ret)) {
8445                 ret = host_to_target_rusage(arg2, &rusage);
8446             }
8447         }
8448         return ret;
8449     case TARGET_NR_gettimeofday:
8450         {
8451             struct timeval tv;
8452             ret = get_errno(gettimeofday(&tv, NULL));
8453             if (!is_error(ret)) {
8454                 if (copy_to_user_timeval(arg1, &tv))
8455                     return -TARGET_EFAULT;
8456             }
8457         }
8458         return ret;
8459     case TARGET_NR_settimeofday:
8460         {
8461             struct timeval tv, *ptv = NULL;
8462             struct timezone tz, *ptz = NULL;
8463 
8464             if (arg1) {
8465                 if (copy_from_user_timeval(&tv, arg1)) {
8466                     return -TARGET_EFAULT;
8467                 }
8468                 ptv = &tv;
8469             }
8470 
8471             if (arg2) {
8472                 if (copy_from_user_timezone(&tz, arg2)) {
8473                     return -TARGET_EFAULT;
8474                 }
8475                 ptz = &tz;
8476             }
8477 
8478             return get_errno(settimeofday(ptv, ptz));
8479         }
8480 #if defined(TARGET_NR_select)
8481     case TARGET_NR_select:
8482 #if defined(TARGET_WANT_NI_OLD_SELECT)
8483         /* some architectures used to have old_select here
8484          * but now ENOSYS it.
8485          */
8486         ret = -TARGET_ENOSYS;
8487 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8488         ret = do_old_select(arg1);
8489 #else
8490         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8491 #endif
8492         return ret;
8493 #endif
8494 #ifdef TARGET_NR_pselect6
8495     case TARGET_NR_pselect6:
8496         {
8497             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8498             fd_set rfds, wfds, efds;
8499             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8500             struct timespec ts, *ts_ptr;
8501 
8502             /*
8503              * The 6th arg is actually two args smashed together,
8504              * so we cannot use the C library.
8505              */
8506             sigset_t set;
8507             struct {
8508                 sigset_t *set;
8509                 size_t size;
8510             } sig, *sig_ptr;
8511 
8512             abi_ulong arg_sigset, arg_sigsize, *arg7;
8513             target_sigset_t *target_sigset;
8514 
8515             n = arg1;
8516             rfd_addr = arg2;
8517             wfd_addr = arg3;
8518             efd_addr = arg4;
8519             ts_addr = arg5;
8520 
8521             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8522             if (ret) {
8523                 return ret;
8524             }
8525             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8526             if (ret) {
8527                 return ret;
8528             }
8529             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8530             if (ret) {
8531                 return ret;
8532             }
8533 
8534             /*
8535              * This takes a timespec, and not a timeval, so we cannot
8536              * use the do_select() helper ...
8537              */
8538             if (ts_addr) {
8539                 if (target_to_host_timespec(&ts, ts_addr)) {
8540                     return -TARGET_EFAULT;
8541                 }
8542                 ts_ptr = &ts;
8543             } else {
8544                 ts_ptr = NULL;
8545             }
8546 
8547             /* Extract the two packed args for the sigset */
8548             if (arg6) {
8549                 sig_ptr = &sig;
8550                 sig.size = SIGSET_T_SIZE;
8551 
8552                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8553                 if (!arg7) {
8554                     return -TARGET_EFAULT;
8555                 }
8556                 arg_sigset = tswapal(arg7[0]);
8557                 arg_sigsize = tswapal(arg7[1]);
8558                 unlock_user(arg7, arg6, 0);
8559 
8560                 if (arg_sigset) {
8561                     sig.set = &set;
8562                     if (arg_sigsize != sizeof(*target_sigset)) {
8563                         /* Like the kernel, we enforce correct size sigsets */
8564                         return -TARGET_EINVAL;
8565                     }
8566                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8567                                               sizeof(*target_sigset), 1);
8568                     if (!target_sigset) {
8569                         return -TARGET_EFAULT;
8570                     }
8571                     target_to_host_sigset(&set, target_sigset);
8572                     unlock_user(target_sigset, arg_sigset, 0);
8573                 } else {
8574                     sig.set = NULL;
8575                 }
8576             } else {
8577                 sig_ptr = NULL;
8578             }
8579 
8580             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8581                                           ts_ptr, sig_ptr));
8582 
8583             if (!is_error(ret)) {
8584                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8585                     return -TARGET_EFAULT;
8586                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8587                     return -TARGET_EFAULT;
8588                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8589                     return -TARGET_EFAULT;
8590 
8591                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8592                     return -TARGET_EFAULT;
8593             }
8594         }
8595         return ret;
8596 #endif
8597 #ifdef TARGET_NR_symlink
8598     case TARGET_NR_symlink:
8599         {
8600             void *p2;
8601             p = lock_user_string(arg1);
8602             p2 = lock_user_string(arg2);
8603             if (!p || !p2)
8604                 ret = -TARGET_EFAULT;
8605             else
8606                 ret = get_errno(symlink(p, p2));
8607             unlock_user(p2, arg2, 0);
8608             unlock_user(p, arg1, 0);
8609         }
8610         return ret;
8611 #endif
8612 #if defined(TARGET_NR_symlinkat)
8613     case TARGET_NR_symlinkat:
8614         {
8615             void *p2;
8616             p  = lock_user_string(arg1);
8617             p2 = lock_user_string(arg3);
8618             if (!p || !p2)
8619                 ret = -TARGET_EFAULT;
8620             else
8621                 ret = get_errno(symlinkat(p, arg2, p2));
8622             unlock_user(p2, arg3, 0);
8623             unlock_user(p, arg1, 0);
8624         }
8625         return ret;
8626 #endif
8627 #ifdef TARGET_NR_readlink
8628     case TARGET_NR_readlink:
8629         {
8630             void *p2;
8631             p = lock_user_string(arg1);
8632             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8633             if (!p || !p2) {
8634                 ret = -TARGET_EFAULT;
8635             } else if (!arg3) {
8636                 /* Short circuit this for the magic exe check. */
8637                 ret = -TARGET_EINVAL;
8638             } else if (is_proc_myself((const char *)p, "exe")) {
8639                 char real[PATH_MAX], *temp;
8640                 temp = realpath(exec_path, real);
8641                 /* Return value is # of bytes that we wrote to the buffer. */
8642                 if (temp == NULL) {
8643                     ret = get_errno(-1);
8644                 } else {
8645                     /* Don't worry about sign mismatch as earlier mapping
8646                      * logic would have thrown a bad address error. */
8647                     ret = MIN(strlen(real), arg3);
8648                     /* We cannot NUL terminate the string. */
8649                     memcpy(p2, real, ret);
8650                 }
8651             } else {
8652                 ret = get_errno(readlink(path(p), p2, arg3));
8653             }
8654             unlock_user(p2, arg2, ret);
8655             unlock_user(p, arg1, 0);
8656         }
8657         return ret;
8658 #endif
8659 #if defined(TARGET_NR_readlinkat)
8660     case TARGET_NR_readlinkat:
8661         {
8662             void *p2;
8663             p  = lock_user_string(arg2);
8664             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8665             if (!p || !p2) {
8666                 ret = -TARGET_EFAULT;
8667             } else if (is_proc_myself((const char *)p, "exe")) {
8668                 char real[PATH_MAX], *temp;
8669                 temp = realpath(exec_path, real);
8670                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8671                 snprintf((char *)p2, arg4, "%s", real);
8672             } else {
8673                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8674             }
8675             unlock_user(p2, arg3, ret);
8676             unlock_user(p, arg2, 0);
8677         }
8678         return ret;
8679 #endif
8680 #ifdef TARGET_NR_swapon
8681     case TARGET_NR_swapon:
8682         if (!(p = lock_user_string(arg1)))
8683             return -TARGET_EFAULT;
8684         ret = get_errno(swapon(p, arg2));
8685         unlock_user(p, arg1, 0);
8686         return ret;
8687 #endif
8688     case TARGET_NR_reboot:
8689         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8690            /* arg4 must be ignored in all other cases */
8691            p = lock_user_string(arg4);
8692            if (!p) {
8693                return -TARGET_EFAULT;
8694            }
8695            ret = get_errno(reboot(arg1, arg2, arg3, p));
8696            unlock_user(p, arg4, 0);
8697         } else {
8698            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8699         }
8700         return ret;
8701 #ifdef TARGET_NR_mmap
8702     case TARGET_NR_mmap:
8703 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8704     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8705     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8706     || defined(TARGET_S390X)
8707         {
8708             abi_ulong *v;
8709             abi_ulong v1, v2, v3, v4, v5, v6;
8710             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8711                 return -TARGET_EFAULT;
8712             v1 = tswapal(v[0]);
8713             v2 = tswapal(v[1]);
8714             v3 = tswapal(v[2]);
8715             v4 = tswapal(v[3]);
8716             v5 = tswapal(v[4]);
8717             v6 = tswapal(v[5]);
8718             unlock_user(v, arg1, 0);
8719             ret = get_errno(target_mmap(v1, v2, v3,
8720                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8721                                         v5, v6));
8722         }
8723 #else
8724         ret = get_errno(target_mmap(arg1, arg2, arg3,
8725                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8726                                     arg5,
8727                                     arg6));
8728 #endif
8729         return ret;
8730 #endif
8731 #ifdef TARGET_NR_mmap2
8732     case TARGET_NR_mmap2:
8733 #ifndef MMAP_SHIFT
8734 #define MMAP_SHIFT 12
8735 #endif
8736         ret = target_mmap(arg1, arg2, arg3,
8737                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8738                           arg5, arg6 << MMAP_SHIFT);
8739         return get_errno(ret);
8740 #endif
8741     case TARGET_NR_munmap:
8742         return get_errno(target_munmap(arg1, arg2));
8743     case TARGET_NR_mprotect:
8744         {
8745             TaskState *ts = cpu->opaque;
8746             /* Special hack to detect libc making the stack executable.  */
8747             if ((arg3 & PROT_GROWSDOWN)
8748                 && arg1 >= ts->info->stack_limit
8749                 && arg1 <= ts->info->start_stack) {
8750                 arg3 &= ~PROT_GROWSDOWN;
8751                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8752                 arg1 = ts->info->stack_limit;
8753             }
8754         }
8755         return get_errno(target_mprotect(arg1, arg2, arg3));
8756 #ifdef TARGET_NR_mremap
8757     case TARGET_NR_mremap:
8758         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8759 #endif
8760         /* ??? msync/mlock/munlock are broken for softmmu.  */
8761 #ifdef TARGET_NR_msync
8762     case TARGET_NR_msync:
8763         return get_errno(msync(g2h(arg1), arg2, arg3));
8764 #endif
8765 #ifdef TARGET_NR_mlock
8766     case TARGET_NR_mlock:
8767         return get_errno(mlock(g2h(arg1), arg2));
8768 #endif
8769 #ifdef TARGET_NR_munlock
8770     case TARGET_NR_munlock:
8771         return get_errno(munlock(g2h(arg1), arg2));
8772 #endif
8773 #ifdef TARGET_NR_mlockall
8774     case TARGET_NR_mlockall:
8775         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8776 #endif
8777 #ifdef TARGET_NR_munlockall
8778     case TARGET_NR_munlockall:
8779         return get_errno(munlockall());
8780 #endif
8781 #ifdef TARGET_NR_truncate
8782     case TARGET_NR_truncate:
8783         if (!(p = lock_user_string(arg1)))
8784             return -TARGET_EFAULT;
8785         ret = get_errno(truncate(p, arg2));
8786         unlock_user(p, arg1, 0);
8787         return ret;
8788 #endif
8789 #ifdef TARGET_NR_ftruncate
8790     case TARGET_NR_ftruncate:
8791         return get_errno(ftruncate(arg1, arg2));
8792 #endif
8793     case TARGET_NR_fchmod:
8794         return get_errno(fchmod(arg1, arg2));
8795 #if defined(TARGET_NR_fchmodat)
8796     case TARGET_NR_fchmodat:
8797         if (!(p = lock_user_string(arg2)))
8798             return -TARGET_EFAULT;
8799         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8800         unlock_user(p, arg2, 0);
8801         return ret;
8802 #endif
8803     case TARGET_NR_getpriority:
8804         /* Note that negative values are valid for getpriority, so we must
8805            differentiate based on errno settings.  */
8806         errno = 0;
8807         ret = getpriority(arg1, arg2);
8808         if (ret == -1 && errno != 0) {
8809             return -host_to_target_errno(errno);
8810         }
8811 #ifdef TARGET_ALPHA
8812         /* Return value is the unbiased priority.  Signal no error.  */
8813         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8814 #else
8815         /* Return value is a biased priority to avoid negative numbers.  */
8816         ret = 20 - ret;
8817 #endif
8818         return ret;
8819     case TARGET_NR_setpriority:
8820         return get_errno(setpriority(arg1, arg2, arg3));
8821 #ifdef TARGET_NR_statfs
8822     case TARGET_NR_statfs:
8823         if (!(p = lock_user_string(arg1))) {
8824             return -TARGET_EFAULT;
8825         }
8826         ret = get_errno(statfs(path(p), &stfs));
8827         unlock_user(p, arg1, 0);
8828     convert_statfs:
8829         if (!is_error(ret)) {
8830             struct target_statfs *target_stfs;
8831 
8832             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8833                 return -TARGET_EFAULT;
8834             __put_user(stfs.f_type, &target_stfs->f_type);
8835             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8836             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8837             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8838             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8839             __put_user(stfs.f_files, &target_stfs->f_files);
8840             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8841             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8842             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8843             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8844             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8845 #ifdef _STATFS_F_FLAGS
8846             __put_user(stfs.f_flags, &target_stfs->f_flags);
8847 #else
8848             __put_user(0, &target_stfs->f_flags);
8849 #endif
8850             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8851             unlock_user_struct(target_stfs, arg2, 1);
8852         }
8853         return ret;
8854 #endif
8855 #ifdef TARGET_NR_fstatfs
8856     case TARGET_NR_fstatfs:
8857         ret = get_errno(fstatfs(arg1, &stfs));
8858         goto convert_statfs;
8859 #endif
8860 #ifdef TARGET_NR_statfs64
8861     case TARGET_NR_statfs64:
8862         if (!(p = lock_user_string(arg1))) {
8863             return -TARGET_EFAULT;
8864         }
8865         ret = get_errno(statfs(path(p), &stfs));
8866         unlock_user(p, arg1, 0);
8867     convert_statfs64:
8868         if (!is_error(ret)) {
8869             struct target_statfs64 *target_stfs;
8870 
8871             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8872                 return -TARGET_EFAULT;
8873             __put_user(stfs.f_type, &target_stfs->f_type);
8874             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8875             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8876             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8877             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8878             __put_user(stfs.f_files, &target_stfs->f_files);
8879             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8880             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8881             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8882             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8883             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8884             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8885             unlock_user_struct(target_stfs, arg3, 1);
8886         }
8887         return ret;
8888     case TARGET_NR_fstatfs64:
8889         ret = get_errno(fstatfs(arg1, &stfs));
8890         goto convert_statfs64;
8891 #endif
8892 #ifdef TARGET_NR_socketcall
8893     case TARGET_NR_socketcall:
8894         return do_socketcall(arg1, arg2);
8895 #endif
8896 #ifdef TARGET_NR_accept
8897     case TARGET_NR_accept:
8898         return do_accept4(arg1, arg2, arg3, 0);
8899 #endif
8900 #ifdef TARGET_NR_accept4
8901     case TARGET_NR_accept4:
8902         return do_accept4(arg1, arg2, arg3, arg4);
8903 #endif
8904 #ifdef TARGET_NR_bind
8905     case TARGET_NR_bind:
8906         return do_bind(arg1, arg2, arg3);
8907 #endif
8908 #ifdef TARGET_NR_connect
8909     case TARGET_NR_connect:
8910         return do_connect(arg1, arg2, arg3);
8911 #endif
8912 #ifdef TARGET_NR_getpeername
8913     case TARGET_NR_getpeername:
8914         return do_getpeername(arg1, arg2, arg3);
8915 #endif
8916 #ifdef TARGET_NR_getsockname
8917     case TARGET_NR_getsockname:
8918         return do_getsockname(arg1, arg2, arg3);
8919 #endif
8920 #ifdef TARGET_NR_getsockopt
8921     case TARGET_NR_getsockopt:
8922         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8923 #endif
8924 #ifdef TARGET_NR_listen
8925     case TARGET_NR_listen:
8926         return get_errno(listen(arg1, arg2));
8927 #endif
8928 #ifdef TARGET_NR_recv
8929     case TARGET_NR_recv:
8930         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8931 #endif
8932 #ifdef TARGET_NR_recvfrom
8933     case TARGET_NR_recvfrom:
8934         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8935 #endif
8936 #ifdef TARGET_NR_recvmsg
8937     case TARGET_NR_recvmsg:
8938         return do_sendrecvmsg(arg1, arg2, arg3, 0);
8939 #endif
8940 #ifdef TARGET_NR_send
8941     case TARGET_NR_send:
8942         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8943 #endif
8944 #ifdef TARGET_NR_sendmsg
8945     case TARGET_NR_sendmsg:
8946         return do_sendrecvmsg(arg1, arg2, arg3, 1);
8947 #endif
8948 #ifdef TARGET_NR_sendmmsg
8949     case TARGET_NR_sendmmsg:
8950         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8951     case TARGET_NR_recvmmsg:
8952         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8953 #endif
8954 #ifdef TARGET_NR_sendto
8955     case TARGET_NR_sendto:
8956         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8957 #endif
8958 #ifdef TARGET_NR_shutdown
8959     case TARGET_NR_shutdown:
8960         return get_errno(shutdown(arg1, arg2));
8961 #endif
8962 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8963     case TARGET_NR_getrandom:
8964         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8965         if (!p) {
8966             return -TARGET_EFAULT;
8967         }
8968         ret = get_errno(getrandom(p, arg2, arg3));
8969         unlock_user(p, arg1, ret);
8970         return ret;
8971 #endif
8972 #ifdef TARGET_NR_socket
8973     case TARGET_NR_socket:
8974         return do_socket(arg1, arg2, arg3);
8975 #endif
8976 #ifdef TARGET_NR_socketpair
8977     case TARGET_NR_socketpair:
8978         return do_socketpair(arg1, arg2, arg3, arg4);
8979 #endif
8980 #ifdef TARGET_NR_setsockopt
8981     case TARGET_NR_setsockopt:
8982         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8983 #endif
8984 #if defined(TARGET_NR_syslog)
8985     case TARGET_NR_syslog:
8986         {
8987             int len = arg2;
8988 
8989             switch (arg1) {
8990             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8991             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8992             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8993             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8994             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8995             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8996             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8997             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8998                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8999             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9000             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9001             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9002                 {
9003                     if (len < 0) {
9004                         return -TARGET_EINVAL;
9005                     }
9006                     if (len == 0) {
9007                         return 0;
9008                     }
9009                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9010                     if (!p) {
9011                         return -TARGET_EFAULT;
9012                     }
9013                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9014                     unlock_user(p, arg2, arg3);
9015                 }
9016                 return ret;
9017             default:
9018                 return -TARGET_EINVAL;
9019             }
9020         }
9021         break;
9022 #endif
9023     case TARGET_NR_setitimer:
9024         {
9025             struct itimerval value, ovalue, *pvalue;
9026 
9027             if (arg2) {
9028                 pvalue = &value;
9029                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9030                     || copy_from_user_timeval(&pvalue->it_value,
9031                                               arg2 + sizeof(struct target_timeval)))
9032                     return -TARGET_EFAULT;
9033             } else {
9034                 pvalue = NULL;
9035             }
9036             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9037             if (!is_error(ret) && arg3) {
9038                 if (copy_to_user_timeval(arg3,
9039                                          &ovalue.it_interval)
9040                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9041                                             &ovalue.it_value))
9042                     return -TARGET_EFAULT;
9043             }
9044         }
9045         return ret;
9046     case TARGET_NR_getitimer:
9047         {
9048             struct itimerval value;
9049 
9050             ret = get_errno(getitimer(arg1, &value));
9051             if (!is_error(ret) && arg2) {
9052                 if (copy_to_user_timeval(arg2,
9053                                          &value.it_interval)
9054                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9055                                             &value.it_value))
9056                     return -TARGET_EFAULT;
9057             }
9058         }
9059         return ret;
9060 #ifdef TARGET_NR_stat
9061     case TARGET_NR_stat:
9062         if (!(p = lock_user_string(arg1))) {
9063             return -TARGET_EFAULT;
9064         }
9065         ret = get_errno(stat(path(p), &st));
9066         unlock_user(p, arg1, 0);
9067         goto do_stat;
9068 #endif
9069 #ifdef TARGET_NR_lstat
9070     case TARGET_NR_lstat:
9071         if (!(p = lock_user_string(arg1))) {
9072             return -TARGET_EFAULT;
9073         }
9074         ret = get_errno(lstat(path(p), &st));
9075         unlock_user(p, arg1, 0);
9076         goto do_stat;
9077 #endif
9078 #ifdef TARGET_NR_fstat
9079     case TARGET_NR_fstat:
9080         {
9081             ret = get_errno(fstat(arg1, &st));
9082 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9083         do_stat:
9084 #endif
9085             if (!is_error(ret)) {
9086                 struct target_stat *target_st;
9087 
9088                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9089                     return -TARGET_EFAULT;
9090                 memset(target_st, 0, sizeof(*target_st));
9091                 __put_user(st.st_dev, &target_st->st_dev);
9092                 __put_user(st.st_ino, &target_st->st_ino);
9093                 __put_user(st.st_mode, &target_st->st_mode);
9094                 __put_user(st.st_uid, &target_st->st_uid);
9095                 __put_user(st.st_gid, &target_st->st_gid);
9096                 __put_user(st.st_nlink, &target_st->st_nlink);
9097                 __put_user(st.st_rdev, &target_st->st_rdev);
9098                 __put_user(st.st_size, &target_st->st_size);
9099                 __put_user(st.st_blksize, &target_st->st_blksize);
9100                 __put_user(st.st_blocks, &target_st->st_blocks);
9101                 __put_user(st.st_atime, &target_st->target_st_atime);
9102                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9103                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9104 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9105     defined(TARGET_STAT_HAVE_NSEC)
9106                 __put_user(st.st_atim.tv_nsec,
9107                            &target_st->target_st_atime_nsec);
9108                 __put_user(st.st_mtim.tv_nsec,
9109                            &target_st->target_st_mtime_nsec);
9110                 __put_user(st.st_ctim.tv_nsec,
9111                            &target_st->target_st_ctime_nsec);
9112 #endif
9113                 unlock_user_struct(target_st, arg2, 1);
9114             }
9115         }
9116         return ret;
9117 #endif
9118     case TARGET_NR_vhangup:
9119         return get_errno(vhangup());
9120 #ifdef TARGET_NR_syscall
9121     case TARGET_NR_syscall:
9122         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9123                           arg6, arg7, arg8, 0);
9124 #endif
9125     case TARGET_NR_wait4:
9126         {
9127             int status;
9128             abi_long status_ptr = arg2;
9129             struct rusage rusage, *rusage_ptr;
9130             abi_ulong target_rusage = arg4;
9131             abi_long rusage_err;
9132             if (target_rusage)
9133                 rusage_ptr = &rusage;
9134             else
9135                 rusage_ptr = NULL;
9136             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9137             if (!is_error(ret)) {
9138                 if (status_ptr && ret) {
9139                     status = host_to_target_waitstatus(status);
9140                     if (put_user_s32(status, status_ptr))
9141                         return -TARGET_EFAULT;
9142                 }
9143                 if (target_rusage) {
9144                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9145                     if (rusage_err) {
9146                         ret = rusage_err;
9147                     }
9148                 }
9149             }
9150         }
9151         return ret;
9152 #ifdef TARGET_NR_swapoff
9153     case TARGET_NR_swapoff:
9154         if (!(p = lock_user_string(arg1)))
9155             return -TARGET_EFAULT;
9156         ret = get_errno(swapoff(p));
9157         unlock_user(p, arg1, 0);
9158         return ret;
9159 #endif
9160     case TARGET_NR_sysinfo:
9161         {
9162             struct target_sysinfo *target_value;
9163             struct sysinfo value;
9164             ret = get_errno(sysinfo(&value));
9165             if (!is_error(ret) && arg1)
9166             {
9167                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9168                     return -TARGET_EFAULT;
9169                 __put_user(value.uptime, &target_value->uptime);
9170                 __put_user(value.loads[0], &target_value->loads[0]);
9171                 __put_user(value.loads[1], &target_value->loads[1]);
9172                 __put_user(value.loads[2], &target_value->loads[2]);
9173                 __put_user(value.totalram, &target_value->totalram);
9174                 __put_user(value.freeram, &target_value->freeram);
9175                 __put_user(value.sharedram, &target_value->sharedram);
9176                 __put_user(value.bufferram, &target_value->bufferram);
9177                 __put_user(value.totalswap, &target_value->totalswap);
9178                 __put_user(value.freeswap, &target_value->freeswap);
9179                 __put_user(value.procs, &target_value->procs);
9180                 __put_user(value.totalhigh, &target_value->totalhigh);
9181                 __put_user(value.freehigh, &target_value->freehigh);
9182                 __put_user(value.mem_unit, &target_value->mem_unit);
9183                 unlock_user_struct(target_value, arg1, 1);
9184             }
9185         }
9186         return ret;
9187 #ifdef TARGET_NR_ipc
9188     case TARGET_NR_ipc:
9189         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9190 #endif
9191 #ifdef TARGET_NR_semget
9192     case TARGET_NR_semget:
9193         return get_errno(semget(arg1, arg2, arg3));
9194 #endif
9195 #ifdef TARGET_NR_semop
9196     case TARGET_NR_semop:
9197         return do_semop(arg1, arg2, arg3);
9198 #endif
9199 #ifdef TARGET_NR_semctl
9200     case TARGET_NR_semctl:
9201         return do_semctl(arg1, arg2, arg3, arg4);
9202 #endif
9203 #ifdef TARGET_NR_msgctl
9204     case TARGET_NR_msgctl:
9205         return do_msgctl(arg1, arg2, arg3);
9206 #endif
9207 #ifdef TARGET_NR_msgget
9208     case TARGET_NR_msgget:
9209         return get_errno(msgget(arg1, arg2));
9210 #endif
9211 #ifdef TARGET_NR_msgrcv
9212     case TARGET_NR_msgrcv:
9213         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9214 #endif
9215 #ifdef TARGET_NR_msgsnd
9216     case TARGET_NR_msgsnd:
9217         return do_msgsnd(arg1, arg2, arg3, arg4);
9218 #endif
9219 #ifdef TARGET_NR_shmget
9220     case TARGET_NR_shmget:
9221         return get_errno(shmget(arg1, arg2, arg3));
9222 #endif
9223 #ifdef TARGET_NR_shmctl
9224     case TARGET_NR_shmctl:
9225         return do_shmctl(arg1, arg2, arg3);
9226 #endif
9227 #ifdef TARGET_NR_shmat
9228     case TARGET_NR_shmat:
9229         return do_shmat(cpu_env, arg1, arg2, arg3);
9230 #endif
9231 #ifdef TARGET_NR_shmdt
9232     case TARGET_NR_shmdt:
9233         return do_shmdt(arg1);
9234 #endif
9235     case TARGET_NR_fsync:
9236         return get_errno(fsync(arg1));
9237     case TARGET_NR_clone:
9238         /* Linux manages to have three different orderings for its
9239          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9240          * match the kernel's CONFIG_CLONE_* settings.
9241          * Microblaze is further special in that it uses a sixth
9242          * implicit argument to clone for the TLS pointer.
9243          */
9244 #if defined(TARGET_MICROBLAZE)
9245         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9246 #elif defined(TARGET_CLONE_BACKWARDS)
9247         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9248 #elif defined(TARGET_CLONE_BACKWARDS2)
9249         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9250 #else
9251         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9252 #endif
9253         return ret;
9254 #ifdef __NR_exit_group
9255         /* new thread calls */
9256     case TARGET_NR_exit_group:
9257         preexit_cleanup(cpu_env, arg1);
9258         return get_errno(exit_group(arg1));
9259 #endif
9260     case TARGET_NR_setdomainname:
9261         if (!(p = lock_user_string(arg1)))
9262             return -TARGET_EFAULT;
9263         ret = get_errno(setdomainname(p, arg2));
9264         unlock_user(p, arg1, 0);
9265         return ret;
9266     case TARGET_NR_uname:
9267         /* no need to transcode because we use the linux syscall */
9268         {
9269             struct new_utsname * buf;
9270 
9271             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9272                 return -TARGET_EFAULT;
9273             ret = get_errno(sys_uname(buf));
9274             if (!is_error(ret)) {
9275                 /* Overwrite the native machine name with whatever is being
9276                    emulated. */
9277                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9278                           sizeof(buf->machine));
9279                 /* Allow the user to override the reported release.  */
9280                 if (qemu_uname_release && *qemu_uname_release) {
9281                     g_strlcpy(buf->release, qemu_uname_release,
9282                               sizeof(buf->release));
9283                 }
9284             }
9285             unlock_user_struct(buf, arg1, 1);
9286         }
9287         return ret;
9288 #ifdef TARGET_I386
9289     case TARGET_NR_modify_ldt:
9290         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9291 #if !defined(TARGET_X86_64)
9292     case TARGET_NR_vm86:
9293         return do_vm86(cpu_env, arg1, arg2);
9294 #endif
9295 #endif
9296     case TARGET_NR_adjtimex:
9297         {
9298             struct timex host_buf;
9299 
9300             if (target_to_host_timex(&host_buf, arg1) != 0) {
9301                 return -TARGET_EFAULT;
9302             }
9303             ret = get_errno(adjtimex(&host_buf));
9304             if (!is_error(ret)) {
9305                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9306                     return -TARGET_EFAULT;
9307                 }
9308             }
9309         }
9310         return ret;
9311 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9312     case TARGET_NR_clock_adjtime:
9313         {
9314             struct timex htx, *phtx = &htx;
9315 
9316             if (target_to_host_timex(phtx, arg2) != 0) {
9317                 return -TARGET_EFAULT;
9318             }
9319             ret = get_errno(clock_adjtime(arg1, phtx));
9320             if (!is_error(ret) && phtx) {
9321                 if (host_to_target_timex(arg2, phtx) != 0) {
9322                     return -TARGET_EFAULT;
9323                 }
9324             }
9325         }
9326         return ret;
9327 #endif
9328     case TARGET_NR_getpgid:
9329         return get_errno(getpgid(arg1));
9330     case TARGET_NR_fchdir:
9331         return get_errno(fchdir(arg1));
9332     case TARGET_NR_personality:
9333         return get_errno(personality(arg1));
9334 #ifdef TARGET_NR__llseek /* Not on alpha */
9335     case TARGET_NR__llseek:
9336         {
9337             int64_t res;
9338 #if !defined(__NR_llseek)
9339             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9340             if (res == -1) {
9341                 ret = get_errno(res);
9342             } else {
9343                 ret = 0;
9344             }
9345 #else
9346             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9347 #endif
9348             if ((ret == 0) && put_user_s64(res, arg4)) {
9349                 return -TARGET_EFAULT;
9350             }
9351         }
9352         return ret;
9353 #endif
9354 #ifdef TARGET_NR_getdents
9355     case TARGET_NR_getdents:
9356 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9357 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9358         {
9359             struct target_dirent *target_dirp;
9360             struct linux_dirent *dirp;
9361             abi_long count = arg3;
9362 
9363             dirp = g_try_malloc(count);
9364             if (!dirp) {
9365                 return -TARGET_ENOMEM;
9366             }
9367 
9368             ret = get_errno(sys_getdents(arg1, dirp, count));
9369             if (!is_error(ret)) {
9370                 struct linux_dirent *de;
9371 		struct target_dirent *tde;
9372                 int len = ret;
9373                 int reclen, treclen;
9374 		int count1, tnamelen;
9375 
9376 		count1 = 0;
9377                 de = dirp;
9378                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9379                     return -TARGET_EFAULT;
9380 		tde = target_dirp;
9381                 while (len > 0) {
9382                     reclen = de->d_reclen;
9383                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9384                     assert(tnamelen >= 0);
9385                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9386                     assert(count1 + treclen <= count);
9387                     tde->d_reclen = tswap16(treclen);
9388                     tde->d_ino = tswapal(de->d_ino);
9389                     tde->d_off = tswapal(de->d_off);
9390                     memcpy(tde->d_name, de->d_name, tnamelen);
9391                     de = (struct linux_dirent *)((char *)de + reclen);
9392                     len -= reclen;
9393                     tde = (struct target_dirent *)((char *)tde + treclen);
9394 		    count1 += treclen;
9395                 }
9396 		ret = count1;
9397                 unlock_user(target_dirp, arg2, ret);
9398             }
9399             g_free(dirp);
9400         }
9401 #else
9402         {
9403             struct linux_dirent *dirp;
9404             abi_long count = arg3;
9405 
9406             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9407                 return -TARGET_EFAULT;
9408             ret = get_errno(sys_getdents(arg1, dirp, count));
9409             if (!is_error(ret)) {
9410                 struct linux_dirent *de;
9411                 int len = ret;
9412                 int reclen;
9413                 de = dirp;
9414                 while (len > 0) {
9415                     reclen = de->d_reclen;
9416                     if (reclen > len)
9417                         break;
9418                     de->d_reclen = tswap16(reclen);
9419                     tswapls(&de->d_ino);
9420                     tswapls(&de->d_off);
9421                     de = (struct linux_dirent *)((char *)de + reclen);
9422                     len -= reclen;
9423                 }
9424             }
9425             unlock_user(dirp, arg2, ret);
9426         }
9427 #endif
9428 #else
9429         /* Implement getdents in terms of getdents64 */
9430         {
9431             struct linux_dirent64 *dirp;
9432             abi_long count = arg3;
9433 
9434             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9435             if (!dirp) {
9436                 return -TARGET_EFAULT;
9437             }
9438             ret = get_errno(sys_getdents64(arg1, dirp, count));
9439             if (!is_error(ret)) {
9440                 /* Convert the dirent64 structs to target dirent.  We do this
9441                  * in-place, since we can guarantee that a target_dirent is no
9442                  * larger than a dirent64; however this means we have to be
9443                  * careful to read everything before writing in the new format.
9444                  */
9445                 struct linux_dirent64 *de;
9446                 struct target_dirent *tde;
9447                 int len = ret;
9448                 int tlen = 0;
9449 
9450                 de = dirp;
9451                 tde = (struct target_dirent *)dirp;
9452                 while (len > 0) {
9453                     int namelen, treclen;
9454                     int reclen = de->d_reclen;
9455                     uint64_t ino = de->d_ino;
9456                     int64_t off = de->d_off;
9457                     uint8_t type = de->d_type;
9458 
9459                     namelen = strlen(de->d_name);
9460                     treclen = offsetof(struct target_dirent, d_name)
9461                         + namelen + 2;
9462                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9463 
9464                     memmove(tde->d_name, de->d_name, namelen + 1);
9465                     tde->d_ino = tswapal(ino);
9466                     tde->d_off = tswapal(off);
9467                     tde->d_reclen = tswap16(treclen);
9468                     /* The target_dirent type is in what was formerly a padding
9469                      * byte at the end of the structure:
9470                      */
9471                     *(((char *)tde) + treclen - 1) = type;
9472 
9473                     de = (struct linux_dirent64 *)((char *)de + reclen);
9474                     tde = (struct target_dirent *)((char *)tde + treclen);
9475                     len -= reclen;
9476                     tlen += treclen;
9477                 }
9478                 ret = tlen;
9479             }
9480             unlock_user(dirp, arg2, ret);
9481         }
9482 #endif
9483         return ret;
9484 #endif /* TARGET_NR_getdents */
9485 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9486     case TARGET_NR_getdents64:
9487         {
9488             struct linux_dirent64 *dirp;
9489             abi_long count = arg3;
9490             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9491                 return -TARGET_EFAULT;
9492             ret = get_errno(sys_getdents64(arg1, dirp, count));
9493             if (!is_error(ret)) {
9494                 struct linux_dirent64 *de;
9495                 int len = ret;
9496                 int reclen;
9497                 de = dirp;
9498                 while (len > 0) {
9499                     reclen = de->d_reclen;
9500                     if (reclen > len)
9501                         break;
9502                     de->d_reclen = tswap16(reclen);
9503                     tswap64s((uint64_t *)&de->d_ino);
9504                     tswap64s((uint64_t *)&de->d_off);
9505                     de = (struct linux_dirent64 *)((char *)de + reclen);
9506                     len -= reclen;
9507                 }
9508             }
9509             unlock_user(dirp, arg2, ret);
9510         }
9511         return ret;
9512 #endif /* TARGET_NR_getdents64 */
9513 #if defined(TARGET_NR__newselect)
9514     case TARGET_NR__newselect:
9515         return do_select(arg1, arg2, arg3, arg4, arg5);
9516 #endif
9517 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9518 # ifdef TARGET_NR_poll
9519     case TARGET_NR_poll:
9520 # endif
9521 # ifdef TARGET_NR_ppoll
9522     case TARGET_NR_ppoll:
9523 # endif
9524         {
9525             struct target_pollfd *target_pfd;
9526             unsigned int nfds = arg2;
9527             struct pollfd *pfd;
9528             unsigned int i;
9529 
9530             pfd = NULL;
9531             target_pfd = NULL;
9532             if (nfds) {
9533                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9534                     return -TARGET_EINVAL;
9535                 }
9536 
9537                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9538                                        sizeof(struct target_pollfd) * nfds, 1);
9539                 if (!target_pfd) {
9540                     return -TARGET_EFAULT;
9541                 }
9542 
9543                 pfd = alloca(sizeof(struct pollfd) * nfds);
9544                 for (i = 0; i < nfds; i++) {
9545                     pfd[i].fd = tswap32(target_pfd[i].fd);
9546                     pfd[i].events = tswap16(target_pfd[i].events);
9547                 }
9548             }
9549 
9550             switch (num) {
9551 # ifdef TARGET_NR_ppoll
9552             case TARGET_NR_ppoll:
9553             {
9554                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9555                 target_sigset_t *target_set;
9556                 sigset_t _set, *set = &_set;
9557 
9558                 if (arg3) {
9559                     if (target_to_host_timespec(timeout_ts, arg3)) {
9560                         unlock_user(target_pfd, arg1, 0);
9561                         return -TARGET_EFAULT;
9562                     }
9563                 } else {
9564                     timeout_ts = NULL;
9565                 }
9566 
9567                 if (arg4) {
9568                     if (arg5 != sizeof(target_sigset_t)) {
9569                         unlock_user(target_pfd, arg1, 0);
9570                         return -TARGET_EINVAL;
9571                     }
9572 
9573                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9574                     if (!target_set) {
9575                         unlock_user(target_pfd, arg1, 0);
9576                         return -TARGET_EFAULT;
9577                     }
9578                     target_to_host_sigset(set, target_set);
9579                 } else {
9580                     set = NULL;
9581                 }
9582 
9583                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9584                                            set, SIGSET_T_SIZE));
9585 
9586                 if (!is_error(ret) && arg3) {
9587                     host_to_target_timespec(arg3, timeout_ts);
9588                 }
9589                 if (arg4) {
9590                     unlock_user(target_set, arg4, 0);
9591                 }
9592                 break;
9593             }
9594 # endif
9595 # ifdef TARGET_NR_poll
9596             case TARGET_NR_poll:
9597             {
9598                 struct timespec ts, *pts;
9599 
9600                 if (arg3 >= 0) {
9601                     /* Convert ms to secs, ns */
9602                     ts.tv_sec = arg3 / 1000;
9603                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9604                     pts = &ts;
9605                 } else {
9606                     /* -ve poll() timeout means "infinite" */
9607                     pts = NULL;
9608                 }
9609                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9610                 break;
9611             }
9612 # endif
9613             default:
9614                 g_assert_not_reached();
9615             }
9616 
9617             if (!is_error(ret)) {
9618                 for(i = 0; i < nfds; i++) {
9619                     target_pfd[i].revents = tswap16(pfd[i].revents);
9620                 }
9621             }
9622             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9623         }
9624         return ret;
9625 #endif
9626     case TARGET_NR_flock:
9627         /* NOTE: the flock constant seems to be the same for every
9628            Linux platform */
9629         return get_errno(safe_flock(arg1, arg2));
9630     case TARGET_NR_readv:
9631         {
9632             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9633             if (vec != NULL) {
9634                 ret = get_errno(safe_readv(arg1, vec, arg3));
9635                 unlock_iovec(vec, arg2, arg3, 1);
9636             } else {
9637                 ret = -host_to_target_errno(errno);
9638             }
9639         }
9640         return ret;
9641     case TARGET_NR_writev:
9642         {
9643             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9644             if (vec != NULL) {
9645                 ret = get_errno(safe_writev(arg1, vec, arg3));
9646                 unlock_iovec(vec, arg2, arg3, 0);
9647             } else {
9648                 ret = -host_to_target_errno(errno);
9649             }
9650         }
9651         return ret;
9652 #if defined(TARGET_NR_preadv)
9653     case TARGET_NR_preadv:
9654         {
9655             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9656             if (vec != NULL) {
9657                 unsigned long low, high;
9658 
9659                 target_to_host_low_high(arg4, arg5, &low, &high);
9660                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9661                 unlock_iovec(vec, arg2, arg3, 1);
9662             } else {
9663                 ret = -host_to_target_errno(errno);
9664            }
9665         }
9666         return ret;
9667 #endif
9668 #if defined(TARGET_NR_pwritev)
9669     case TARGET_NR_pwritev:
9670         {
9671             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9672             if (vec != NULL) {
9673                 unsigned long low, high;
9674 
9675                 target_to_host_low_high(arg4, arg5, &low, &high);
9676                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9677                 unlock_iovec(vec, arg2, arg3, 0);
9678             } else {
9679                 ret = -host_to_target_errno(errno);
9680            }
9681         }
9682         return ret;
9683 #endif
9684     case TARGET_NR_getsid:
9685         return get_errno(getsid(arg1));
9686 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9687     case TARGET_NR_fdatasync:
9688         return get_errno(fdatasync(arg1));
9689 #endif
9690 #ifdef TARGET_NR__sysctl
9691     case TARGET_NR__sysctl:
9692         /* We don't implement this, but ENOTDIR is always a safe
9693            return value. */
9694         return -TARGET_ENOTDIR;
9695 #endif
9696     case TARGET_NR_sched_getaffinity:
9697         {
9698             unsigned int mask_size;
9699             unsigned long *mask;
9700 
9701             /*
9702              * sched_getaffinity needs multiples of ulong, so need to take
9703              * care of mismatches between target ulong and host ulong sizes.
9704              */
9705             if (arg2 & (sizeof(abi_ulong) - 1)) {
9706                 return -TARGET_EINVAL;
9707             }
9708             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9709 
9710             mask = alloca(mask_size);
9711             memset(mask, 0, mask_size);
9712             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9713 
9714             if (!is_error(ret)) {
9715                 if (ret > arg2) {
9716                     /* More data returned than the caller's buffer will fit.
9717                      * This only happens if sizeof(abi_long) < sizeof(long)
9718                      * and the caller passed us a buffer holding an odd number
9719                      * of abi_longs. If the host kernel is actually using the
9720                      * extra 4 bytes then fail EINVAL; otherwise we can just
9721                      * ignore them and only copy the interesting part.
9722                      */
9723                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9724                     if (numcpus > arg2 * 8) {
9725                         return -TARGET_EINVAL;
9726                     }
9727                     ret = arg2;
9728                 }
9729 
9730                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9731                     return -TARGET_EFAULT;
9732                 }
9733             }
9734         }
9735         return ret;
9736     case TARGET_NR_sched_setaffinity:
9737         {
9738             unsigned int mask_size;
9739             unsigned long *mask;
9740 
9741             /*
9742              * sched_setaffinity needs multiples of ulong, so need to take
9743              * care of mismatches between target ulong and host ulong sizes.
9744              */
9745             if (arg2 & (sizeof(abi_ulong) - 1)) {
9746                 return -TARGET_EINVAL;
9747             }
9748             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9749             mask = alloca(mask_size);
9750 
9751             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9752             if (ret) {
9753                 return ret;
9754             }
9755 
9756             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9757         }
9758     case TARGET_NR_getcpu:
9759         {
9760             unsigned cpu, node;
9761             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9762                                        arg2 ? &node : NULL,
9763                                        NULL));
9764             if (is_error(ret)) {
9765                 return ret;
9766             }
9767             if (arg1 && put_user_u32(cpu, arg1)) {
9768                 return -TARGET_EFAULT;
9769             }
9770             if (arg2 && put_user_u32(node, arg2)) {
9771                 return -TARGET_EFAULT;
9772             }
9773         }
9774         return ret;
9775     case TARGET_NR_sched_setparam:
9776         {
9777             struct sched_param *target_schp;
9778             struct sched_param schp;
9779 
9780             if (arg2 == 0) {
9781                 return -TARGET_EINVAL;
9782             }
9783             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9784                 return -TARGET_EFAULT;
9785             schp.sched_priority = tswap32(target_schp->sched_priority);
9786             unlock_user_struct(target_schp, arg2, 0);
9787             return get_errno(sched_setparam(arg1, &schp));
9788         }
9789     case TARGET_NR_sched_getparam:
9790         {
9791             struct sched_param *target_schp;
9792             struct sched_param schp;
9793 
9794             if (arg2 == 0) {
9795                 return -TARGET_EINVAL;
9796             }
9797             ret = get_errno(sched_getparam(arg1, &schp));
9798             if (!is_error(ret)) {
9799                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9800                     return -TARGET_EFAULT;
9801                 target_schp->sched_priority = tswap32(schp.sched_priority);
9802                 unlock_user_struct(target_schp, arg2, 1);
9803             }
9804         }
9805         return ret;
9806     case TARGET_NR_sched_setscheduler:
9807         {
9808             struct sched_param *target_schp;
9809             struct sched_param schp;
9810             if (arg3 == 0) {
9811                 return -TARGET_EINVAL;
9812             }
9813             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9814                 return -TARGET_EFAULT;
9815             schp.sched_priority = tswap32(target_schp->sched_priority);
9816             unlock_user_struct(target_schp, arg3, 0);
9817             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9818         }
9819     case TARGET_NR_sched_getscheduler:
9820         return get_errno(sched_getscheduler(arg1));
9821     case TARGET_NR_sched_yield:
9822         return get_errno(sched_yield());
9823     case TARGET_NR_sched_get_priority_max:
9824         return get_errno(sched_get_priority_max(arg1));
9825     case TARGET_NR_sched_get_priority_min:
9826         return get_errno(sched_get_priority_min(arg1));
9827     case TARGET_NR_sched_rr_get_interval:
9828         {
9829             struct timespec ts;
9830             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9831             if (!is_error(ret)) {
9832                 ret = host_to_target_timespec(arg2, &ts);
9833             }
9834         }
9835         return ret;
9836     case TARGET_NR_nanosleep:
9837         {
9838             struct timespec req, rem;
9839             target_to_host_timespec(&req, arg1);
9840             ret = get_errno(safe_nanosleep(&req, &rem));
9841             if (is_error(ret) && arg2) {
9842                 host_to_target_timespec(arg2, &rem);
9843             }
9844         }
9845         return ret;
9846     case TARGET_NR_prctl:
9847         switch (arg1) {
9848         case PR_GET_PDEATHSIG:
9849         {
9850             int deathsig;
9851             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9852             if (!is_error(ret) && arg2
9853                 && put_user_ual(deathsig, arg2)) {
9854                 return -TARGET_EFAULT;
9855             }
9856             return ret;
9857         }
9858 #ifdef PR_GET_NAME
9859         case PR_GET_NAME:
9860         {
9861             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9862             if (!name) {
9863                 return -TARGET_EFAULT;
9864             }
9865             ret = get_errno(prctl(arg1, (unsigned long)name,
9866                                   arg3, arg4, arg5));
9867             unlock_user(name, arg2, 16);
9868             return ret;
9869         }
9870         case PR_SET_NAME:
9871         {
9872             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9873             if (!name) {
9874                 return -TARGET_EFAULT;
9875             }
9876             ret = get_errno(prctl(arg1, (unsigned long)name,
9877                                   arg3, arg4, arg5));
9878             unlock_user(name, arg2, 0);
9879             return ret;
9880         }
9881 #endif
9882 #ifdef TARGET_MIPS
9883         case TARGET_PR_GET_FP_MODE:
9884         {
9885             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9886             ret = 0;
9887             if (env->CP0_Status & (1 << CP0St_FR)) {
9888                 ret |= TARGET_PR_FP_MODE_FR;
9889             }
9890             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9891                 ret |= TARGET_PR_FP_MODE_FRE;
9892             }
9893             return ret;
9894         }
9895         case TARGET_PR_SET_FP_MODE:
9896         {
9897             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9898             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9899             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9900             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9901             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9902 
9903             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9904                                             TARGET_PR_FP_MODE_FRE;
9905 
9906             /* If nothing to change, return right away, successfully.  */
9907             if (old_fr == new_fr && old_fre == new_fre) {
9908                 return 0;
9909             }
9910             /* Check the value is valid */
9911             if (arg2 & ~known_bits) {
9912                 return -TARGET_EOPNOTSUPP;
9913             }
9914             /* Setting FRE without FR is not supported.  */
9915             if (new_fre && !new_fr) {
9916                 return -TARGET_EOPNOTSUPP;
9917             }
9918             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9919                 /* FR1 is not supported */
9920                 return -TARGET_EOPNOTSUPP;
9921             }
9922             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9923                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9924                 /* cannot set FR=0 */
9925                 return -TARGET_EOPNOTSUPP;
9926             }
9927             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9928                 /* Cannot set FRE=1 */
9929                 return -TARGET_EOPNOTSUPP;
9930             }
9931 
9932             int i;
9933             fpr_t *fpr = env->active_fpu.fpr;
9934             for (i = 0; i < 32 ; i += 2) {
9935                 if (!old_fr && new_fr) {
9936                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9937                 } else if (old_fr && !new_fr) {
9938                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9939                 }
9940             }
9941 
9942             if (new_fr) {
9943                 env->CP0_Status |= (1 << CP0St_FR);
9944                 env->hflags |= MIPS_HFLAG_F64;
9945             } else {
9946                 env->CP0_Status &= ~(1 << CP0St_FR);
9947                 env->hflags &= ~MIPS_HFLAG_F64;
9948             }
9949             if (new_fre) {
9950                 env->CP0_Config5 |= (1 << CP0C5_FRE);
9951                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9952                     env->hflags |= MIPS_HFLAG_FRE;
9953                 }
9954             } else {
9955                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9956                 env->hflags &= ~MIPS_HFLAG_FRE;
9957             }
9958 
9959             return 0;
9960         }
9961 #endif /* MIPS */
9962 #ifdef TARGET_AARCH64
9963         case TARGET_PR_SVE_SET_VL:
9964             /*
9965              * We cannot support either PR_SVE_SET_VL_ONEXEC or
9966              * PR_SVE_VL_INHERIT.  Note the kernel definition
9967              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9968              * even though the current architectural maximum is VQ=16.
9969              */
9970             ret = -TARGET_EINVAL;
9971             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
9972                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9973                 CPUARMState *env = cpu_env;
9974                 ARMCPU *cpu = env_archcpu(env);
9975                 uint32_t vq, old_vq;
9976 
9977                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9978                 vq = MAX(arg2 / 16, 1);
9979                 vq = MIN(vq, cpu->sve_max_vq);
9980 
9981                 if (vq < old_vq) {
9982                     aarch64_sve_narrow_vq(env, vq);
9983                 }
9984                 env->vfp.zcr_el[1] = vq - 1;
9985                 ret = vq * 16;
9986             }
9987             return ret;
9988         case TARGET_PR_SVE_GET_VL:
9989             ret = -TARGET_EINVAL;
9990             {
9991                 ARMCPU *cpu = env_archcpu(cpu_env);
9992                 if (cpu_isar_feature(aa64_sve, cpu)) {
9993                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9994                 }
9995             }
9996             return ret;
9997         case TARGET_PR_PAC_RESET_KEYS:
9998             {
9999                 CPUARMState *env = cpu_env;
10000                 ARMCPU *cpu = env_archcpu(env);
10001 
10002                 if (arg3 || arg4 || arg5) {
10003                     return -TARGET_EINVAL;
10004                 }
10005                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10006                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10007                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10008                                TARGET_PR_PAC_APGAKEY);
10009                     int ret = 0;
10010                     Error *err = NULL;
10011 
10012                     if (arg2 == 0) {
10013                         arg2 = all;
10014                     } else if (arg2 & ~all) {
10015                         return -TARGET_EINVAL;
10016                     }
10017                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10018                         ret |= qemu_guest_getrandom(&env->keys.apia,
10019                                                     sizeof(ARMPACKey), &err);
10020                     }
10021                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10022                         ret |= qemu_guest_getrandom(&env->keys.apib,
10023                                                     sizeof(ARMPACKey), &err);
10024                     }
10025                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10026                         ret |= qemu_guest_getrandom(&env->keys.apda,
10027                                                     sizeof(ARMPACKey), &err);
10028                     }
10029                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10030                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10031                                                     sizeof(ARMPACKey), &err);
10032                     }
10033                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10034                         ret |= qemu_guest_getrandom(&env->keys.apga,
10035                                                     sizeof(ARMPACKey), &err);
10036                     }
10037                     if (ret != 0) {
10038                         /*
10039                          * Some unknown failure in the crypto.  The best
10040                          * we can do is log it and fail the syscall.
10041                          * The real syscall cannot fail this way.
10042                          */
10043                         qemu_log_mask(LOG_UNIMP,
10044                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10045                                       error_get_pretty(err));
10046                         error_free(err);
10047                         return -TARGET_EIO;
10048                     }
10049                     return 0;
10050                 }
10051             }
10052             return -TARGET_EINVAL;
10053 #endif /* AARCH64 */
10054         case PR_GET_SECCOMP:
10055         case PR_SET_SECCOMP:
10056             /* Disable seccomp to prevent the target disabling syscalls we
10057              * need. */
10058             return -TARGET_EINVAL;
10059         default:
10060             /* Most prctl options have no pointer arguments */
10061             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10062         }
10063         break;
10064 #ifdef TARGET_NR_arch_prctl
10065     case TARGET_NR_arch_prctl:
10066 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10067         return do_arch_prctl(cpu_env, arg1, arg2);
10068 #else
10069 #error unreachable
10070 #endif
10071 #endif
10072 #ifdef TARGET_NR_pread64
10073     case TARGET_NR_pread64:
10074         if (regpairs_aligned(cpu_env, num)) {
10075             arg4 = arg5;
10076             arg5 = arg6;
10077         }
10078         if (arg2 == 0 && arg3 == 0) {
10079             /* Special-case NULL buffer and zero length, which should succeed */
10080             p = 0;
10081         } else {
10082             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10083             if (!p) {
10084                 return -TARGET_EFAULT;
10085             }
10086         }
10087         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10088         unlock_user(p, arg2, ret);
10089         return ret;
10090     case TARGET_NR_pwrite64:
10091         if (regpairs_aligned(cpu_env, num)) {
10092             arg4 = arg5;
10093             arg5 = arg6;
10094         }
10095         if (arg2 == 0 && arg3 == 0) {
10096             /* Special-case NULL buffer and zero length, which should succeed */
10097             p = 0;
10098         } else {
10099             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10100             if (!p) {
10101                 return -TARGET_EFAULT;
10102             }
10103         }
10104         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10105         unlock_user(p, arg2, 0);
10106         return ret;
10107 #endif
10108     case TARGET_NR_getcwd:
10109         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10110             return -TARGET_EFAULT;
10111         ret = get_errno(sys_getcwd1(p, arg2));
10112         unlock_user(p, arg1, ret);
10113         return ret;
10114     case TARGET_NR_capget:
10115     case TARGET_NR_capset:
10116     {
10117         struct target_user_cap_header *target_header;
10118         struct target_user_cap_data *target_data = NULL;
10119         struct __user_cap_header_struct header;
10120         struct __user_cap_data_struct data[2];
10121         struct __user_cap_data_struct *dataptr = NULL;
10122         int i, target_datalen;
10123         int data_items = 1;
10124 
10125         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10126             return -TARGET_EFAULT;
10127         }
10128         header.version = tswap32(target_header->version);
10129         header.pid = tswap32(target_header->pid);
10130 
10131         if (header.version != _LINUX_CAPABILITY_VERSION) {
10132             /* Version 2 and up takes pointer to two user_data structs */
10133             data_items = 2;
10134         }
10135 
10136         target_datalen = sizeof(*target_data) * data_items;
10137 
10138         if (arg2) {
10139             if (num == TARGET_NR_capget) {
10140                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10141             } else {
10142                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10143             }
10144             if (!target_data) {
10145                 unlock_user_struct(target_header, arg1, 0);
10146                 return -TARGET_EFAULT;
10147             }
10148 
10149             if (num == TARGET_NR_capset) {
10150                 for (i = 0; i < data_items; i++) {
10151                     data[i].effective = tswap32(target_data[i].effective);
10152                     data[i].permitted = tswap32(target_data[i].permitted);
10153                     data[i].inheritable = tswap32(target_data[i].inheritable);
10154                 }
10155             }
10156 
10157             dataptr = data;
10158         }
10159 
10160         if (num == TARGET_NR_capget) {
10161             ret = get_errno(capget(&header, dataptr));
10162         } else {
10163             ret = get_errno(capset(&header, dataptr));
10164         }
10165 
10166         /* The kernel always updates version for both capget and capset */
10167         target_header->version = tswap32(header.version);
10168         unlock_user_struct(target_header, arg1, 1);
10169 
10170         if (arg2) {
10171             if (num == TARGET_NR_capget) {
10172                 for (i = 0; i < data_items; i++) {
10173                     target_data[i].effective = tswap32(data[i].effective);
10174                     target_data[i].permitted = tswap32(data[i].permitted);
10175                     target_data[i].inheritable = tswap32(data[i].inheritable);
10176                 }
10177                 unlock_user(target_data, arg2, target_datalen);
10178             } else {
10179                 unlock_user(target_data, arg2, 0);
10180             }
10181         }
10182         return ret;
10183     }
10184     case TARGET_NR_sigaltstack:
10185         return do_sigaltstack(arg1, arg2,
10186                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10187 
10188 #ifdef CONFIG_SENDFILE
10189 #ifdef TARGET_NR_sendfile
10190     case TARGET_NR_sendfile:
10191     {
10192         off_t *offp = NULL;
10193         off_t off;
10194         if (arg3) {
10195             ret = get_user_sal(off, arg3);
10196             if (is_error(ret)) {
10197                 return ret;
10198             }
10199             offp = &off;
10200         }
10201         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10202         if (!is_error(ret) && arg3) {
10203             abi_long ret2 = put_user_sal(off, arg3);
10204             if (is_error(ret2)) {
10205                 ret = ret2;
10206             }
10207         }
10208         return ret;
10209     }
10210 #endif
10211 #ifdef TARGET_NR_sendfile64
10212     case TARGET_NR_sendfile64:
10213     {
10214         off_t *offp = NULL;
10215         off_t off;
10216         if (arg3) {
10217             ret = get_user_s64(off, arg3);
10218             if (is_error(ret)) {
10219                 return ret;
10220             }
10221             offp = &off;
10222         }
10223         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10224         if (!is_error(ret) && arg3) {
10225             abi_long ret2 = put_user_s64(off, arg3);
10226             if (is_error(ret2)) {
10227                 ret = ret2;
10228             }
10229         }
10230         return ret;
10231     }
10232 #endif
10233 #endif
10234 #ifdef TARGET_NR_vfork
10235     case TARGET_NR_vfork:
10236         return get_errno(do_fork(cpu_env,
10237                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10238                          0, 0, 0, 0));
10239 #endif
10240 #ifdef TARGET_NR_ugetrlimit
10241     case TARGET_NR_ugetrlimit:
10242     {
10243 	struct rlimit rlim;
10244 	int resource = target_to_host_resource(arg1);
10245 	ret = get_errno(getrlimit(resource, &rlim));
10246 	if (!is_error(ret)) {
10247 	    struct target_rlimit *target_rlim;
10248             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10249                 return -TARGET_EFAULT;
10250 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10251 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10252             unlock_user_struct(target_rlim, arg2, 1);
10253 	}
10254         return ret;
10255     }
10256 #endif
10257 #ifdef TARGET_NR_truncate64
10258     case TARGET_NR_truncate64:
10259         if (!(p = lock_user_string(arg1)))
10260             return -TARGET_EFAULT;
10261 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10262         unlock_user(p, arg1, 0);
10263         return ret;
10264 #endif
10265 #ifdef TARGET_NR_ftruncate64
10266     case TARGET_NR_ftruncate64:
10267         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10268 #endif
10269 #ifdef TARGET_NR_stat64
10270     case TARGET_NR_stat64:
10271         if (!(p = lock_user_string(arg1))) {
10272             return -TARGET_EFAULT;
10273         }
10274         ret = get_errno(stat(path(p), &st));
10275         unlock_user(p, arg1, 0);
10276         if (!is_error(ret))
10277             ret = host_to_target_stat64(cpu_env, arg2, &st);
10278         return ret;
10279 #endif
10280 #ifdef TARGET_NR_lstat64
10281     case TARGET_NR_lstat64:
10282         if (!(p = lock_user_string(arg1))) {
10283             return -TARGET_EFAULT;
10284         }
10285         ret = get_errno(lstat(path(p), &st));
10286         unlock_user(p, arg1, 0);
10287         if (!is_error(ret))
10288             ret = host_to_target_stat64(cpu_env, arg2, &st);
10289         return ret;
10290 #endif
10291 #ifdef TARGET_NR_fstat64
10292     case TARGET_NR_fstat64:
10293         ret = get_errno(fstat(arg1, &st));
10294         if (!is_error(ret))
10295             ret = host_to_target_stat64(cpu_env, arg2, &st);
10296         return ret;
10297 #endif
10298 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10299 #ifdef TARGET_NR_fstatat64
10300     case TARGET_NR_fstatat64:
10301 #endif
10302 #ifdef TARGET_NR_newfstatat
10303     case TARGET_NR_newfstatat:
10304 #endif
10305         if (!(p = lock_user_string(arg2))) {
10306             return -TARGET_EFAULT;
10307         }
10308         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10309         unlock_user(p, arg2, 0);
10310         if (!is_error(ret))
10311             ret = host_to_target_stat64(cpu_env, arg3, &st);
10312         return ret;
10313 #endif
10314 #if defined(TARGET_NR_statx)
10315     case TARGET_NR_statx:
10316         {
10317             struct target_statx *target_stx;
10318             int dirfd = arg1;
10319             int flags = arg3;
10320 
10321             p = lock_user_string(arg2);
10322             if (p == NULL) {
10323                 return -TARGET_EFAULT;
10324             }
10325 #if defined(__NR_statx)
10326             {
10327                 /*
10328                  * It is assumed that struct statx is architecture independent.
10329                  */
10330                 struct target_statx host_stx;
10331                 int mask = arg4;
10332 
10333                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10334                 if (!is_error(ret)) {
10335                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10336                         unlock_user(p, arg2, 0);
10337                         return -TARGET_EFAULT;
10338                     }
10339                 }
10340 
10341                 if (ret != -TARGET_ENOSYS) {
10342                     unlock_user(p, arg2, 0);
10343                     return ret;
10344                 }
10345             }
10346 #endif
10347             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10348             unlock_user(p, arg2, 0);
10349 
10350             if (!is_error(ret)) {
10351                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10352                     return -TARGET_EFAULT;
10353                 }
10354                 memset(target_stx, 0, sizeof(*target_stx));
10355                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10356                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10357                 __put_user(st.st_ino, &target_stx->stx_ino);
10358                 __put_user(st.st_mode, &target_stx->stx_mode);
10359                 __put_user(st.st_uid, &target_stx->stx_uid);
10360                 __put_user(st.st_gid, &target_stx->stx_gid);
10361                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10362                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10363                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10364                 __put_user(st.st_size, &target_stx->stx_size);
10365                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10366                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10367                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10368                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10369                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10370                 unlock_user_struct(target_stx, arg5, 1);
10371             }
10372         }
10373         return ret;
10374 #endif
10375 #ifdef TARGET_NR_lchown
10376     case TARGET_NR_lchown:
10377         if (!(p = lock_user_string(arg1)))
10378             return -TARGET_EFAULT;
10379         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10380         unlock_user(p, arg1, 0);
10381         return ret;
10382 #endif
10383 #ifdef TARGET_NR_getuid
10384     case TARGET_NR_getuid:
10385         return get_errno(high2lowuid(getuid()));
10386 #endif
10387 #ifdef TARGET_NR_getgid
10388     case TARGET_NR_getgid:
10389         return get_errno(high2lowgid(getgid()));
10390 #endif
10391 #ifdef TARGET_NR_geteuid
10392     case TARGET_NR_geteuid:
10393         return get_errno(high2lowuid(geteuid()));
10394 #endif
10395 #ifdef TARGET_NR_getegid
10396     case TARGET_NR_getegid:
10397         return get_errno(high2lowgid(getegid()));
10398 #endif
10399     case TARGET_NR_setreuid:
10400         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10401     case TARGET_NR_setregid:
10402         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10403     case TARGET_NR_getgroups:
10404         {
10405             int gidsetsize = arg1;
10406             target_id *target_grouplist;
10407             gid_t *grouplist;
10408             int i;
10409 
10410             grouplist = alloca(gidsetsize * sizeof(gid_t));
10411             ret = get_errno(getgroups(gidsetsize, grouplist));
10412             if (gidsetsize == 0)
10413                 return ret;
10414             if (!is_error(ret)) {
10415                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10416                 if (!target_grouplist)
10417                     return -TARGET_EFAULT;
10418                 for(i = 0;i < ret; i++)
10419                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10420                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10421             }
10422         }
10423         return ret;
10424     case TARGET_NR_setgroups:
10425         {
10426             int gidsetsize = arg1;
10427             target_id *target_grouplist;
10428             gid_t *grouplist = NULL;
10429             int i;
10430             if (gidsetsize) {
10431                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10432                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10433                 if (!target_grouplist) {
10434                     return -TARGET_EFAULT;
10435                 }
10436                 for (i = 0; i < gidsetsize; i++) {
10437                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10438                 }
10439                 unlock_user(target_grouplist, arg2, 0);
10440             }
10441             return get_errno(setgroups(gidsetsize, grouplist));
10442         }
10443     case TARGET_NR_fchown:
10444         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10445 #if defined(TARGET_NR_fchownat)
10446     case TARGET_NR_fchownat:
10447         if (!(p = lock_user_string(arg2)))
10448             return -TARGET_EFAULT;
10449         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10450                                  low2highgid(arg4), arg5));
10451         unlock_user(p, arg2, 0);
10452         return ret;
10453 #endif
10454 #ifdef TARGET_NR_setresuid
10455     case TARGET_NR_setresuid:
10456         return get_errno(sys_setresuid(low2highuid(arg1),
10457                                        low2highuid(arg2),
10458                                        low2highuid(arg3)));
10459 #endif
10460 #ifdef TARGET_NR_getresuid
10461     case TARGET_NR_getresuid:
10462         {
10463             uid_t ruid, euid, suid;
10464             ret = get_errno(getresuid(&ruid, &euid, &suid));
10465             if (!is_error(ret)) {
10466                 if (put_user_id(high2lowuid(ruid), arg1)
10467                     || put_user_id(high2lowuid(euid), arg2)
10468                     || put_user_id(high2lowuid(suid), arg3))
10469                     return -TARGET_EFAULT;
10470             }
10471         }
10472         return ret;
10473 #endif
10474 #ifdef TARGET_NR_getresgid
10475     case TARGET_NR_setresgid:
10476         return get_errno(sys_setresgid(low2highgid(arg1),
10477                                        low2highgid(arg2),
10478                                        low2highgid(arg3)));
10479 #endif
10480 #ifdef TARGET_NR_getresgid
10481     case TARGET_NR_getresgid:
10482         {
10483             gid_t rgid, egid, sgid;
10484             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10485             if (!is_error(ret)) {
10486                 if (put_user_id(high2lowgid(rgid), arg1)
10487                     || put_user_id(high2lowgid(egid), arg2)
10488                     || put_user_id(high2lowgid(sgid), arg3))
10489                     return -TARGET_EFAULT;
10490             }
10491         }
10492         return ret;
10493 #endif
10494 #ifdef TARGET_NR_chown
10495     case TARGET_NR_chown:
10496         if (!(p = lock_user_string(arg1)))
10497             return -TARGET_EFAULT;
10498         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10499         unlock_user(p, arg1, 0);
10500         return ret;
10501 #endif
10502     case TARGET_NR_setuid:
10503         return get_errno(sys_setuid(low2highuid(arg1)));
10504     case TARGET_NR_setgid:
10505         return get_errno(sys_setgid(low2highgid(arg1)));
10506     case TARGET_NR_setfsuid:
10507         return get_errno(setfsuid(arg1));
10508     case TARGET_NR_setfsgid:
10509         return get_errno(setfsgid(arg1));
10510 
10511 #ifdef TARGET_NR_lchown32
10512     case TARGET_NR_lchown32:
10513         if (!(p = lock_user_string(arg1)))
10514             return -TARGET_EFAULT;
10515         ret = get_errno(lchown(p, arg2, arg3));
10516         unlock_user(p, arg1, 0);
10517         return ret;
10518 #endif
10519 #ifdef TARGET_NR_getuid32
10520     case TARGET_NR_getuid32:
10521         return get_errno(getuid());
10522 #endif
10523 
10524 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10525    /* Alpha specific */
10526     case TARGET_NR_getxuid:
10527          {
10528             uid_t euid;
10529             euid=geteuid();
10530             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10531          }
10532         return get_errno(getuid());
10533 #endif
10534 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10535    /* Alpha specific */
10536     case TARGET_NR_getxgid:
10537          {
10538             uid_t egid;
10539             egid=getegid();
10540             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10541          }
10542         return get_errno(getgid());
10543 #endif
10544 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10545     /* Alpha specific */
10546     case TARGET_NR_osf_getsysinfo:
10547         ret = -TARGET_EOPNOTSUPP;
10548         switch (arg1) {
10549           case TARGET_GSI_IEEE_FP_CONTROL:
10550             {
10551                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10552                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10553 
10554                 swcr &= ~SWCR_STATUS_MASK;
10555                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10556 
10557                 if (put_user_u64 (swcr, arg2))
10558                         return -TARGET_EFAULT;
10559                 ret = 0;
10560             }
10561             break;
10562 
10563           /* case GSI_IEEE_STATE_AT_SIGNAL:
10564              -- Not implemented in linux kernel.
10565              case GSI_UACPROC:
10566              -- Retrieves current unaligned access state; not much used.
10567              case GSI_PROC_TYPE:
10568              -- Retrieves implver information; surely not used.
10569              case GSI_GET_HWRPB:
10570              -- Grabs a copy of the HWRPB; surely not used.
10571           */
10572         }
10573         return ret;
10574 #endif
10575 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10576     /* Alpha specific */
10577     case TARGET_NR_osf_setsysinfo:
10578         ret = -TARGET_EOPNOTSUPP;
10579         switch (arg1) {
10580           case TARGET_SSI_IEEE_FP_CONTROL:
10581             {
10582                 uint64_t swcr, fpcr;
10583 
10584                 if (get_user_u64 (swcr, arg2)) {
10585                     return -TARGET_EFAULT;
10586                 }
10587 
10588                 /*
10589                  * The kernel calls swcr_update_status to update the
10590                  * status bits from the fpcr at every point that it
10591                  * could be queried.  Therefore, we store the status
10592                  * bits only in FPCR.
10593                  */
10594                 ((CPUAlphaState *)cpu_env)->swcr
10595                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10596 
10597                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10598                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10599                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10600                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10601                 ret = 0;
10602             }
10603             break;
10604 
10605           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10606             {
10607                 uint64_t exc, fpcr, fex;
10608 
10609                 if (get_user_u64(exc, arg2)) {
10610                     return -TARGET_EFAULT;
10611                 }
10612                 exc &= SWCR_STATUS_MASK;
10613                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10614 
10615                 /* Old exceptions are not signaled.  */
10616                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10617                 fex = exc & ~fex;
10618                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10619                 fex &= ((CPUArchState *)cpu_env)->swcr;
10620 
10621                 /* Update the hardware fpcr.  */
10622                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10623                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10624 
10625                 if (fex) {
10626                     int si_code = TARGET_FPE_FLTUNK;
10627                     target_siginfo_t info;
10628 
10629                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10630                         si_code = TARGET_FPE_FLTUND;
10631                     }
10632                     if (fex & SWCR_TRAP_ENABLE_INE) {
10633                         si_code = TARGET_FPE_FLTRES;
10634                     }
10635                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10636                         si_code = TARGET_FPE_FLTUND;
10637                     }
10638                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10639                         si_code = TARGET_FPE_FLTOVF;
10640                     }
10641                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10642                         si_code = TARGET_FPE_FLTDIV;
10643                     }
10644                     if (fex & SWCR_TRAP_ENABLE_INV) {
10645                         si_code = TARGET_FPE_FLTINV;
10646                     }
10647 
10648                     info.si_signo = SIGFPE;
10649                     info.si_errno = 0;
10650                     info.si_code = si_code;
10651                     info._sifields._sigfault._addr
10652                         = ((CPUArchState *)cpu_env)->pc;
10653                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10654                                  QEMU_SI_FAULT, &info);
10655                 }
10656                 ret = 0;
10657             }
10658             break;
10659 
10660           /* case SSI_NVPAIRS:
10661              -- Used with SSIN_UACPROC to enable unaligned accesses.
10662              case SSI_IEEE_STATE_AT_SIGNAL:
10663              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10664              -- Not implemented in linux kernel
10665           */
10666         }
10667         return ret;
10668 #endif
10669 #ifdef TARGET_NR_osf_sigprocmask
10670     /* Alpha specific.  */
10671     case TARGET_NR_osf_sigprocmask:
10672         {
10673             abi_ulong mask;
10674             int how;
10675             sigset_t set, oldset;
10676 
10677             switch(arg1) {
10678             case TARGET_SIG_BLOCK:
10679                 how = SIG_BLOCK;
10680                 break;
10681             case TARGET_SIG_UNBLOCK:
10682                 how = SIG_UNBLOCK;
10683                 break;
10684             case TARGET_SIG_SETMASK:
10685                 how = SIG_SETMASK;
10686                 break;
10687             default:
10688                 return -TARGET_EINVAL;
10689             }
10690             mask = arg2;
10691             target_to_host_old_sigset(&set, &mask);
10692             ret = do_sigprocmask(how, &set, &oldset);
10693             if (!ret) {
10694                 host_to_target_old_sigset(&mask, &oldset);
10695                 ret = mask;
10696             }
10697         }
10698         return ret;
10699 #endif
10700 
10701 #ifdef TARGET_NR_getgid32
10702     case TARGET_NR_getgid32:
10703         return get_errno(getgid());
10704 #endif
10705 #ifdef TARGET_NR_geteuid32
10706     case TARGET_NR_geteuid32:
10707         return get_errno(geteuid());
10708 #endif
10709 #ifdef TARGET_NR_getegid32
10710     case TARGET_NR_getegid32:
10711         return get_errno(getegid());
10712 #endif
10713 #ifdef TARGET_NR_setreuid32
10714     case TARGET_NR_setreuid32:
10715         return get_errno(setreuid(arg1, arg2));
10716 #endif
10717 #ifdef TARGET_NR_setregid32
10718     case TARGET_NR_setregid32:
10719         return get_errno(setregid(arg1, arg2));
10720 #endif
10721 #ifdef TARGET_NR_getgroups32
10722     case TARGET_NR_getgroups32:
10723         {
10724             int gidsetsize = arg1;
10725             uint32_t *target_grouplist;
10726             gid_t *grouplist;
10727             int i;
10728 
10729             grouplist = alloca(gidsetsize * sizeof(gid_t));
10730             ret = get_errno(getgroups(gidsetsize, grouplist));
10731             if (gidsetsize == 0)
10732                 return ret;
10733             if (!is_error(ret)) {
10734                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10735                 if (!target_grouplist) {
10736                     return -TARGET_EFAULT;
10737                 }
10738                 for(i = 0;i < ret; i++)
10739                     target_grouplist[i] = tswap32(grouplist[i]);
10740                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10741             }
10742         }
10743         return ret;
10744 #endif
10745 #ifdef TARGET_NR_setgroups32
10746     case TARGET_NR_setgroups32:
10747         {
10748             int gidsetsize = arg1;
10749             uint32_t *target_grouplist;
10750             gid_t *grouplist;
10751             int i;
10752 
10753             grouplist = alloca(gidsetsize * sizeof(gid_t));
10754             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10755             if (!target_grouplist) {
10756                 return -TARGET_EFAULT;
10757             }
10758             for(i = 0;i < gidsetsize; i++)
10759                 grouplist[i] = tswap32(target_grouplist[i]);
10760             unlock_user(target_grouplist, arg2, 0);
10761             return get_errno(setgroups(gidsetsize, grouplist));
10762         }
10763 #endif
10764 #ifdef TARGET_NR_fchown32
10765     case TARGET_NR_fchown32:
10766         return get_errno(fchown(arg1, arg2, arg3));
10767 #endif
10768 #ifdef TARGET_NR_setresuid32
10769     case TARGET_NR_setresuid32:
10770         return get_errno(sys_setresuid(arg1, arg2, arg3));
10771 #endif
10772 #ifdef TARGET_NR_getresuid32
10773     case TARGET_NR_getresuid32:
10774         {
10775             uid_t ruid, euid, suid;
10776             ret = get_errno(getresuid(&ruid, &euid, &suid));
10777             if (!is_error(ret)) {
10778                 if (put_user_u32(ruid, arg1)
10779                     || put_user_u32(euid, arg2)
10780                     || put_user_u32(suid, arg3))
10781                     return -TARGET_EFAULT;
10782             }
10783         }
10784         return ret;
10785 #endif
10786 #ifdef TARGET_NR_setresgid32
10787     case TARGET_NR_setresgid32:
10788         return get_errno(sys_setresgid(arg1, arg2, arg3));
10789 #endif
10790 #ifdef TARGET_NR_getresgid32
10791     case TARGET_NR_getresgid32:
10792         {
10793             gid_t rgid, egid, sgid;
10794             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10795             if (!is_error(ret)) {
10796                 if (put_user_u32(rgid, arg1)
10797                     || put_user_u32(egid, arg2)
10798                     || put_user_u32(sgid, arg3))
10799                     return -TARGET_EFAULT;
10800             }
10801         }
10802         return ret;
10803 #endif
10804 #ifdef TARGET_NR_chown32
10805     case TARGET_NR_chown32:
10806         if (!(p = lock_user_string(arg1)))
10807             return -TARGET_EFAULT;
10808         ret = get_errno(chown(p, arg2, arg3));
10809         unlock_user(p, arg1, 0);
10810         return ret;
10811 #endif
10812 #ifdef TARGET_NR_setuid32
10813     case TARGET_NR_setuid32:
10814         return get_errno(sys_setuid(arg1));
10815 #endif
10816 #ifdef TARGET_NR_setgid32
10817     case TARGET_NR_setgid32:
10818         return get_errno(sys_setgid(arg1));
10819 #endif
10820 #ifdef TARGET_NR_setfsuid32
10821     case TARGET_NR_setfsuid32:
10822         return get_errno(setfsuid(arg1));
10823 #endif
10824 #ifdef TARGET_NR_setfsgid32
10825     case TARGET_NR_setfsgid32:
10826         return get_errno(setfsgid(arg1));
10827 #endif
10828 #ifdef TARGET_NR_mincore
10829     case TARGET_NR_mincore:
10830         {
10831             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10832             if (!a) {
10833                 return -TARGET_ENOMEM;
10834             }
10835             p = lock_user_string(arg3);
10836             if (!p) {
10837                 ret = -TARGET_EFAULT;
10838             } else {
10839                 ret = get_errno(mincore(a, arg2, p));
10840                 unlock_user(p, arg3, ret);
10841             }
10842             unlock_user(a, arg1, 0);
10843         }
10844         return ret;
10845 #endif
10846 #ifdef TARGET_NR_arm_fadvise64_64
10847     case TARGET_NR_arm_fadvise64_64:
10848         /* arm_fadvise64_64 looks like fadvise64_64 but
10849          * with different argument order: fd, advice, offset, len
10850          * rather than the usual fd, offset, len, advice.
10851          * Note that offset and len are both 64-bit so appear as
10852          * pairs of 32-bit registers.
10853          */
10854         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10855                             target_offset64(arg5, arg6), arg2);
10856         return -host_to_target_errno(ret);
10857 #endif
10858 
10859 #if TARGET_ABI_BITS == 32
10860 
10861 #ifdef TARGET_NR_fadvise64_64
10862     case TARGET_NR_fadvise64_64:
10863 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10864         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10865         ret = arg2;
10866         arg2 = arg3;
10867         arg3 = arg4;
10868         arg4 = arg5;
10869         arg5 = arg6;
10870         arg6 = ret;
10871 #else
10872         /* 6 args: fd, offset (high, low), len (high, low), advice */
10873         if (regpairs_aligned(cpu_env, num)) {
10874             /* offset is in (3,4), len in (5,6) and advice in 7 */
10875             arg2 = arg3;
10876             arg3 = arg4;
10877             arg4 = arg5;
10878             arg5 = arg6;
10879             arg6 = arg7;
10880         }
10881 #endif
10882         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10883                             target_offset64(arg4, arg5), arg6);
10884         return -host_to_target_errno(ret);
10885 #endif
10886 
10887 #ifdef TARGET_NR_fadvise64
10888     case TARGET_NR_fadvise64:
10889         /* 5 args: fd, offset (high, low), len, advice */
10890         if (regpairs_aligned(cpu_env, num)) {
10891             /* offset is in (3,4), len in 5 and advice in 6 */
10892             arg2 = arg3;
10893             arg3 = arg4;
10894             arg4 = arg5;
10895             arg5 = arg6;
10896         }
10897         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10898         return -host_to_target_errno(ret);
10899 #endif
10900 
10901 #else /* not a 32-bit ABI */
10902 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10903 #ifdef TARGET_NR_fadvise64_64
10904     case TARGET_NR_fadvise64_64:
10905 #endif
10906 #ifdef TARGET_NR_fadvise64
10907     case TARGET_NR_fadvise64:
10908 #endif
10909 #ifdef TARGET_S390X
10910         switch (arg4) {
10911         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10912         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10913         case 6: arg4 = POSIX_FADV_DONTNEED; break;
10914         case 7: arg4 = POSIX_FADV_NOREUSE; break;
10915         default: break;
10916         }
10917 #endif
10918         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10919 #endif
10920 #endif /* end of 64-bit ABI fadvise handling */
10921 
10922 #ifdef TARGET_NR_madvise
10923     case TARGET_NR_madvise:
10924         /* A straight passthrough may not be safe because qemu sometimes
10925            turns private file-backed mappings into anonymous mappings.
10926            This will break MADV_DONTNEED.
10927            This is a hint, so ignoring and returning success is ok.  */
10928         return 0;
10929 #endif
10930 #if TARGET_ABI_BITS == 32
10931     case TARGET_NR_fcntl64:
10932     {
10933 	int cmd;
10934 	struct flock64 fl;
10935         from_flock64_fn *copyfrom = copy_from_user_flock64;
10936         to_flock64_fn *copyto = copy_to_user_flock64;
10937 
10938 #ifdef TARGET_ARM
10939         if (!((CPUARMState *)cpu_env)->eabi) {
10940             copyfrom = copy_from_user_oabi_flock64;
10941             copyto = copy_to_user_oabi_flock64;
10942         }
10943 #endif
10944 
10945 	cmd = target_to_host_fcntl_cmd(arg2);
10946         if (cmd == -TARGET_EINVAL) {
10947             return cmd;
10948         }
10949 
10950         switch(arg2) {
10951         case TARGET_F_GETLK64:
10952             ret = copyfrom(&fl, arg3);
10953             if (ret) {
10954                 break;
10955             }
10956             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10957             if (ret == 0) {
10958                 ret = copyto(arg3, &fl);
10959             }
10960 	    break;
10961 
10962         case TARGET_F_SETLK64:
10963         case TARGET_F_SETLKW64:
10964             ret = copyfrom(&fl, arg3);
10965             if (ret) {
10966                 break;
10967             }
10968             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10969 	    break;
10970         default:
10971             ret = do_fcntl(arg1, arg2, arg3);
10972             break;
10973         }
10974         return ret;
10975     }
10976 #endif
10977 #ifdef TARGET_NR_cacheflush
10978     case TARGET_NR_cacheflush:
10979         /* self-modifying code is handled automatically, so nothing needed */
10980         return 0;
10981 #endif
10982 #ifdef TARGET_NR_getpagesize
10983     case TARGET_NR_getpagesize:
10984         return TARGET_PAGE_SIZE;
10985 #endif
10986     case TARGET_NR_gettid:
10987         return get_errno(sys_gettid());
10988 #ifdef TARGET_NR_readahead
10989     case TARGET_NR_readahead:
10990 #if TARGET_ABI_BITS == 32
10991         if (regpairs_aligned(cpu_env, num)) {
10992             arg2 = arg3;
10993             arg3 = arg4;
10994             arg4 = arg5;
10995         }
10996         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10997 #else
10998         ret = get_errno(readahead(arg1, arg2, arg3));
10999 #endif
11000         return ret;
11001 #endif
11002 #ifdef CONFIG_ATTR
11003 #ifdef TARGET_NR_setxattr
11004     case TARGET_NR_listxattr:
11005     case TARGET_NR_llistxattr:
11006     {
11007         void *p, *b = 0;
11008         if (arg2) {
11009             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11010             if (!b) {
11011                 return -TARGET_EFAULT;
11012             }
11013         }
11014         p = lock_user_string(arg1);
11015         if (p) {
11016             if (num == TARGET_NR_listxattr) {
11017                 ret = get_errno(listxattr(p, b, arg3));
11018             } else {
11019                 ret = get_errno(llistxattr(p, b, arg3));
11020             }
11021         } else {
11022             ret = -TARGET_EFAULT;
11023         }
11024         unlock_user(p, arg1, 0);
11025         unlock_user(b, arg2, arg3);
11026         return ret;
11027     }
11028     case TARGET_NR_flistxattr:
11029     {
11030         void *b = 0;
11031         if (arg2) {
11032             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11033             if (!b) {
11034                 return -TARGET_EFAULT;
11035             }
11036         }
11037         ret = get_errno(flistxattr(arg1, b, arg3));
11038         unlock_user(b, arg2, arg3);
11039         return ret;
11040     }
11041     case TARGET_NR_setxattr:
11042     case TARGET_NR_lsetxattr:
11043         {
11044             void *p, *n, *v = 0;
11045             if (arg3) {
11046                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11047                 if (!v) {
11048                     return -TARGET_EFAULT;
11049                 }
11050             }
11051             p = lock_user_string(arg1);
11052             n = lock_user_string(arg2);
11053             if (p && n) {
11054                 if (num == TARGET_NR_setxattr) {
11055                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11056                 } else {
11057                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11058                 }
11059             } else {
11060                 ret = -TARGET_EFAULT;
11061             }
11062             unlock_user(p, arg1, 0);
11063             unlock_user(n, arg2, 0);
11064             unlock_user(v, arg3, 0);
11065         }
11066         return ret;
11067     case TARGET_NR_fsetxattr:
11068         {
11069             void *n, *v = 0;
11070             if (arg3) {
11071                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11072                 if (!v) {
11073                     return -TARGET_EFAULT;
11074                 }
11075             }
11076             n = lock_user_string(arg2);
11077             if (n) {
11078                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11079             } else {
11080                 ret = -TARGET_EFAULT;
11081             }
11082             unlock_user(n, arg2, 0);
11083             unlock_user(v, arg3, 0);
11084         }
11085         return ret;
11086     case TARGET_NR_getxattr:
11087     case TARGET_NR_lgetxattr:
11088         {
11089             void *p, *n, *v = 0;
11090             if (arg3) {
11091                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11092                 if (!v) {
11093                     return -TARGET_EFAULT;
11094                 }
11095             }
11096             p = lock_user_string(arg1);
11097             n = lock_user_string(arg2);
11098             if (p && n) {
11099                 if (num == TARGET_NR_getxattr) {
11100                     ret = get_errno(getxattr(p, n, v, arg4));
11101                 } else {
11102                     ret = get_errno(lgetxattr(p, n, v, arg4));
11103                 }
11104             } else {
11105                 ret = -TARGET_EFAULT;
11106             }
11107             unlock_user(p, arg1, 0);
11108             unlock_user(n, arg2, 0);
11109             unlock_user(v, arg3, arg4);
11110         }
11111         return ret;
11112     case TARGET_NR_fgetxattr:
11113         {
11114             void *n, *v = 0;
11115             if (arg3) {
11116                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11117                 if (!v) {
11118                     return -TARGET_EFAULT;
11119                 }
11120             }
11121             n = lock_user_string(arg2);
11122             if (n) {
11123                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11124             } else {
11125                 ret = -TARGET_EFAULT;
11126             }
11127             unlock_user(n, arg2, 0);
11128             unlock_user(v, arg3, arg4);
11129         }
11130         return ret;
11131     case TARGET_NR_removexattr:
11132     case TARGET_NR_lremovexattr:
11133         {
11134             void *p, *n;
11135             p = lock_user_string(arg1);
11136             n = lock_user_string(arg2);
11137             if (p && n) {
11138                 if (num == TARGET_NR_removexattr) {
11139                     ret = get_errno(removexattr(p, n));
11140                 } else {
11141                     ret = get_errno(lremovexattr(p, n));
11142                 }
11143             } else {
11144                 ret = -TARGET_EFAULT;
11145             }
11146             unlock_user(p, arg1, 0);
11147             unlock_user(n, arg2, 0);
11148         }
11149         return ret;
11150     case TARGET_NR_fremovexattr:
11151         {
11152             void *n;
11153             n = lock_user_string(arg2);
11154             if (n) {
11155                 ret = get_errno(fremovexattr(arg1, n));
11156             } else {
11157                 ret = -TARGET_EFAULT;
11158             }
11159             unlock_user(n, arg2, 0);
11160         }
11161         return ret;
11162 #endif
11163 #endif /* CONFIG_ATTR */
11164 #ifdef TARGET_NR_set_thread_area
11165     case TARGET_NR_set_thread_area:
11166 #if defined(TARGET_MIPS)
11167       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11168       return 0;
11169 #elif defined(TARGET_CRIS)
11170       if (arg1 & 0xff)
11171           ret = -TARGET_EINVAL;
11172       else {
11173           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11174           ret = 0;
11175       }
11176       return ret;
11177 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11178       return do_set_thread_area(cpu_env, arg1);
11179 #elif defined(TARGET_M68K)
11180       {
11181           TaskState *ts = cpu->opaque;
11182           ts->tp_value = arg1;
11183           return 0;
11184       }
11185 #else
11186       return -TARGET_ENOSYS;
11187 #endif
11188 #endif
11189 #ifdef TARGET_NR_get_thread_area
11190     case TARGET_NR_get_thread_area:
11191 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11192         return do_get_thread_area(cpu_env, arg1);
11193 #elif defined(TARGET_M68K)
11194         {
11195             TaskState *ts = cpu->opaque;
11196             return ts->tp_value;
11197         }
11198 #else
11199         return -TARGET_ENOSYS;
11200 #endif
11201 #endif
11202 #ifdef TARGET_NR_getdomainname
11203     case TARGET_NR_getdomainname:
11204         return -TARGET_ENOSYS;
11205 #endif
11206 
11207 #ifdef TARGET_NR_clock_settime
11208     case TARGET_NR_clock_settime:
11209     {
11210         struct timespec ts;
11211 
11212         ret = target_to_host_timespec(&ts, arg2);
11213         if (!is_error(ret)) {
11214             ret = get_errno(clock_settime(arg1, &ts));
11215         }
11216         return ret;
11217     }
11218 #endif
11219 #ifdef TARGET_NR_clock_gettime
11220     case TARGET_NR_clock_gettime:
11221     {
11222         struct timespec ts;
11223         ret = get_errno(clock_gettime(arg1, &ts));
11224         if (!is_error(ret)) {
11225             ret = host_to_target_timespec(arg2, &ts);
11226         }
11227         return ret;
11228     }
11229 #endif
11230 #ifdef TARGET_NR_clock_getres
11231     case TARGET_NR_clock_getres:
11232     {
11233         struct timespec ts;
11234         ret = get_errno(clock_getres(arg1, &ts));
11235         if (!is_error(ret)) {
11236             host_to_target_timespec(arg2, &ts);
11237         }
11238         return ret;
11239     }
11240 #endif
11241 #ifdef TARGET_NR_clock_nanosleep
11242     case TARGET_NR_clock_nanosleep:
11243     {
11244         struct timespec ts;
11245         target_to_host_timespec(&ts, arg3);
11246         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11247                                              &ts, arg4 ? &ts : NULL));
11248         if (arg4)
11249             host_to_target_timespec(arg4, &ts);
11250 
11251 #if defined(TARGET_PPC)
11252         /* clock_nanosleep is odd in that it returns positive errno values.
11253          * On PPC, CR0 bit 3 should be set in such a situation. */
11254         if (ret && ret != -TARGET_ERESTARTSYS) {
11255             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11256         }
11257 #endif
11258         return ret;
11259     }
11260 #endif
11261 
11262 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11263     case TARGET_NR_set_tid_address:
11264         return get_errno(set_tid_address((int *)g2h(arg1)));
11265 #endif
11266 
11267     case TARGET_NR_tkill:
11268         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11269 
11270     case TARGET_NR_tgkill:
11271         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11272                          target_to_host_signal(arg3)));
11273 
11274 #ifdef TARGET_NR_set_robust_list
11275     case TARGET_NR_set_robust_list:
11276     case TARGET_NR_get_robust_list:
11277         /* The ABI for supporting robust futexes has userspace pass
11278          * the kernel a pointer to a linked list which is updated by
11279          * userspace after the syscall; the list is walked by the kernel
11280          * when the thread exits. Since the linked list in QEMU guest
11281          * memory isn't a valid linked list for the host and we have
11282          * no way to reliably intercept the thread-death event, we can't
11283          * support these. Silently return ENOSYS so that guest userspace
11284          * falls back to a non-robust futex implementation (which should
11285          * be OK except in the corner case of the guest crashing while
11286          * holding a mutex that is shared with another process via
11287          * shared memory).
11288          */
11289         return -TARGET_ENOSYS;
11290 #endif
11291 
11292 #if defined(TARGET_NR_utimensat)
11293     case TARGET_NR_utimensat:
11294         {
11295             struct timespec *tsp, ts[2];
11296             if (!arg3) {
11297                 tsp = NULL;
11298             } else {
11299                 target_to_host_timespec(ts, arg3);
11300                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11301                 tsp = ts;
11302             }
11303             if (!arg2)
11304                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11305             else {
11306                 if (!(p = lock_user_string(arg2))) {
11307                     return -TARGET_EFAULT;
11308                 }
11309                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11310                 unlock_user(p, arg2, 0);
11311             }
11312         }
11313         return ret;
11314 #endif
11315     case TARGET_NR_futex:
11316         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11317 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11318     case TARGET_NR_inotify_init:
11319         ret = get_errno(sys_inotify_init());
11320         if (ret >= 0) {
11321             fd_trans_register(ret, &target_inotify_trans);
11322         }
11323         return ret;
11324 #endif
11325 #ifdef CONFIG_INOTIFY1
11326 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11327     case TARGET_NR_inotify_init1:
11328         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11329                                           fcntl_flags_tbl)));
11330         if (ret >= 0) {
11331             fd_trans_register(ret, &target_inotify_trans);
11332         }
11333         return ret;
11334 #endif
11335 #endif
11336 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11337     case TARGET_NR_inotify_add_watch:
11338         p = lock_user_string(arg2);
11339         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11340         unlock_user(p, arg2, 0);
11341         return ret;
11342 #endif
11343 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11344     case TARGET_NR_inotify_rm_watch:
11345         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11346 #endif
11347 
11348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11349     case TARGET_NR_mq_open:
11350         {
11351             struct mq_attr posix_mq_attr;
11352             struct mq_attr *pposix_mq_attr;
11353             int host_flags;
11354 
11355             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11356             pposix_mq_attr = NULL;
11357             if (arg4) {
11358                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11359                     return -TARGET_EFAULT;
11360                 }
11361                 pposix_mq_attr = &posix_mq_attr;
11362             }
11363             p = lock_user_string(arg1 - 1);
11364             if (!p) {
11365                 return -TARGET_EFAULT;
11366             }
11367             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11368             unlock_user (p, arg1, 0);
11369         }
11370         return ret;
11371 
11372     case TARGET_NR_mq_unlink:
11373         p = lock_user_string(arg1 - 1);
11374         if (!p) {
11375             return -TARGET_EFAULT;
11376         }
11377         ret = get_errno(mq_unlink(p));
11378         unlock_user (p, arg1, 0);
11379         return ret;
11380 
11381     case TARGET_NR_mq_timedsend:
11382         {
11383             struct timespec ts;
11384 
11385             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11386             if (arg5 != 0) {
11387                 target_to_host_timespec(&ts, arg5);
11388                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11389                 host_to_target_timespec(arg5, &ts);
11390             } else {
11391                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11392             }
11393             unlock_user (p, arg2, arg3);
11394         }
11395         return ret;
11396 
11397     case TARGET_NR_mq_timedreceive:
11398         {
11399             struct timespec ts;
11400             unsigned int prio;
11401 
11402             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11403             if (arg5 != 0) {
11404                 target_to_host_timespec(&ts, arg5);
11405                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11406                                                      &prio, &ts));
11407                 host_to_target_timespec(arg5, &ts);
11408             } else {
11409                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11410                                                      &prio, NULL));
11411             }
11412             unlock_user (p, arg2, arg3);
11413             if (arg4 != 0)
11414                 put_user_u32(prio, arg4);
11415         }
11416         return ret;
11417 
11418     /* Not implemented for now... */
11419 /*     case TARGET_NR_mq_notify: */
11420 /*         break; */
11421 
11422     case TARGET_NR_mq_getsetattr:
11423         {
11424             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11425             ret = 0;
11426             if (arg2 != 0) {
11427                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11428                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11429                                            &posix_mq_attr_out));
11430             } else if (arg3 != 0) {
11431                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11432             }
11433             if (ret == 0 && arg3 != 0) {
11434                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11435             }
11436         }
11437         return ret;
11438 #endif
11439 
11440 #ifdef CONFIG_SPLICE
11441 #ifdef TARGET_NR_tee
11442     case TARGET_NR_tee:
11443         {
11444             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11445         }
11446         return ret;
11447 #endif
11448 #ifdef TARGET_NR_splice
11449     case TARGET_NR_splice:
11450         {
11451             loff_t loff_in, loff_out;
11452             loff_t *ploff_in = NULL, *ploff_out = NULL;
11453             if (arg2) {
11454                 if (get_user_u64(loff_in, arg2)) {
11455                     return -TARGET_EFAULT;
11456                 }
11457                 ploff_in = &loff_in;
11458             }
11459             if (arg4) {
11460                 if (get_user_u64(loff_out, arg4)) {
11461                     return -TARGET_EFAULT;
11462                 }
11463                 ploff_out = &loff_out;
11464             }
11465             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11466             if (arg2) {
11467                 if (put_user_u64(loff_in, arg2)) {
11468                     return -TARGET_EFAULT;
11469                 }
11470             }
11471             if (arg4) {
11472                 if (put_user_u64(loff_out, arg4)) {
11473                     return -TARGET_EFAULT;
11474                 }
11475             }
11476         }
11477         return ret;
11478 #endif
11479 #ifdef TARGET_NR_vmsplice
11480 	case TARGET_NR_vmsplice:
11481         {
11482             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11483             if (vec != NULL) {
11484                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11485                 unlock_iovec(vec, arg2, arg3, 0);
11486             } else {
11487                 ret = -host_to_target_errno(errno);
11488             }
11489         }
11490         return ret;
11491 #endif
11492 #endif /* CONFIG_SPLICE */
11493 #ifdef CONFIG_EVENTFD
11494 #if defined(TARGET_NR_eventfd)
11495     case TARGET_NR_eventfd:
11496         ret = get_errno(eventfd(arg1, 0));
11497         if (ret >= 0) {
11498             fd_trans_register(ret, &target_eventfd_trans);
11499         }
11500         return ret;
11501 #endif
11502 #if defined(TARGET_NR_eventfd2)
11503     case TARGET_NR_eventfd2:
11504     {
11505         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11506         if (arg2 & TARGET_O_NONBLOCK) {
11507             host_flags |= O_NONBLOCK;
11508         }
11509         if (arg2 & TARGET_O_CLOEXEC) {
11510             host_flags |= O_CLOEXEC;
11511         }
11512         ret = get_errno(eventfd(arg1, host_flags));
11513         if (ret >= 0) {
11514             fd_trans_register(ret, &target_eventfd_trans);
11515         }
11516         return ret;
11517     }
11518 #endif
11519 #endif /* CONFIG_EVENTFD  */
11520 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11521     case TARGET_NR_fallocate:
11522 #if TARGET_ABI_BITS == 32
11523         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11524                                   target_offset64(arg5, arg6)));
11525 #else
11526         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11527 #endif
11528         return ret;
11529 #endif
11530 #if defined(CONFIG_SYNC_FILE_RANGE)
11531 #if defined(TARGET_NR_sync_file_range)
11532     case TARGET_NR_sync_file_range:
11533 #if TARGET_ABI_BITS == 32
11534 #if defined(TARGET_MIPS)
11535         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11536                                         target_offset64(arg5, arg6), arg7));
11537 #else
11538         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11539                                         target_offset64(arg4, arg5), arg6));
11540 #endif /* !TARGET_MIPS */
11541 #else
11542         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11543 #endif
11544         return ret;
11545 #endif
11546 #if defined(TARGET_NR_sync_file_range2)
11547     case TARGET_NR_sync_file_range2:
11548         /* This is like sync_file_range but the arguments are reordered */
11549 #if TARGET_ABI_BITS == 32
11550         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11551                                         target_offset64(arg5, arg6), arg2));
11552 #else
11553         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11554 #endif
11555         return ret;
11556 #endif
11557 #endif
11558 #if defined(TARGET_NR_signalfd4)
11559     case TARGET_NR_signalfd4:
11560         return do_signalfd4(arg1, arg2, arg4);
11561 #endif
11562 #if defined(TARGET_NR_signalfd)
11563     case TARGET_NR_signalfd:
11564         return do_signalfd4(arg1, arg2, 0);
11565 #endif
11566 #if defined(CONFIG_EPOLL)
11567 #if defined(TARGET_NR_epoll_create)
11568     case TARGET_NR_epoll_create:
11569         return get_errno(epoll_create(arg1));
11570 #endif
11571 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11572     case TARGET_NR_epoll_create1:
11573         return get_errno(epoll_create1(arg1));
11574 #endif
11575 #if defined(TARGET_NR_epoll_ctl)
11576     case TARGET_NR_epoll_ctl:
11577     {
11578         struct epoll_event ep;
11579         struct epoll_event *epp = 0;
11580         if (arg4) {
11581             struct target_epoll_event *target_ep;
11582             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11583                 return -TARGET_EFAULT;
11584             }
11585             ep.events = tswap32(target_ep->events);
11586             /* The epoll_data_t union is just opaque data to the kernel,
11587              * so we transfer all 64 bits across and need not worry what
11588              * actual data type it is.
11589              */
11590             ep.data.u64 = tswap64(target_ep->data.u64);
11591             unlock_user_struct(target_ep, arg4, 0);
11592             epp = &ep;
11593         }
11594         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11595     }
11596 #endif
11597 
11598 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11599 #if defined(TARGET_NR_epoll_wait)
11600     case TARGET_NR_epoll_wait:
11601 #endif
11602 #if defined(TARGET_NR_epoll_pwait)
11603     case TARGET_NR_epoll_pwait:
11604 #endif
11605     {
11606         struct target_epoll_event *target_ep;
11607         struct epoll_event *ep;
11608         int epfd = arg1;
11609         int maxevents = arg3;
11610         int timeout = arg4;
11611 
11612         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11613             return -TARGET_EINVAL;
11614         }
11615 
11616         target_ep = lock_user(VERIFY_WRITE, arg2,
11617                               maxevents * sizeof(struct target_epoll_event), 1);
11618         if (!target_ep) {
11619             return -TARGET_EFAULT;
11620         }
11621 
11622         ep = g_try_new(struct epoll_event, maxevents);
11623         if (!ep) {
11624             unlock_user(target_ep, arg2, 0);
11625             return -TARGET_ENOMEM;
11626         }
11627 
11628         switch (num) {
11629 #if defined(TARGET_NR_epoll_pwait)
11630         case TARGET_NR_epoll_pwait:
11631         {
11632             target_sigset_t *target_set;
11633             sigset_t _set, *set = &_set;
11634 
11635             if (arg5) {
11636                 if (arg6 != sizeof(target_sigset_t)) {
11637                     ret = -TARGET_EINVAL;
11638                     break;
11639                 }
11640 
11641                 target_set = lock_user(VERIFY_READ, arg5,
11642                                        sizeof(target_sigset_t), 1);
11643                 if (!target_set) {
11644                     ret = -TARGET_EFAULT;
11645                     break;
11646                 }
11647                 target_to_host_sigset(set, target_set);
11648                 unlock_user(target_set, arg5, 0);
11649             } else {
11650                 set = NULL;
11651             }
11652 
11653             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11654                                              set, SIGSET_T_SIZE));
11655             break;
11656         }
11657 #endif
11658 #if defined(TARGET_NR_epoll_wait)
11659         case TARGET_NR_epoll_wait:
11660             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11661                                              NULL, 0));
11662             break;
11663 #endif
11664         default:
11665             ret = -TARGET_ENOSYS;
11666         }
11667         if (!is_error(ret)) {
11668             int i;
11669             for (i = 0; i < ret; i++) {
11670                 target_ep[i].events = tswap32(ep[i].events);
11671                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11672             }
11673             unlock_user(target_ep, arg2,
11674                         ret * sizeof(struct target_epoll_event));
11675         } else {
11676             unlock_user(target_ep, arg2, 0);
11677         }
11678         g_free(ep);
11679         return ret;
11680     }
11681 #endif
11682 #endif
11683 #ifdef TARGET_NR_prlimit64
11684     case TARGET_NR_prlimit64:
11685     {
11686         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11687         struct target_rlimit64 *target_rnew, *target_rold;
11688         struct host_rlimit64 rnew, rold, *rnewp = 0;
11689         int resource = target_to_host_resource(arg2);
11690         if (arg3) {
11691             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11692                 return -TARGET_EFAULT;
11693             }
11694             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11695             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11696             unlock_user_struct(target_rnew, arg3, 0);
11697             rnewp = &rnew;
11698         }
11699 
11700         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11701         if (!is_error(ret) && arg4) {
11702             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11703                 return -TARGET_EFAULT;
11704             }
11705             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11706             target_rold->rlim_max = tswap64(rold.rlim_max);
11707             unlock_user_struct(target_rold, arg4, 1);
11708         }
11709         return ret;
11710     }
11711 #endif
11712 #ifdef TARGET_NR_gethostname
11713     case TARGET_NR_gethostname:
11714     {
11715         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11716         if (name) {
11717             ret = get_errno(gethostname(name, arg2));
11718             unlock_user(name, arg1, arg2);
11719         } else {
11720             ret = -TARGET_EFAULT;
11721         }
11722         return ret;
11723     }
11724 #endif
11725 #ifdef TARGET_NR_atomic_cmpxchg_32
11726     case TARGET_NR_atomic_cmpxchg_32:
11727     {
11728         /* should use start_exclusive from main.c */
11729         abi_ulong mem_value;
11730         if (get_user_u32(mem_value, arg6)) {
11731             target_siginfo_t info;
11732             info.si_signo = SIGSEGV;
11733             info.si_errno = 0;
11734             info.si_code = TARGET_SEGV_MAPERR;
11735             info._sifields._sigfault._addr = arg6;
11736             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11737                          QEMU_SI_FAULT, &info);
11738             ret = 0xdeadbeef;
11739 
11740         }
11741         if (mem_value == arg2)
11742             put_user_u32(arg1, arg6);
11743         return mem_value;
11744     }
11745 #endif
11746 #ifdef TARGET_NR_atomic_barrier
11747     case TARGET_NR_atomic_barrier:
11748         /* Like the kernel implementation and the
11749            qemu arm barrier, no-op this? */
11750         return 0;
11751 #endif
11752 
11753 #ifdef TARGET_NR_timer_create
11754     case TARGET_NR_timer_create:
11755     {
11756         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11757 
11758         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11759 
11760         int clkid = arg1;
11761         int timer_index = next_free_host_timer();
11762 
11763         if (timer_index < 0) {
11764             ret = -TARGET_EAGAIN;
11765         } else {
11766             timer_t *phtimer = g_posix_timers  + timer_index;
11767 
11768             if (arg2) {
11769                 phost_sevp = &host_sevp;
11770                 ret = target_to_host_sigevent(phost_sevp, arg2);
11771                 if (ret != 0) {
11772                     return ret;
11773                 }
11774             }
11775 
11776             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11777             if (ret) {
11778                 phtimer = NULL;
11779             } else {
11780                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11781                     return -TARGET_EFAULT;
11782                 }
11783             }
11784         }
11785         return ret;
11786     }
11787 #endif
11788 
11789 #ifdef TARGET_NR_timer_settime
11790     case TARGET_NR_timer_settime:
11791     {
11792         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11793          * struct itimerspec * old_value */
11794         target_timer_t timerid = get_timer_id(arg1);
11795 
11796         if (timerid < 0) {
11797             ret = timerid;
11798         } else if (arg3 == 0) {
11799             ret = -TARGET_EINVAL;
11800         } else {
11801             timer_t htimer = g_posix_timers[timerid];
11802             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11803 
11804             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11805                 return -TARGET_EFAULT;
11806             }
11807             ret = get_errno(
11808                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11809             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11810                 return -TARGET_EFAULT;
11811             }
11812         }
11813         return ret;
11814     }
11815 #endif
11816 
11817 #ifdef TARGET_NR_timer_gettime
11818     case TARGET_NR_timer_gettime:
11819     {
11820         /* args: timer_t timerid, struct itimerspec *curr_value */
11821         target_timer_t timerid = get_timer_id(arg1);
11822 
11823         if (timerid < 0) {
11824             ret = timerid;
11825         } else if (!arg2) {
11826             ret = -TARGET_EFAULT;
11827         } else {
11828             timer_t htimer = g_posix_timers[timerid];
11829             struct itimerspec hspec;
11830             ret = get_errno(timer_gettime(htimer, &hspec));
11831 
11832             if (host_to_target_itimerspec(arg2, &hspec)) {
11833                 ret = -TARGET_EFAULT;
11834             }
11835         }
11836         return ret;
11837     }
11838 #endif
11839 
11840 #ifdef TARGET_NR_timer_getoverrun
11841     case TARGET_NR_timer_getoverrun:
11842     {
11843         /* args: timer_t timerid */
11844         target_timer_t timerid = get_timer_id(arg1);
11845 
11846         if (timerid < 0) {
11847             ret = timerid;
11848         } else {
11849             timer_t htimer = g_posix_timers[timerid];
11850             ret = get_errno(timer_getoverrun(htimer));
11851         }
11852         return ret;
11853     }
11854 #endif
11855 
11856 #ifdef TARGET_NR_timer_delete
11857     case TARGET_NR_timer_delete:
11858     {
11859         /* args: timer_t timerid */
11860         target_timer_t timerid = get_timer_id(arg1);
11861 
11862         if (timerid < 0) {
11863             ret = timerid;
11864         } else {
11865             timer_t htimer = g_posix_timers[timerid];
11866             ret = get_errno(timer_delete(htimer));
11867             g_posix_timers[timerid] = 0;
11868         }
11869         return ret;
11870     }
11871 #endif
11872 
11873 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11874     case TARGET_NR_timerfd_create:
11875         return get_errno(timerfd_create(arg1,
11876                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11877 #endif
11878 
11879 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11880     case TARGET_NR_timerfd_gettime:
11881         {
11882             struct itimerspec its_curr;
11883 
11884             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11885 
11886             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11887                 return -TARGET_EFAULT;
11888             }
11889         }
11890         return ret;
11891 #endif
11892 
11893 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11894     case TARGET_NR_timerfd_settime:
11895         {
11896             struct itimerspec its_new, its_old, *p_new;
11897 
11898             if (arg3) {
11899                 if (target_to_host_itimerspec(&its_new, arg3)) {
11900                     return -TARGET_EFAULT;
11901                 }
11902                 p_new = &its_new;
11903             } else {
11904                 p_new = NULL;
11905             }
11906 
11907             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11908 
11909             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11910                 return -TARGET_EFAULT;
11911             }
11912         }
11913         return ret;
11914 #endif
11915 
11916 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11917     case TARGET_NR_ioprio_get:
11918         return get_errno(ioprio_get(arg1, arg2));
11919 #endif
11920 
11921 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11922     case TARGET_NR_ioprio_set:
11923         return get_errno(ioprio_set(arg1, arg2, arg3));
11924 #endif
11925 
11926 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11927     case TARGET_NR_setns:
11928         return get_errno(setns(arg1, arg2));
11929 #endif
11930 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11931     case TARGET_NR_unshare:
11932         return get_errno(unshare(arg1));
11933 #endif
11934 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11935     case TARGET_NR_kcmp:
11936         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11937 #endif
11938 #ifdef TARGET_NR_swapcontext
11939     case TARGET_NR_swapcontext:
11940         /* PowerPC specific.  */
11941         return do_swapcontext(cpu_env, arg1, arg2, arg3);
11942 #endif
11943 #ifdef TARGET_NR_memfd_create
11944     case TARGET_NR_memfd_create:
11945         p = lock_user_string(arg1);
11946         if (!p) {
11947             return -TARGET_EFAULT;
11948         }
11949         ret = get_errno(memfd_create(p, arg2));
11950         fd_trans_unregister(ret);
11951         unlock_user(p, arg1, 0);
11952         return ret;
11953 #endif
11954 
11955     default:
11956         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11957         return -TARGET_ENOSYS;
11958     }
11959     return ret;
11960 }
11961 
11962 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11963                     abi_long arg2, abi_long arg3, abi_long arg4,
11964                     abi_long arg5, abi_long arg6, abi_long arg7,
11965                     abi_long arg8)
11966 {
11967     CPUState *cpu = env_cpu(cpu_env);
11968     abi_long ret;
11969 
11970 #ifdef DEBUG_ERESTARTSYS
11971     /* Debug-only code for exercising the syscall-restart code paths
11972      * in the per-architecture cpu main loops: restart every syscall
11973      * the guest makes once before letting it through.
11974      */
11975     {
11976         static bool flag;
11977         flag = !flag;
11978         if (flag) {
11979             return -TARGET_ERESTARTSYS;
11980         }
11981     }
11982 #endif
11983 
11984     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11985                              arg5, arg6, arg7, arg8);
11986 
11987     if (unlikely(do_strace)) {
11988         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11989         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11990                           arg5, arg6, arg7, arg8);
11991         print_syscall_ret(num, ret);
11992     } else {
11993         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11994                           arg5, arg6, arg7, arg8);
11995     }
11996 
11997     trace_guest_user_syscall_ret(cpu, num, ret);
11998     return ret;
11999 }
12000