xref: /openbmc/qemu/linux-user/syscall.c (revision a2d86682)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
118 
119 #ifndef CLONE_IO
120 #define CLONE_IO                0x80000000      /* Clone io context */
121 #endif
122 
123 /* We can't directly call the host clone syscall, because this will
124  * badly confuse libc (breaking mutexes, for example). So we must
125  * divide clone flags into:
126  *  * flag combinations that look like pthread_create()
127  *  * flag combinations that look like fork()
128  *  * flags we can implement within QEMU itself
129  *  * flags we can't support and will return an error for
130  */
131 /* For thread creation, all these flags must be present; for
132  * fork, none must be present.
133  */
134 #define CLONE_THREAD_FLAGS                              \
135     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
136      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137 
138 /* These flags are ignored:
139  * CLONE_DETACHED is now ignored by the kernel;
140  * CLONE_IO is just an optimisation hint to the I/O scheduler
141  */
142 #define CLONE_IGNORED_FLAGS                     \
143     (CLONE_DETACHED | CLONE_IO)
144 
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS               \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149 
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154 
155 #define CLONE_INVALID_FORK_FLAGS                                        \
156     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157 
158 #define CLONE_INVALID_THREAD_FLAGS                                      \
159     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
160        CLONE_IGNORED_FLAGS))
161 
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163  * have almost all been allocated. We cannot support any of
164  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166  * The checks against the invalid thread masks above will catch these.
167  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168  */
169 
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171  * once. This exercises the codepaths for restart.
172  */
173 //#define DEBUG_ERESTARTSYS
174 
175 //#include <linux/msdos_fs.h>
176 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
177 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
178 
179 #undef _syscall0
180 #undef _syscall1
181 #undef _syscall2
182 #undef _syscall3
183 #undef _syscall4
184 #undef _syscall5
185 #undef _syscall6
186 
187 #define _syscall0(type,name)		\
188 static type name (void)			\
189 {					\
190 	return syscall(__NR_##name);	\
191 }
192 
193 #define _syscall1(type,name,type1,arg1)		\
194 static type name (type1 arg1)			\
195 {						\
196 	return syscall(__NR_##name, arg1);	\
197 }
198 
199 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
200 static type name (type1 arg1,type2 arg2)		\
201 {							\
202 	return syscall(__NR_##name, arg1, arg2);	\
203 }
204 
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3)		\
207 {								\
208 	return syscall(__NR_##name, arg1, arg2, arg3);		\
209 }
210 
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
215 }
216 
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5)							\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
220 {										\
221 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
222 }
223 
224 
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
226 		  type5,arg5,type6,arg6)					\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
228                   type6 arg6)							\
229 {										\
230 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
231 }
232 
233 
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
247 
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
250 #endif
251 
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
255 #endif
256 
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid)
259 
260 /* For the 64-bit guest on 32-bit host case we must emulate
261  * getdents using getdents64, because otherwise the host
262  * might hand us back more dirent records than we can fit
263  * into the guest buffer after structure format conversion.
264  * Otherwise we emulate getdents with getdents if the host has it.
265  */
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #endif
269 
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
272 #endif
273 #if (defined(TARGET_NR_getdents) && \
274       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
277 #endif
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
280           loff_t *, res, uint, wh);
281 #endif
282 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
283 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
284           siginfo_t *, uinfo)
285 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group,int,error_code)
288 #endif
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address,int *,tidptr)
291 #endif
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
294           const struct timespec *,timeout,int *,uaddr2,int,val3)
295 #endif
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
298           unsigned long *, user_mask_ptr);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
304 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
305           void *, arg);
306 _syscall2(int, capget, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 _syscall2(int, capset, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get, int, which, int, who)
312 #endif
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
315 #endif
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
318 #endif
319 
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
322           unsigned long, idx1, unsigned long, idx2)
323 #endif
324 
325 /*
326  * It is assumed that struct statx is architecture independent.
327  */
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
330           unsigned int, mask, struct target_statx *, statxbuf)
331 #endif
332 
333 static bitmask_transtbl fcntl_flags_tbl[] = {
334   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
335   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
336   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
337   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
338   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
339   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
340   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
341   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
342   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
343   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
344   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
345   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
346   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
347 #if defined(O_DIRECT)
348   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
349 #endif
350 #if defined(O_NOATIME)
351   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
352 #endif
353 #if defined(O_CLOEXEC)
354   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
355 #endif
356 #if defined(O_PATH)
357   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
358 #endif
359 #if defined(O_TMPFILE)
360   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
361 #endif
362   /* Don't terminate the list prematurely on 64-bit host+guest.  */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
365 #endif
366   { 0, 0, 0, 0 }
367 };
368 
369 static int sys_getcwd1(char *buf, size_t size)
370 {
371   if (getcwd(buf, size) == NULL) {
372       /* getcwd() sets errno */
373       return (-1);
374   }
375   return strlen(buf)+1;
376 }
377 
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
382           const struct timespec *,tsp,int,flags)
383 #else
384 static int sys_utimensat(int dirfd, const char *pathname,
385                          const struct timespec times[2], int flags)
386 {
387     errno = ENOSYS;
388     return -1;
389 }
390 #endif
391 #endif /* TARGET_NR_utimensat */
392 
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
397           const char *, new, unsigned int, flags)
398 #else
399 static int sys_renameat2(int oldfd, const char *old,
400                          int newfd, const char *new, int flags)
401 {
402     if (flags == 0) {
403         return renameat(oldfd, old, newfd, new);
404     }
405     errno = ENOSYS;
406     return -1;
407 }
408 #endif
409 #endif /* TARGET_NR_renameat2 */
410 
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
413 
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
416 {
417   return (inotify_init());
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
422 {
423   return (inotify_add_watch(fd, pathname, mask));
424 }
425 #endif
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd, int32_t wd)
428 {
429   return (inotify_rm_watch(fd, wd));
430 }
431 #endif
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags)
435 {
436   return (inotify_init1(flags));
437 }
438 #endif
439 #endif
440 #else
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY  */
447 
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
451 #endif
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
455     uint64_t rlim_cur;
456     uint64_t rlim_max;
457 };
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459           const struct host_rlimit64 *, new_limit,
460           struct host_rlimit64 *, old_limit)
461 #endif
462 
463 
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
467 
468 static inline int next_free_host_timer(void)
469 {
470     int k ;
471     /* FIXME: Does finding the next free slot require a lock? */
472     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473         if (g_posix_timers[k] == 0) {
474             g_posix_timers[k] = (timer_t) 1;
475             return k;
476         }
477     }
478     return -1;
479 }
480 #endif
481 
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 #ifdef TARGET_ARM
484 static inline int regpairs_aligned(void *cpu_env, int num)
485 {
486     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 }
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492  * of registers which translates to the same as ARM/MIPS, because we start with
493  * r3 as arg1 */
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env, int num)
498 {
499     switch (num) {
500     case TARGET_NR_pread64:
501     case TARGET_NR_pwrite64:
502         return 1;
503 
504     default:
505         return 0;
506     }
507 }
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #else
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
512 #endif
513 
514 #define ERRNO_TABLE_SIZE 1200
515 
516 /* target_to_host_errno_table[] is initialized from
517  * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
519 };
520 
521 /*
522  * This list is the union of errno values overridden in asm-<arch>/errno.h
523  * minus the errnos that are not actually generic to all archs.
524  */
525 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
526     [EAGAIN]		= TARGET_EAGAIN,
527     [EIDRM]		= TARGET_EIDRM,
528     [ECHRNG]		= TARGET_ECHRNG,
529     [EL2NSYNC]		= TARGET_EL2NSYNC,
530     [EL3HLT]		= TARGET_EL3HLT,
531     [EL3RST]		= TARGET_EL3RST,
532     [ELNRNG]		= TARGET_ELNRNG,
533     [EUNATCH]		= TARGET_EUNATCH,
534     [ENOCSI]		= TARGET_ENOCSI,
535     [EL2HLT]		= TARGET_EL2HLT,
536     [EDEADLK]		= TARGET_EDEADLK,
537     [ENOLCK]		= TARGET_ENOLCK,
538     [EBADE]		= TARGET_EBADE,
539     [EBADR]		= TARGET_EBADR,
540     [EXFULL]		= TARGET_EXFULL,
541     [ENOANO]		= TARGET_ENOANO,
542     [EBADRQC]		= TARGET_EBADRQC,
543     [EBADSLT]		= TARGET_EBADSLT,
544     [EBFONT]		= TARGET_EBFONT,
545     [ENOSTR]		= TARGET_ENOSTR,
546     [ENODATA]		= TARGET_ENODATA,
547     [ETIME]		= TARGET_ETIME,
548     [ENOSR]		= TARGET_ENOSR,
549     [ENONET]		= TARGET_ENONET,
550     [ENOPKG]		= TARGET_ENOPKG,
551     [EREMOTE]		= TARGET_EREMOTE,
552     [ENOLINK]		= TARGET_ENOLINK,
553     [EADV]		= TARGET_EADV,
554     [ESRMNT]		= TARGET_ESRMNT,
555     [ECOMM]		= TARGET_ECOMM,
556     [EPROTO]		= TARGET_EPROTO,
557     [EDOTDOT]		= TARGET_EDOTDOT,
558     [EMULTIHOP]		= TARGET_EMULTIHOP,
559     [EBADMSG]		= TARGET_EBADMSG,
560     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
561     [EOVERFLOW]		= TARGET_EOVERFLOW,
562     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
563     [EBADFD]		= TARGET_EBADFD,
564     [EREMCHG]		= TARGET_EREMCHG,
565     [ELIBACC]		= TARGET_ELIBACC,
566     [ELIBBAD]		= TARGET_ELIBBAD,
567     [ELIBSCN]		= TARGET_ELIBSCN,
568     [ELIBMAX]		= TARGET_ELIBMAX,
569     [ELIBEXEC]		= TARGET_ELIBEXEC,
570     [EILSEQ]		= TARGET_EILSEQ,
571     [ENOSYS]		= TARGET_ENOSYS,
572     [ELOOP]		= TARGET_ELOOP,
573     [ERESTART]		= TARGET_ERESTART,
574     [ESTRPIPE]		= TARGET_ESTRPIPE,
575     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
576     [EUSERS]		= TARGET_EUSERS,
577     [ENOTSOCK]		= TARGET_ENOTSOCK,
578     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
579     [EMSGSIZE]		= TARGET_EMSGSIZE,
580     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
581     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
582     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
583     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
584     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
585     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
586     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
587     [EADDRINUSE]	= TARGET_EADDRINUSE,
588     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
589     [ENETDOWN]		= TARGET_ENETDOWN,
590     [ENETUNREACH]	= TARGET_ENETUNREACH,
591     [ENETRESET]		= TARGET_ENETRESET,
592     [ECONNABORTED]	= TARGET_ECONNABORTED,
593     [ECONNRESET]	= TARGET_ECONNRESET,
594     [ENOBUFS]		= TARGET_ENOBUFS,
595     [EISCONN]		= TARGET_EISCONN,
596     [ENOTCONN]		= TARGET_ENOTCONN,
597     [EUCLEAN]		= TARGET_EUCLEAN,
598     [ENOTNAM]		= TARGET_ENOTNAM,
599     [ENAVAIL]		= TARGET_ENAVAIL,
600     [EISNAM]		= TARGET_EISNAM,
601     [EREMOTEIO]		= TARGET_EREMOTEIO,
602     [EDQUOT]            = TARGET_EDQUOT,
603     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
604     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
605     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
606     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
607     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
608     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
609     [EALREADY]		= TARGET_EALREADY,
610     [EINPROGRESS]	= TARGET_EINPROGRESS,
611     [ESTALE]		= TARGET_ESTALE,
612     [ECANCELED]		= TARGET_ECANCELED,
613     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
614     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
615 #ifdef ENOKEY
616     [ENOKEY]		= TARGET_ENOKEY,
617 #endif
618 #ifdef EKEYEXPIRED
619     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
620 #endif
621 #ifdef EKEYREVOKED
622     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
623 #endif
624 #ifdef EKEYREJECTED
625     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
626 #endif
627 #ifdef EOWNERDEAD
628     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
629 #endif
630 #ifdef ENOTRECOVERABLE
631     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
632 #endif
633 #ifdef ENOMSG
634     [ENOMSG]            = TARGET_ENOMSG,
635 #endif
636 #ifdef ERKFILL
637     [ERFKILL]           = TARGET_ERFKILL,
638 #endif
639 #ifdef EHWPOISON
640     [EHWPOISON]         = TARGET_EHWPOISON,
641 #endif
642 };
643 
644 static inline int host_to_target_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         host_to_target_errno_table[err]) {
648         return host_to_target_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline int target_to_host_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         target_to_host_errno_table[err]) {
657         return target_to_host_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline abi_long get_errno(abi_long ret)
663 {
664     if (ret == -1)
665         return -host_to_target_errno(errno);
666     else
667         return ret;
668 }
669 
670 const char *target_strerror(int err)
671 {
672     if (err == TARGET_ERESTARTSYS) {
673         return "To be restarted";
674     }
675     if (err == TARGET_QEMU_ESIGRETURN) {
676         return "Successful exit from sigreturn";
677     }
678 
679     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
680         return NULL;
681     }
682     return strerror(target_to_host_errno(err));
683 }
684 
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
687 { \
688     return safe_syscall(__NR_##name); \
689 }
690 
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
693 { \
694     return safe_syscall(__NR_##name, arg1); \
695 }
696 
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2); \
701 }
702 
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
707 }
708 
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
714 }
715 
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717     type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
719     type5 arg5) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
722 }
723 
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727     type5 arg5, type6 arg6) \
728 { \
729     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
730 }
731 
732 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
733 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
734 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
735               int, flags, mode_t, mode)
736 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
737               struct rusage *, rusage)
738 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
739               int, options, struct rusage *, rusage)
740 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
744               struct timespec *, tsp, const sigset_t *, sigmask,
745               size_t, sigsetsize)
746 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
747               int, maxevents, int, timeout, const sigset_t *, sigmask,
748               size_t, sigsetsize)
749 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
771               const struct timespec *, uts, size_t, sigsetsize)
772 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
773               int, flags)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
778               const struct timespec *, req, struct timespec *, rem)
779 #endif
780 #ifdef __NR_ipc
781 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
782               void *, ptr, long, fifth)
783 #endif
784 #ifdef __NR_msgsnd
785 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
786               int, flags)
787 #endif
788 #ifdef __NR_msgrcv
789 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
790               long, msgtype, int, flags)
791 #endif
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
794               unsigned, nsops, const struct timespec *, timeout)
795 #endif
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
798               size_t, len, unsigned, prio, const struct timespec *, timeout)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800               size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 #endif
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803  * "third argument might be integer or pointer or not present" behaviour of
804  * the libc function.
805  */
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809  *  use the flock64 struct rather than unsuffixed flock
810  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
811  */
812 #ifdef __NR_fcntl64
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
814 #else
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
816 #endif
817 
818 static inline int host_to_target_sock_type(int host_type)
819 {
820     int target_type;
821 
822     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
823     case SOCK_DGRAM:
824         target_type = TARGET_SOCK_DGRAM;
825         break;
826     case SOCK_STREAM:
827         target_type = TARGET_SOCK_STREAM;
828         break;
829     default:
830         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
831         break;
832     }
833 
834 #if defined(SOCK_CLOEXEC)
835     if (host_type & SOCK_CLOEXEC) {
836         target_type |= TARGET_SOCK_CLOEXEC;
837     }
838 #endif
839 
840 #if defined(SOCK_NONBLOCK)
841     if (host_type & SOCK_NONBLOCK) {
842         target_type |= TARGET_SOCK_NONBLOCK;
843     }
844 #endif
845 
846     return target_type;
847 }
848 
849 static abi_ulong target_brk;
850 static abi_ulong target_original_brk;
851 static abi_ulong brk_page;
852 
853 void target_set_brk(abi_ulong new_brk)
854 {
855     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
856     brk_page = HOST_PAGE_ALIGN(target_brk);
857 }
858 
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
861 
862 /* do_brk() must return target values and target errnos. */
863 abi_long do_brk(abi_ulong new_brk)
864 {
865     abi_long mapped_addr;
866     abi_ulong new_alloc_size;
867 
868     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
869 
870     if (!new_brk) {
871         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
872         return target_brk;
873     }
874     if (new_brk < target_original_brk) {
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
876                    target_brk);
877         return target_brk;
878     }
879 
880     /* If the new brk is less than the highest page reserved to the
881      * target heap allocation, set it and we're almost done...  */
882     if (new_brk <= brk_page) {
883         /* Heap contents are initialized to zero, as for anonymous
884          * mapped pages.  */
885         if (new_brk > target_brk) {
886             memset(g2h(target_brk), 0, new_brk - target_brk);
887         }
888 	target_brk = new_brk;
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
890 	return target_brk;
891     }
892 
893     /* We need to allocate more memory after the brk... Note that
894      * we don't use MAP_FIXED because that will map over the top of
895      * any existing mapping (like the one with the host libc or qemu
896      * itself); instead we treat "mapped but at wrong address" as
897      * a failure and unmap again.
898      */
899     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
900     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
901                                         PROT_READ|PROT_WRITE,
902                                         MAP_ANON|MAP_PRIVATE, 0, 0));
903 
904     if (mapped_addr == brk_page) {
905         /* Heap contents are initialized to zero, as for anonymous
906          * mapped pages.  Technically the new pages are already
907          * initialized to zero since they *are* anonymous mapped
908          * pages, however we have to take care with the contents that
909          * come from the remaining part of the previous page: it may
910          * contains garbage data due to a previous heap usage (grown
911          * then shrunken).  */
912         memset(g2h(target_brk), 0, brk_page - target_brk);
913 
914         target_brk = new_brk;
915         brk_page = HOST_PAGE_ALIGN(target_brk);
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
917             target_brk);
918         return target_brk;
919     } else if (mapped_addr != -1) {
920         /* Mapped but at wrong address, meaning there wasn't actually
921          * enough space for this brk.
922          */
923         target_munmap(mapped_addr, new_alloc_size);
924         mapped_addr = -1;
925         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
926     }
927     else {
928         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
929     }
930 
931 #if defined(TARGET_ALPHA)
932     /* We (partially) emulate OSF/1 on Alpha, which requires we
933        return a proper errno, not an unchanged brk value.  */
934     return -TARGET_ENOMEM;
935 #endif
936     /* For everything else, return the previous break. */
937     return target_brk;
938 }
939 
940 static inline abi_long copy_from_user_fdset(fd_set *fds,
941                                             abi_ulong target_fds_addr,
942                                             int n)
943 {
944     int i, nw, j, k;
945     abi_ulong b, *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_READ,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  1)))
952         return -TARGET_EFAULT;
953 
954     FD_ZERO(fds);
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         /* grab the abi_ulong */
958         __get_user(b, &target_fds[i]);
959         for (j = 0; j < TARGET_ABI_BITS; j++) {
960             /* check the bit inside the abi_ulong */
961             if ((b >> j) & 1)
962                 FD_SET(k, fds);
963             k++;
964         }
965     }
966 
967     unlock_user(target_fds, target_fds_addr, 0);
968 
969     return 0;
970 }
971 
972 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
973                                                  abi_ulong target_fds_addr,
974                                                  int n)
975 {
976     if (target_fds_addr) {
977         if (copy_from_user_fdset(fds, target_fds_addr, n))
978             return -TARGET_EFAULT;
979         *fds_ptr = fds;
980     } else {
981         *fds_ptr = NULL;
982     }
983     return 0;
984 }
985 
986 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
987                                           const fd_set *fds,
988                                           int n)
989 {
990     int i, nw, j, k;
991     abi_long v;
992     abi_ulong *target_fds;
993 
994     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
995     if (!(target_fds = lock_user(VERIFY_WRITE,
996                                  target_fds_addr,
997                                  sizeof(abi_ulong) * nw,
998                                  0)))
999         return -TARGET_EFAULT;
1000 
1001     k = 0;
1002     for (i = 0; i < nw; i++) {
1003         v = 0;
1004         for (j = 0; j < TARGET_ABI_BITS; j++) {
1005             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1006             k++;
1007         }
1008         __put_user(v, &target_fds[i]);
1009     }
1010 
1011     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1012 
1013     return 0;
1014 }
1015 
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1018 #else
1019 #define HOST_HZ 100
1020 #endif
1021 
1022 static inline abi_long host_to_target_clock_t(long ticks)
1023 {
1024 #if HOST_HZ == TARGET_HZ
1025     return ticks;
1026 #else
1027     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1028 #endif
1029 }
1030 
1031 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1032                                              const struct rusage *rusage)
1033 {
1034     struct target_rusage *target_rusage;
1035 
1036     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1037         return -TARGET_EFAULT;
1038     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1039     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1040     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1041     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1042     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1043     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1044     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1045     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1046     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1047     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1048     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1049     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1050     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1051     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1052     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1053     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1054     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1055     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1056     unlock_user_struct(target_rusage, target_addr, 1);
1057 
1058     return 0;
1059 }
1060 
1061 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1062 {
1063     abi_ulong target_rlim_swap;
1064     rlim_t result;
1065 
1066     target_rlim_swap = tswapal(target_rlim);
1067     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1068         return RLIM_INFINITY;
1069 
1070     result = target_rlim_swap;
1071     if (target_rlim_swap != (rlim_t)result)
1072         return RLIM_INFINITY;
1073 
1074     return result;
1075 }
1076 
1077 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1078 {
1079     abi_ulong target_rlim_swap;
1080     abi_ulong result;
1081 
1082     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1083         target_rlim_swap = TARGET_RLIM_INFINITY;
1084     else
1085         target_rlim_swap = rlim;
1086     result = tswapal(target_rlim_swap);
1087 
1088     return result;
1089 }
1090 
1091 static inline int target_to_host_resource(int code)
1092 {
1093     switch (code) {
1094     case TARGET_RLIMIT_AS:
1095         return RLIMIT_AS;
1096     case TARGET_RLIMIT_CORE:
1097         return RLIMIT_CORE;
1098     case TARGET_RLIMIT_CPU:
1099         return RLIMIT_CPU;
1100     case TARGET_RLIMIT_DATA:
1101         return RLIMIT_DATA;
1102     case TARGET_RLIMIT_FSIZE:
1103         return RLIMIT_FSIZE;
1104     case TARGET_RLIMIT_LOCKS:
1105         return RLIMIT_LOCKS;
1106     case TARGET_RLIMIT_MEMLOCK:
1107         return RLIMIT_MEMLOCK;
1108     case TARGET_RLIMIT_MSGQUEUE:
1109         return RLIMIT_MSGQUEUE;
1110     case TARGET_RLIMIT_NICE:
1111         return RLIMIT_NICE;
1112     case TARGET_RLIMIT_NOFILE:
1113         return RLIMIT_NOFILE;
1114     case TARGET_RLIMIT_NPROC:
1115         return RLIMIT_NPROC;
1116     case TARGET_RLIMIT_RSS:
1117         return RLIMIT_RSS;
1118     case TARGET_RLIMIT_RTPRIO:
1119         return RLIMIT_RTPRIO;
1120     case TARGET_RLIMIT_SIGPENDING:
1121         return RLIMIT_SIGPENDING;
1122     case TARGET_RLIMIT_STACK:
1123         return RLIMIT_STACK;
1124     default:
1125         return code;
1126     }
1127 }
1128 
1129 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1130                                               abi_ulong target_tv_addr)
1131 {
1132     struct target_timeval *target_tv;
1133 
1134     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1135         return -TARGET_EFAULT;
1136     }
1137 
1138     __get_user(tv->tv_sec, &target_tv->tv_sec);
1139     __get_user(tv->tv_usec, &target_tv->tv_usec);
1140 
1141     unlock_user_struct(target_tv, target_tv_addr, 0);
1142 
1143     return 0;
1144 }
1145 
1146 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1147                                             const struct timeval *tv)
1148 {
1149     struct target_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __put_user(tv->tv_sec, &target_tv->tv_sec);
1156     __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 1);
1159 
1160     return 0;
1161 }
1162 
1163 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1164                                              const struct timeval *tv)
1165 {
1166     struct target__kernel_sock_timeval *target_tv;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171 
1172     __put_user(tv->tv_sec, &target_tv->tv_sec);
1173     __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 
1175     unlock_user_struct(target_tv, target_tv_addr, 1);
1176 
1177     return 0;
1178 }
1179 
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 
1194 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1195                                                struct timespec *host_ts)
1196 {
1197     struct target_timespec *target_ts;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1203     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204     unlock_user_struct(target_ts, target_addr, 1);
1205     return 0;
1206 }
1207 
1208 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1209                                                  struct timespec *host_ts)
1210 {
1211     struct target__kernel_timespec *target_ts;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1214         return -TARGET_EFAULT;
1215     }
1216     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1217     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218     unlock_user_struct(target_ts, target_addr, 1);
1219     return 0;
1220 }
1221 
1222 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1223                                                abi_ulong target_tz_addr)
1224 {
1225     struct target_timezone *target_tz;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230 
1231     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1232     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1233 
1234     unlock_user_struct(target_tz, target_tz_addr, 0);
1235 
1236     return 0;
1237 }
1238 
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1240 #include <mqueue.h>
1241 
1242 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1243                                               abi_ulong target_mq_attr_addr)
1244 {
1245     struct target_mq_attr *target_mq_attr;
1246 
1247     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1248                           target_mq_attr_addr, 1))
1249         return -TARGET_EFAULT;
1250 
1251     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1252     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1253     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1254     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1255 
1256     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1257 
1258     return 0;
1259 }
1260 
1261 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1262                                             const struct mq_attr *attr)
1263 {
1264     struct target_mq_attr *target_mq_attr;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1267                           target_mq_attr_addr, 0))
1268         return -TARGET_EFAULT;
1269 
1270     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1271     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1272     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1273     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1274 
1275     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1276 
1277     return 0;
1278 }
1279 #endif
1280 
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long do_select(int n,
1284                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1285                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1286 {
1287     fd_set rfds, wfds, efds;
1288     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1289     struct timeval tv;
1290     struct timespec ts, *ts_ptr;
1291     abi_long ret;
1292 
1293     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1302     if (ret) {
1303         return ret;
1304     }
1305 
1306     if (target_tv_addr) {
1307         if (copy_from_user_timeval(&tv, target_tv_addr))
1308             return -TARGET_EFAULT;
1309         ts.tv_sec = tv.tv_sec;
1310         ts.tv_nsec = tv.tv_usec * 1000;
1311         ts_ptr = &ts;
1312     } else {
1313         ts_ptr = NULL;
1314     }
1315 
1316     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1317                                   ts_ptr, NULL));
1318 
1319     if (!is_error(ret)) {
1320         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1321             return -TARGET_EFAULT;
1322         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1323             return -TARGET_EFAULT;
1324         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1325             return -TARGET_EFAULT;
1326 
1327         if (target_tv_addr) {
1328             tv.tv_sec = ts.tv_sec;
1329             tv.tv_usec = ts.tv_nsec / 1000;
1330             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1331                 return -TARGET_EFAULT;
1332             }
1333         }
1334     }
1335 
1336     return ret;
1337 }
1338 
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long do_old_select(abi_ulong arg1)
1341 {
1342     struct target_sel_arg_struct *sel;
1343     abi_ulong inp, outp, exp, tvp;
1344     long nsel;
1345 
1346     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1347         return -TARGET_EFAULT;
1348     }
1349 
1350     nsel = tswapal(sel->n);
1351     inp = tswapal(sel->inp);
1352     outp = tswapal(sel->outp);
1353     exp = tswapal(sel->exp);
1354     tvp = tswapal(sel->tvp);
1355 
1356     unlock_user_struct(sel, arg1, 0);
1357 
1358     return do_select(nsel, inp, outp, exp, tvp);
1359 }
1360 #endif
1361 #endif
1362 
1363 static abi_long do_pipe2(int host_pipe[], int flags)
1364 {
1365 #ifdef CONFIG_PIPE2
1366     return pipe2(host_pipe, flags);
1367 #else
1368     return -ENOSYS;
1369 #endif
1370 }
1371 
1372 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1373                         int flags, int is_pipe2)
1374 {
1375     int host_pipe[2];
1376     abi_long ret;
1377     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1378 
1379     if (is_error(ret))
1380         return get_errno(ret);
1381 
1382     /* Several targets have special calling conventions for the original
1383        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1384     if (!is_pipe2) {
1385 #if defined(TARGET_ALPHA)
1386         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1387         return host_pipe[0];
1388 #elif defined(TARGET_MIPS)
1389         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1390         return host_pipe[0];
1391 #elif defined(TARGET_SH4)
1392         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1393         return host_pipe[0];
1394 #elif defined(TARGET_SPARC)
1395         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1396         return host_pipe[0];
1397 #endif
1398     }
1399 
1400     if (put_user_s32(host_pipe[0], pipedes)
1401         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1402         return -TARGET_EFAULT;
1403     return get_errno(ret);
1404 }
1405 
1406 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1407                                               abi_ulong target_addr,
1408                                               socklen_t len)
1409 {
1410     struct target_ip_mreqn *target_smreqn;
1411 
1412     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1413     if (!target_smreqn)
1414         return -TARGET_EFAULT;
1415     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1416     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1417     if (len == sizeof(struct target_ip_mreqn))
1418         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1419     unlock_user(target_smreqn, target_addr, 0);
1420 
1421     return 0;
1422 }
1423 
1424 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1425                                                abi_ulong target_addr,
1426                                                socklen_t len)
1427 {
1428     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1429     sa_family_t sa_family;
1430     struct target_sockaddr *target_saddr;
1431 
1432     if (fd_trans_target_to_host_addr(fd)) {
1433         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1434     }
1435 
1436     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439 
1440     sa_family = tswap16(target_saddr->sa_family);
1441 
1442     /* Oops. The caller might send a incomplete sun_path; sun_path
1443      * must be terminated by \0 (see the manual page), but
1444      * unfortunately it is quite common to specify sockaddr_un
1445      * length as "strlen(x->sun_path)" while it should be
1446      * "strlen(...) + 1". We'll fix that here if needed.
1447      * Linux kernel has a similar feature.
1448      */
1449 
1450     if (sa_family == AF_UNIX) {
1451         if (len < unix_maxlen && len > 0) {
1452             char *cp = (char*)target_saddr;
1453 
1454             if ( cp[len-1] && !cp[len] )
1455                 len++;
1456         }
1457         if (len > unix_maxlen)
1458             len = unix_maxlen;
1459     }
1460 
1461     memcpy(addr, target_saddr, len);
1462     addr->sa_family = sa_family;
1463     if (sa_family == AF_NETLINK) {
1464         struct sockaddr_nl *nladdr;
1465 
1466         nladdr = (struct sockaddr_nl *)addr;
1467         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1468         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1469     } else if (sa_family == AF_PACKET) {
1470 	struct target_sockaddr_ll *lladdr;
1471 
1472 	lladdr = (struct target_sockaddr_ll *)addr;
1473 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1474 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1475     }
1476     unlock_user(target_saddr, target_addr, 0);
1477 
1478     return 0;
1479 }
1480 
1481 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1482                                                struct sockaddr *addr,
1483                                                socklen_t len)
1484 {
1485     struct target_sockaddr *target_saddr;
1486 
1487     if (len == 0) {
1488         return 0;
1489     }
1490     assert(addr);
1491 
1492     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495     memcpy(target_saddr, addr, len);
1496     if (len >= offsetof(struct target_sockaddr, sa_family) +
1497         sizeof(target_saddr->sa_family)) {
1498         target_saddr->sa_family = tswap16(addr->sa_family);
1499     }
1500     if (addr->sa_family == AF_NETLINK &&
1501         len >= sizeof(struct target_sockaddr_nl)) {
1502         struct target_sockaddr_nl *target_nl =
1503                (struct target_sockaddr_nl *)target_saddr;
1504         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1505         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1506     } else if (addr->sa_family == AF_PACKET) {
1507         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1508         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1509         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1510     } else if (addr->sa_family == AF_INET6 &&
1511                len >= sizeof(struct target_sockaddr_in6)) {
1512         struct target_sockaddr_in6 *target_in6 =
1513                (struct target_sockaddr_in6 *)target_saddr;
1514         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1515     }
1516     unlock_user(target_saddr, target_addr, len);
1517 
1518     return 0;
1519 }
1520 
1521 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1522                                            struct target_msghdr *target_msgh)
1523 {
1524     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1525     abi_long msg_controllen;
1526     abi_ulong target_cmsg_addr;
1527     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1528     socklen_t space = 0;
1529 
1530     msg_controllen = tswapal(target_msgh->msg_controllen);
1531     if (msg_controllen < sizeof (struct target_cmsghdr))
1532         goto the_end;
1533     target_cmsg_addr = tswapal(target_msgh->msg_control);
1534     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1535     target_cmsg_start = target_cmsg;
1536     if (!target_cmsg)
1537         return -TARGET_EFAULT;
1538 
1539     while (cmsg && target_cmsg) {
1540         void *data = CMSG_DATA(cmsg);
1541         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1542 
1543         int len = tswapal(target_cmsg->cmsg_len)
1544             - sizeof(struct target_cmsghdr);
1545 
1546         space += CMSG_SPACE(len);
1547         if (space > msgh->msg_controllen) {
1548             space -= CMSG_SPACE(len);
1549             /* This is a QEMU bug, since we allocated the payload
1550              * area ourselves (unlike overflow in host-to-target
1551              * conversion, which is just the guest giving us a buffer
1552              * that's too small). It can't happen for the payload types
1553              * we currently support; if it becomes an issue in future
1554              * we would need to improve our allocation strategy to
1555              * something more intelligent than "twice the size of the
1556              * target buffer we're reading from".
1557              */
1558             gemu_log("Host cmsg overflow\n");
1559             break;
1560         }
1561 
1562         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1563             cmsg->cmsg_level = SOL_SOCKET;
1564         } else {
1565             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1566         }
1567         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1568         cmsg->cmsg_len = CMSG_LEN(len);
1569 
1570         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1571             int *fd = (int *)data;
1572             int *target_fd = (int *)target_data;
1573             int i, numfds = len / sizeof(int);
1574 
1575             for (i = 0; i < numfds; i++) {
1576                 __get_user(fd[i], target_fd + i);
1577             }
1578         } else if (cmsg->cmsg_level == SOL_SOCKET
1579                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1580             struct ucred *cred = (struct ucred *)data;
1581             struct target_ucred *target_cred =
1582                 (struct target_ucred *)target_data;
1583 
1584             __get_user(cred->pid, &target_cred->pid);
1585             __get_user(cred->uid, &target_cred->uid);
1586             __get_user(cred->gid, &target_cred->gid);
1587         } else {
1588             gemu_log("Unsupported ancillary data: %d/%d\n",
1589                                         cmsg->cmsg_level, cmsg->cmsg_type);
1590             memcpy(data, target_data, len);
1591         }
1592 
1593         cmsg = CMSG_NXTHDR(msgh, cmsg);
1594         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1595                                          target_cmsg_start);
1596     }
1597     unlock_user(target_cmsg, target_cmsg_addr, 0);
1598  the_end:
1599     msgh->msg_controllen = space;
1600     return 0;
1601 }
1602 
1603 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1604                                            struct msghdr *msgh)
1605 {
1606     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1607     abi_long msg_controllen;
1608     abi_ulong target_cmsg_addr;
1609     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1610     socklen_t space = 0;
1611 
1612     msg_controllen = tswapal(target_msgh->msg_controllen);
1613     if (msg_controllen < sizeof (struct target_cmsghdr))
1614         goto the_end;
1615     target_cmsg_addr = tswapal(target_msgh->msg_control);
1616     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1617     target_cmsg_start = target_cmsg;
1618     if (!target_cmsg)
1619         return -TARGET_EFAULT;
1620 
1621     while (cmsg && target_cmsg) {
1622         void *data = CMSG_DATA(cmsg);
1623         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1624 
1625         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1626         int tgt_len, tgt_space;
1627 
1628         /* We never copy a half-header but may copy half-data;
1629          * this is Linux's behaviour in put_cmsg(). Note that
1630          * truncation here is a guest problem (which we report
1631          * to the guest via the CTRUNC bit), unlike truncation
1632          * in target_to_host_cmsg, which is a QEMU bug.
1633          */
1634         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1635             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1636             break;
1637         }
1638 
1639         if (cmsg->cmsg_level == SOL_SOCKET) {
1640             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1641         } else {
1642             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1643         }
1644         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1645 
1646         /* Payload types which need a different size of payload on
1647          * the target must adjust tgt_len here.
1648          */
1649         tgt_len = len;
1650         switch (cmsg->cmsg_level) {
1651         case SOL_SOCKET:
1652             switch (cmsg->cmsg_type) {
1653             case SO_TIMESTAMP:
1654                 tgt_len = sizeof(struct target_timeval);
1655                 break;
1656             default:
1657                 break;
1658             }
1659             break;
1660         default:
1661             break;
1662         }
1663 
1664         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1665             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1667         }
1668 
1669         /* We must now copy-and-convert len bytes of payload
1670          * into tgt_len bytes of destination space. Bear in mind
1671          * that in both source and destination we may be dealing
1672          * with a truncated value!
1673          */
1674         switch (cmsg->cmsg_level) {
1675         case SOL_SOCKET:
1676             switch (cmsg->cmsg_type) {
1677             case SCM_RIGHTS:
1678             {
1679                 int *fd = (int *)data;
1680                 int *target_fd = (int *)target_data;
1681                 int i, numfds = tgt_len / sizeof(int);
1682 
1683                 for (i = 0; i < numfds; i++) {
1684                     __put_user(fd[i], target_fd + i);
1685                 }
1686                 break;
1687             }
1688             case SO_TIMESTAMP:
1689             {
1690                 struct timeval *tv = (struct timeval *)data;
1691                 struct target_timeval *target_tv =
1692                     (struct target_timeval *)target_data;
1693 
1694                 if (len != sizeof(struct timeval) ||
1695                     tgt_len != sizeof(struct target_timeval)) {
1696                     goto unimplemented;
1697                 }
1698 
1699                 /* copy struct timeval to target */
1700                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1701                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1702                 break;
1703             }
1704             case SCM_CREDENTIALS:
1705             {
1706                 struct ucred *cred = (struct ucred *)data;
1707                 struct target_ucred *target_cred =
1708                     (struct target_ucred *)target_data;
1709 
1710                 __put_user(cred->pid, &target_cred->pid);
1711                 __put_user(cred->uid, &target_cred->uid);
1712                 __put_user(cred->gid, &target_cred->gid);
1713                 break;
1714             }
1715             default:
1716                 goto unimplemented;
1717             }
1718             break;
1719 
1720         case SOL_IP:
1721             switch (cmsg->cmsg_type) {
1722             case IP_TTL:
1723             {
1724                 uint32_t *v = (uint32_t *)data;
1725                 uint32_t *t_int = (uint32_t *)target_data;
1726 
1727                 if (len != sizeof(uint32_t) ||
1728                     tgt_len != sizeof(uint32_t)) {
1729                     goto unimplemented;
1730                 }
1731                 __put_user(*v, t_int);
1732                 break;
1733             }
1734             case IP_RECVERR:
1735             {
1736                 struct errhdr_t {
1737                    struct sock_extended_err ee;
1738                    struct sockaddr_in offender;
1739                 };
1740                 struct errhdr_t *errh = (struct errhdr_t *)data;
1741                 struct errhdr_t *target_errh =
1742                     (struct errhdr_t *)target_data;
1743 
1744                 if (len != sizeof(struct errhdr_t) ||
1745                     tgt_len != sizeof(struct errhdr_t)) {
1746                     goto unimplemented;
1747                 }
1748                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1749                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1750                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1751                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1752                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1753                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1754                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1755                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1756                     (void *) &errh->offender, sizeof(errh->offender));
1757                 break;
1758             }
1759             default:
1760                 goto unimplemented;
1761             }
1762             break;
1763 
1764         case SOL_IPV6:
1765             switch (cmsg->cmsg_type) {
1766             case IPV6_HOPLIMIT:
1767             {
1768                 uint32_t *v = (uint32_t *)data;
1769                 uint32_t *t_int = (uint32_t *)target_data;
1770 
1771                 if (len != sizeof(uint32_t) ||
1772                     tgt_len != sizeof(uint32_t)) {
1773                     goto unimplemented;
1774                 }
1775                 __put_user(*v, t_int);
1776                 break;
1777             }
1778             case IPV6_RECVERR:
1779             {
1780                 struct errhdr6_t {
1781                    struct sock_extended_err ee;
1782                    struct sockaddr_in6 offender;
1783                 };
1784                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1785                 struct errhdr6_t *target_errh =
1786                     (struct errhdr6_t *)target_data;
1787 
1788                 if (len != sizeof(struct errhdr6_t) ||
1789                     tgt_len != sizeof(struct errhdr6_t)) {
1790                     goto unimplemented;
1791                 }
1792                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1795                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800                     (void *) &errh->offender, sizeof(errh->offender));
1801                 break;
1802             }
1803             default:
1804                 goto unimplemented;
1805             }
1806             break;
1807 
1808         default:
1809         unimplemented:
1810             gemu_log("Unsupported ancillary data: %d/%d\n",
1811                                         cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(target_data, data, MIN(len, tgt_len));
1813             if (tgt_len > len) {
1814                 memset(target_data + len, 0, tgt_len - len);
1815             }
1816         }
1817 
1818         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1819         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1820         if (msg_controllen < tgt_space) {
1821             tgt_space = msg_controllen;
1822         }
1823         msg_controllen -= tgt_space;
1824         space += tgt_space;
1825         cmsg = CMSG_NXTHDR(msgh, cmsg);
1826         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1827                                          target_cmsg_start);
1828     }
1829     unlock_user(target_cmsg, target_cmsg_addr, space);
1830  the_end:
1831     target_msgh->msg_controllen = tswapal(space);
1832     return 0;
1833 }
1834 
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long do_setsockopt(int sockfd, int level, int optname,
1837                               abi_ulong optval_addr, socklen_t optlen)
1838 {
1839     abi_long ret;
1840     int val;
1841     struct ip_mreqn *ip_mreq;
1842     struct ip_mreq_source *ip_mreq_source;
1843 
1844     switch(level) {
1845     case SOL_TCP:
1846         /* TCP options all take an 'int' value.  */
1847         if (optlen < sizeof(uint32_t))
1848             return -TARGET_EINVAL;
1849 
1850         if (get_user_u32(val, optval_addr))
1851             return -TARGET_EFAULT;
1852         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1853         break;
1854     case SOL_IP:
1855         switch(optname) {
1856         case IP_TOS:
1857         case IP_TTL:
1858         case IP_HDRINCL:
1859         case IP_ROUTER_ALERT:
1860         case IP_RECVOPTS:
1861         case IP_RETOPTS:
1862         case IP_PKTINFO:
1863         case IP_MTU_DISCOVER:
1864         case IP_RECVERR:
1865         case IP_RECVTTL:
1866         case IP_RECVTOS:
1867 #ifdef IP_FREEBIND
1868         case IP_FREEBIND:
1869 #endif
1870         case IP_MULTICAST_TTL:
1871         case IP_MULTICAST_LOOP:
1872             val = 0;
1873             if (optlen >= sizeof(uint32_t)) {
1874                 if (get_user_u32(val, optval_addr))
1875                     return -TARGET_EFAULT;
1876             } else if (optlen >= 1) {
1877                 if (get_user_u8(val, optval_addr))
1878                     return -TARGET_EFAULT;
1879             }
1880             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1881             break;
1882         case IP_ADD_MEMBERSHIP:
1883         case IP_DROP_MEMBERSHIP:
1884             if (optlen < sizeof (struct target_ip_mreq) ||
1885                 optlen > sizeof (struct target_ip_mreqn))
1886                 return -TARGET_EINVAL;
1887 
1888             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1889             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1890             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1891             break;
1892 
1893         case IP_BLOCK_SOURCE:
1894         case IP_UNBLOCK_SOURCE:
1895         case IP_ADD_SOURCE_MEMBERSHIP:
1896         case IP_DROP_SOURCE_MEMBERSHIP:
1897             if (optlen != sizeof (struct target_ip_mreq_source))
1898                 return -TARGET_EINVAL;
1899 
1900             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1901             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1902             unlock_user (ip_mreq_source, optval_addr, 0);
1903             break;
1904 
1905         default:
1906             goto unimplemented;
1907         }
1908         break;
1909     case SOL_IPV6:
1910         switch (optname) {
1911         case IPV6_MTU_DISCOVER:
1912         case IPV6_MTU:
1913         case IPV6_V6ONLY:
1914         case IPV6_RECVPKTINFO:
1915         case IPV6_UNICAST_HOPS:
1916         case IPV6_MULTICAST_HOPS:
1917         case IPV6_MULTICAST_LOOP:
1918         case IPV6_RECVERR:
1919         case IPV6_RECVHOPLIMIT:
1920         case IPV6_2292HOPLIMIT:
1921         case IPV6_CHECKSUM:
1922         case IPV6_ADDRFORM:
1923         case IPV6_2292PKTINFO:
1924         case IPV6_RECVTCLASS:
1925         case IPV6_RECVRTHDR:
1926         case IPV6_2292RTHDR:
1927         case IPV6_RECVHOPOPTS:
1928         case IPV6_2292HOPOPTS:
1929         case IPV6_RECVDSTOPTS:
1930         case IPV6_2292DSTOPTS:
1931         case IPV6_TCLASS:
1932 #ifdef IPV6_RECVPATHMTU
1933         case IPV6_RECVPATHMTU:
1934 #endif
1935 #ifdef IPV6_TRANSPARENT
1936         case IPV6_TRANSPARENT:
1937 #endif
1938 #ifdef IPV6_FREEBIND
1939         case IPV6_FREEBIND:
1940 #endif
1941 #ifdef IPV6_RECVORIGDSTADDR
1942         case IPV6_RECVORIGDSTADDR:
1943 #endif
1944             val = 0;
1945             if (optlen < sizeof(uint32_t)) {
1946                 return -TARGET_EINVAL;
1947             }
1948             if (get_user_u32(val, optval_addr)) {
1949                 return -TARGET_EFAULT;
1950             }
1951             ret = get_errno(setsockopt(sockfd, level, optname,
1952                                        &val, sizeof(val)));
1953             break;
1954         case IPV6_PKTINFO:
1955         {
1956             struct in6_pktinfo pki;
1957 
1958             if (optlen < sizeof(pki)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1963                 return -TARGET_EFAULT;
1964             }
1965 
1966             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1967 
1968             ret = get_errno(setsockopt(sockfd, level, optname,
1969                                        &pki, sizeof(pki)));
1970             break;
1971         }
1972         case IPV6_ADD_MEMBERSHIP:
1973         case IPV6_DROP_MEMBERSHIP:
1974         {
1975             struct ipv6_mreq ipv6mreq;
1976 
1977             if (optlen < sizeof(ipv6mreq)) {
1978                 return -TARGET_EINVAL;
1979             }
1980 
1981             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1982                 return -TARGET_EFAULT;
1983             }
1984 
1985             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1986 
1987             ret = get_errno(setsockopt(sockfd, level, optname,
1988                                        &ipv6mreq, sizeof(ipv6mreq)));
1989             break;
1990         }
1991         default:
1992             goto unimplemented;
1993         }
1994         break;
1995     case SOL_ICMPV6:
1996         switch (optname) {
1997         case ICMPV6_FILTER:
1998         {
1999             struct icmp6_filter icmp6f;
2000 
2001             if (optlen > sizeof(icmp6f)) {
2002                 optlen = sizeof(icmp6f);
2003             }
2004 
2005             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2006                 return -TARGET_EFAULT;
2007             }
2008 
2009             for (val = 0; val < 8; val++) {
2010                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2011             }
2012 
2013             ret = get_errno(setsockopt(sockfd, level, optname,
2014                                        &icmp6f, optlen));
2015             break;
2016         }
2017         default:
2018             goto unimplemented;
2019         }
2020         break;
2021     case SOL_RAW:
2022         switch (optname) {
2023         case ICMP_FILTER:
2024         case IPV6_CHECKSUM:
2025             /* those take an u32 value */
2026             if (optlen < sizeof(uint32_t)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (get_user_u32(val, optval_addr)) {
2031                 return -TARGET_EFAULT;
2032             }
2033             ret = get_errno(setsockopt(sockfd, level, optname,
2034                                        &val, sizeof(val)));
2035             break;
2036 
2037         default:
2038             goto unimplemented;
2039         }
2040         break;
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2042     case SOL_ALG:
2043         switch (optname) {
2044         case ALG_SET_KEY:
2045         {
2046             char *alg_key = g_malloc(optlen);
2047 
2048             if (!alg_key) {
2049                 return -TARGET_ENOMEM;
2050             }
2051             if (copy_from_user(alg_key, optval_addr, optlen)) {
2052                 g_free(alg_key);
2053                 return -TARGET_EFAULT;
2054             }
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        alg_key, optlen));
2057             g_free(alg_key);
2058             break;
2059         }
2060         case ALG_SET_AEAD_AUTHSIZE:
2061         {
2062             ret = get_errno(setsockopt(sockfd, level, optname,
2063                                        NULL, optlen));
2064             break;
2065         }
2066         default:
2067             goto unimplemented;
2068         }
2069         break;
2070 #endif
2071     case TARGET_SOL_SOCKET:
2072         switch (optname) {
2073         case TARGET_SO_RCVTIMEO:
2074         {
2075                 struct timeval tv;
2076 
2077                 optname = SO_RCVTIMEO;
2078 
2079 set_timeout:
2080                 if (optlen != sizeof(struct target_timeval)) {
2081                     return -TARGET_EINVAL;
2082                 }
2083 
2084                 if (copy_from_user_timeval(&tv, optval_addr)) {
2085                     return -TARGET_EFAULT;
2086                 }
2087 
2088                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2089                                 &tv, sizeof(tv)));
2090                 return ret;
2091         }
2092         case TARGET_SO_SNDTIMEO:
2093                 optname = SO_SNDTIMEO;
2094                 goto set_timeout;
2095         case TARGET_SO_ATTACH_FILTER:
2096         {
2097                 struct target_sock_fprog *tfprog;
2098                 struct target_sock_filter *tfilter;
2099                 struct sock_fprog fprog;
2100                 struct sock_filter *filter;
2101                 int i;
2102 
2103                 if (optlen != sizeof(*tfprog)) {
2104                     return -TARGET_EINVAL;
2105                 }
2106                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2107                     return -TARGET_EFAULT;
2108                 }
2109                 if (!lock_user_struct(VERIFY_READ, tfilter,
2110                                       tswapal(tfprog->filter), 0)) {
2111                     unlock_user_struct(tfprog, optval_addr, 1);
2112                     return -TARGET_EFAULT;
2113                 }
2114 
2115                 fprog.len = tswap16(tfprog->len);
2116                 filter = g_try_new(struct sock_filter, fprog.len);
2117                 if (filter == NULL) {
2118                     unlock_user_struct(tfilter, tfprog->filter, 1);
2119                     unlock_user_struct(tfprog, optval_addr, 1);
2120                     return -TARGET_ENOMEM;
2121                 }
2122                 for (i = 0; i < fprog.len; i++) {
2123                     filter[i].code = tswap16(tfilter[i].code);
2124                     filter[i].jt = tfilter[i].jt;
2125                     filter[i].jf = tfilter[i].jf;
2126                     filter[i].k = tswap32(tfilter[i].k);
2127                 }
2128                 fprog.filter = filter;
2129 
2130                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2131                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2132                 g_free(filter);
2133 
2134                 unlock_user_struct(tfilter, tfprog->filter, 1);
2135                 unlock_user_struct(tfprog, optval_addr, 1);
2136                 return ret;
2137         }
2138 	case TARGET_SO_BINDTODEVICE:
2139 	{
2140 		char *dev_ifname, *addr_ifname;
2141 
2142 		if (optlen > IFNAMSIZ - 1) {
2143 		    optlen = IFNAMSIZ - 1;
2144 		}
2145 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2146 		if (!dev_ifname) {
2147 		    return -TARGET_EFAULT;
2148 		}
2149 		optname = SO_BINDTODEVICE;
2150 		addr_ifname = alloca(IFNAMSIZ);
2151 		memcpy(addr_ifname, dev_ifname, optlen);
2152 		addr_ifname[optlen] = 0;
2153 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2154                                            addr_ifname, optlen));
2155 		unlock_user (dev_ifname, optval_addr, 0);
2156 		return ret;
2157 	}
2158         case TARGET_SO_LINGER:
2159         {
2160                 struct linger lg;
2161                 struct target_linger *tlg;
2162 
2163                 if (optlen != sizeof(struct target_linger)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 __get_user(lg.l_onoff, &tlg->l_onoff);
2170                 __get_user(lg.l_linger, &tlg->l_linger);
2171                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2172                                 &lg, sizeof(lg)));
2173                 unlock_user_struct(tlg, optval_addr, 0);
2174                 return ret;
2175         }
2176             /* Options with 'int' argument.  */
2177         case TARGET_SO_DEBUG:
2178 		optname = SO_DEBUG;
2179 		break;
2180         case TARGET_SO_REUSEADDR:
2181 		optname = SO_REUSEADDR;
2182 		break;
2183 #ifdef SO_REUSEPORT
2184         case TARGET_SO_REUSEPORT:
2185                 optname = SO_REUSEPORT;
2186                 break;
2187 #endif
2188         case TARGET_SO_TYPE:
2189 		optname = SO_TYPE;
2190 		break;
2191         case TARGET_SO_ERROR:
2192 		optname = SO_ERROR;
2193 		break;
2194         case TARGET_SO_DONTROUTE:
2195 		optname = SO_DONTROUTE;
2196 		break;
2197         case TARGET_SO_BROADCAST:
2198 		optname = SO_BROADCAST;
2199 		break;
2200         case TARGET_SO_SNDBUF:
2201 		optname = SO_SNDBUF;
2202 		break;
2203         case TARGET_SO_SNDBUFFORCE:
2204                 optname = SO_SNDBUFFORCE;
2205                 break;
2206         case TARGET_SO_RCVBUF:
2207 		optname = SO_RCVBUF;
2208 		break;
2209         case TARGET_SO_RCVBUFFORCE:
2210                 optname = SO_RCVBUFFORCE;
2211                 break;
2212         case TARGET_SO_KEEPALIVE:
2213 		optname = SO_KEEPALIVE;
2214 		break;
2215         case TARGET_SO_OOBINLINE:
2216 		optname = SO_OOBINLINE;
2217 		break;
2218         case TARGET_SO_NO_CHECK:
2219 		optname = SO_NO_CHECK;
2220 		break;
2221         case TARGET_SO_PRIORITY:
2222 		optname = SO_PRIORITY;
2223 		break;
2224 #ifdef SO_BSDCOMPAT
2225         case TARGET_SO_BSDCOMPAT:
2226 		optname = SO_BSDCOMPAT;
2227 		break;
2228 #endif
2229         case TARGET_SO_PASSCRED:
2230 		optname = SO_PASSCRED;
2231 		break;
2232         case TARGET_SO_PASSSEC:
2233                 optname = SO_PASSSEC;
2234                 break;
2235         case TARGET_SO_TIMESTAMP:
2236 		optname = SO_TIMESTAMP;
2237 		break;
2238         case TARGET_SO_RCVLOWAT:
2239 		optname = SO_RCVLOWAT;
2240 		break;
2241         default:
2242             goto unimplemented;
2243         }
2244 	if (optlen < sizeof(uint32_t))
2245             return -TARGET_EINVAL;
2246 
2247 	if (get_user_u32(val, optval_addr))
2248             return -TARGET_EFAULT;
2249 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2250         break;
2251 #ifdef SOL_NETLINK
2252     case SOL_NETLINK:
2253         switch (optname) {
2254         case NETLINK_PKTINFO:
2255         case NETLINK_ADD_MEMBERSHIP:
2256         case NETLINK_DROP_MEMBERSHIP:
2257         case NETLINK_BROADCAST_ERROR:
2258         case NETLINK_NO_ENOBUFS:
2259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2260         case NETLINK_LISTEN_ALL_NSID:
2261         case NETLINK_CAP_ACK:
2262 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2264         case NETLINK_EXT_ACK:
2265 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2266 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2267         case NETLINK_GET_STRICT_CHK:
2268 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2269             break;
2270         default:
2271             goto unimplemented;
2272         }
2273         val = 0;
2274         if (optlen < sizeof(uint32_t)) {
2275             return -TARGET_EINVAL;
2276         }
2277         if (get_user_u32(val, optval_addr)) {
2278             return -TARGET_EFAULT;
2279         }
2280         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2281                                    sizeof(val)));
2282         break;
2283 #endif /* SOL_NETLINK */
2284     default:
2285     unimplemented:
2286         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2287         ret = -TARGET_ENOPROTOOPT;
2288     }
2289     return ret;
2290 }
2291 
2292 /* do_getsockopt() Must return target values and target errnos. */
2293 static abi_long do_getsockopt(int sockfd, int level, int optname,
2294                               abi_ulong optval_addr, abi_ulong optlen)
2295 {
2296     abi_long ret;
2297     int len, val;
2298     socklen_t lv;
2299 
2300     switch(level) {
2301     case TARGET_SOL_SOCKET:
2302         level = SOL_SOCKET;
2303         switch (optname) {
2304         /* These don't just return a single integer */
2305         case TARGET_SO_RCVTIMEO:
2306         case TARGET_SO_SNDTIMEO:
2307         case TARGET_SO_PEERNAME:
2308             goto unimplemented;
2309         case TARGET_SO_PEERCRED: {
2310             struct ucred cr;
2311             socklen_t crlen;
2312             struct target_ucred *tcr;
2313 
2314             if (get_user_u32(len, optlen)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             if (len < 0) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             crlen = sizeof(cr);
2322             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2323                                        &cr, &crlen));
2324             if (ret < 0) {
2325                 return ret;
2326             }
2327             if (len > crlen) {
2328                 len = crlen;
2329             }
2330             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2331                 return -TARGET_EFAULT;
2332             }
2333             __put_user(cr.pid, &tcr->pid);
2334             __put_user(cr.uid, &tcr->uid);
2335             __put_user(cr.gid, &tcr->gid);
2336             unlock_user_struct(tcr, optval_addr, 1);
2337             if (put_user_u32(len, optlen)) {
2338                 return -TARGET_EFAULT;
2339             }
2340             break;
2341         }
2342         case TARGET_SO_LINGER:
2343         {
2344             struct linger lg;
2345             socklen_t lglen;
2346             struct target_linger *tlg;
2347 
2348             if (get_user_u32(len, optlen)) {
2349                 return -TARGET_EFAULT;
2350             }
2351             if (len < 0) {
2352                 return -TARGET_EINVAL;
2353             }
2354 
2355             lglen = sizeof(lg);
2356             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2357                                        &lg, &lglen));
2358             if (ret < 0) {
2359                 return ret;
2360             }
2361             if (len > lglen) {
2362                 len = lglen;
2363             }
2364             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2365                 return -TARGET_EFAULT;
2366             }
2367             __put_user(lg.l_onoff, &tlg->l_onoff);
2368             __put_user(lg.l_linger, &tlg->l_linger);
2369             unlock_user_struct(tlg, optval_addr, 1);
2370             if (put_user_u32(len, optlen)) {
2371                 return -TARGET_EFAULT;
2372             }
2373             break;
2374         }
2375         /* Options with 'int' argument.  */
2376         case TARGET_SO_DEBUG:
2377             optname = SO_DEBUG;
2378             goto int_case;
2379         case TARGET_SO_REUSEADDR:
2380             optname = SO_REUSEADDR;
2381             goto int_case;
2382 #ifdef SO_REUSEPORT
2383         case TARGET_SO_REUSEPORT:
2384             optname = SO_REUSEPORT;
2385             goto int_case;
2386 #endif
2387         case TARGET_SO_TYPE:
2388             optname = SO_TYPE;
2389             goto int_case;
2390         case TARGET_SO_ERROR:
2391             optname = SO_ERROR;
2392             goto int_case;
2393         case TARGET_SO_DONTROUTE:
2394             optname = SO_DONTROUTE;
2395             goto int_case;
2396         case TARGET_SO_BROADCAST:
2397             optname = SO_BROADCAST;
2398             goto int_case;
2399         case TARGET_SO_SNDBUF:
2400             optname = SO_SNDBUF;
2401             goto int_case;
2402         case TARGET_SO_RCVBUF:
2403             optname = SO_RCVBUF;
2404             goto int_case;
2405         case TARGET_SO_KEEPALIVE:
2406             optname = SO_KEEPALIVE;
2407             goto int_case;
2408         case TARGET_SO_OOBINLINE:
2409             optname = SO_OOBINLINE;
2410             goto int_case;
2411         case TARGET_SO_NO_CHECK:
2412             optname = SO_NO_CHECK;
2413             goto int_case;
2414         case TARGET_SO_PRIORITY:
2415             optname = SO_PRIORITY;
2416             goto int_case;
2417 #ifdef SO_BSDCOMPAT
2418         case TARGET_SO_BSDCOMPAT:
2419             optname = SO_BSDCOMPAT;
2420             goto int_case;
2421 #endif
2422         case TARGET_SO_PASSCRED:
2423             optname = SO_PASSCRED;
2424             goto int_case;
2425         case TARGET_SO_TIMESTAMP:
2426             optname = SO_TIMESTAMP;
2427             goto int_case;
2428         case TARGET_SO_RCVLOWAT:
2429             optname = SO_RCVLOWAT;
2430             goto int_case;
2431         case TARGET_SO_ACCEPTCONN:
2432             optname = SO_ACCEPTCONN;
2433             goto int_case;
2434         default:
2435             goto int_case;
2436         }
2437         break;
2438     case SOL_TCP:
2439         /* TCP options all take an 'int' value.  */
2440     int_case:
2441         if (get_user_u32(len, optlen))
2442             return -TARGET_EFAULT;
2443         if (len < 0)
2444             return -TARGET_EINVAL;
2445         lv = sizeof(lv);
2446         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2447         if (ret < 0)
2448             return ret;
2449         if (optname == SO_TYPE) {
2450             val = host_to_target_sock_type(val);
2451         }
2452         if (len > lv)
2453             len = lv;
2454         if (len == 4) {
2455             if (put_user_u32(val, optval_addr))
2456                 return -TARGET_EFAULT;
2457         } else {
2458             if (put_user_u8(val, optval_addr))
2459                 return -TARGET_EFAULT;
2460         }
2461         if (put_user_u32(len, optlen))
2462             return -TARGET_EFAULT;
2463         break;
2464     case SOL_IP:
2465         switch(optname) {
2466         case IP_TOS:
2467         case IP_TTL:
2468         case IP_HDRINCL:
2469         case IP_ROUTER_ALERT:
2470         case IP_RECVOPTS:
2471         case IP_RETOPTS:
2472         case IP_PKTINFO:
2473         case IP_MTU_DISCOVER:
2474         case IP_RECVERR:
2475         case IP_RECVTOS:
2476 #ifdef IP_FREEBIND
2477         case IP_FREEBIND:
2478 #endif
2479         case IP_MULTICAST_TTL:
2480         case IP_MULTICAST_LOOP:
2481             if (get_user_u32(len, optlen))
2482                 return -TARGET_EFAULT;
2483             if (len < 0)
2484                 return -TARGET_EINVAL;
2485             lv = sizeof(lv);
2486             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2487             if (ret < 0)
2488                 return ret;
2489             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2490                 len = 1;
2491                 if (put_user_u32(len, optlen)
2492                     || put_user_u8(val, optval_addr))
2493                     return -TARGET_EFAULT;
2494             } else {
2495                 if (len > sizeof(int))
2496                     len = sizeof(int);
2497                 if (put_user_u32(len, optlen)
2498                     || put_user_u32(val, optval_addr))
2499                     return -TARGET_EFAULT;
2500             }
2501             break;
2502         default:
2503             ret = -TARGET_ENOPROTOOPT;
2504             break;
2505         }
2506         break;
2507     case SOL_IPV6:
2508         switch (optname) {
2509         case IPV6_MTU_DISCOVER:
2510         case IPV6_MTU:
2511         case IPV6_V6ONLY:
2512         case IPV6_RECVPKTINFO:
2513         case IPV6_UNICAST_HOPS:
2514         case IPV6_MULTICAST_HOPS:
2515         case IPV6_MULTICAST_LOOP:
2516         case IPV6_RECVERR:
2517         case IPV6_RECVHOPLIMIT:
2518         case IPV6_2292HOPLIMIT:
2519         case IPV6_CHECKSUM:
2520         case IPV6_ADDRFORM:
2521         case IPV6_2292PKTINFO:
2522         case IPV6_RECVTCLASS:
2523         case IPV6_RECVRTHDR:
2524         case IPV6_2292RTHDR:
2525         case IPV6_RECVHOPOPTS:
2526         case IPV6_2292HOPOPTS:
2527         case IPV6_RECVDSTOPTS:
2528         case IPV6_2292DSTOPTS:
2529         case IPV6_TCLASS:
2530 #ifdef IPV6_RECVPATHMTU
2531         case IPV6_RECVPATHMTU:
2532 #endif
2533 #ifdef IPV6_TRANSPARENT
2534         case IPV6_TRANSPARENT:
2535 #endif
2536 #ifdef IPV6_FREEBIND
2537         case IPV6_FREEBIND:
2538 #endif
2539 #ifdef IPV6_RECVORIGDSTADDR
2540         case IPV6_RECVORIGDSTADDR:
2541 #endif
2542             if (get_user_u32(len, optlen))
2543                 return -TARGET_EFAULT;
2544             if (len < 0)
2545                 return -TARGET_EINVAL;
2546             lv = sizeof(lv);
2547             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2548             if (ret < 0)
2549                 return ret;
2550             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2551                 len = 1;
2552                 if (put_user_u32(len, optlen)
2553                     || put_user_u8(val, optval_addr))
2554                     return -TARGET_EFAULT;
2555             } else {
2556                 if (len > sizeof(int))
2557                     len = sizeof(int);
2558                 if (put_user_u32(len, optlen)
2559                     || put_user_u32(val, optval_addr))
2560                     return -TARGET_EFAULT;
2561             }
2562             break;
2563         default:
2564             ret = -TARGET_ENOPROTOOPT;
2565             break;
2566         }
2567         break;
2568 #ifdef SOL_NETLINK
2569     case SOL_NETLINK:
2570         switch (optname) {
2571         case NETLINK_PKTINFO:
2572         case NETLINK_BROADCAST_ERROR:
2573         case NETLINK_NO_ENOBUFS:
2574 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2575         case NETLINK_LISTEN_ALL_NSID:
2576         case NETLINK_CAP_ACK:
2577 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2579         case NETLINK_EXT_ACK:
2580 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2581 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2582         case NETLINK_GET_STRICT_CHK:
2583 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2584             if (get_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             if (len != sizeof(val)) {
2588                 return -TARGET_EINVAL;
2589             }
2590             lv = len;
2591             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (put_user_u32(lv, optlen)
2596                 || put_user_u32(val, optval_addr)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601         case NETLINK_LIST_MEMBERSHIPS:
2602         {
2603             uint32_t *results;
2604             int i;
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2612             if (!results) {
2613                 return -TARGET_EFAULT;
2614             }
2615             lv = len;
2616             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2617             if (ret < 0) {
2618                 unlock_user(results, optval_addr, 0);
2619                 return ret;
2620             }
2621             /* swap host endianess to target endianess. */
2622             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2623                 results[i] = tswap32(results[i]);
2624             }
2625             if (put_user_u32(lv, optlen)) {
2626                 return -TARGET_EFAULT;
2627             }
2628             unlock_user(results, optval_addr, 0);
2629             break;
2630         }
2631 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2632         default:
2633             goto unimplemented;
2634         }
2635 #endif /* SOL_NETLINK */
2636     default:
2637     unimplemented:
2638         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2639                  level, optname);
2640         ret = -TARGET_EOPNOTSUPP;
2641         break;
2642     }
2643     return ret;
2644 }
2645 
2646 /* Convert target low/high pair representing file offset into the host
2647  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2648  * as the kernel doesn't handle them either.
2649  */
2650 static void target_to_host_low_high(abi_ulong tlow,
2651                                     abi_ulong thigh,
2652                                     unsigned long *hlow,
2653                                     unsigned long *hhigh)
2654 {
2655     uint64_t off = tlow |
2656         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2657         TARGET_LONG_BITS / 2;
2658 
2659     *hlow = off;
2660     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2661 }
2662 
2663 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2664                                 abi_ulong count, int copy)
2665 {
2666     struct target_iovec *target_vec;
2667     struct iovec *vec;
2668     abi_ulong total_len, max_len;
2669     int i;
2670     int err = 0;
2671     bool bad_address = false;
2672 
2673     if (count == 0) {
2674         errno = 0;
2675         return NULL;
2676     }
2677     if (count > IOV_MAX) {
2678         errno = EINVAL;
2679         return NULL;
2680     }
2681 
2682     vec = g_try_new0(struct iovec, count);
2683     if (vec == NULL) {
2684         errno = ENOMEM;
2685         return NULL;
2686     }
2687 
2688     target_vec = lock_user(VERIFY_READ, target_addr,
2689                            count * sizeof(struct target_iovec), 1);
2690     if (target_vec == NULL) {
2691         err = EFAULT;
2692         goto fail2;
2693     }
2694 
2695     /* ??? If host page size > target page size, this will result in a
2696        value larger than what we can actually support.  */
2697     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2698     total_len = 0;
2699 
2700     for (i = 0; i < count; i++) {
2701         abi_ulong base = tswapal(target_vec[i].iov_base);
2702         abi_long len = tswapal(target_vec[i].iov_len);
2703 
2704         if (len < 0) {
2705             err = EINVAL;
2706             goto fail;
2707         } else if (len == 0) {
2708             /* Zero length pointer is ignored.  */
2709             vec[i].iov_base = 0;
2710         } else {
2711             vec[i].iov_base = lock_user(type, base, len, copy);
2712             /* If the first buffer pointer is bad, this is a fault.  But
2713              * subsequent bad buffers will result in a partial write; this
2714              * is realized by filling the vector with null pointers and
2715              * zero lengths. */
2716             if (!vec[i].iov_base) {
2717                 if (i == 0) {
2718                     err = EFAULT;
2719                     goto fail;
2720                 } else {
2721                     bad_address = true;
2722                 }
2723             }
2724             if (bad_address) {
2725                 len = 0;
2726             }
2727             if (len > max_len - total_len) {
2728                 len = max_len - total_len;
2729             }
2730         }
2731         vec[i].iov_len = len;
2732         total_len += len;
2733     }
2734 
2735     unlock_user(target_vec, target_addr, 0);
2736     return vec;
2737 
2738  fail:
2739     while (--i >= 0) {
2740         if (tswapal(target_vec[i].iov_len) > 0) {
2741             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2742         }
2743     }
2744     unlock_user(target_vec, target_addr, 0);
2745  fail2:
2746     g_free(vec);
2747     errno = err;
2748     return NULL;
2749 }
2750 
2751 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2752                          abi_ulong count, int copy)
2753 {
2754     struct target_iovec *target_vec;
2755     int i;
2756 
2757     target_vec = lock_user(VERIFY_READ, target_addr,
2758                            count * sizeof(struct target_iovec), 1);
2759     if (target_vec) {
2760         for (i = 0; i < count; i++) {
2761             abi_ulong base = tswapal(target_vec[i].iov_base);
2762             abi_long len = tswapal(target_vec[i].iov_len);
2763             if (len < 0) {
2764                 break;
2765             }
2766             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2767         }
2768         unlock_user(target_vec, target_addr, 0);
2769     }
2770 
2771     g_free(vec);
2772 }
2773 
2774 static inline int target_to_host_sock_type(int *type)
2775 {
2776     int host_type = 0;
2777     int target_type = *type;
2778 
2779     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2780     case TARGET_SOCK_DGRAM:
2781         host_type = SOCK_DGRAM;
2782         break;
2783     case TARGET_SOCK_STREAM:
2784         host_type = SOCK_STREAM;
2785         break;
2786     default:
2787         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2788         break;
2789     }
2790     if (target_type & TARGET_SOCK_CLOEXEC) {
2791 #if defined(SOCK_CLOEXEC)
2792         host_type |= SOCK_CLOEXEC;
2793 #else
2794         return -TARGET_EINVAL;
2795 #endif
2796     }
2797     if (target_type & TARGET_SOCK_NONBLOCK) {
2798 #if defined(SOCK_NONBLOCK)
2799         host_type |= SOCK_NONBLOCK;
2800 #elif !defined(O_NONBLOCK)
2801         return -TARGET_EINVAL;
2802 #endif
2803     }
2804     *type = host_type;
2805     return 0;
2806 }
2807 
2808 /* Try to emulate socket type flags after socket creation.  */
2809 static int sock_flags_fixup(int fd, int target_type)
2810 {
2811 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2812     if (target_type & TARGET_SOCK_NONBLOCK) {
2813         int flags = fcntl(fd, F_GETFL);
2814         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2815             close(fd);
2816             return -TARGET_EINVAL;
2817         }
2818     }
2819 #endif
2820     return fd;
2821 }
2822 
2823 /* do_socket() Must return target values and target errnos. */
2824 static abi_long do_socket(int domain, int type, int protocol)
2825 {
2826     int target_type = type;
2827     int ret;
2828 
2829     ret = target_to_host_sock_type(&type);
2830     if (ret) {
2831         return ret;
2832     }
2833 
2834     if (domain == PF_NETLINK && !(
2835 #ifdef CONFIG_RTNETLINK
2836          protocol == NETLINK_ROUTE ||
2837 #endif
2838          protocol == NETLINK_KOBJECT_UEVENT ||
2839          protocol == NETLINK_AUDIT)) {
2840         return -EPFNOSUPPORT;
2841     }
2842 
2843     if (domain == AF_PACKET ||
2844         (domain == AF_INET && type == SOCK_PACKET)) {
2845         protocol = tswap16(protocol);
2846     }
2847 
2848     ret = get_errno(socket(domain, type, protocol));
2849     if (ret >= 0) {
2850         ret = sock_flags_fixup(ret, target_type);
2851         if (type == SOCK_PACKET) {
2852             /* Manage an obsolete case :
2853              * if socket type is SOCK_PACKET, bind by name
2854              */
2855             fd_trans_register(ret, &target_packet_trans);
2856         } else if (domain == PF_NETLINK) {
2857             switch (protocol) {
2858 #ifdef CONFIG_RTNETLINK
2859             case NETLINK_ROUTE:
2860                 fd_trans_register(ret, &target_netlink_route_trans);
2861                 break;
2862 #endif
2863             case NETLINK_KOBJECT_UEVENT:
2864                 /* nothing to do: messages are strings */
2865                 break;
2866             case NETLINK_AUDIT:
2867                 fd_trans_register(ret, &target_netlink_audit_trans);
2868                 break;
2869             default:
2870                 g_assert_not_reached();
2871             }
2872         }
2873     }
2874     return ret;
2875 }
2876 
2877 /* do_bind() Must return target values and target errnos. */
2878 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2879                         socklen_t addrlen)
2880 {
2881     void *addr;
2882     abi_long ret;
2883 
2884     if ((int)addrlen < 0) {
2885         return -TARGET_EINVAL;
2886     }
2887 
2888     addr = alloca(addrlen+1);
2889 
2890     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2891     if (ret)
2892         return ret;
2893 
2894     return get_errno(bind(sockfd, addr, addrlen));
2895 }
2896 
2897 /* do_connect() Must return target values and target errnos. */
2898 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2899                            socklen_t addrlen)
2900 {
2901     void *addr;
2902     abi_long ret;
2903 
2904     if ((int)addrlen < 0) {
2905         return -TARGET_EINVAL;
2906     }
2907 
2908     addr = alloca(addrlen+1);
2909 
2910     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2911     if (ret)
2912         return ret;
2913 
2914     return get_errno(safe_connect(sockfd, addr, addrlen));
2915 }
2916 
2917 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2918 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2919                                       int flags, int send)
2920 {
2921     abi_long ret, len;
2922     struct msghdr msg;
2923     abi_ulong count;
2924     struct iovec *vec;
2925     abi_ulong target_vec;
2926 
2927     if (msgp->msg_name) {
2928         msg.msg_namelen = tswap32(msgp->msg_namelen);
2929         msg.msg_name = alloca(msg.msg_namelen+1);
2930         ret = target_to_host_sockaddr(fd, msg.msg_name,
2931                                       tswapal(msgp->msg_name),
2932                                       msg.msg_namelen);
2933         if (ret == -TARGET_EFAULT) {
2934             /* For connected sockets msg_name and msg_namelen must
2935              * be ignored, so returning EFAULT immediately is wrong.
2936              * Instead, pass a bad msg_name to the host kernel, and
2937              * let it decide whether to return EFAULT or not.
2938              */
2939             msg.msg_name = (void *)-1;
2940         } else if (ret) {
2941             goto out2;
2942         }
2943     } else {
2944         msg.msg_name = NULL;
2945         msg.msg_namelen = 0;
2946     }
2947     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2948     msg.msg_control = alloca(msg.msg_controllen);
2949     memset(msg.msg_control, 0, msg.msg_controllen);
2950 
2951     msg.msg_flags = tswap32(msgp->msg_flags);
2952 
2953     count = tswapal(msgp->msg_iovlen);
2954     target_vec = tswapal(msgp->msg_iov);
2955 
2956     if (count > IOV_MAX) {
2957         /* sendrcvmsg returns a different errno for this condition than
2958          * readv/writev, so we must catch it here before lock_iovec() does.
2959          */
2960         ret = -TARGET_EMSGSIZE;
2961         goto out2;
2962     }
2963 
2964     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2965                      target_vec, count, send);
2966     if (vec == NULL) {
2967         ret = -host_to_target_errno(errno);
2968         goto out2;
2969     }
2970     msg.msg_iovlen = count;
2971     msg.msg_iov = vec;
2972 
2973     if (send) {
2974         if (fd_trans_target_to_host_data(fd)) {
2975             void *host_msg;
2976 
2977             host_msg = g_malloc(msg.msg_iov->iov_len);
2978             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2979             ret = fd_trans_target_to_host_data(fd)(host_msg,
2980                                                    msg.msg_iov->iov_len);
2981             if (ret >= 0) {
2982                 msg.msg_iov->iov_base = host_msg;
2983                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2984             }
2985             g_free(host_msg);
2986         } else {
2987             ret = target_to_host_cmsg(&msg, msgp);
2988             if (ret == 0) {
2989                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2990             }
2991         }
2992     } else {
2993         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2994         if (!is_error(ret)) {
2995             len = ret;
2996             if (fd_trans_host_to_target_data(fd)) {
2997                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2998                                                MIN(msg.msg_iov->iov_len, len));
2999             } else {
3000                 ret = host_to_target_cmsg(msgp, &msg);
3001             }
3002             if (!is_error(ret)) {
3003                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3004                 msgp->msg_flags = tswap32(msg.msg_flags);
3005                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3006                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3007                                     msg.msg_name, msg.msg_namelen);
3008                     if (ret) {
3009                         goto out;
3010                     }
3011                 }
3012 
3013                 ret = len;
3014             }
3015         }
3016     }
3017 
3018 out:
3019     unlock_iovec(vec, target_vec, count, !send);
3020 out2:
3021     return ret;
3022 }
3023 
3024 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3025                                int flags, int send)
3026 {
3027     abi_long ret;
3028     struct target_msghdr *msgp;
3029 
3030     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3031                           msgp,
3032                           target_msg,
3033                           send ? 1 : 0)) {
3034         return -TARGET_EFAULT;
3035     }
3036     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3037     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3038     return ret;
3039 }
3040 
3041 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3042  * so it might not have this *mmsg-specific flag either.
3043  */
3044 #ifndef MSG_WAITFORONE
3045 #define MSG_WAITFORONE 0x10000
3046 #endif
3047 
3048 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3049                                 unsigned int vlen, unsigned int flags,
3050                                 int send)
3051 {
3052     struct target_mmsghdr *mmsgp;
3053     abi_long ret = 0;
3054     int i;
3055 
3056     if (vlen > UIO_MAXIOV) {
3057         vlen = UIO_MAXIOV;
3058     }
3059 
3060     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3061     if (!mmsgp) {
3062         return -TARGET_EFAULT;
3063     }
3064 
3065     for (i = 0; i < vlen; i++) {
3066         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3067         if (is_error(ret)) {
3068             break;
3069         }
3070         mmsgp[i].msg_len = tswap32(ret);
3071         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3072         if (flags & MSG_WAITFORONE) {
3073             flags |= MSG_DONTWAIT;
3074         }
3075     }
3076 
3077     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3078 
3079     /* Return number of datagrams sent if we sent any at all;
3080      * otherwise return the error.
3081      */
3082     if (i) {
3083         return i;
3084     }
3085     return ret;
3086 }
3087 
3088 /* do_accept4() Must return target values and target errnos. */
3089 static abi_long do_accept4(int fd, abi_ulong target_addr,
3090                            abi_ulong target_addrlen_addr, int flags)
3091 {
3092     socklen_t addrlen, ret_addrlen;
3093     void *addr;
3094     abi_long ret;
3095     int host_flags;
3096 
3097     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3098 
3099     if (target_addr == 0) {
3100         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3101     }
3102 
3103     /* linux returns EINVAL if addrlen pointer is invalid */
3104     if (get_user_u32(addrlen, target_addrlen_addr))
3105         return -TARGET_EINVAL;
3106 
3107     if ((int)addrlen < 0) {
3108         return -TARGET_EINVAL;
3109     }
3110 
3111     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3112         return -TARGET_EINVAL;
3113 
3114     addr = alloca(addrlen);
3115 
3116     ret_addrlen = addrlen;
3117     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3118     if (!is_error(ret)) {
3119         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3120         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3121             ret = -TARGET_EFAULT;
3122         }
3123     }
3124     return ret;
3125 }
3126 
3127 /* do_getpeername() Must return target values and target errnos. */
3128 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3129                                abi_ulong target_addrlen_addr)
3130 {
3131     socklen_t addrlen, ret_addrlen;
3132     void *addr;
3133     abi_long ret;
3134 
3135     if (get_user_u32(addrlen, target_addrlen_addr))
3136         return -TARGET_EFAULT;
3137 
3138     if ((int)addrlen < 0) {
3139         return -TARGET_EINVAL;
3140     }
3141 
3142     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3143         return -TARGET_EFAULT;
3144 
3145     addr = alloca(addrlen);
3146 
3147     ret_addrlen = addrlen;
3148     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3149     if (!is_error(ret)) {
3150         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3151         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3152             ret = -TARGET_EFAULT;
3153         }
3154     }
3155     return ret;
3156 }
3157 
3158 /* do_getsockname() Must return target values and target errnos. */
3159 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3160                                abi_ulong target_addrlen_addr)
3161 {
3162     socklen_t addrlen, ret_addrlen;
3163     void *addr;
3164     abi_long ret;
3165 
3166     if (get_user_u32(addrlen, target_addrlen_addr))
3167         return -TARGET_EFAULT;
3168 
3169     if ((int)addrlen < 0) {
3170         return -TARGET_EINVAL;
3171     }
3172 
3173     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3174         return -TARGET_EFAULT;
3175 
3176     addr = alloca(addrlen);
3177 
3178     ret_addrlen = addrlen;
3179     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3180     if (!is_error(ret)) {
3181         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3182         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3183             ret = -TARGET_EFAULT;
3184         }
3185     }
3186     return ret;
3187 }
3188 
3189 /* do_socketpair() Must return target values and target errnos. */
3190 static abi_long do_socketpair(int domain, int type, int protocol,
3191                               abi_ulong target_tab_addr)
3192 {
3193     int tab[2];
3194     abi_long ret;
3195 
3196     target_to_host_sock_type(&type);
3197 
3198     ret = get_errno(socketpair(domain, type, protocol, tab));
3199     if (!is_error(ret)) {
3200         if (put_user_s32(tab[0], target_tab_addr)
3201             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3202             ret = -TARGET_EFAULT;
3203     }
3204     return ret;
3205 }
3206 
3207 /* do_sendto() Must return target values and target errnos. */
3208 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3209                           abi_ulong target_addr, socklen_t addrlen)
3210 {
3211     void *addr;
3212     void *host_msg;
3213     void *copy_msg = NULL;
3214     abi_long ret;
3215 
3216     if ((int)addrlen < 0) {
3217         return -TARGET_EINVAL;
3218     }
3219 
3220     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3221     if (!host_msg)
3222         return -TARGET_EFAULT;
3223     if (fd_trans_target_to_host_data(fd)) {
3224         copy_msg = host_msg;
3225         host_msg = g_malloc(len);
3226         memcpy(host_msg, copy_msg, len);
3227         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3228         if (ret < 0) {
3229             goto fail;
3230         }
3231     }
3232     if (target_addr) {
3233         addr = alloca(addrlen+1);
3234         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3235         if (ret) {
3236             goto fail;
3237         }
3238         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3239     } else {
3240         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3241     }
3242 fail:
3243     if (copy_msg) {
3244         g_free(host_msg);
3245         host_msg = copy_msg;
3246     }
3247     unlock_user(host_msg, msg, 0);
3248     return ret;
3249 }
3250 
3251 /* do_recvfrom() Must return target values and target errnos. */
3252 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3253                             abi_ulong target_addr,
3254                             abi_ulong target_addrlen)
3255 {
3256     socklen_t addrlen, ret_addrlen;
3257     void *addr;
3258     void *host_msg;
3259     abi_long ret;
3260 
3261     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3262     if (!host_msg)
3263         return -TARGET_EFAULT;
3264     if (target_addr) {
3265         if (get_user_u32(addrlen, target_addrlen)) {
3266             ret = -TARGET_EFAULT;
3267             goto fail;
3268         }
3269         if ((int)addrlen < 0) {
3270             ret = -TARGET_EINVAL;
3271             goto fail;
3272         }
3273         addr = alloca(addrlen);
3274         ret_addrlen = addrlen;
3275         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3276                                       addr, &ret_addrlen));
3277     } else {
3278         addr = NULL; /* To keep compiler quiet.  */
3279         addrlen = 0; /* To keep compiler quiet.  */
3280         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3281     }
3282     if (!is_error(ret)) {
3283         if (fd_trans_host_to_target_data(fd)) {
3284             abi_long trans;
3285             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3286             if (is_error(trans)) {
3287                 ret = trans;
3288                 goto fail;
3289             }
3290         }
3291         if (target_addr) {
3292             host_to_target_sockaddr(target_addr, addr,
3293                                     MIN(addrlen, ret_addrlen));
3294             if (put_user_u32(ret_addrlen, target_addrlen)) {
3295                 ret = -TARGET_EFAULT;
3296                 goto fail;
3297             }
3298         }
3299         unlock_user(host_msg, msg, len);
3300     } else {
3301 fail:
3302         unlock_user(host_msg, msg, 0);
3303     }
3304     return ret;
3305 }
3306 
3307 #ifdef TARGET_NR_socketcall
3308 /* do_socketcall() must return target values and target errnos. */
3309 static abi_long do_socketcall(int num, abi_ulong vptr)
3310 {
3311     static const unsigned nargs[] = { /* number of arguments per operation */
3312         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3313         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3314         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3315         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3316         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3317         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3318         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3319         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3320         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3321         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3322         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3323         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3324         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3325         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3326         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3327         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3328         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3329         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3330         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3331         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3332     };
3333     abi_long a[6]; /* max 6 args */
3334     unsigned i;
3335 
3336     /* check the range of the first argument num */
3337     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3338     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3339         return -TARGET_EINVAL;
3340     }
3341     /* ensure we have space for args */
3342     if (nargs[num] > ARRAY_SIZE(a)) {
3343         return -TARGET_EINVAL;
3344     }
3345     /* collect the arguments in a[] according to nargs[] */
3346     for (i = 0; i < nargs[num]; ++i) {
3347         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3348             return -TARGET_EFAULT;
3349         }
3350     }
3351     /* now when we have the args, invoke the appropriate underlying function */
3352     switch (num) {
3353     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3354         return do_socket(a[0], a[1], a[2]);
3355     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3356         return do_bind(a[0], a[1], a[2]);
3357     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3358         return do_connect(a[0], a[1], a[2]);
3359     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3360         return get_errno(listen(a[0], a[1]));
3361     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3362         return do_accept4(a[0], a[1], a[2], 0);
3363     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3364         return do_getsockname(a[0], a[1], a[2]);
3365     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3366         return do_getpeername(a[0], a[1], a[2]);
3367     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3368         return do_socketpair(a[0], a[1], a[2], a[3]);
3369     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3370         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3371     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3372         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3373     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3374         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3375     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3376         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3377     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3378         return get_errno(shutdown(a[0], a[1]));
3379     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3380         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3381     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3382         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3383     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3384         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3385     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3386         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3387     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3388         return do_accept4(a[0], a[1], a[2], a[3]);
3389     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3390         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3391     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3392         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3393     default:
3394         gemu_log("Unsupported socketcall: %d\n", num);
3395         return -TARGET_EINVAL;
3396     }
3397 }
3398 #endif
3399 
3400 #define N_SHM_REGIONS	32
3401 
3402 static struct shm_region {
3403     abi_ulong start;
3404     abi_ulong size;
3405     bool in_use;
3406 } shm_regions[N_SHM_REGIONS];
3407 
3408 #ifndef TARGET_SEMID64_DS
3409 /* asm-generic version of this struct */
3410 struct target_semid64_ds
3411 {
3412   struct target_ipc_perm sem_perm;
3413   abi_ulong sem_otime;
3414 #if TARGET_ABI_BITS == 32
3415   abi_ulong __unused1;
3416 #endif
3417   abi_ulong sem_ctime;
3418 #if TARGET_ABI_BITS == 32
3419   abi_ulong __unused2;
3420 #endif
3421   abi_ulong sem_nsems;
3422   abi_ulong __unused3;
3423   abi_ulong __unused4;
3424 };
3425 #endif
3426 
3427 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3428                                                abi_ulong target_addr)
3429 {
3430     struct target_ipc_perm *target_ip;
3431     struct target_semid64_ds *target_sd;
3432 
3433     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3434         return -TARGET_EFAULT;
3435     target_ip = &(target_sd->sem_perm);
3436     host_ip->__key = tswap32(target_ip->__key);
3437     host_ip->uid = tswap32(target_ip->uid);
3438     host_ip->gid = tswap32(target_ip->gid);
3439     host_ip->cuid = tswap32(target_ip->cuid);
3440     host_ip->cgid = tswap32(target_ip->cgid);
3441 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3442     host_ip->mode = tswap32(target_ip->mode);
3443 #else
3444     host_ip->mode = tswap16(target_ip->mode);
3445 #endif
3446 #if defined(TARGET_PPC)
3447     host_ip->__seq = tswap32(target_ip->__seq);
3448 #else
3449     host_ip->__seq = tswap16(target_ip->__seq);
3450 #endif
3451     unlock_user_struct(target_sd, target_addr, 0);
3452     return 0;
3453 }
3454 
3455 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3456                                                struct ipc_perm *host_ip)
3457 {
3458     struct target_ipc_perm *target_ip;
3459     struct target_semid64_ds *target_sd;
3460 
3461     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3462         return -TARGET_EFAULT;
3463     target_ip = &(target_sd->sem_perm);
3464     target_ip->__key = tswap32(host_ip->__key);
3465     target_ip->uid = tswap32(host_ip->uid);
3466     target_ip->gid = tswap32(host_ip->gid);
3467     target_ip->cuid = tswap32(host_ip->cuid);
3468     target_ip->cgid = tswap32(host_ip->cgid);
3469 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3470     target_ip->mode = tswap32(host_ip->mode);
3471 #else
3472     target_ip->mode = tswap16(host_ip->mode);
3473 #endif
3474 #if defined(TARGET_PPC)
3475     target_ip->__seq = tswap32(host_ip->__seq);
3476 #else
3477     target_ip->__seq = tswap16(host_ip->__seq);
3478 #endif
3479     unlock_user_struct(target_sd, target_addr, 1);
3480     return 0;
3481 }
3482 
3483 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3484                                                abi_ulong target_addr)
3485 {
3486     struct target_semid64_ds *target_sd;
3487 
3488     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3489         return -TARGET_EFAULT;
3490     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3491         return -TARGET_EFAULT;
3492     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3493     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3494     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3495     unlock_user_struct(target_sd, target_addr, 0);
3496     return 0;
3497 }
3498 
3499 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3500                                                struct semid_ds *host_sd)
3501 {
3502     struct target_semid64_ds *target_sd;
3503 
3504     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3505         return -TARGET_EFAULT;
3506     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3507         return -TARGET_EFAULT;
3508     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3509     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3510     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3511     unlock_user_struct(target_sd, target_addr, 1);
3512     return 0;
3513 }
3514 
3515 struct target_seminfo {
3516     int semmap;
3517     int semmni;
3518     int semmns;
3519     int semmnu;
3520     int semmsl;
3521     int semopm;
3522     int semume;
3523     int semusz;
3524     int semvmx;
3525     int semaem;
3526 };
3527 
3528 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3529                                               struct seminfo *host_seminfo)
3530 {
3531     struct target_seminfo *target_seminfo;
3532     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3533         return -TARGET_EFAULT;
3534     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3535     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3536     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3537     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3538     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3539     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3540     __put_user(host_seminfo->semume, &target_seminfo->semume);
3541     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3542     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3543     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3544     unlock_user_struct(target_seminfo, target_addr, 1);
3545     return 0;
3546 }
3547 
3548 union semun {
3549 	int val;
3550 	struct semid_ds *buf;
3551 	unsigned short *array;
3552 	struct seminfo *__buf;
3553 };
3554 
3555 union target_semun {
3556 	int val;
3557 	abi_ulong buf;
3558 	abi_ulong array;
3559 	abi_ulong __buf;
3560 };
3561 
3562 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3563                                                abi_ulong target_addr)
3564 {
3565     int nsems;
3566     unsigned short *array;
3567     union semun semun;
3568     struct semid_ds semid_ds;
3569     int i, ret;
3570 
3571     semun.buf = &semid_ds;
3572 
3573     ret = semctl(semid, 0, IPC_STAT, semun);
3574     if (ret == -1)
3575         return get_errno(ret);
3576 
3577     nsems = semid_ds.sem_nsems;
3578 
3579     *host_array = g_try_new(unsigned short, nsems);
3580     if (!*host_array) {
3581         return -TARGET_ENOMEM;
3582     }
3583     array = lock_user(VERIFY_READ, target_addr,
3584                       nsems*sizeof(unsigned short), 1);
3585     if (!array) {
3586         g_free(*host_array);
3587         return -TARGET_EFAULT;
3588     }
3589 
3590     for(i=0; i<nsems; i++) {
3591         __get_user((*host_array)[i], &array[i]);
3592     }
3593     unlock_user(array, target_addr, 0);
3594 
3595     return 0;
3596 }
3597 
3598 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3599                                                unsigned short **host_array)
3600 {
3601     int nsems;
3602     unsigned short *array;
3603     union semun semun;
3604     struct semid_ds semid_ds;
3605     int i, ret;
3606 
3607     semun.buf = &semid_ds;
3608 
3609     ret = semctl(semid, 0, IPC_STAT, semun);
3610     if (ret == -1)
3611         return get_errno(ret);
3612 
3613     nsems = semid_ds.sem_nsems;
3614 
3615     array = lock_user(VERIFY_WRITE, target_addr,
3616                       nsems*sizeof(unsigned short), 0);
3617     if (!array)
3618         return -TARGET_EFAULT;
3619 
3620     for(i=0; i<nsems; i++) {
3621         __put_user((*host_array)[i], &array[i]);
3622     }
3623     g_free(*host_array);
3624     unlock_user(array, target_addr, 1);
3625 
3626     return 0;
3627 }
3628 
3629 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3630                                  abi_ulong target_arg)
3631 {
3632     union target_semun target_su = { .buf = target_arg };
3633     union semun arg;
3634     struct semid_ds dsarg;
3635     unsigned short *array = NULL;
3636     struct seminfo seminfo;
3637     abi_long ret = -TARGET_EINVAL;
3638     abi_long err;
3639     cmd &= 0xff;
3640 
3641     switch( cmd ) {
3642 	case GETVAL:
3643 	case SETVAL:
3644             /* In 64 bit cross-endian situations, we will erroneously pick up
3645              * the wrong half of the union for the "val" element.  To rectify
3646              * this, the entire 8-byte structure is byteswapped, followed by
3647 	     * a swap of the 4 byte val field. In other cases, the data is
3648 	     * already in proper host byte order. */
3649 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3650 		target_su.buf = tswapal(target_su.buf);
3651 		arg.val = tswap32(target_su.val);
3652 	    } else {
3653 		arg.val = target_su.val;
3654 	    }
3655             ret = get_errno(semctl(semid, semnum, cmd, arg));
3656             break;
3657 	case GETALL:
3658 	case SETALL:
3659             err = target_to_host_semarray(semid, &array, target_su.array);
3660             if (err)
3661                 return err;
3662             arg.array = array;
3663             ret = get_errno(semctl(semid, semnum, cmd, arg));
3664             err = host_to_target_semarray(semid, target_su.array, &array);
3665             if (err)
3666                 return err;
3667             break;
3668 	case IPC_STAT:
3669 	case IPC_SET:
3670 	case SEM_STAT:
3671             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3672             if (err)
3673                 return err;
3674             arg.buf = &dsarg;
3675             ret = get_errno(semctl(semid, semnum, cmd, arg));
3676             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3677             if (err)
3678                 return err;
3679             break;
3680 	case IPC_INFO:
3681 	case SEM_INFO:
3682             arg.__buf = &seminfo;
3683             ret = get_errno(semctl(semid, semnum, cmd, arg));
3684             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3685             if (err)
3686                 return err;
3687             break;
3688 	case IPC_RMID:
3689 	case GETPID:
3690 	case GETNCNT:
3691 	case GETZCNT:
3692             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3693             break;
3694     }
3695 
3696     return ret;
3697 }
3698 
3699 struct target_sembuf {
3700     unsigned short sem_num;
3701     short sem_op;
3702     short sem_flg;
3703 };
3704 
3705 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3706                                              abi_ulong target_addr,
3707                                              unsigned nsops)
3708 {
3709     struct target_sembuf *target_sembuf;
3710     int i;
3711 
3712     target_sembuf = lock_user(VERIFY_READ, target_addr,
3713                               nsops*sizeof(struct target_sembuf), 1);
3714     if (!target_sembuf)
3715         return -TARGET_EFAULT;
3716 
3717     for(i=0; i<nsops; i++) {
3718         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3719         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3720         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3721     }
3722 
3723     unlock_user(target_sembuf, target_addr, 0);
3724 
3725     return 0;
3726 }
3727 
3728 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3729 {
3730     struct sembuf sops[nsops];
3731     abi_long ret;
3732 
3733     if (target_to_host_sembuf(sops, ptr, nsops))
3734         return -TARGET_EFAULT;
3735 
3736     ret = -TARGET_ENOSYS;
3737 #ifdef __NR_semtimedop
3738     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3739 #endif
3740 #ifdef __NR_ipc
3741     if (ret == -TARGET_ENOSYS) {
3742         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3743     }
3744 #endif
3745     return ret;
3746 }
3747 
3748 struct target_msqid_ds
3749 {
3750     struct target_ipc_perm msg_perm;
3751     abi_ulong msg_stime;
3752 #if TARGET_ABI_BITS == 32
3753     abi_ulong __unused1;
3754 #endif
3755     abi_ulong msg_rtime;
3756 #if TARGET_ABI_BITS == 32
3757     abi_ulong __unused2;
3758 #endif
3759     abi_ulong msg_ctime;
3760 #if TARGET_ABI_BITS == 32
3761     abi_ulong __unused3;
3762 #endif
3763     abi_ulong __msg_cbytes;
3764     abi_ulong msg_qnum;
3765     abi_ulong msg_qbytes;
3766     abi_ulong msg_lspid;
3767     abi_ulong msg_lrpid;
3768     abi_ulong __unused4;
3769     abi_ulong __unused5;
3770 };
3771 
3772 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3773                                                abi_ulong target_addr)
3774 {
3775     struct target_msqid_ds *target_md;
3776 
3777     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3778         return -TARGET_EFAULT;
3779     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3780         return -TARGET_EFAULT;
3781     host_md->msg_stime = tswapal(target_md->msg_stime);
3782     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3783     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3784     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3785     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3786     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3787     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3788     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3789     unlock_user_struct(target_md, target_addr, 0);
3790     return 0;
3791 }
3792 
3793 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3794                                                struct msqid_ds *host_md)
3795 {
3796     struct target_msqid_ds *target_md;
3797 
3798     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3799         return -TARGET_EFAULT;
3800     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3801         return -TARGET_EFAULT;
3802     target_md->msg_stime = tswapal(host_md->msg_stime);
3803     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3804     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3805     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3806     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3807     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3808     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3809     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3810     unlock_user_struct(target_md, target_addr, 1);
3811     return 0;
3812 }
3813 
3814 struct target_msginfo {
3815     int msgpool;
3816     int msgmap;
3817     int msgmax;
3818     int msgmnb;
3819     int msgmni;
3820     int msgssz;
3821     int msgtql;
3822     unsigned short int msgseg;
3823 };
3824 
3825 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3826                                               struct msginfo *host_msginfo)
3827 {
3828     struct target_msginfo *target_msginfo;
3829     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3830         return -TARGET_EFAULT;
3831     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3832     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3833     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3834     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3835     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3836     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3837     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3838     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3839     unlock_user_struct(target_msginfo, target_addr, 1);
3840     return 0;
3841 }
3842 
3843 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3844 {
3845     struct msqid_ds dsarg;
3846     struct msginfo msginfo;
3847     abi_long ret = -TARGET_EINVAL;
3848 
3849     cmd &= 0xff;
3850 
3851     switch (cmd) {
3852     case IPC_STAT:
3853     case IPC_SET:
3854     case MSG_STAT:
3855         if (target_to_host_msqid_ds(&dsarg,ptr))
3856             return -TARGET_EFAULT;
3857         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3858         if (host_to_target_msqid_ds(ptr,&dsarg))
3859             return -TARGET_EFAULT;
3860         break;
3861     case IPC_RMID:
3862         ret = get_errno(msgctl(msgid, cmd, NULL));
3863         break;
3864     case IPC_INFO:
3865     case MSG_INFO:
3866         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3867         if (host_to_target_msginfo(ptr, &msginfo))
3868             return -TARGET_EFAULT;
3869         break;
3870     }
3871 
3872     return ret;
3873 }
3874 
3875 struct target_msgbuf {
3876     abi_long mtype;
3877     char	mtext[1];
3878 };
3879 
3880 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3881                                  ssize_t msgsz, int msgflg)
3882 {
3883     struct target_msgbuf *target_mb;
3884     struct msgbuf *host_mb;
3885     abi_long ret = 0;
3886 
3887     if (msgsz < 0) {
3888         return -TARGET_EINVAL;
3889     }
3890 
3891     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3892         return -TARGET_EFAULT;
3893     host_mb = g_try_malloc(msgsz + sizeof(long));
3894     if (!host_mb) {
3895         unlock_user_struct(target_mb, msgp, 0);
3896         return -TARGET_ENOMEM;
3897     }
3898     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3899     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3900     ret = -TARGET_ENOSYS;
3901 #ifdef __NR_msgsnd
3902     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3903 #endif
3904 #ifdef __NR_ipc
3905     if (ret == -TARGET_ENOSYS) {
3906         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3907                                  host_mb, 0));
3908     }
3909 #endif
3910     g_free(host_mb);
3911     unlock_user_struct(target_mb, msgp, 0);
3912 
3913     return ret;
3914 }
3915 
3916 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3917                                  ssize_t msgsz, abi_long msgtyp,
3918                                  int msgflg)
3919 {
3920     struct target_msgbuf *target_mb;
3921     char *target_mtext;
3922     struct msgbuf *host_mb;
3923     abi_long ret = 0;
3924 
3925     if (msgsz < 0) {
3926         return -TARGET_EINVAL;
3927     }
3928 
3929     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3930         return -TARGET_EFAULT;
3931 
3932     host_mb = g_try_malloc(msgsz + sizeof(long));
3933     if (!host_mb) {
3934         ret = -TARGET_ENOMEM;
3935         goto end;
3936     }
3937     ret = -TARGET_ENOSYS;
3938 #ifdef __NR_msgrcv
3939     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3940 #endif
3941 #ifdef __NR_ipc
3942     if (ret == -TARGET_ENOSYS) {
3943         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3944                         msgflg, host_mb, msgtyp));
3945     }
3946 #endif
3947 
3948     if (ret > 0) {
3949         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3950         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3951         if (!target_mtext) {
3952             ret = -TARGET_EFAULT;
3953             goto end;
3954         }
3955         memcpy(target_mb->mtext, host_mb->mtext, ret);
3956         unlock_user(target_mtext, target_mtext_addr, ret);
3957     }
3958 
3959     target_mb->mtype = tswapal(host_mb->mtype);
3960 
3961 end:
3962     if (target_mb)
3963         unlock_user_struct(target_mb, msgp, 1);
3964     g_free(host_mb);
3965     return ret;
3966 }
3967 
3968 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3969                                                abi_ulong target_addr)
3970 {
3971     struct target_shmid_ds *target_sd;
3972 
3973     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3974         return -TARGET_EFAULT;
3975     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3976         return -TARGET_EFAULT;
3977     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3978     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3979     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3980     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3981     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3982     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3983     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3984     unlock_user_struct(target_sd, target_addr, 0);
3985     return 0;
3986 }
3987 
3988 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3989                                                struct shmid_ds *host_sd)
3990 {
3991     struct target_shmid_ds *target_sd;
3992 
3993     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3994         return -TARGET_EFAULT;
3995     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3996         return -TARGET_EFAULT;
3997     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3998     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3999     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4000     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4001     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4002     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4003     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4004     unlock_user_struct(target_sd, target_addr, 1);
4005     return 0;
4006 }
4007 
4008 struct  target_shminfo {
4009     abi_ulong shmmax;
4010     abi_ulong shmmin;
4011     abi_ulong shmmni;
4012     abi_ulong shmseg;
4013     abi_ulong shmall;
4014 };
4015 
4016 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4017                                               struct shminfo *host_shminfo)
4018 {
4019     struct target_shminfo *target_shminfo;
4020     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4021         return -TARGET_EFAULT;
4022     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4023     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4024     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4025     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4026     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4027     unlock_user_struct(target_shminfo, target_addr, 1);
4028     return 0;
4029 }
4030 
4031 struct target_shm_info {
4032     int used_ids;
4033     abi_ulong shm_tot;
4034     abi_ulong shm_rss;
4035     abi_ulong shm_swp;
4036     abi_ulong swap_attempts;
4037     abi_ulong swap_successes;
4038 };
4039 
4040 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4041                                                struct shm_info *host_shm_info)
4042 {
4043     struct target_shm_info *target_shm_info;
4044     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4045         return -TARGET_EFAULT;
4046     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4047     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4048     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4049     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4050     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4051     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4052     unlock_user_struct(target_shm_info, target_addr, 1);
4053     return 0;
4054 }
4055 
4056 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4057 {
4058     struct shmid_ds dsarg;
4059     struct shminfo shminfo;
4060     struct shm_info shm_info;
4061     abi_long ret = -TARGET_EINVAL;
4062 
4063     cmd &= 0xff;
4064 
4065     switch(cmd) {
4066     case IPC_STAT:
4067     case IPC_SET:
4068     case SHM_STAT:
4069         if (target_to_host_shmid_ds(&dsarg, buf))
4070             return -TARGET_EFAULT;
4071         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4072         if (host_to_target_shmid_ds(buf, &dsarg))
4073             return -TARGET_EFAULT;
4074         break;
4075     case IPC_INFO:
4076         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4077         if (host_to_target_shminfo(buf, &shminfo))
4078             return -TARGET_EFAULT;
4079         break;
4080     case SHM_INFO:
4081         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4082         if (host_to_target_shm_info(buf, &shm_info))
4083             return -TARGET_EFAULT;
4084         break;
4085     case IPC_RMID:
4086     case SHM_LOCK:
4087     case SHM_UNLOCK:
4088         ret = get_errno(shmctl(shmid, cmd, NULL));
4089         break;
4090     }
4091 
4092     return ret;
4093 }
4094 
4095 #ifndef TARGET_FORCE_SHMLBA
4096 /* For most architectures, SHMLBA is the same as the page size;
4097  * some architectures have larger values, in which case they should
4098  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4099  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4100  * and defining its own value for SHMLBA.
4101  *
4102  * The kernel also permits SHMLBA to be set by the architecture to a
4103  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4104  * this means that addresses are rounded to the large size if
4105  * SHM_RND is set but addresses not aligned to that size are not rejected
4106  * as long as they are at least page-aligned. Since the only architecture
4107  * which uses this is ia64 this code doesn't provide for that oddity.
4108  */
4109 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4110 {
4111     return TARGET_PAGE_SIZE;
4112 }
4113 #endif
4114 
4115 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4116                                  int shmid, abi_ulong shmaddr, int shmflg)
4117 {
4118     abi_long raddr;
4119     void *host_raddr;
4120     struct shmid_ds shm_info;
4121     int i,ret;
4122     abi_ulong shmlba;
4123 
4124     /* find out the length of the shared memory segment */
4125     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4126     if (is_error(ret)) {
4127         /* can't get length, bail out */
4128         return ret;
4129     }
4130 
4131     shmlba = target_shmlba(cpu_env);
4132 
4133     if (shmaddr & (shmlba - 1)) {
4134         if (shmflg & SHM_RND) {
4135             shmaddr &= ~(shmlba - 1);
4136         } else {
4137             return -TARGET_EINVAL;
4138         }
4139     }
4140     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4141         return -TARGET_EINVAL;
4142     }
4143 
4144     mmap_lock();
4145 
4146     if (shmaddr)
4147         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4148     else {
4149         abi_ulong mmap_start;
4150 
4151         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4152         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4153 
4154         if (mmap_start == -1) {
4155             errno = ENOMEM;
4156             host_raddr = (void *)-1;
4157         } else
4158             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4159     }
4160 
4161     if (host_raddr == (void *)-1) {
4162         mmap_unlock();
4163         return get_errno((long)host_raddr);
4164     }
4165     raddr=h2g((unsigned long)host_raddr);
4166 
4167     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4168                    PAGE_VALID | PAGE_READ |
4169                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4170 
4171     for (i = 0; i < N_SHM_REGIONS; i++) {
4172         if (!shm_regions[i].in_use) {
4173             shm_regions[i].in_use = true;
4174             shm_regions[i].start = raddr;
4175             shm_regions[i].size = shm_info.shm_segsz;
4176             break;
4177         }
4178     }
4179 
4180     mmap_unlock();
4181     return raddr;
4182 
4183 }
4184 
4185 static inline abi_long do_shmdt(abi_ulong shmaddr)
4186 {
4187     int i;
4188     abi_long rv;
4189 
4190     mmap_lock();
4191 
4192     for (i = 0; i < N_SHM_REGIONS; ++i) {
4193         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4194             shm_regions[i].in_use = false;
4195             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4196             break;
4197         }
4198     }
4199     rv = get_errno(shmdt(g2h(shmaddr)));
4200 
4201     mmap_unlock();
4202 
4203     return rv;
4204 }
4205 
4206 #ifdef TARGET_NR_ipc
4207 /* ??? This only works with linear mappings.  */
4208 /* do_ipc() must return target values and target errnos. */
4209 static abi_long do_ipc(CPUArchState *cpu_env,
4210                        unsigned int call, abi_long first,
4211                        abi_long second, abi_long third,
4212                        abi_long ptr, abi_long fifth)
4213 {
4214     int version;
4215     abi_long ret = 0;
4216 
4217     version = call >> 16;
4218     call &= 0xffff;
4219 
4220     switch (call) {
4221     case IPCOP_semop:
4222         ret = do_semop(first, ptr, second);
4223         break;
4224 
4225     case IPCOP_semget:
4226         ret = get_errno(semget(first, second, third));
4227         break;
4228 
4229     case IPCOP_semctl: {
4230         /* The semun argument to semctl is passed by value, so dereference the
4231          * ptr argument. */
4232         abi_ulong atptr;
4233         get_user_ual(atptr, ptr);
4234         ret = do_semctl(first, second, third, atptr);
4235         break;
4236     }
4237 
4238     case IPCOP_msgget:
4239         ret = get_errno(msgget(first, second));
4240         break;
4241 
4242     case IPCOP_msgsnd:
4243         ret = do_msgsnd(first, ptr, second, third);
4244         break;
4245 
4246     case IPCOP_msgctl:
4247         ret = do_msgctl(first, second, ptr);
4248         break;
4249 
4250     case IPCOP_msgrcv:
4251         switch (version) {
4252         case 0:
4253             {
4254                 struct target_ipc_kludge {
4255                     abi_long msgp;
4256                     abi_long msgtyp;
4257                 } *tmp;
4258 
4259                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4260                     ret = -TARGET_EFAULT;
4261                     break;
4262                 }
4263 
4264                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4265 
4266                 unlock_user_struct(tmp, ptr, 0);
4267                 break;
4268             }
4269         default:
4270             ret = do_msgrcv(first, ptr, second, fifth, third);
4271         }
4272         break;
4273 
4274     case IPCOP_shmat:
4275         switch (version) {
4276         default:
4277         {
4278             abi_ulong raddr;
4279             raddr = do_shmat(cpu_env, first, ptr, second);
4280             if (is_error(raddr))
4281                 return get_errno(raddr);
4282             if (put_user_ual(raddr, third))
4283                 return -TARGET_EFAULT;
4284             break;
4285         }
4286         case 1:
4287             ret = -TARGET_EINVAL;
4288             break;
4289         }
4290 	break;
4291     case IPCOP_shmdt:
4292         ret = do_shmdt(ptr);
4293 	break;
4294 
4295     case IPCOP_shmget:
4296 	/* IPC_* flag values are the same on all linux platforms */
4297 	ret = get_errno(shmget(first, second, third));
4298 	break;
4299 
4300 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4301     case IPCOP_shmctl:
4302         ret = do_shmctl(first, second, ptr);
4303         break;
4304     default:
4305 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4306 	ret = -TARGET_ENOSYS;
4307 	break;
4308     }
4309     return ret;
4310 }
4311 #endif
4312 
4313 /* kernel structure types definitions */
4314 
4315 #define STRUCT(name, ...) STRUCT_ ## name,
4316 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4317 enum {
4318 #include "syscall_types.h"
4319 STRUCT_MAX
4320 };
4321 #undef STRUCT
4322 #undef STRUCT_SPECIAL
4323 
4324 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4325 #define STRUCT_SPECIAL(name)
4326 #include "syscall_types.h"
4327 #undef STRUCT
4328 #undef STRUCT_SPECIAL
4329 
4330 typedef struct IOCTLEntry IOCTLEntry;
4331 
4332 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4333                              int fd, int cmd, abi_long arg);
4334 
4335 struct IOCTLEntry {
4336     int target_cmd;
4337     unsigned int host_cmd;
4338     const char *name;
4339     int access;
4340     do_ioctl_fn *do_ioctl;
4341     const argtype arg_type[5];
4342 };
4343 
4344 #define IOC_R 0x0001
4345 #define IOC_W 0x0002
4346 #define IOC_RW (IOC_R | IOC_W)
4347 
4348 #define MAX_STRUCT_SIZE 4096
4349 
4350 #ifdef CONFIG_FIEMAP
4351 /* So fiemap access checks don't overflow on 32 bit systems.
4352  * This is very slightly smaller than the limit imposed by
4353  * the underlying kernel.
4354  */
4355 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4356                             / sizeof(struct fiemap_extent))
4357 
4358 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4359                                        int fd, int cmd, abi_long arg)
4360 {
4361     /* The parameter for this ioctl is a struct fiemap followed
4362      * by an array of struct fiemap_extent whose size is set
4363      * in fiemap->fm_extent_count. The array is filled in by the
4364      * ioctl.
4365      */
4366     int target_size_in, target_size_out;
4367     struct fiemap *fm;
4368     const argtype *arg_type = ie->arg_type;
4369     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4370     void *argptr, *p;
4371     abi_long ret;
4372     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4373     uint32_t outbufsz;
4374     int free_fm = 0;
4375 
4376     assert(arg_type[0] == TYPE_PTR);
4377     assert(ie->access == IOC_RW);
4378     arg_type++;
4379     target_size_in = thunk_type_size(arg_type, 0);
4380     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4381     if (!argptr) {
4382         return -TARGET_EFAULT;
4383     }
4384     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4385     unlock_user(argptr, arg, 0);
4386     fm = (struct fiemap *)buf_temp;
4387     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4388         return -TARGET_EINVAL;
4389     }
4390 
4391     outbufsz = sizeof (*fm) +
4392         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4393 
4394     if (outbufsz > MAX_STRUCT_SIZE) {
4395         /* We can't fit all the extents into the fixed size buffer.
4396          * Allocate one that is large enough and use it instead.
4397          */
4398         fm = g_try_malloc(outbufsz);
4399         if (!fm) {
4400             return -TARGET_ENOMEM;
4401         }
4402         memcpy(fm, buf_temp, sizeof(struct fiemap));
4403         free_fm = 1;
4404     }
4405     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4406     if (!is_error(ret)) {
4407         target_size_out = target_size_in;
4408         /* An extent_count of 0 means we were only counting the extents
4409          * so there are no structs to copy
4410          */
4411         if (fm->fm_extent_count != 0) {
4412             target_size_out += fm->fm_mapped_extents * extent_size;
4413         }
4414         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4415         if (!argptr) {
4416             ret = -TARGET_EFAULT;
4417         } else {
4418             /* Convert the struct fiemap */
4419             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4420             if (fm->fm_extent_count != 0) {
4421                 p = argptr + target_size_in;
4422                 /* ...and then all the struct fiemap_extents */
4423                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4424                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4425                                   THUNK_TARGET);
4426                     p += extent_size;
4427                 }
4428             }
4429             unlock_user(argptr, arg, target_size_out);
4430         }
4431     }
4432     if (free_fm) {
4433         g_free(fm);
4434     }
4435     return ret;
4436 }
4437 #endif
4438 
4439 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4440                                 int fd, int cmd, abi_long arg)
4441 {
4442     const argtype *arg_type = ie->arg_type;
4443     int target_size;
4444     void *argptr;
4445     int ret;
4446     struct ifconf *host_ifconf;
4447     uint32_t outbufsz;
4448     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4449     int target_ifreq_size;
4450     int nb_ifreq;
4451     int free_buf = 0;
4452     int i;
4453     int target_ifc_len;
4454     abi_long target_ifc_buf;
4455     int host_ifc_len;
4456     char *host_ifc_buf;
4457 
4458     assert(arg_type[0] == TYPE_PTR);
4459     assert(ie->access == IOC_RW);
4460 
4461     arg_type++;
4462     target_size = thunk_type_size(arg_type, 0);
4463 
4464     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4465     if (!argptr)
4466         return -TARGET_EFAULT;
4467     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4468     unlock_user(argptr, arg, 0);
4469 
4470     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4471     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4472     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4473 
4474     if (target_ifc_buf != 0) {
4475         target_ifc_len = host_ifconf->ifc_len;
4476         nb_ifreq = target_ifc_len / target_ifreq_size;
4477         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4478 
4479         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4480         if (outbufsz > MAX_STRUCT_SIZE) {
4481             /*
4482              * We can't fit all the extents into the fixed size buffer.
4483              * Allocate one that is large enough and use it instead.
4484              */
4485             host_ifconf = malloc(outbufsz);
4486             if (!host_ifconf) {
4487                 return -TARGET_ENOMEM;
4488             }
4489             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4490             free_buf = 1;
4491         }
4492         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4493 
4494         host_ifconf->ifc_len = host_ifc_len;
4495     } else {
4496       host_ifc_buf = NULL;
4497     }
4498     host_ifconf->ifc_buf = host_ifc_buf;
4499 
4500     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4501     if (!is_error(ret)) {
4502 	/* convert host ifc_len to target ifc_len */
4503 
4504         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4505         target_ifc_len = nb_ifreq * target_ifreq_size;
4506         host_ifconf->ifc_len = target_ifc_len;
4507 
4508 	/* restore target ifc_buf */
4509 
4510         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4511 
4512 	/* copy struct ifconf to target user */
4513 
4514         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4515         if (!argptr)
4516             return -TARGET_EFAULT;
4517         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4518         unlock_user(argptr, arg, target_size);
4519 
4520         if (target_ifc_buf != 0) {
4521             /* copy ifreq[] to target user */
4522             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4523             for (i = 0; i < nb_ifreq ; i++) {
4524                 thunk_convert(argptr + i * target_ifreq_size,
4525                               host_ifc_buf + i * sizeof(struct ifreq),
4526                               ifreq_arg_type, THUNK_TARGET);
4527             }
4528             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4529         }
4530     }
4531 
4532     if (free_buf) {
4533         free(host_ifconf);
4534     }
4535 
4536     return ret;
4537 }
4538 
4539 #if defined(CONFIG_USBFS)
4540 #if HOST_LONG_BITS > 64
4541 #error USBDEVFS thunks do not support >64 bit hosts yet.
4542 #endif
4543 struct live_urb {
4544     uint64_t target_urb_adr;
4545     uint64_t target_buf_adr;
4546     char *target_buf_ptr;
4547     struct usbdevfs_urb host_urb;
4548 };
4549 
4550 static GHashTable *usbdevfs_urb_hashtable(void)
4551 {
4552     static GHashTable *urb_hashtable;
4553 
4554     if (!urb_hashtable) {
4555         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4556     }
4557     return urb_hashtable;
4558 }
4559 
4560 static void urb_hashtable_insert(struct live_urb *urb)
4561 {
4562     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4563     g_hash_table_insert(urb_hashtable, urb, urb);
4564 }
4565 
4566 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4567 {
4568     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4569     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4570 }
4571 
4572 static void urb_hashtable_remove(struct live_urb *urb)
4573 {
4574     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4575     g_hash_table_remove(urb_hashtable, urb);
4576 }
4577 
4578 static abi_long
4579 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4580                           int fd, int cmd, abi_long arg)
4581 {
4582     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4583     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4584     struct live_urb *lurb;
4585     void *argptr;
4586     uint64_t hurb;
4587     int target_size;
4588     uintptr_t target_urb_adr;
4589     abi_long ret;
4590 
4591     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4592 
4593     memset(buf_temp, 0, sizeof(uint64_t));
4594     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4595     if (is_error(ret)) {
4596         return ret;
4597     }
4598 
4599     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4600     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4601     if (!lurb->target_urb_adr) {
4602         return -TARGET_EFAULT;
4603     }
4604     urb_hashtable_remove(lurb);
4605     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4606         lurb->host_urb.buffer_length);
4607     lurb->target_buf_ptr = NULL;
4608 
4609     /* restore the guest buffer pointer */
4610     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4611 
4612     /* update the guest urb struct */
4613     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4614     if (!argptr) {
4615         g_free(lurb);
4616         return -TARGET_EFAULT;
4617     }
4618     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4619     unlock_user(argptr, lurb->target_urb_adr, target_size);
4620 
4621     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4622     /* write back the urb handle */
4623     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4624     if (!argptr) {
4625         g_free(lurb);
4626         return -TARGET_EFAULT;
4627     }
4628 
4629     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4630     target_urb_adr = lurb->target_urb_adr;
4631     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4632     unlock_user(argptr, arg, target_size);
4633 
4634     g_free(lurb);
4635     return ret;
4636 }
4637 
4638 static abi_long
4639 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4640                              uint8_t *buf_temp __attribute__((unused)),
4641                              int fd, int cmd, abi_long arg)
4642 {
4643     struct live_urb *lurb;
4644 
4645     /* map target address back to host URB with metadata. */
4646     lurb = urb_hashtable_lookup(arg);
4647     if (!lurb) {
4648         return -TARGET_EFAULT;
4649     }
4650     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4651 }
4652 
4653 static abi_long
4654 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4655                             int fd, int cmd, abi_long arg)
4656 {
4657     const argtype *arg_type = ie->arg_type;
4658     int target_size;
4659     abi_long ret;
4660     void *argptr;
4661     int rw_dir;
4662     struct live_urb *lurb;
4663 
4664     /*
4665      * each submitted URB needs to map to a unique ID for the
4666      * kernel, and that unique ID needs to be a pointer to
4667      * host memory.  hence, we need to malloc for each URB.
4668      * isochronous transfers have a variable length struct.
4669      */
4670     arg_type++;
4671     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4672 
4673     /* construct host copy of urb and metadata */
4674     lurb = g_try_malloc0(sizeof(struct live_urb));
4675     if (!lurb) {
4676         return -TARGET_ENOMEM;
4677     }
4678 
4679     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4680     if (!argptr) {
4681         g_free(lurb);
4682         return -TARGET_EFAULT;
4683     }
4684     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4685     unlock_user(argptr, arg, 0);
4686 
4687     lurb->target_urb_adr = arg;
4688     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4689 
4690     /* buffer space used depends on endpoint type so lock the entire buffer */
4691     /* control type urbs should check the buffer contents for true direction */
4692     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4693     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4694         lurb->host_urb.buffer_length, 1);
4695     if (lurb->target_buf_ptr == NULL) {
4696         g_free(lurb);
4697         return -TARGET_EFAULT;
4698     }
4699 
4700     /* update buffer pointer in host copy */
4701     lurb->host_urb.buffer = lurb->target_buf_ptr;
4702 
4703     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4704     if (is_error(ret)) {
4705         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4706         g_free(lurb);
4707     } else {
4708         urb_hashtable_insert(lurb);
4709     }
4710 
4711     return ret;
4712 }
4713 #endif /* CONFIG_USBFS */
4714 
4715 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4716                             int cmd, abi_long arg)
4717 {
4718     void *argptr;
4719     struct dm_ioctl *host_dm;
4720     abi_long guest_data;
4721     uint32_t guest_data_size;
4722     int target_size;
4723     const argtype *arg_type = ie->arg_type;
4724     abi_long ret;
4725     void *big_buf = NULL;
4726     char *host_data;
4727 
4728     arg_type++;
4729     target_size = thunk_type_size(arg_type, 0);
4730     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4731     if (!argptr) {
4732         ret = -TARGET_EFAULT;
4733         goto out;
4734     }
4735     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4736     unlock_user(argptr, arg, 0);
4737 
4738     /* buf_temp is too small, so fetch things into a bigger buffer */
4739     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4740     memcpy(big_buf, buf_temp, target_size);
4741     buf_temp = big_buf;
4742     host_dm = big_buf;
4743 
4744     guest_data = arg + host_dm->data_start;
4745     if ((guest_data - arg) < 0) {
4746         ret = -TARGET_EINVAL;
4747         goto out;
4748     }
4749     guest_data_size = host_dm->data_size - host_dm->data_start;
4750     host_data = (char*)host_dm + host_dm->data_start;
4751 
4752     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4753     if (!argptr) {
4754         ret = -TARGET_EFAULT;
4755         goto out;
4756     }
4757 
4758     switch (ie->host_cmd) {
4759     case DM_REMOVE_ALL:
4760     case DM_LIST_DEVICES:
4761     case DM_DEV_CREATE:
4762     case DM_DEV_REMOVE:
4763     case DM_DEV_SUSPEND:
4764     case DM_DEV_STATUS:
4765     case DM_DEV_WAIT:
4766     case DM_TABLE_STATUS:
4767     case DM_TABLE_CLEAR:
4768     case DM_TABLE_DEPS:
4769     case DM_LIST_VERSIONS:
4770         /* no input data */
4771         break;
4772     case DM_DEV_RENAME:
4773     case DM_DEV_SET_GEOMETRY:
4774         /* data contains only strings */
4775         memcpy(host_data, argptr, guest_data_size);
4776         break;
4777     case DM_TARGET_MSG:
4778         memcpy(host_data, argptr, guest_data_size);
4779         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4780         break;
4781     case DM_TABLE_LOAD:
4782     {
4783         void *gspec = argptr;
4784         void *cur_data = host_data;
4785         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4786         int spec_size = thunk_type_size(arg_type, 0);
4787         int i;
4788 
4789         for (i = 0; i < host_dm->target_count; i++) {
4790             struct dm_target_spec *spec = cur_data;
4791             uint32_t next;
4792             int slen;
4793 
4794             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4795             slen = strlen((char*)gspec + spec_size) + 1;
4796             next = spec->next;
4797             spec->next = sizeof(*spec) + slen;
4798             strcpy((char*)&spec[1], gspec + spec_size);
4799             gspec += next;
4800             cur_data += spec->next;
4801         }
4802         break;
4803     }
4804     default:
4805         ret = -TARGET_EINVAL;
4806         unlock_user(argptr, guest_data, 0);
4807         goto out;
4808     }
4809     unlock_user(argptr, guest_data, 0);
4810 
4811     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4812     if (!is_error(ret)) {
4813         guest_data = arg + host_dm->data_start;
4814         guest_data_size = host_dm->data_size - host_dm->data_start;
4815         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4816         switch (ie->host_cmd) {
4817         case DM_REMOVE_ALL:
4818         case DM_DEV_CREATE:
4819         case DM_DEV_REMOVE:
4820         case DM_DEV_RENAME:
4821         case DM_DEV_SUSPEND:
4822         case DM_DEV_STATUS:
4823         case DM_TABLE_LOAD:
4824         case DM_TABLE_CLEAR:
4825         case DM_TARGET_MSG:
4826         case DM_DEV_SET_GEOMETRY:
4827             /* no return data */
4828             break;
4829         case DM_LIST_DEVICES:
4830         {
4831             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4832             uint32_t remaining_data = guest_data_size;
4833             void *cur_data = argptr;
4834             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4835             int nl_size = 12; /* can't use thunk_size due to alignment */
4836 
4837             while (1) {
4838                 uint32_t next = nl->next;
4839                 if (next) {
4840                     nl->next = nl_size + (strlen(nl->name) + 1);
4841                 }
4842                 if (remaining_data < nl->next) {
4843                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4844                     break;
4845                 }
4846                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4847                 strcpy(cur_data + nl_size, nl->name);
4848                 cur_data += nl->next;
4849                 remaining_data -= nl->next;
4850                 if (!next) {
4851                     break;
4852                 }
4853                 nl = (void*)nl + next;
4854             }
4855             break;
4856         }
4857         case DM_DEV_WAIT:
4858         case DM_TABLE_STATUS:
4859         {
4860             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4861             void *cur_data = argptr;
4862             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4863             int spec_size = thunk_type_size(arg_type, 0);
4864             int i;
4865 
4866             for (i = 0; i < host_dm->target_count; i++) {
4867                 uint32_t next = spec->next;
4868                 int slen = strlen((char*)&spec[1]) + 1;
4869                 spec->next = (cur_data - argptr) + spec_size + slen;
4870                 if (guest_data_size < spec->next) {
4871                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4872                     break;
4873                 }
4874                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4875                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4876                 cur_data = argptr + spec->next;
4877                 spec = (void*)host_dm + host_dm->data_start + next;
4878             }
4879             break;
4880         }
4881         case DM_TABLE_DEPS:
4882         {
4883             void *hdata = (void*)host_dm + host_dm->data_start;
4884             int count = *(uint32_t*)hdata;
4885             uint64_t *hdev = hdata + 8;
4886             uint64_t *gdev = argptr + 8;
4887             int i;
4888 
4889             *(uint32_t*)argptr = tswap32(count);
4890             for (i = 0; i < count; i++) {
4891                 *gdev = tswap64(*hdev);
4892                 gdev++;
4893                 hdev++;
4894             }
4895             break;
4896         }
4897         case DM_LIST_VERSIONS:
4898         {
4899             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4900             uint32_t remaining_data = guest_data_size;
4901             void *cur_data = argptr;
4902             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4903             int vers_size = thunk_type_size(arg_type, 0);
4904 
4905             while (1) {
4906                 uint32_t next = vers->next;
4907                 if (next) {
4908                     vers->next = vers_size + (strlen(vers->name) + 1);
4909                 }
4910                 if (remaining_data < vers->next) {
4911                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4912                     break;
4913                 }
4914                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4915                 strcpy(cur_data + vers_size, vers->name);
4916                 cur_data += vers->next;
4917                 remaining_data -= vers->next;
4918                 if (!next) {
4919                     break;
4920                 }
4921                 vers = (void*)vers + next;
4922             }
4923             break;
4924         }
4925         default:
4926             unlock_user(argptr, guest_data, 0);
4927             ret = -TARGET_EINVAL;
4928             goto out;
4929         }
4930         unlock_user(argptr, guest_data, guest_data_size);
4931 
4932         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4933         if (!argptr) {
4934             ret = -TARGET_EFAULT;
4935             goto out;
4936         }
4937         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4938         unlock_user(argptr, arg, target_size);
4939     }
4940 out:
4941     g_free(big_buf);
4942     return ret;
4943 }
4944 
4945 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4946                                int cmd, abi_long arg)
4947 {
4948     void *argptr;
4949     int target_size;
4950     const argtype *arg_type = ie->arg_type;
4951     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4952     abi_long ret;
4953 
4954     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4955     struct blkpg_partition host_part;
4956 
4957     /* Read and convert blkpg */
4958     arg_type++;
4959     target_size = thunk_type_size(arg_type, 0);
4960     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4961     if (!argptr) {
4962         ret = -TARGET_EFAULT;
4963         goto out;
4964     }
4965     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4966     unlock_user(argptr, arg, 0);
4967 
4968     switch (host_blkpg->op) {
4969     case BLKPG_ADD_PARTITION:
4970     case BLKPG_DEL_PARTITION:
4971         /* payload is struct blkpg_partition */
4972         break;
4973     default:
4974         /* Unknown opcode */
4975         ret = -TARGET_EINVAL;
4976         goto out;
4977     }
4978 
4979     /* Read and convert blkpg->data */
4980     arg = (abi_long)(uintptr_t)host_blkpg->data;
4981     target_size = thunk_type_size(part_arg_type, 0);
4982     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4983     if (!argptr) {
4984         ret = -TARGET_EFAULT;
4985         goto out;
4986     }
4987     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4988     unlock_user(argptr, arg, 0);
4989 
4990     /* Swizzle the data pointer to our local copy and call! */
4991     host_blkpg->data = &host_part;
4992     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4993 
4994 out:
4995     return ret;
4996 }
4997 
4998 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4999                                 int fd, int cmd, abi_long arg)
5000 {
5001     const argtype *arg_type = ie->arg_type;
5002     const StructEntry *se;
5003     const argtype *field_types;
5004     const int *dst_offsets, *src_offsets;
5005     int target_size;
5006     void *argptr;
5007     abi_ulong *target_rt_dev_ptr = NULL;
5008     unsigned long *host_rt_dev_ptr = NULL;
5009     abi_long ret;
5010     int i;
5011 
5012     assert(ie->access == IOC_W);
5013     assert(*arg_type == TYPE_PTR);
5014     arg_type++;
5015     assert(*arg_type == TYPE_STRUCT);
5016     target_size = thunk_type_size(arg_type, 0);
5017     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5018     if (!argptr) {
5019         return -TARGET_EFAULT;
5020     }
5021     arg_type++;
5022     assert(*arg_type == (int)STRUCT_rtentry);
5023     se = struct_entries + *arg_type++;
5024     assert(se->convert[0] == NULL);
5025     /* convert struct here to be able to catch rt_dev string */
5026     field_types = se->field_types;
5027     dst_offsets = se->field_offsets[THUNK_HOST];
5028     src_offsets = se->field_offsets[THUNK_TARGET];
5029     for (i = 0; i < se->nb_fields; i++) {
5030         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5031             assert(*field_types == TYPE_PTRVOID);
5032             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5033             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5034             if (*target_rt_dev_ptr != 0) {
5035                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5036                                                   tswapal(*target_rt_dev_ptr));
5037                 if (!*host_rt_dev_ptr) {
5038                     unlock_user(argptr, arg, 0);
5039                     return -TARGET_EFAULT;
5040                 }
5041             } else {
5042                 *host_rt_dev_ptr = 0;
5043             }
5044             field_types++;
5045             continue;
5046         }
5047         field_types = thunk_convert(buf_temp + dst_offsets[i],
5048                                     argptr + src_offsets[i],
5049                                     field_types, THUNK_HOST);
5050     }
5051     unlock_user(argptr, arg, 0);
5052 
5053     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5054 
5055     assert(host_rt_dev_ptr != NULL);
5056     assert(target_rt_dev_ptr != NULL);
5057     if (*host_rt_dev_ptr != 0) {
5058         unlock_user((void *)*host_rt_dev_ptr,
5059                     *target_rt_dev_ptr, 0);
5060     }
5061     return ret;
5062 }
5063 
5064 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5065                                      int fd, int cmd, abi_long arg)
5066 {
5067     int sig = target_to_host_signal(arg);
5068     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5069 }
5070 
5071 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5072                                     int fd, int cmd, abi_long arg)
5073 {
5074     struct timeval tv;
5075     abi_long ret;
5076 
5077     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5078     if (is_error(ret)) {
5079         return ret;
5080     }
5081 
5082     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5083         if (copy_to_user_timeval(arg, &tv)) {
5084             return -TARGET_EFAULT;
5085         }
5086     } else {
5087         if (copy_to_user_timeval64(arg, &tv)) {
5088             return -TARGET_EFAULT;
5089         }
5090     }
5091 
5092     return ret;
5093 }
5094 
5095 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5096                                       int fd, int cmd, abi_long arg)
5097 {
5098     struct timespec ts;
5099     abi_long ret;
5100 
5101     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5102     if (is_error(ret)) {
5103         return ret;
5104     }
5105 
5106     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5107         if (host_to_target_timespec(arg, &ts)) {
5108             return -TARGET_EFAULT;
5109         }
5110     } else{
5111         if (host_to_target_timespec64(arg, &ts)) {
5112             return -TARGET_EFAULT;
5113         }
5114     }
5115 
5116     return ret;
5117 }
5118 
5119 #ifdef TIOCGPTPEER
5120 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5121                                      int fd, int cmd, abi_long arg)
5122 {
5123     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5124     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5125 }
5126 #endif
5127 
5128 static IOCTLEntry ioctl_entries[] = {
5129 #define IOCTL(cmd, access, ...) \
5130     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5131 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5132     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5133 #define IOCTL_IGNORE(cmd) \
5134     { TARGET_ ## cmd, 0, #cmd },
5135 #include "ioctls.h"
5136     { 0, 0, },
5137 };
5138 
5139 /* ??? Implement proper locking for ioctls.  */
5140 /* do_ioctl() Must return target values and target errnos. */
5141 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5142 {
5143     const IOCTLEntry *ie;
5144     const argtype *arg_type;
5145     abi_long ret;
5146     uint8_t buf_temp[MAX_STRUCT_SIZE];
5147     int target_size;
5148     void *argptr;
5149 
5150     ie = ioctl_entries;
5151     for(;;) {
5152         if (ie->target_cmd == 0) {
5153             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5154             return -TARGET_ENOSYS;
5155         }
5156         if (ie->target_cmd == cmd)
5157             break;
5158         ie++;
5159     }
5160     arg_type = ie->arg_type;
5161     if (ie->do_ioctl) {
5162         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5163     } else if (!ie->host_cmd) {
5164         /* Some architectures define BSD ioctls in their headers
5165            that are not implemented in Linux.  */
5166         return -TARGET_ENOSYS;
5167     }
5168 
5169     switch(arg_type[0]) {
5170     case TYPE_NULL:
5171         /* no argument */
5172         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5173         break;
5174     case TYPE_PTRVOID:
5175     case TYPE_INT:
5176         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5177         break;
5178     case TYPE_PTR:
5179         arg_type++;
5180         target_size = thunk_type_size(arg_type, 0);
5181         switch(ie->access) {
5182         case IOC_R:
5183             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5184             if (!is_error(ret)) {
5185                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5186                 if (!argptr)
5187                     return -TARGET_EFAULT;
5188                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5189                 unlock_user(argptr, arg, target_size);
5190             }
5191             break;
5192         case IOC_W:
5193             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5194             if (!argptr)
5195                 return -TARGET_EFAULT;
5196             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5197             unlock_user(argptr, arg, 0);
5198             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5199             break;
5200         default:
5201         case IOC_RW:
5202             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5203             if (!argptr)
5204                 return -TARGET_EFAULT;
5205             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5206             unlock_user(argptr, arg, 0);
5207             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5208             if (!is_error(ret)) {
5209                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5210                 if (!argptr)
5211                     return -TARGET_EFAULT;
5212                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5213                 unlock_user(argptr, arg, target_size);
5214             }
5215             break;
5216         }
5217         break;
5218     default:
5219         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5220                  (long)cmd, arg_type[0]);
5221         ret = -TARGET_ENOSYS;
5222         break;
5223     }
5224     return ret;
5225 }
5226 
5227 static const bitmask_transtbl iflag_tbl[] = {
5228         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5229         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5230         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5231         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5232         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5233         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5234         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5235         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5236         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5237         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5238         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5239         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5240         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5241         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5242         { 0, 0, 0, 0 }
5243 };
5244 
5245 static const bitmask_transtbl oflag_tbl[] = {
5246 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5247 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5248 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5249 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5250 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5251 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5252 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5253 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5254 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5255 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5256 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5257 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5258 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5259 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5260 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5261 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5262 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5263 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5264 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5265 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5266 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5267 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5268 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5269 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5270 	{ 0, 0, 0, 0 }
5271 };
5272 
5273 static const bitmask_transtbl cflag_tbl[] = {
5274 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5275 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5276 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5277 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5278 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5279 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5280 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5281 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5282 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5283 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5284 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5285 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5286 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5287 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5288 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5289 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5290 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5291 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5292 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5293 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5294 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5295 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5296 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5297 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5298 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5299 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5300 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5301 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5302 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5303 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5304 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5305 	{ 0, 0, 0, 0 }
5306 };
5307 
5308 static const bitmask_transtbl lflag_tbl[] = {
5309 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5310 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5311 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5312 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5313 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5314 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5315 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5316 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5317 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5318 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5319 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5320 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5321 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5322 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5323 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5324 	{ 0, 0, 0, 0 }
5325 };
5326 
5327 static void target_to_host_termios (void *dst, const void *src)
5328 {
5329     struct host_termios *host = dst;
5330     const struct target_termios *target = src;
5331 
5332     host->c_iflag =
5333         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5334     host->c_oflag =
5335         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5336     host->c_cflag =
5337         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5338     host->c_lflag =
5339         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5340     host->c_line = target->c_line;
5341 
5342     memset(host->c_cc, 0, sizeof(host->c_cc));
5343     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5344     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5345     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5346     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5347     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5348     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5349     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5350     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5351     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5352     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5353     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5354     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5355     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5356     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5357     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5358     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5359     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5360 }
5361 
5362 static void host_to_target_termios (void *dst, const void *src)
5363 {
5364     struct target_termios *target = dst;
5365     const struct host_termios *host = src;
5366 
5367     target->c_iflag =
5368         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5369     target->c_oflag =
5370         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5371     target->c_cflag =
5372         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5373     target->c_lflag =
5374         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5375     target->c_line = host->c_line;
5376 
5377     memset(target->c_cc, 0, sizeof(target->c_cc));
5378     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5379     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5380     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5381     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5382     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5383     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5384     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5385     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5386     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5387     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5388     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5389     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5390     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5391     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5392     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5393     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5394     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5395 }
5396 
5397 static const StructEntry struct_termios_def = {
5398     .convert = { host_to_target_termios, target_to_host_termios },
5399     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5400     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5401 };
5402 
5403 static bitmask_transtbl mmap_flags_tbl[] = {
5404     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5405     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5406     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5407     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5408       MAP_ANONYMOUS, MAP_ANONYMOUS },
5409     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5410       MAP_GROWSDOWN, MAP_GROWSDOWN },
5411     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5412       MAP_DENYWRITE, MAP_DENYWRITE },
5413     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5414       MAP_EXECUTABLE, MAP_EXECUTABLE },
5415     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5416     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5417       MAP_NORESERVE, MAP_NORESERVE },
5418     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5419     /* MAP_STACK had been ignored by the kernel for quite some time.
5420        Recognize it for the target insofar as we do not want to pass
5421        it through to the host.  */
5422     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5423     { 0, 0, 0, 0 }
5424 };
5425 
5426 #if defined(TARGET_I386)
5427 
5428 /* NOTE: there is really one LDT for all the threads */
5429 static uint8_t *ldt_table;
5430 
5431 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5432 {
5433     int size;
5434     void *p;
5435 
5436     if (!ldt_table)
5437         return 0;
5438     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5439     if (size > bytecount)
5440         size = bytecount;
5441     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5442     if (!p)
5443         return -TARGET_EFAULT;
5444     /* ??? Should this by byteswapped?  */
5445     memcpy(p, ldt_table, size);
5446     unlock_user(p, ptr, size);
5447     return size;
5448 }
5449 
5450 /* XXX: add locking support */
5451 static abi_long write_ldt(CPUX86State *env,
5452                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5453 {
5454     struct target_modify_ldt_ldt_s ldt_info;
5455     struct target_modify_ldt_ldt_s *target_ldt_info;
5456     int seg_32bit, contents, read_exec_only, limit_in_pages;
5457     int seg_not_present, useable, lm;
5458     uint32_t *lp, entry_1, entry_2;
5459 
5460     if (bytecount != sizeof(ldt_info))
5461         return -TARGET_EINVAL;
5462     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5463         return -TARGET_EFAULT;
5464     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5465     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5466     ldt_info.limit = tswap32(target_ldt_info->limit);
5467     ldt_info.flags = tswap32(target_ldt_info->flags);
5468     unlock_user_struct(target_ldt_info, ptr, 0);
5469 
5470     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5471         return -TARGET_EINVAL;
5472     seg_32bit = ldt_info.flags & 1;
5473     contents = (ldt_info.flags >> 1) & 3;
5474     read_exec_only = (ldt_info.flags >> 3) & 1;
5475     limit_in_pages = (ldt_info.flags >> 4) & 1;
5476     seg_not_present = (ldt_info.flags >> 5) & 1;
5477     useable = (ldt_info.flags >> 6) & 1;
5478 #ifdef TARGET_ABI32
5479     lm = 0;
5480 #else
5481     lm = (ldt_info.flags >> 7) & 1;
5482 #endif
5483     if (contents == 3) {
5484         if (oldmode)
5485             return -TARGET_EINVAL;
5486         if (seg_not_present == 0)
5487             return -TARGET_EINVAL;
5488     }
5489     /* allocate the LDT */
5490     if (!ldt_table) {
5491         env->ldt.base = target_mmap(0,
5492                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5493                                     PROT_READ|PROT_WRITE,
5494                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5495         if (env->ldt.base == -1)
5496             return -TARGET_ENOMEM;
5497         memset(g2h(env->ldt.base), 0,
5498                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5499         env->ldt.limit = 0xffff;
5500         ldt_table = g2h(env->ldt.base);
5501     }
5502 
5503     /* NOTE: same code as Linux kernel */
5504     /* Allow LDTs to be cleared by the user. */
5505     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5506         if (oldmode ||
5507             (contents == 0		&&
5508              read_exec_only == 1	&&
5509              seg_32bit == 0		&&
5510              limit_in_pages == 0	&&
5511              seg_not_present == 1	&&
5512              useable == 0 )) {
5513             entry_1 = 0;
5514             entry_2 = 0;
5515             goto install;
5516         }
5517     }
5518 
5519     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5520         (ldt_info.limit & 0x0ffff);
5521     entry_2 = (ldt_info.base_addr & 0xff000000) |
5522         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5523         (ldt_info.limit & 0xf0000) |
5524         ((read_exec_only ^ 1) << 9) |
5525         (contents << 10) |
5526         ((seg_not_present ^ 1) << 15) |
5527         (seg_32bit << 22) |
5528         (limit_in_pages << 23) |
5529         (lm << 21) |
5530         0x7000;
5531     if (!oldmode)
5532         entry_2 |= (useable << 20);
5533 
5534     /* Install the new entry ...  */
5535 install:
5536     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5537     lp[0] = tswap32(entry_1);
5538     lp[1] = tswap32(entry_2);
5539     return 0;
5540 }
5541 
5542 /* specific and weird i386 syscalls */
5543 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5544                               unsigned long bytecount)
5545 {
5546     abi_long ret;
5547 
5548     switch (func) {
5549     case 0:
5550         ret = read_ldt(ptr, bytecount);
5551         break;
5552     case 1:
5553         ret = write_ldt(env, ptr, bytecount, 1);
5554         break;
5555     case 0x11:
5556         ret = write_ldt(env, ptr, bytecount, 0);
5557         break;
5558     default:
5559         ret = -TARGET_ENOSYS;
5560         break;
5561     }
5562     return ret;
5563 }
5564 
5565 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5566 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5567 {
5568     uint64_t *gdt_table = g2h(env->gdt.base);
5569     struct target_modify_ldt_ldt_s ldt_info;
5570     struct target_modify_ldt_ldt_s *target_ldt_info;
5571     int seg_32bit, contents, read_exec_only, limit_in_pages;
5572     int seg_not_present, useable, lm;
5573     uint32_t *lp, entry_1, entry_2;
5574     int i;
5575 
5576     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5577     if (!target_ldt_info)
5578         return -TARGET_EFAULT;
5579     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5580     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5581     ldt_info.limit = tswap32(target_ldt_info->limit);
5582     ldt_info.flags = tswap32(target_ldt_info->flags);
5583     if (ldt_info.entry_number == -1) {
5584         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5585             if (gdt_table[i] == 0) {
5586                 ldt_info.entry_number = i;
5587                 target_ldt_info->entry_number = tswap32(i);
5588                 break;
5589             }
5590         }
5591     }
5592     unlock_user_struct(target_ldt_info, ptr, 1);
5593 
5594     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5595         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5596            return -TARGET_EINVAL;
5597     seg_32bit = ldt_info.flags & 1;
5598     contents = (ldt_info.flags >> 1) & 3;
5599     read_exec_only = (ldt_info.flags >> 3) & 1;
5600     limit_in_pages = (ldt_info.flags >> 4) & 1;
5601     seg_not_present = (ldt_info.flags >> 5) & 1;
5602     useable = (ldt_info.flags >> 6) & 1;
5603 #ifdef TARGET_ABI32
5604     lm = 0;
5605 #else
5606     lm = (ldt_info.flags >> 7) & 1;
5607 #endif
5608 
5609     if (contents == 3) {
5610         if (seg_not_present == 0)
5611             return -TARGET_EINVAL;
5612     }
5613 
5614     /* NOTE: same code as Linux kernel */
5615     /* Allow LDTs to be cleared by the user. */
5616     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5617         if ((contents == 0             &&
5618              read_exec_only == 1       &&
5619              seg_32bit == 0            &&
5620              limit_in_pages == 0       &&
5621              seg_not_present == 1      &&
5622              useable == 0 )) {
5623             entry_1 = 0;
5624             entry_2 = 0;
5625             goto install;
5626         }
5627     }
5628 
5629     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5630         (ldt_info.limit & 0x0ffff);
5631     entry_2 = (ldt_info.base_addr & 0xff000000) |
5632         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5633         (ldt_info.limit & 0xf0000) |
5634         ((read_exec_only ^ 1) << 9) |
5635         (contents << 10) |
5636         ((seg_not_present ^ 1) << 15) |
5637         (seg_32bit << 22) |
5638         (limit_in_pages << 23) |
5639         (useable << 20) |
5640         (lm << 21) |
5641         0x7000;
5642 
5643     /* Install the new entry ...  */
5644 install:
5645     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5646     lp[0] = tswap32(entry_1);
5647     lp[1] = tswap32(entry_2);
5648     return 0;
5649 }
5650 
5651 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5652 {
5653     struct target_modify_ldt_ldt_s *target_ldt_info;
5654     uint64_t *gdt_table = g2h(env->gdt.base);
5655     uint32_t base_addr, limit, flags;
5656     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5657     int seg_not_present, useable, lm;
5658     uint32_t *lp, entry_1, entry_2;
5659 
5660     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5661     if (!target_ldt_info)
5662         return -TARGET_EFAULT;
5663     idx = tswap32(target_ldt_info->entry_number);
5664     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5665         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5666         unlock_user_struct(target_ldt_info, ptr, 1);
5667         return -TARGET_EINVAL;
5668     }
5669     lp = (uint32_t *)(gdt_table + idx);
5670     entry_1 = tswap32(lp[0]);
5671     entry_2 = tswap32(lp[1]);
5672 
5673     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5674     contents = (entry_2 >> 10) & 3;
5675     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5676     seg_32bit = (entry_2 >> 22) & 1;
5677     limit_in_pages = (entry_2 >> 23) & 1;
5678     useable = (entry_2 >> 20) & 1;
5679 #ifdef TARGET_ABI32
5680     lm = 0;
5681 #else
5682     lm = (entry_2 >> 21) & 1;
5683 #endif
5684     flags = (seg_32bit << 0) | (contents << 1) |
5685         (read_exec_only << 3) | (limit_in_pages << 4) |
5686         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5687     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5688     base_addr = (entry_1 >> 16) |
5689         (entry_2 & 0xff000000) |
5690         ((entry_2 & 0xff) << 16);
5691     target_ldt_info->base_addr = tswapal(base_addr);
5692     target_ldt_info->limit = tswap32(limit);
5693     target_ldt_info->flags = tswap32(flags);
5694     unlock_user_struct(target_ldt_info, ptr, 1);
5695     return 0;
5696 }
5697 #endif /* TARGET_I386 && TARGET_ABI32 */
5698 
5699 #ifndef TARGET_ABI32
5700 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5701 {
5702     abi_long ret = 0;
5703     abi_ulong val;
5704     int idx;
5705 
5706     switch(code) {
5707     case TARGET_ARCH_SET_GS:
5708     case TARGET_ARCH_SET_FS:
5709         if (code == TARGET_ARCH_SET_GS)
5710             idx = R_GS;
5711         else
5712             idx = R_FS;
5713         cpu_x86_load_seg(env, idx, 0);
5714         env->segs[idx].base = addr;
5715         break;
5716     case TARGET_ARCH_GET_GS:
5717     case TARGET_ARCH_GET_FS:
5718         if (code == TARGET_ARCH_GET_GS)
5719             idx = R_GS;
5720         else
5721             idx = R_FS;
5722         val = env->segs[idx].base;
5723         if (put_user(val, addr, abi_ulong))
5724             ret = -TARGET_EFAULT;
5725         break;
5726     default:
5727         ret = -TARGET_EINVAL;
5728         break;
5729     }
5730     return ret;
5731 }
5732 #endif
5733 
5734 #endif /* defined(TARGET_I386) */
5735 
5736 #define NEW_STACK_SIZE 0x40000
5737 
5738 
5739 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5740 typedef struct {
5741     CPUArchState *env;
5742     pthread_mutex_t mutex;
5743     pthread_cond_t cond;
5744     pthread_t thread;
5745     uint32_t tid;
5746     abi_ulong child_tidptr;
5747     abi_ulong parent_tidptr;
5748     sigset_t sigmask;
5749 } new_thread_info;
5750 
5751 static void *clone_func(void *arg)
5752 {
5753     new_thread_info *info = arg;
5754     CPUArchState *env;
5755     CPUState *cpu;
5756     TaskState *ts;
5757 
5758     rcu_register_thread();
5759     tcg_register_thread();
5760     env = info->env;
5761     cpu = env_cpu(env);
5762     thread_cpu = cpu;
5763     ts = (TaskState *)cpu->opaque;
5764     info->tid = sys_gettid();
5765     task_settid(ts);
5766     if (info->child_tidptr)
5767         put_user_u32(info->tid, info->child_tidptr);
5768     if (info->parent_tidptr)
5769         put_user_u32(info->tid, info->parent_tidptr);
5770     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5771     /* Enable signals.  */
5772     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5773     /* Signal to the parent that we're ready.  */
5774     pthread_mutex_lock(&info->mutex);
5775     pthread_cond_broadcast(&info->cond);
5776     pthread_mutex_unlock(&info->mutex);
5777     /* Wait until the parent has finished initializing the tls state.  */
5778     pthread_mutex_lock(&clone_lock);
5779     pthread_mutex_unlock(&clone_lock);
5780     cpu_loop(env);
5781     /* never exits */
5782     return NULL;
5783 }
5784 
5785 /* do_fork() Must return host values and target errnos (unlike most
5786    do_*() functions). */
5787 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5788                    abi_ulong parent_tidptr, target_ulong newtls,
5789                    abi_ulong child_tidptr)
5790 {
5791     CPUState *cpu = env_cpu(env);
5792     int ret;
5793     TaskState *ts;
5794     CPUState *new_cpu;
5795     CPUArchState *new_env;
5796     sigset_t sigmask;
5797 
5798     flags &= ~CLONE_IGNORED_FLAGS;
5799 
5800     /* Emulate vfork() with fork() */
5801     if (flags & CLONE_VFORK)
5802         flags &= ~(CLONE_VFORK | CLONE_VM);
5803 
5804     if (flags & CLONE_VM) {
5805         TaskState *parent_ts = (TaskState *)cpu->opaque;
5806         new_thread_info info;
5807         pthread_attr_t attr;
5808 
5809         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5810             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5811             return -TARGET_EINVAL;
5812         }
5813 
5814         ts = g_new0(TaskState, 1);
5815         init_task_state(ts);
5816 
5817         /* Grab a mutex so that thread setup appears atomic.  */
5818         pthread_mutex_lock(&clone_lock);
5819 
5820         /* we create a new CPU instance. */
5821         new_env = cpu_copy(env);
5822         /* Init regs that differ from the parent.  */
5823         cpu_clone_regs(new_env, newsp);
5824         new_cpu = env_cpu(new_env);
5825         new_cpu->opaque = ts;
5826         ts->bprm = parent_ts->bprm;
5827         ts->info = parent_ts->info;
5828         ts->signal_mask = parent_ts->signal_mask;
5829 
5830         if (flags & CLONE_CHILD_CLEARTID) {
5831             ts->child_tidptr = child_tidptr;
5832         }
5833 
5834         if (flags & CLONE_SETTLS) {
5835             cpu_set_tls (new_env, newtls);
5836         }
5837 
5838         memset(&info, 0, sizeof(info));
5839         pthread_mutex_init(&info.mutex, NULL);
5840         pthread_mutex_lock(&info.mutex);
5841         pthread_cond_init(&info.cond, NULL);
5842         info.env = new_env;
5843         if (flags & CLONE_CHILD_SETTID) {
5844             info.child_tidptr = child_tidptr;
5845         }
5846         if (flags & CLONE_PARENT_SETTID) {
5847             info.parent_tidptr = parent_tidptr;
5848         }
5849 
5850         ret = pthread_attr_init(&attr);
5851         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5852         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5853         /* It is not safe to deliver signals until the child has finished
5854            initializing, so temporarily block all signals.  */
5855         sigfillset(&sigmask);
5856         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5857         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5858 
5859         /* If this is our first additional thread, we need to ensure we
5860          * generate code for parallel execution and flush old translations.
5861          */
5862         if (!parallel_cpus) {
5863             parallel_cpus = true;
5864             tb_flush(cpu);
5865         }
5866 
5867         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5868         /* TODO: Free new CPU state if thread creation failed.  */
5869 
5870         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5871         pthread_attr_destroy(&attr);
5872         if (ret == 0) {
5873             /* Wait for the child to initialize.  */
5874             pthread_cond_wait(&info.cond, &info.mutex);
5875             ret = info.tid;
5876         } else {
5877             ret = -1;
5878         }
5879         pthread_mutex_unlock(&info.mutex);
5880         pthread_cond_destroy(&info.cond);
5881         pthread_mutex_destroy(&info.mutex);
5882         pthread_mutex_unlock(&clone_lock);
5883     } else {
5884         /* if no CLONE_VM, we consider it is a fork */
5885         if (flags & CLONE_INVALID_FORK_FLAGS) {
5886             return -TARGET_EINVAL;
5887         }
5888 
5889         /* We can't support custom termination signals */
5890         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5891             return -TARGET_EINVAL;
5892         }
5893 
5894         if (block_signals()) {
5895             return -TARGET_ERESTARTSYS;
5896         }
5897 
5898         fork_start();
5899         ret = fork();
5900         if (ret == 0) {
5901             /* Child Process.  */
5902             cpu_clone_regs(env, newsp);
5903             fork_end(1);
5904             /* There is a race condition here.  The parent process could
5905                theoretically read the TID in the child process before the child
5906                tid is set.  This would require using either ptrace
5907                (not implemented) or having *_tidptr to point at a shared memory
5908                mapping.  We can't repeat the spinlock hack used above because
5909                the child process gets its own copy of the lock.  */
5910             if (flags & CLONE_CHILD_SETTID)
5911                 put_user_u32(sys_gettid(), child_tidptr);
5912             if (flags & CLONE_PARENT_SETTID)
5913                 put_user_u32(sys_gettid(), parent_tidptr);
5914             ts = (TaskState *)cpu->opaque;
5915             if (flags & CLONE_SETTLS)
5916                 cpu_set_tls (env, newtls);
5917             if (flags & CLONE_CHILD_CLEARTID)
5918                 ts->child_tidptr = child_tidptr;
5919         } else {
5920             fork_end(0);
5921         }
5922     }
5923     return ret;
5924 }
5925 
5926 /* warning : doesn't handle linux specific flags... */
5927 static int target_to_host_fcntl_cmd(int cmd)
5928 {
5929     int ret;
5930 
5931     switch(cmd) {
5932     case TARGET_F_DUPFD:
5933     case TARGET_F_GETFD:
5934     case TARGET_F_SETFD:
5935     case TARGET_F_GETFL:
5936     case TARGET_F_SETFL:
5937         ret = cmd;
5938         break;
5939     case TARGET_F_GETLK:
5940         ret = F_GETLK64;
5941         break;
5942     case TARGET_F_SETLK:
5943         ret = F_SETLK64;
5944         break;
5945     case TARGET_F_SETLKW:
5946         ret = F_SETLKW64;
5947         break;
5948     case TARGET_F_GETOWN:
5949         ret = F_GETOWN;
5950         break;
5951     case TARGET_F_SETOWN:
5952         ret = F_SETOWN;
5953         break;
5954     case TARGET_F_GETSIG:
5955         ret = F_GETSIG;
5956         break;
5957     case TARGET_F_SETSIG:
5958         ret = F_SETSIG;
5959         break;
5960 #if TARGET_ABI_BITS == 32
5961     case TARGET_F_GETLK64:
5962         ret = F_GETLK64;
5963         break;
5964     case TARGET_F_SETLK64:
5965         ret = F_SETLK64;
5966         break;
5967     case TARGET_F_SETLKW64:
5968         ret = F_SETLKW64;
5969         break;
5970 #endif
5971     case TARGET_F_SETLEASE:
5972         ret = F_SETLEASE;
5973         break;
5974     case TARGET_F_GETLEASE:
5975         ret = F_GETLEASE;
5976         break;
5977 #ifdef F_DUPFD_CLOEXEC
5978     case TARGET_F_DUPFD_CLOEXEC:
5979         ret = F_DUPFD_CLOEXEC;
5980         break;
5981 #endif
5982     case TARGET_F_NOTIFY:
5983         ret = F_NOTIFY;
5984         break;
5985 #ifdef F_GETOWN_EX
5986     case TARGET_F_GETOWN_EX:
5987         ret = F_GETOWN_EX;
5988         break;
5989 #endif
5990 #ifdef F_SETOWN_EX
5991     case TARGET_F_SETOWN_EX:
5992         ret = F_SETOWN_EX;
5993         break;
5994 #endif
5995 #ifdef F_SETPIPE_SZ
5996     case TARGET_F_SETPIPE_SZ:
5997         ret = F_SETPIPE_SZ;
5998         break;
5999     case TARGET_F_GETPIPE_SZ:
6000         ret = F_GETPIPE_SZ;
6001         break;
6002 #endif
6003     default:
6004         ret = -TARGET_EINVAL;
6005         break;
6006     }
6007 
6008 #if defined(__powerpc64__)
6009     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6010      * is not supported by kernel. The glibc fcntl call actually adjusts
6011      * them to 5, 6 and 7 before making the syscall(). Since we make the
6012      * syscall directly, adjust to what is supported by the kernel.
6013      */
6014     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6015         ret -= F_GETLK64 - 5;
6016     }
6017 #endif
6018 
6019     return ret;
6020 }
6021 
6022 #define FLOCK_TRANSTBL \
6023     switch (type) { \
6024     TRANSTBL_CONVERT(F_RDLCK); \
6025     TRANSTBL_CONVERT(F_WRLCK); \
6026     TRANSTBL_CONVERT(F_UNLCK); \
6027     TRANSTBL_CONVERT(F_EXLCK); \
6028     TRANSTBL_CONVERT(F_SHLCK); \
6029     }
6030 
6031 static int target_to_host_flock(int type)
6032 {
6033 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6034     FLOCK_TRANSTBL
6035 #undef  TRANSTBL_CONVERT
6036     return -TARGET_EINVAL;
6037 }
6038 
6039 static int host_to_target_flock(int type)
6040 {
6041 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6042     FLOCK_TRANSTBL
6043 #undef  TRANSTBL_CONVERT
6044     /* if we don't know how to convert the value coming
6045      * from the host we copy to the target field as-is
6046      */
6047     return type;
6048 }
6049 
6050 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6051                                             abi_ulong target_flock_addr)
6052 {
6053     struct target_flock *target_fl;
6054     int l_type;
6055 
6056     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6057         return -TARGET_EFAULT;
6058     }
6059 
6060     __get_user(l_type, &target_fl->l_type);
6061     l_type = target_to_host_flock(l_type);
6062     if (l_type < 0) {
6063         return l_type;
6064     }
6065     fl->l_type = l_type;
6066     __get_user(fl->l_whence, &target_fl->l_whence);
6067     __get_user(fl->l_start, &target_fl->l_start);
6068     __get_user(fl->l_len, &target_fl->l_len);
6069     __get_user(fl->l_pid, &target_fl->l_pid);
6070     unlock_user_struct(target_fl, target_flock_addr, 0);
6071     return 0;
6072 }
6073 
6074 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6075                                           const struct flock64 *fl)
6076 {
6077     struct target_flock *target_fl;
6078     short l_type;
6079 
6080     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6081         return -TARGET_EFAULT;
6082     }
6083 
6084     l_type = host_to_target_flock(fl->l_type);
6085     __put_user(l_type, &target_fl->l_type);
6086     __put_user(fl->l_whence, &target_fl->l_whence);
6087     __put_user(fl->l_start, &target_fl->l_start);
6088     __put_user(fl->l_len, &target_fl->l_len);
6089     __put_user(fl->l_pid, &target_fl->l_pid);
6090     unlock_user_struct(target_fl, target_flock_addr, 1);
6091     return 0;
6092 }
6093 
6094 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6095 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6096 
6097 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6098 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6099                                                    abi_ulong target_flock_addr)
6100 {
6101     struct target_oabi_flock64 *target_fl;
6102     int l_type;
6103 
6104     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6105         return -TARGET_EFAULT;
6106     }
6107 
6108     __get_user(l_type, &target_fl->l_type);
6109     l_type = target_to_host_flock(l_type);
6110     if (l_type < 0) {
6111         return l_type;
6112     }
6113     fl->l_type = l_type;
6114     __get_user(fl->l_whence, &target_fl->l_whence);
6115     __get_user(fl->l_start, &target_fl->l_start);
6116     __get_user(fl->l_len, &target_fl->l_len);
6117     __get_user(fl->l_pid, &target_fl->l_pid);
6118     unlock_user_struct(target_fl, target_flock_addr, 0);
6119     return 0;
6120 }
6121 
6122 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6123                                                  const struct flock64 *fl)
6124 {
6125     struct target_oabi_flock64 *target_fl;
6126     short l_type;
6127 
6128     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6129         return -TARGET_EFAULT;
6130     }
6131 
6132     l_type = host_to_target_flock(fl->l_type);
6133     __put_user(l_type, &target_fl->l_type);
6134     __put_user(fl->l_whence, &target_fl->l_whence);
6135     __put_user(fl->l_start, &target_fl->l_start);
6136     __put_user(fl->l_len, &target_fl->l_len);
6137     __put_user(fl->l_pid, &target_fl->l_pid);
6138     unlock_user_struct(target_fl, target_flock_addr, 1);
6139     return 0;
6140 }
6141 #endif
6142 
6143 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6144                                               abi_ulong target_flock_addr)
6145 {
6146     struct target_flock64 *target_fl;
6147     int l_type;
6148 
6149     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6150         return -TARGET_EFAULT;
6151     }
6152 
6153     __get_user(l_type, &target_fl->l_type);
6154     l_type = target_to_host_flock(l_type);
6155     if (l_type < 0) {
6156         return l_type;
6157     }
6158     fl->l_type = l_type;
6159     __get_user(fl->l_whence, &target_fl->l_whence);
6160     __get_user(fl->l_start, &target_fl->l_start);
6161     __get_user(fl->l_len, &target_fl->l_len);
6162     __get_user(fl->l_pid, &target_fl->l_pid);
6163     unlock_user_struct(target_fl, target_flock_addr, 0);
6164     return 0;
6165 }
6166 
6167 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6168                                             const struct flock64 *fl)
6169 {
6170     struct target_flock64 *target_fl;
6171     short l_type;
6172 
6173     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6174         return -TARGET_EFAULT;
6175     }
6176 
6177     l_type = host_to_target_flock(fl->l_type);
6178     __put_user(l_type, &target_fl->l_type);
6179     __put_user(fl->l_whence, &target_fl->l_whence);
6180     __put_user(fl->l_start, &target_fl->l_start);
6181     __put_user(fl->l_len, &target_fl->l_len);
6182     __put_user(fl->l_pid, &target_fl->l_pid);
6183     unlock_user_struct(target_fl, target_flock_addr, 1);
6184     return 0;
6185 }
6186 
6187 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6188 {
6189     struct flock64 fl64;
6190 #ifdef F_GETOWN_EX
6191     struct f_owner_ex fox;
6192     struct target_f_owner_ex *target_fox;
6193 #endif
6194     abi_long ret;
6195     int host_cmd = target_to_host_fcntl_cmd(cmd);
6196 
6197     if (host_cmd == -TARGET_EINVAL)
6198 	    return host_cmd;
6199 
6200     switch(cmd) {
6201     case TARGET_F_GETLK:
6202         ret = copy_from_user_flock(&fl64, arg);
6203         if (ret) {
6204             return ret;
6205         }
6206         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6207         if (ret == 0) {
6208             ret = copy_to_user_flock(arg, &fl64);
6209         }
6210         break;
6211 
6212     case TARGET_F_SETLK:
6213     case TARGET_F_SETLKW:
6214         ret = copy_from_user_flock(&fl64, arg);
6215         if (ret) {
6216             return ret;
6217         }
6218         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6219         break;
6220 
6221     case TARGET_F_GETLK64:
6222         ret = copy_from_user_flock64(&fl64, arg);
6223         if (ret) {
6224             return ret;
6225         }
6226         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6227         if (ret == 0) {
6228             ret = copy_to_user_flock64(arg, &fl64);
6229         }
6230         break;
6231     case TARGET_F_SETLK64:
6232     case TARGET_F_SETLKW64:
6233         ret = copy_from_user_flock64(&fl64, arg);
6234         if (ret) {
6235             return ret;
6236         }
6237         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6238         break;
6239 
6240     case TARGET_F_GETFL:
6241         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6242         if (ret >= 0) {
6243             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6244         }
6245         break;
6246 
6247     case TARGET_F_SETFL:
6248         ret = get_errno(safe_fcntl(fd, host_cmd,
6249                                    target_to_host_bitmask(arg,
6250                                                           fcntl_flags_tbl)));
6251         break;
6252 
6253 #ifdef F_GETOWN_EX
6254     case TARGET_F_GETOWN_EX:
6255         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6256         if (ret >= 0) {
6257             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6258                 return -TARGET_EFAULT;
6259             target_fox->type = tswap32(fox.type);
6260             target_fox->pid = tswap32(fox.pid);
6261             unlock_user_struct(target_fox, arg, 1);
6262         }
6263         break;
6264 #endif
6265 
6266 #ifdef F_SETOWN_EX
6267     case TARGET_F_SETOWN_EX:
6268         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6269             return -TARGET_EFAULT;
6270         fox.type = tswap32(target_fox->type);
6271         fox.pid = tswap32(target_fox->pid);
6272         unlock_user_struct(target_fox, arg, 0);
6273         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6274         break;
6275 #endif
6276 
6277     case TARGET_F_SETOWN:
6278     case TARGET_F_GETOWN:
6279     case TARGET_F_SETSIG:
6280     case TARGET_F_GETSIG:
6281     case TARGET_F_SETLEASE:
6282     case TARGET_F_GETLEASE:
6283     case TARGET_F_SETPIPE_SZ:
6284     case TARGET_F_GETPIPE_SZ:
6285         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6286         break;
6287 
6288     default:
6289         ret = get_errno(safe_fcntl(fd, cmd, arg));
6290         break;
6291     }
6292     return ret;
6293 }
6294 
6295 #ifdef USE_UID16
6296 
6297 static inline int high2lowuid(int uid)
6298 {
6299     if (uid > 65535)
6300         return 65534;
6301     else
6302         return uid;
6303 }
6304 
6305 static inline int high2lowgid(int gid)
6306 {
6307     if (gid > 65535)
6308         return 65534;
6309     else
6310         return gid;
6311 }
6312 
6313 static inline int low2highuid(int uid)
6314 {
6315     if ((int16_t)uid == -1)
6316         return -1;
6317     else
6318         return uid;
6319 }
6320 
6321 static inline int low2highgid(int gid)
6322 {
6323     if ((int16_t)gid == -1)
6324         return -1;
6325     else
6326         return gid;
6327 }
6328 static inline int tswapid(int id)
6329 {
6330     return tswap16(id);
6331 }
6332 
6333 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6334 
6335 #else /* !USE_UID16 */
6336 static inline int high2lowuid(int uid)
6337 {
6338     return uid;
6339 }
6340 static inline int high2lowgid(int gid)
6341 {
6342     return gid;
6343 }
6344 static inline int low2highuid(int uid)
6345 {
6346     return uid;
6347 }
6348 static inline int low2highgid(int gid)
6349 {
6350     return gid;
6351 }
6352 static inline int tswapid(int id)
6353 {
6354     return tswap32(id);
6355 }
6356 
6357 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6358 
6359 #endif /* USE_UID16 */
6360 
6361 /* We must do direct syscalls for setting UID/GID, because we want to
6362  * implement the Linux system call semantics of "change only for this thread",
6363  * not the libc/POSIX semantics of "change for all threads in process".
6364  * (See http://ewontfix.com/17/ for more details.)
6365  * We use the 32-bit version of the syscalls if present; if it is not
6366  * then either the host architecture supports 32-bit UIDs natively with
6367  * the standard syscall, or the 16-bit UID is the best we can do.
6368  */
6369 #ifdef __NR_setuid32
6370 #define __NR_sys_setuid __NR_setuid32
6371 #else
6372 #define __NR_sys_setuid __NR_setuid
6373 #endif
6374 #ifdef __NR_setgid32
6375 #define __NR_sys_setgid __NR_setgid32
6376 #else
6377 #define __NR_sys_setgid __NR_setgid
6378 #endif
6379 #ifdef __NR_setresuid32
6380 #define __NR_sys_setresuid __NR_setresuid32
6381 #else
6382 #define __NR_sys_setresuid __NR_setresuid
6383 #endif
6384 #ifdef __NR_setresgid32
6385 #define __NR_sys_setresgid __NR_setresgid32
6386 #else
6387 #define __NR_sys_setresgid __NR_setresgid
6388 #endif
6389 
6390 _syscall1(int, sys_setuid, uid_t, uid)
6391 _syscall1(int, sys_setgid, gid_t, gid)
6392 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6393 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6394 
6395 void syscall_init(void)
6396 {
6397     IOCTLEntry *ie;
6398     const argtype *arg_type;
6399     int size;
6400     int i;
6401 
6402     thunk_init(STRUCT_MAX);
6403 
6404 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6405 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6406 #include "syscall_types.h"
6407 #undef STRUCT
6408 #undef STRUCT_SPECIAL
6409 
6410     /* Build target_to_host_errno_table[] table from
6411      * host_to_target_errno_table[]. */
6412     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6413         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6414     }
6415 
6416     /* we patch the ioctl size if necessary. We rely on the fact that
6417        no ioctl has all the bits at '1' in the size field */
6418     ie = ioctl_entries;
6419     while (ie->target_cmd != 0) {
6420         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6421             TARGET_IOC_SIZEMASK) {
6422             arg_type = ie->arg_type;
6423             if (arg_type[0] != TYPE_PTR) {
6424                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6425                         ie->target_cmd);
6426                 exit(1);
6427             }
6428             arg_type++;
6429             size = thunk_type_size(arg_type, 0);
6430             ie->target_cmd = (ie->target_cmd &
6431                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6432                 (size << TARGET_IOC_SIZESHIFT);
6433         }
6434 
6435         /* automatic consistency check if same arch */
6436 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6437     (defined(__x86_64__) && defined(TARGET_X86_64))
6438         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6439             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6440                     ie->name, ie->target_cmd, ie->host_cmd);
6441         }
6442 #endif
6443         ie++;
6444     }
6445 }
6446 
6447 #if TARGET_ABI_BITS == 32
6448 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6449 {
6450 #ifdef TARGET_WORDS_BIGENDIAN
6451     return ((uint64_t)word0 << 32) | word1;
6452 #else
6453     return ((uint64_t)word1 << 32) | word0;
6454 #endif
6455 }
6456 #else /* TARGET_ABI_BITS == 32 */
6457 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6458 {
6459     return word0;
6460 }
6461 #endif /* TARGET_ABI_BITS != 32 */
6462 
6463 #ifdef TARGET_NR_truncate64
6464 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6465                                          abi_long arg2,
6466                                          abi_long arg3,
6467                                          abi_long arg4)
6468 {
6469     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6470         arg2 = arg3;
6471         arg3 = arg4;
6472     }
6473     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6474 }
6475 #endif
6476 
6477 #ifdef TARGET_NR_ftruncate64
6478 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6479                                           abi_long arg2,
6480                                           abi_long arg3,
6481                                           abi_long arg4)
6482 {
6483     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6484         arg2 = arg3;
6485         arg3 = arg4;
6486     }
6487     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6488 }
6489 #endif
6490 
6491 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6492                                                  abi_ulong target_addr)
6493 {
6494     struct target_itimerspec *target_itspec;
6495 
6496     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6497         return -TARGET_EFAULT;
6498     }
6499 
6500     host_itspec->it_interval.tv_sec =
6501                             tswapal(target_itspec->it_interval.tv_sec);
6502     host_itspec->it_interval.tv_nsec =
6503                             tswapal(target_itspec->it_interval.tv_nsec);
6504     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6505     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6506 
6507     unlock_user_struct(target_itspec, target_addr, 1);
6508     return 0;
6509 }
6510 
6511 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6512                                                struct itimerspec *host_its)
6513 {
6514     struct target_itimerspec *target_itspec;
6515 
6516     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6517         return -TARGET_EFAULT;
6518     }
6519 
6520     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6521     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6522 
6523     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6524     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6525 
6526     unlock_user_struct(target_itspec, target_addr, 0);
6527     return 0;
6528 }
6529 
6530 static inline abi_long target_to_host_timex(struct timex *host_tx,
6531                                             abi_long target_addr)
6532 {
6533     struct target_timex *target_tx;
6534 
6535     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6536         return -TARGET_EFAULT;
6537     }
6538 
6539     __get_user(host_tx->modes, &target_tx->modes);
6540     __get_user(host_tx->offset, &target_tx->offset);
6541     __get_user(host_tx->freq, &target_tx->freq);
6542     __get_user(host_tx->maxerror, &target_tx->maxerror);
6543     __get_user(host_tx->esterror, &target_tx->esterror);
6544     __get_user(host_tx->status, &target_tx->status);
6545     __get_user(host_tx->constant, &target_tx->constant);
6546     __get_user(host_tx->precision, &target_tx->precision);
6547     __get_user(host_tx->tolerance, &target_tx->tolerance);
6548     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6549     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6550     __get_user(host_tx->tick, &target_tx->tick);
6551     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6552     __get_user(host_tx->jitter, &target_tx->jitter);
6553     __get_user(host_tx->shift, &target_tx->shift);
6554     __get_user(host_tx->stabil, &target_tx->stabil);
6555     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6556     __get_user(host_tx->calcnt, &target_tx->calcnt);
6557     __get_user(host_tx->errcnt, &target_tx->errcnt);
6558     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6559     __get_user(host_tx->tai, &target_tx->tai);
6560 
6561     unlock_user_struct(target_tx, target_addr, 0);
6562     return 0;
6563 }
6564 
6565 static inline abi_long host_to_target_timex(abi_long target_addr,
6566                                             struct timex *host_tx)
6567 {
6568     struct target_timex *target_tx;
6569 
6570     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6571         return -TARGET_EFAULT;
6572     }
6573 
6574     __put_user(host_tx->modes, &target_tx->modes);
6575     __put_user(host_tx->offset, &target_tx->offset);
6576     __put_user(host_tx->freq, &target_tx->freq);
6577     __put_user(host_tx->maxerror, &target_tx->maxerror);
6578     __put_user(host_tx->esterror, &target_tx->esterror);
6579     __put_user(host_tx->status, &target_tx->status);
6580     __put_user(host_tx->constant, &target_tx->constant);
6581     __put_user(host_tx->precision, &target_tx->precision);
6582     __put_user(host_tx->tolerance, &target_tx->tolerance);
6583     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6584     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6585     __put_user(host_tx->tick, &target_tx->tick);
6586     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6587     __put_user(host_tx->jitter, &target_tx->jitter);
6588     __put_user(host_tx->shift, &target_tx->shift);
6589     __put_user(host_tx->stabil, &target_tx->stabil);
6590     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6591     __put_user(host_tx->calcnt, &target_tx->calcnt);
6592     __put_user(host_tx->errcnt, &target_tx->errcnt);
6593     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6594     __put_user(host_tx->tai, &target_tx->tai);
6595 
6596     unlock_user_struct(target_tx, target_addr, 1);
6597     return 0;
6598 }
6599 
6600 
6601 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6602                                                abi_ulong target_addr)
6603 {
6604     struct target_sigevent *target_sevp;
6605 
6606     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6607         return -TARGET_EFAULT;
6608     }
6609 
6610     /* This union is awkward on 64 bit systems because it has a 32 bit
6611      * integer and a pointer in it; we follow the conversion approach
6612      * used for handling sigval types in signal.c so the guest should get
6613      * the correct value back even if we did a 64 bit byteswap and it's
6614      * using the 32 bit integer.
6615      */
6616     host_sevp->sigev_value.sival_ptr =
6617         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6618     host_sevp->sigev_signo =
6619         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6620     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6621     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6622 
6623     unlock_user_struct(target_sevp, target_addr, 1);
6624     return 0;
6625 }
6626 
6627 #if defined(TARGET_NR_mlockall)
6628 static inline int target_to_host_mlockall_arg(int arg)
6629 {
6630     int result = 0;
6631 
6632     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6633         result |= MCL_CURRENT;
6634     }
6635     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6636         result |= MCL_FUTURE;
6637     }
6638     return result;
6639 }
6640 #endif
6641 
6642 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6643      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6644      defined(TARGET_NR_newfstatat))
6645 static inline abi_long host_to_target_stat64(void *cpu_env,
6646                                              abi_ulong target_addr,
6647                                              struct stat *host_st)
6648 {
6649 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6650     if (((CPUARMState *)cpu_env)->eabi) {
6651         struct target_eabi_stat64 *target_st;
6652 
6653         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6654             return -TARGET_EFAULT;
6655         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6656         __put_user(host_st->st_dev, &target_st->st_dev);
6657         __put_user(host_st->st_ino, &target_st->st_ino);
6658 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6659         __put_user(host_st->st_ino, &target_st->__st_ino);
6660 #endif
6661         __put_user(host_st->st_mode, &target_st->st_mode);
6662         __put_user(host_st->st_nlink, &target_st->st_nlink);
6663         __put_user(host_st->st_uid, &target_st->st_uid);
6664         __put_user(host_st->st_gid, &target_st->st_gid);
6665         __put_user(host_st->st_rdev, &target_st->st_rdev);
6666         __put_user(host_st->st_size, &target_st->st_size);
6667         __put_user(host_st->st_blksize, &target_st->st_blksize);
6668         __put_user(host_st->st_blocks, &target_st->st_blocks);
6669         __put_user(host_st->st_atime, &target_st->target_st_atime);
6670         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6671         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6672 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6673         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6674         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6675         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6676 #endif
6677         unlock_user_struct(target_st, target_addr, 1);
6678     } else
6679 #endif
6680     {
6681 #if defined(TARGET_HAS_STRUCT_STAT64)
6682         struct target_stat64 *target_st;
6683 #else
6684         struct target_stat *target_st;
6685 #endif
6686 
6687         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6688             return -TARGET_EFAULT;
6689         memset(target_st, 0, sizeof(*target_st));
6690         __put_user(host_st->st_dev, &target_st->st_dev);
6691         __put_user(host_st->st_ino, &target_st->st_ino);
6692 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6693         __put_user(host_st->st_ino, &target_st->__st_ino);
6694 #endif
6695         __put_user(host_st->st_mode, &target_st->st_mode);
6696         __put_user(host_st->st_nlink, &target_st->st_nlink);
6697         __put_user(host_st->st_uid, &target_st->st_uid);
6698         __put_user(host_st->st_gid, &target_st->st_gid);
6699         __put_user(host_st->st_rdev, &target_st->st_rdev);
6700         /* XXX: better use of kernel struct */
6701         __put_user(host_st->st_size, &target_st->st_size);
6702         __put_user(host_st->st_blksize, &target_st->st_blksize);
6703         __put_user(host_st->st_blocks, &target_st->st_blocks);
6704         __put_user(host_st->st_atime, &target_st->target_st_atime);
6705         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6706         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6707 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6708         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6709         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6710         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6711 #endif
6712         unlock_user_struct(target_st, target_addr, 1);
6713     }
6714 
6715     return 0;
6716 }
6717 #endif
6718 
6719 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6720 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6721                                             abi_ulong target_addr)
6722 {
6723     struct target_statx *target_stx;
6724 
6725     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6726         return -TARGET_EFAULT;
6727     }
6728     memset(target_stx, 0, sizeof(*target_stx));
6729 
6730     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6731     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6732     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6733     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6734     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6735     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6736     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6737     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6738     __put_user(host_stx->stx_size, &target_stx->stx_size);
6739     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6740     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6741     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6742     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6743     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6744     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6745     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6746     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6747     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6748     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6749     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6750     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6751     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6752     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6753 
6754     unlock_user_struct(target_stx, target_addr, 1);
6755 
6756     return 0;
6757 }
6758 #endif
6759 
6760 
6761 /* ??? Using host futex calls even when target atomic operations
6762    are not really atomic probably breaks things.  However implementing
6763    futexes locally would make futexes shared between multiple processes
6764    tricky.  However they're probably useless because guest atomic
6765    operations won't work either.  */
6766 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6767                     target_ulong uaddr2, int val3)
6768 {
6769     struct timespec ts, *pts;
6770     int base_op;
6771 
6772     /* ??? We assume FUTEX_* constants are the same on both host
6773        and target.  */
6774 #ifdef FUTEX_CMD_MASK
6775     base_op = op & FUTEX_CMD_MASK;
6776 #else
6777     base_op = op;
6778 #endif
6779     switch (base_op) {
6780     case FUTEX_WAIT:
6781     case FUTEX_WAIT_BITSET:
6782         if (timeout) {
6783             pts = &ts;
6784             target_to_host_timespec(pts, timeout);
6785         } else {
6786             pts = NULL;
6787         }
6788         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6789                          pts, NULL, val3));
6790     case FUTEX_WAKE:
6791         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6792     case FUTEX_FD:
6793         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6794     case FUTEX_REQUEUE:
6795     case FUTEX_CMP_REQUEUE:
6796     case FUTEX_WAKE_OP:
6797         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6798            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6799            But the prototype takes a `struct timespec *'; insert casts
6800            to satisfy the compiler.  We do not need to tswap TIMEOUT
6801            since it's not compared to guest memory.  */
6802         pts = (struct timespec *)(uintptr_t) timeout;
6803         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6804                                     g2h(uaddr2),
6805                                     (base_op == FUTEX_CMP_REQUEUE
6806                                      ? tswap32(val3)
6807                                      : val3)));
6808     default:
6809         return -TARGET_ENOSYS;
6810     }
6811 }
6812 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6813 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6814                                      abi_long handle, abi_long mount_id,
6815                                      abi_long flags)
6816 {
6817     struct file_handle *target_fh;
6818     struct file_handle *fh;
6819     int mid = 0;
6820     abi_long ret;
6821     char *name;
6822     unsigned int size, total_size;
6823 
6824     if (get_user_s32(size, handle)) {
6825         return -TARGET_EFAULT;
6826     }
6827 
6828     name = lock_user_string(pathname);
6829     if (!name) {
6830         return -TARGET_EFAULT;
6831     }
6832 
6833     total_size = sizeof(struct file_handle) + size;
6834     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6835     if (!target_fh) {
6836         unlock_user(name, pathname, 0);
6837         return -TARGET_EFAULT;
6838     }
6839 
6840     fh = g_malloc0(total_size);
6841     fh->handle_bytes = size;
6842 
6843     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6844     unlock_user(name, pathname, 0);
6845 
6846     /* man name_to_handle_at(2):
6847      * Other than the use of the handle_bytes field, the caller should treat
6848      * the file_handle structure as an opaque data type
6849      */
6850 
6851     memcpy(target_fh, fh, total_size);
6852     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6853     target_fh->handle_type = tswap32(fh->handle_type);
6854     g_free(fh);
6855     unlock_user(target_fh, handle, total_size);
6856 
6857     if (put_user_s32(mid, mount_id)) {
6858         return -TARGET_EFAULT;
6859     }
6860 
6861     return ret;
6862 
6863 }
6864 #endif
6865 
6866 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6867 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6868                                      abi_long flags)
6869 {
6870     struct file_handle *target_fh;
6871     struct file_handle *fh;
6872     unsigned int size, total_size;
6873     abi_long ret;
6874 
6875     if (get_user_s32(size, handle)) {
6876         return -TARGET_EFAULT;
6877     }
6878 
6879     total_size = sizeof(struct file_handle) + size;
6880     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6881     if (!target_fh) {
6882         return -TARGET_EFAULT;
6883     }
6884 
6885     fh = g_memdup(target_fh, total_size);
6886     fh->handle_bytes = size;
6887     fh->handle_type = tswap32(target_fh->handle_type);
6888 
6889     ret = get_errno(open_by_handle_at(mount_fd, fh,
6890                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6891 
6892     g_free(fh);
6893 
6894     unlock_user(target_fh, handle, total_size);
6895 
6896     return ret;
6897 }
6898 #endif
6899 
6900 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6901 
6902 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6903 {
6904     int host_flags;
6905     target_sigset_t *target_mask;
6906     sigset_t host_mask;
6907     abi_long ret;
6908 
6909     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6910         return -TARGET_EINVAL;
6911     }
6912     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6913         return -TARGET_EFAULT;
6914     }
6915 
6916     target_to_host_sigset(&host_mask, target_mask);
6917 
6918     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6919 
6920     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6921     if (ret >= 0) {
6922         fd_trans_register(ret, &target_signalfd_trans);
6923     }
6924 
6925     unlock_user_struct(target_mask, mask, 0);
6926 
6927     return ret;
6928 }
6929 #endif
6930 
6931 /* Map host to target signal numbers for the wait family of syscalls.
6932    Assume all other status bits are the same.  */
6933 int host_to_target_waitstatus(int status)
6934 {
6935     if (WIFSIGNALED(status)) {
6936         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6937     }
6938     if (WIFSTOPPED(status)) {
6939         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6940                | (status & 0xff);
6941     }
6942     return status;
6943 }
6944 
6945 static int open_self_cmdline(void *cpu_env, int fd)
6946 {
6947     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6948     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6949     int i;
6950 
6951     for (i = 0; i < bprm->argc; i++) {
6952         size_t len = strlen(bprm->argv[i]) + 1;
6953 
6954         if (write(fd, bprm->argv[i], len) != len) {
6955             return -1;
6956         }
6957     }
6958 
6959     return 0;
6960 }
6961 
6962 static int open_self_maps(void *cpu_env, int fd)
6963 {
6964     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6965     TaskState *ts = cpu->opaque;
6966     FILE *fp;
6967     char *line = NULL;
6968     size_t len = 0;
6969     ssize_t read;
6970 
6971     fp = fopen("/proc/self/maps", "r");
6972     if (fp == NULL) {
6973         return -1;
6974     }
6975 
6976     while ((read = getline(&line, &len, fp)) != -1) {
6977         int fields, dev_maj, dev_min, inode;
6978         uint64_t min, max, offset;
6979         char flag_r, flag_w, flag_x, flag_p;
6980         char path[512] = "";
6981         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6982                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6983                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6984 
6985         if ((fields < 10) || (fields > 11)) {
6986             continue;
6987         }
6988         if (h2g_valid(min)) {
6989             int flags = page_get_flags(h2g(min));
6990             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6991             if (page_check_range(h2g(min), max - min, flags) == -1) {
6992                 continue;
6993             }
6994             if (h2g(min) == ts->info->stack_limit) {
6995                 pstrcpy(path, sizeof(path), "      [stack]");
6996             }
6997             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6998                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6999                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7000                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7001                     path[0] ? "         " : "", path);
7002         }
7003     }
7004 
7005     free(line);
7006     fclose(fp);
7007 
7008     return 0;
7009 }
7010 
7011 static int open_self_stat(void *cpu_env, int fd)
7012 {
7013     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7014     TaskState *ts = cpu->opaque;
7015     abi_ulong start_stack = ts->info->start_stack;
7016     int i;
7017 
7018     for (i = 0; i < 44; i++) {
7019       char buf[128];
7020       int len;
7021       uint64_t val = 0;
7022 
7023       if (i == 0) {
7024         /* pid */
7025         val = getpid();
7026         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7027       } else if (i == 1) {
7028         /* app name */
7029         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7030       } else if (i == 27) {
7031         /* stack bottom */
7032         val = start_stack;
7033         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7034       } else {
7035         /* for the rest, there is MasterCard */
7036         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7037       }
7038 
7039       len = strlen(buf);
7040       if (write(fd, buf, len) != len) {
7041           return -1;
7042       }
7043     }
7044 
7045     return 0;
7046 }
7047 
7048 static int open_self_auxv(void *cpu_env, int fd)
7049 {
7050     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7051     TaskState *ts = cpu->opaque;
7052     abi_ulong auxv = ts->info->saved_auxv;
7053     abi_ulong len = ts->info->auxv_len;
7054     char *ptr;
7055 
7056     /*
7057      * Auxiliary vector is stored in target process stack.
7058      * read in whole auxv vector and copy it to file
7059      */
7060     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7061     if (ptr != NULL) {
7062         while (len > 0) {
7063             ssize_t r;
7064             r = write(fd, ptr, len);
7065             if (r <= 0) {
7066                 break;
7067             }
7068             len -= r;
7069             ptr += r;
7070         }
7071         lseek(fd, 0, SEEK_SET);
7072         unlock_user(ptr, auxv, len);
7073     }
7074 
7075     return 0;
7076 }
7077 
7078 static int is_proc_myself(const char *filename, const char *entry)
7079 {
7080     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7081         filename += strlen("/proc/");
7082         if (!strncmp(filename, "self/", strlen("self/"))) {
7083             filename += strlen("self/");
7084         } else if (*filename >= '1' && *filename <= '9') {
7085             char myself[80];
7086             snprintf(myself, sizeof(myself), "%d/", getpid());
7087             if (!strncmp(filename, myself, strlen(myself))) {
7088                 filename += strlen(myself);
7089             } else {
7090                 return 0;
7091             }
7092         } else {
7093             return 0;
7094         }
7095         if (!strcmp(filename, entry)) {
7096             return 1;
7097         }
7098     }
7099     return 0;
7100 }
7101 
7102 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7103     defined(TARGET_SPARC) || defined(TARGET_M68K)
7104 static int is_proc(const char *filename, const char *entry)
7105 {
7106     return strcmp(filename, entry) == 0;
7107 }
7108 #endif
7109 
7110 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7111 static int open_net_route(void *cpu_env, int fd)
7112 {
7113     FILE *fp;
7114     char *line = NULL;
7115     size_t len = 0;
7116     ssize_t read;
7117 
7118     fp = fopen("/proc/net/route", "r");
7119     if (fp == NULL) {
7120         return -1;
7121     }
7122 
7123     /* read header */
7124 
7125     read = getline(&line, &len, fp);
7126     dprintf(fd, "%s", line);
7127 
7128     /* read routes */
7129 
7130     while ((read = getline(&line, &len, fp)) != -1) {
7131         char iface[16];
7132         uint32_t dest, gw, mask;
7133         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7134         int fields;
7135 
7136         fields = sscanf(line,
7137                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7138                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7139                         &mask, &mtu, &window, &irtt);
7140         if (fields != 11) {
7141             continue;
7142         }
7143         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7144                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7145                 metric, tswap32(mask), mtu, window, irtt);
7146     }
7147 
7148     free(line);
7149     fclose(fp);
7150 
7151     return 0;
7152 }
7153 #endif
7154 
7155 #if defined(TARGET_SPARC)
7156 static int open_cpuinfo(void *cpu_env, int fd)
7157 {
7158     dprintf(fd, "type\t\t: sun4u\n");
7159     return 0;
7160 }
7161 #endif
7162 
7163 #if defined(TARGET_M68K)
7164 static int open_hardware(void *cpu_env, int fd)
7165 {
7166     dprintf(fd, "Model:\t\tqemu-m68k\n");
7167     return 0;
7168 }
7169 #endif
7170 
7171 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7172 {
7173     struct fake_open {
7174         const char *filename;
7175         int (*fill)(void *cpu_env, int fd);
7176         int (*cmp)(const char *s1, const char *s2);
7177     };
7178     const struct fake_open *fake_open;
7179     static const struct fake_open fakes[] = {
7180         { "maps", open_self_maps, is_proc_myself },
7181         { "stat", open_self_stat, is_proc_myself },
7182         { "auxv", open_self_auxv, is_proc_myself },
7183         { "cmdline", open_self_cmdline, is_proc_myself },
7184 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7185         { "/proc/net/route", open_net_route, is_proc },
7186 #endif
7187 #if defined(TARGET_SPARC)
7188         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7189 #endif
7190 #if defined(TARGET_M68K)
7191         { "/proc/hardware", open_hardware, is_proc },
7192 #endif
7193         { NULL, NULL, NULL }
7194     };
7195 
7196     if (is_proc_myself(pathname, "exe")) {
7197         int execfd = qemu_getauxval(AT_EXECFD);
7198         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7199     }
7200 
7201     for (fake_open = fakes; fake_open->filename; fake_open++) {
7202         if (fake_open->cmp(pathname, fake_open->filename)) {
7203             break;
7204         }
7205     }
7206 
7207     if (fake_open->filename) {
7208         const char *tmpdir;
7209         char filename[PATH_MAX];
7210         int fd, r;
7211 
7212         /* create temporary file to map stat to */
7213         tmpdir = getenv("TMPDIR");
7214         if (!tmpdir)
7215             tmpdir = "/tmp";
7216         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7217         fd = mkstemp(filename);
7218         if (fd < 0) {
7219             return fd;
7220         }
7221         unlink(filename);
7222 
7223         if ((r = fake_open->fill(cpu_env, fd))) {
7224             int e = errno;
7225             close(fd);
7226             errno = e;
7227             return r;
7228         }
7229         lseek(fd, 0, SEEK_SET);
7230 
7231         return fd;
7232     }
7233 
7234     return safe_openat(dirfd, path(pathname), flags, mode);
7235 }
7236 
7237 #define TIMER_MAGIC 0x0caf0000
7238 #define TIMER_MAGIC_MASK 0xffff0000
7239 
7240 /* Convert QEMU provided timer ID back to internal 16bit index format */
7241 static target_timer_t get_timer_id(abi_long arg)
7242 {
7243     target_timer_t timerid = arg;
7244 
7245     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7246         return -TARGET_EINVAL;
7247     }
7248 
7249     timerid &= 0xffff;
7250 
7251     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7252         return -TARGET_EINVAL;
7253     }
7254 
7255     return timerid;
7256 }
7257 
7258 static int target_to_host_cpu_mask(unsigned long *host_mask,
7259                                    size_t host_size,
7260                                    abi_ulong target_addr,
7261                                    size_t target_size)
7262 {
7263     unsigned target_bits = sizeof(abi_ulong) * 8;
7264     unsigned host_bits = sizeof(*host_mask) * 8;
7265     abi_ulong *target_mask;
7266     unsigned i, j;
7267 
7268     assert(host_size >= target_size);
7269 
7270     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7271     if (!target_mask) {
7272         return -TARGET_EFAULT;
7273     }
7274     memset(host_mask, 0, host_size);
7275 
7276     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7277         unsigned bit = i * target_bits;
7278         abi_ulong val;
7279 
7280         __get_user(val, &target_mask[i]);
7281         for (j = 0; j < target_bits; j++, bit++) {
7282             if (val & (1UL << j)) {
7283                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7284             }
7285         }
7286     }
7287 
7288     unlock_user(target_mask, target_addr, 0);
7289     return 0;
7290 }
7291 
7292 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7293                                    size_t host_size,
7294                                    abi_ulong target_addr,
7295                                    size_t target_size)
7296 {
7297     unsigned target_bits = sizeof(abi_ulong) * 8;
7298     unsigned host_bits = sizeof(*host_mask) * 8;
7299     abi_ulong *target_mask;
7300     unsigned i, j;
7301 
7302     assert(host_size >= target_size);
7303 
7304     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7305     if (!target_mask) {
7306         return -TARGET_EFAULT;
7307     }
7308 
7309     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7310         unsigned bit = i * target_bits;
7311         abi_ulong val = 0;
7312 
7313         for (j = 0; j < target_bits; j++, bit++) {
7314             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7315                 val |= 1UL << j;
7316             }
7317         }
7318         __put_user(val, &target_mask[i]);
7319     }
7320 
7321     unlock_user(target_mask, target_addr, target_size);
7322     return 0;
7323 }
7324 
7325 /* This is an internal helper for do_syscall so that it is easier
7326  * to have a single return point, so that actions, such as logging
7327  * of syscall results, can be performed.
7328  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7329  */
7330 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7331                             abi_long arg2, abi_long arg3, abi_long arg4,
7332                             abi_long arg5, abi_long arg6, abi_long arg7,
7333                             abi_long arg8)
7334 {
7335     CPUState *cpu = env_cpu(cpu_env);
7336     abi_long ret;
7337 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7338     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7339     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7340     || defined(TARGET_NR_statx)
7341     struct stat st;
7342 #endif
7343 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7344     || defined(TARGET_NR_fstatfs)
7345     struct statfs stfs;
7346 #endif
7347     void *p;
7348 
7349     switch(num) {
7350     case TARGET_NR_exit:
7351         /* In old applications this may be used to implement _exit(2).
7352            However in threaded applictions it is used for thread termination,
7353            and _exit_group is used for application termination.
7354            Do thread termination if we have more then one thread.  */
7355 
7356         if (block_signals()) {
7357             return -TARGET_ERESTARTSYS;
7358         }
7359 
7360         cpu_list_lock();
7361 
7362         if (CPU_NEXT(first_cpu)) {
7363             TaskState *ts;
7364 
7365             /* Remove the CPU from the list.  */
7366             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7367 
7368             cpu_list_unlock();
7369 
7370             ts = cpu->opaque;
7371             if (ts->child_tidptr) {
7372                 put_user_u32(0, ts->child_tidptr);
7373                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7374                           NULL, NULL, 0);
7375             }
7376             thread_cpu = NULL;
7377             object_unref(OBJECT(cpu));
7378             g_free(ts);
7379             rcu_unregister_thread();
7380             pthread_exit(NULL);
7381         }
7382 
7383         cpu_list_unlock();
7384         preexit_cleanup(cpu_env, arg1);
7385         _exit(arg1);
7386         return 0; /* avoid warning */
7387     case TARGET_NR_read:
7388         if (arg2 == 0 && arg3 == 0) {
7389             return get_errno(safe_read(arg1, 0, 0));
7390         } else {
7391             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7392                 return -TARGET_EFAULT;
7393             ret = get_errno(safe_read(arg1, p, arg3));
7394             if (ret >= 0 &&
7395                 fd_trans_host_to_target_data(arg1)) {
7396                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7397             }
7398             unlock_user(p, arg2, ret);
7399         }
7400         return ret;
7401     case TARGET_NR_write:
7402         if (arg2 == 0 && arg3 == 0) {
7403             return get_errno(safe_write(arg1, 0, 0));
7404         }
7405         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7406             return -TARGET_EFAULT;
7407         if (fd_trans_target_to_host_data(arg1)) {
7408             void *copy = g_malloc(arg3);
7409             memcpy(copy, p, arg3);
7410             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7411             if (ret >= 0) {
7412                 ret = get_errno(safe_write(arg1, copy, ret));
7413             }
7414             g_free(copy);
7415         } else {
7416             ret = get_errno(safe_write(arg1, p, arg3));
7417         }
7418         unlock_user(p, arg2, 0);
7419         return ret;
7420 
7421 #ifdef TARGET_NR_open
7422     case TARGET_NR_open:
7423         if (!(p = lock_user_string(arg1)))
7424             return -TARGET_EFAULT;
7425         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7426                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7427                                   arg3));
7428         fd_trans_unregister(ret);
7429         unlock_user(p, arg1, 0);
7430         return ret;
7431 #endif
7432     case TARGET_NR_openat:
7433         if (!(p = lock_user_string(arg2)))
7434             return -TARGET_EFAULT;
7435         ret = get_errno(do_openat(cpu_env, arg1, p,
7436                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7437                                   arg4));
7438         fd_trans_unregister(ret);
7439         unlock_user(p, arg2, 0);
7440         return ret;
7441 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7442     case TARGET_NR_name_to_handle_at:
7443         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7444         return ret;
7445 #endif
7446 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7447     case TARGET_NR_open_by_handle_at:
7448         ret = do_open_by_handle_at(arg1, arg2, arg3);
7449         fd_trans_unregister(ret);
7450         return ret;
7451 #endif
7452     case TARGET_NR_close:
7453         fd_trans_unregister(arg1);
7454         return get_errno(close(arg1));
7455 
7456     case TARGET_NR_brk:
7457         return do_brk(arg1);
7458 #ifdef TARGET_NR_fork
7459     case TARGET_NR_fork:
7460         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7461 #endif
7462 #ifdef TARGET_NR_waitpid
7463     case TARGET_NR_waitpid:
7464         {
7465             int status;
7466             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7467             if (!is_error(ret) && arg2 && ret
7468                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7469                 return -TARGET_EFAULT;
7470         }
7471         return ret;
7472 #endif
7473 #ifdef TARGET_NR_waitid
7474     case TARGET_NR_waitid:
7475         {
7476             siginfo_t info;
7477             info.si_pid = 0;
7478             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7479             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7480                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7481                     return -TARGET_EFAULT;
7482                 host_to_target_siginfo(p, &info);
7483                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7484             }
7485         }
7486         return ret;
7487 #endif
7488 #ifdef TARGET_NR_creat /* not on alpha */
7489     case TARGET_NR_creat:
7490         if (!(p = lock_user_string(arg1)))
7491             return -TARGET_EFAULT;
7492         ret = get_errno(creat(p, arg2));
7493         fd_trans_unregister(ret);
7494         unlock_user(p, arg1, 0);
7495         return ret;
7496 #endif
7497 #ifdef TARGET_NR_link
7498     case TARGET_NR_link:
7499         {
7500             void * p2;
7501             p = lock_user_string(arg1);
7502             p2 = lock_user_string(arg2);
7503             if (!p || !p2)
7504                 ret = -TARGET_EFAULT;
7505             else
7506                 ret = get_errno(link(p, p2));
7507             unlock_user(p2, arg2, 0);
7508             unlock_user(p, arg1, 0);
7509         }
7510         return ret;
7511 #endif
7512 #if defined(TARGET_NR_linkat)
7513     case TARGET_NR_linkat:
7514         {
7515             void * p2 = NULL;
7516             if (!arg2 || !arg4)
7517                 return -TARGET_EFAULT;
7518             p  = lock_user_string(arg2);
7519             p2 = lock_user_string(arg4);
7520             if (!p || !p2)
7521                 ret = -TARGET_EFAULT;
7522             else
7523                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7524             unlock_user(p, arg2, 0);
7525             unlock_user(p2, arg4, 0);
7526         }
7527         return ret;
7528 #endif
7529 #ifdef TARGET_NR_unlink
7530     case TARGET_NR_unlink:
7531         if (!(p = lock_user_string(arg1)))
7532             return -TARGET_EFAULT;
7533         ret = get_errno(unlink(p));
7534         unlock_user(p, arg1, 0);
7535         return ret;
7536 #endif
7537 #if defined(TARGET_NR_unlinkat)
7538     case TARGET_NR_unlinkat:
7539         if (!(p = lock_user_string(arg2)))
7540             return -TARGET_EFAULT;
7541         ret = get_errno(unlinkat(arg1, p, arg3));
7542         unlock_user(p, arg2, 0);
7543         return ret;
7544 #endif
7545     case TARGET_NR_execve:
7546         {
7547             char **argp, **envp;
7548             int argc, envc;
7549             abi_ulong gp;
7550             abi_ulong guest_argp;
7551             abi_ulong guest_envp;
7552             abi_ulong addr;
7553             char **q;
7554             int total_size = 0;
7555 
7556             argc = 0;
7557             guest_argp = arg2;
7558             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7559                 if (get_user_ual(addr, gp))
7560                     return -TARGET_EFAULT;
7561                 if (!addr)
7562                     break;
7563                 argc++;
7564             }
7565             envc = 0;
7566             guest_envp = arg3;
7567             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7568                 if (get_user_ual(addr, gp))
7569                     return -TARGET_EFAULT;
7570                 if (!addr)
7571                     break;
7572                 envc++;
7573             }
7574 
7575             argp = g_new0(char *, argc + 1);
7576             envp = g_new0(char *, envc + 1);
7577 
7578             for (gp = guest_argp, q = argp; gp;
7579                   gp += sizeof(abi_ulong), q++) {
7580                 if (get_user_ual(addr, gp))
7581                     goto execve_efault;
7582                 if (!addr)
7583                     break;
7584                 if (!(*q = lock_user_string(addr)))
7585                     goto execve_efault;
7586                 total_size += strlen(*q) + 1;
7587             }
7588             *q = NULL;
7589 
7590             for (gp = guest_envp, q = envp; gp;
7591                   gp += sizeof(abi_ulong), q++) {
7592                 if (get_user_ual(addr, gp))
7593                     goto execve_efault;
7594                 if (!addr)
7595                     break;
7596                 if (!(*q = lock_user_string(addr)))
7597                     goto execve_efault;
7598                 total_size += strlen(*q) + 1;
7599             }
7600             *q = NULL;
7601 
7602             if (!(p = lock_user_string(arg1)))
7603                 goto execve_efault;
7604             /* Although execve() is not an interruptible syscall it is
7605              * a special case where we must use the safe_syscall wrapper:
7606              * if we allow a signal to happen before we make the host
7607              * syscall then we will 'lose' it, because at the point of
7608              * execve the process leaves QEMU's control. So we use the
7609              * safe syscall wrapper to ensure that we either take the
7610              * signal as a guest signal, or else it does not happen
7611              * before the execve completes and makes it the other
7612              * program's problem.
7613              */
7614             ret = get_errno(safe_execve(p, argp, envp));
7615             unlock_user(p, arg1, 0);
7616 
7617             goto execve_end;
7618 
7619         execve_efault:
7620             ret = -TARGET_EFAULT;
7621 
7622         execve_end:
7623             for (gp = guest_argp, q = argp; *q;
7624                   gp += sizeof(abi_ulong), q++) {
7625                 if (get_user_ual(addr, gp)
7626                     || !addr)
7627                     break;
7628                 unlock_user(*q, addr, 0);
7629             }
7630             for (gp = guest_envp, q = envp; *q;
7631                   gp += sizeof(abi_ulong), q++) {
7632                 if (get_user_ual(addr, gp)
7633                     || !addr)
7634                     break;
7635                 unlock_user(*q, addr, 0);
7636             }
7637 
7638             g_free(argp);
7639             g_free(envp);
7640         }
7641         return ret;
7642     case TARGET_NR_chdir:
7643         if (!(p = lock_user_string(arg1)))
7644             return -TARGET_EFAULT;
7645         ret = get_errno(chdir(p));
7646         unlock_user(p, arg1, 0);
7647         return ret;
7648 #ifdef TARGET_NR_time
7649     case TARGET_NR_time:
7650         {
7651             time_t host_time;
7652             ret = get_errno(time(&host_time));
7653             if (!is_error(ret)
7654                 && arg1
7655                 && put_user_sal(host_time, arg1))
7656                 return -TARGET_EFAULT;
7657         }
7658         return ret;
7659 #endif
7660 #ifdef TARGET_NR_mknod
7661     case TARGET_NR_mknod:
7662         if (!(p = lock_user_string(arg1)))
7663             return -TARGET_EFAULT;
7664         ret = get_errno(mknod(p, arg2, arg3));
7665         unlock_user(p, arg1, 0);
7666         return ret;
7667 #endif
7668 #if defined(TARGET_NR_mknodat)
7669     case TARGET_NR_mknodat:
7670         if (!(p = lock_user_string(arg2)))
7671             return -TARGET_EFAULT;
7672         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7673         unlock_user(p, arg2, 0);
7674         return ret;
7675 #endif
7676 #ifdef TARGET_NR_chmod
7677     case TARGET_NR_chmod:
7678         if (!(p = lock_user_string(arg1)))
7679             return -TARGET_EFAULT;
7680         ret = get_errno(chmod(p, arg2));
7681         unlock_user(p, arg1, 0);
7682         return ret;
7683 #endif
7684 #ifdef TARGET_NR_lseek
7685     case TARGET_NR_lseek:
7686         return get_errno(lseek(arg1, arg2, arg3));
7687 #endif
7688 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7689     /* Alpha specific */
7690     case TARGET_NR_getxpid:
7691         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7692         return get_errno(getpid());
7693 #endif
7694 #ifdef TARGET_NR_getpid
7695     case TARGET_NR_getpid:
7696         return get_errno(getpid());
7697 #endif
7698     case TARGET_NR_mount:
7699         {
7700             /* need to look at the data field */
7701             void *p2, *p3;
7702 
7703             if (arg1) {
7704                 p = lock_user_string(arg1);
7705                 if (!p) {
7706                     return -TARGET_EFAULT;
7707                 }
7708             } else {
7709                 p = NULL;
7710             }
7711 
7712             p2 = lock_user_string(arg2);
7713             if (!p2) {
7714                 if (arg1) {
7715                     unlock_user(p, arg1, 0);
7716                 }
7717                 return -TARGET_EFAULT;
7718             }
7719 
7720             if (arg3) {
7721                 p3 = lock_user_string(arg3);
7722                 if (!p3) {
7723                     if (arg1) {
7724                         unlock_user(p, arg1, 0);
7725                     }
7726                     unlock_user(p2, arg2, 0);
7727                     return -TARGET_EFAULT;
7728                 }
7729             } else {
7730                 p3 = NULL;
7731             }
7732 
7733             /* FIXME - arg5 should be locked, but it isn't clear how to
7734              * do that since it's not guaranteed to be a NULL-terminated
7735              * string.
7736              */
7737             if (!arg5) {
7738                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7739             } else {
7740                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7741             }
7742             ret = get_errno(ret);
7743 
7744             if (arg1) {
7745                 unlock_user(p, arg1, 0);
7746             }
7747             unlock_user(p2, arg2, 0);
7748             if (arg3) {
7749                 unlock_user(p3, arg3, 0);
7750             }
7751         }
7752         return ret;
7753 #ifdef TARGET_NR_umount
7754     case TARGET_NR_umount:
7755         if (!(p = lock_user_string(arg1)))
7756             return -TARGET_EFAULT;
7757         ret = get_errno(umount(p));
7758         unlock_user(p, arg1, 0);
7759         return ret;
7760 #endif
7761 #ifdef TARGET_NR_stime /* not on alpha */
7762     case TARGET_NR_stime:
7763         {
7764             time_t host_time;
7765             if (get_user_sal(host_time, arg1))
7766                 return -TARGET_EFAULT;
7767             return get_errno(stime(&host_time));
7768         }
7769 #endif
7770 #ifdef TARGET_NR_alarm /* not on alpha */
7771     case TARGET_NR_alarm:
7772         return alarm(arg1);
7773 #endif
7774 #ifdef TARGET_NR_pause /* not on alpha */
7775     case TARGET_NR_pause:
7776         if (!block_signals()) {
7777             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7778         }
7779         return -TARGET_EINTR;
7780 #endif
7781 #ifdef TARGET_NR_utime
7782     case TARGET_NR_utime:
7783         {
7784             struct utimbuf tbuf, *host_tbuf;
7785             struct target_utimbuf *target_tbuf;
7786             if (arg2) {
7787                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7788                     return -TARGET_EFAULT;
7789                 tbuf.actime = tswapal(target_tbuf->actime);
7790                 tbuf.modtime = tswapal(target_tbuf->modtime);
7791                 unlock_user_struct(target_tbuf, arg2, 0);
7792                 host_tbuf = &tbuf;
7793             } else {
7794                 host_tbuf = NULL;
7795             }
7796             if (!(p = lock_user_string(arg1)))
7797                 return -TARGET_EFAULT;
7798             ret = get_errno(utime(p, host_tbuf));
7799             unlock_user(p, arg1, 0);
7800         }
7801         return ret;
7802 #endif
7803 #ifdef TARGET_NR_utimes
7804     case TARGET_NR_utimes:
7805         {
7806             struct timeval *tvp, tv[2];
7807             if (arg2) {
7808                 if (copy_from_user_timeval(&tv[0], arg2)
7809                     || copy_from_user_timeval(&tv[1],
7810                                               arg2 + sizeof(struct target_timeval)))
7811                     return -TARGET_EFAULT;
7812                 tvp = tv;
7813             } else {
7814                 tvp = NULL;
7815             }
7816             if (!(p = lock_user_string(arg1)))
7817                 return -TARGET_EFAULT;
7818             ret = get_errno(utimes(p, tvp));
7819             unlock_user(p, arg1, 0);
7820         }
7821         return ret;
7822 #endif
7823 #if defined(TARGET_NR_futimesat)
7824     case TARGET_NR_futimesat:
7825         {
7826             struct timeval *tvp, tv[2];
7827             if (arg3) {
7828                 if (copy_from_user_timeval(&tv[0], arg3)
7829                     || copy_from_user_timeval(&tv[1],
7830                                               arg3 + sizeof(struct target_timeval)))
7831                     return -TARGET_EFAULT;
7832                 tvp = tv;
7833             } else {
7834                 tvp = NULL;
7835             }
7836             if (!(p = lock_user_string(arg2))) {
7837                 return -TARGET_EFAULT;
7838             }
7839             ret = get_errno(futimesat(arg1, path(p), tvp));
7840             unlock_user(p, arg2, 0);
7841         }
7842         return ret;
7843 #endif
7844 #ifdef TARGET_NR_access
7845     case TARGET_NR_access:
7846         if (!(p = lock_user_string(arg1))) {
7847             return -TARGET_EFAULT;
7848         }
7849         ret = get_errno(access(path(p), arg2));
7850         unlock_user(p, arg1, 0);
7851         return ret;
7852 #endif
7853 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7854     case TARGET_NR_faccessat:
7855         if (!(p = lock_user_string(arg2))) {
7856             return -TARGET_EFAULT;
7857         }
7858         ret = get_errno(faccessat(arg1, p, arg3, 0));
7859         unlock_user(p, arg2, 0);
7860         return ret;
7861 #endif
7862 #ifdef TARGET_NR_nice /* not on alpha */
7863     case TARGET_NR_nice:
7864         return get_errno(nice(arg1));
7865 #endif
7866     case TARGET_NR_sync:
7867         sync();
7868         return 0;
7869 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7870     case TARGET_NR_syncfs:
7871         return get_errno(syncfs(arg1));
7872 #endif
7873     case TARGET_NR_kill:
7874         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7875 #ifdef TARGET_NR_rename
7876     case TARGET_NR_rename:
7877         {
7878             void *p2;
7879             p = lock_user_string(arg1);
7880             p2 = lock_user_string(arg2);
7881             if (!p || !p2)
7882                 ret = -TARGET_EFAULT;
7883             else
7884                 ret = get_errno(rename(p, p2));
7885             unlock_user(p2, arg2, 0);
7886             unlock_user(p, arg1, 0);
7887         }
7888         return ret;
7889 #endif
7890 #if defined(TARGET_NR_renameat)
7891     case TARGET_NR_renameat:
7892         {
7893             void *p2;
7894             p  = lock_user_string(arg2);
7895             p2 = lock_user_string(arg4);
7896             if (!p || !p2)
7897                 ret = -TARGET_EFAULT;
7898             else
7899                 ret = get_errno(renameat(arg1, p, arg3, p2));
7900             unlock_user(p2, arg4, 0);
7901             unlock_user(p, arg2, 0);
7902         }
7903         return ret;
7904 #endif
7905 #if defined(TARGET_NR_renameat2)
7906     case TARGET_NR_renameat2:
7907         {
7908             void *p2;
7909             p  = lock_user_string(arg2);
7910             p2 = lock_user_string(arg4);
7911             if (!p || !p2) {
7912                 ret = -TARGET_EFAULT;
7913             } else {
7914                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7915             }
7916             unlock_user(p2, arg4, 0);
7917             unlock_user(p, arg2, 0);
7918         }
7919         return ret;
7920 #endif
7921 #ifdef TARGET_NR_mkdir
7922     case TARGET_NR_mkdir:
7923         if (!(p = lock_user_string(arg1)))
7924             return -TARGET_EFAULT;
7925         ret = get_errno(mkdir(p, arg2));
7926         unlock_user(p, arg1, 0);
7927         return ret;
7928 #endif
7929 #if defined(TARGET_NR_mkdirat)
7930     case TARGET_NR_mkdirat:
7931         if (!(p = lock_user_string(arg2)))
7932             return -TARGET_EFAULT;
7933         ret = get_errno(mkdirat(arg1, p, arg3));
7934         unlock_user(p, arg2, 0);
7935         return ret;
7936 #endif
7937 #ifdef TARGET_NR_rmdir
7938     case TARGET_NR_rmdir:
7939         if (!(p = lock_user_string(arg1)))
7940             return -TARGET_EFAULT;
7941         ret = get_errno(rmdir(p));
7942         unlock_user(p, arg1, 0);
7943         return ret;
7944 #endif
7945     case TARGET_NR_dup:
7946         ret = get_errno(dup(arg1));
7947         if (ret >= 0) {
7948             fd_trans_dup(arg1, ret);
7949         }
7950         return ret;
7951 #ifdef TARGET_NR_pipe
7952     case TARGET_NR_pipe:
7953         return do_pipe(cpu_env, arg1, 0, 0);
7954 #endif
7955 #ifdef TARGET_NR_pipe2
7956     case TARGET_NR_pipe2:
7957         return do_pipe(cpu_env, arg1,
7958                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7959 #endif
7960     case TARGET_NR_times:
7961         {
7962             struct target_tms *tmsp;
7963             struct tms tms;
7964             ret = get_errno(times(&tms));
7965             if (arg1) {
7966                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7967                 if (!tmsp)
7968                     return -TARGET_EFAULT;
7969                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7970                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7971                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7972                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7973             }
7974             if (!is_error(ret))
7975                 ret = host_to_target_clock_t(ret);
7976         }
7977         return ret;
7978     case TARGET_NR_acct:
7979         if (arg1 == 0) {
7980             ret = get_errno(acct(NULL));
7981         } else {
7982             if (!(p = lock_user_string(arg1))) {
7983                 return -TARGET_EFAULT;
7984             }
7985             ret = get_errno(acct(path(p)));
7986             unlock_user(p, arg1, 0);
7987         }
7988         return ret;
7989 #ifdef TARGET_NR_umount2
7990     case TARGET_NR_umount2:
7991         if (!(p = lock_user_string(arg1)))
7992             return -TARGET_EFAULT;
7993         ret = get_errno(umount2(p, arg2));
7994         unlock_user(p, arg1, 0);
7995         return ret;
7996 #endif
7997     case TARGET_NR_ioctl:
7998         return do_ioctl(arg1, arg2, arg3);
7999 #ifdef TARGET_NR_fcntl
8000     case TARGET_NR_fcntl:
8001         return do_fcntl(arg1, arg2, arg3);
8002 #endif
8003     case TARGET_NR_setpgid:
8004         return get_errno(setpgid(arg1, arg2));
8005     case TARGET_NR_umask:
8006         return get_errno(umask(arg1));
8007     case TARGET_NR_chroot:
8008         if (!(p = lock_user_string(arg1)))
8009             return -TARGET_EFAULT;
8010         ret = get_errno(chroot(p));
8011         unlock_user(p, arg1, 0);
8012         return ret;
8013 #ifdef TARGET_NR_dup2
8014     case TARGET_NR_dup2:
8015         ret = get_errno(dup2(arg1, arg2));
8016         if (ret >= 0) {
8017             fd_trans_dup(arg1, arg2);
8018         }
8019         return ret;
8020 #endif
8021 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8022     case TARGET_NR_dup3:
8023     {
8024         int host_flags;
8025 
8026         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8027             return -EINVAL;
8028         }
8029         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8030         ret = get_errno(dup3(arg1, arg2, host_flags));
8031         if (ret >= 0) {
8032             fd_trans_dup(arg1, arg2);
8033         }
8034         return ret;
8035     }
8036 #endif
8037 #ifdef TARGET_NR_getppid /* not on alpha */
8038     case TARGET_NR_getppid:
8039         return get_errno(getppid());
8040 #endif
8041 #ifdef TARGET_NR_getpgrp
8042     case TARGET_NR_getpgrp:
8043         return get_errno(getpgrp());
8044 #endif
8045     case TARGET_NR_setsid:
8046         return get_errno(setsid());
8047 #ifdef TARGET_NR_sigaction
8048     case TARGET_NR_sigaction:
8049         {
8050 #if defined(TARGET_ALPHA)
8051             struct target_sigaction act, oact, *pact = 0;
8052             struct target_old_sigaction *old_act;
8053             if (arg2) {
8054                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8055                     return -TARGET_EFAULT;
8056                 act._sa_handler = old_act->_sa_handler;
8057                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8058                 act.sa_flags = old_act->sa_flags;
8059                 act.sa_restorer = 0;
8060                 unlock_user_struct(old_act, arg2, 0);
8061                 pact = &act;
8062             }
8063             ret = get_errno(do_sigaction(arg1, pact, &oact));
8064             if (!is_error(ret) && arg3) {
8065                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8066                     return -TARGET_EFAULT;
8067                 old_act->_sa_handler = oact._sa_handler;
8068                 old_act->sa_mask = oact.sa_mask.sig[0];
8069                 old_act->sa_flags = oact.sa_flags;
8070                 unlock_user_struct(old_act, arg3, 1);
8071             }
8072 #elif defined(TARGET_MIPS)
8073 	    struct target_sigaction act, oact, *pact, *old_act;
8074 
8075 	    if (arg2) {
8076                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8077                     return -TARGET_EFAULT;
8078 		act._sa_handler = old_act->_sa_handler;
8079 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8080 		act.sa_flags = old_act->sa_flags;
8081 		unlock_user_struct(old_act, arg2, 0);
8082 		pact = &act;
8083 	    } else {
8084 		pact = NULL;
8085 	    }
8086 
8087 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8088 
8089 	    if (!is_error(ret) && arg3) {
8090                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8091                     return -TARGET_EFAULT;
8092 		old_act->_sa_handler = oact._sa_handler;
8093 		old_act->sa_flags = oact.sa_flags;
8094 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8095 		old_act->sa_mask.sig[1] = 0;
8096 		old_act->sa_mask.sig[2] = 0;
8097 		old_act->sa_mask.sig[3] = 0;
8098 		unlock_user_struct(old_act, arg3, 1);
8099 	    }
8100 #else
8101             struct target_old_sigaction *old_act;
8102             struct target_sigaction act, oact, *pact;
8103             if (arg2) {
8104                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8105                     return -TARGET_EFAULT;
8106                 act._sa_handler = old_act->_sa_handler;
8107                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8108                 act.sa_flags = old_act->sa_flags;
8109                 act.sa_restorer = old_act->sa_restorer;
8110 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8111                 act.ka_restorer = 0;
8112 #endif
8113                 unlock_user_struct(old_act, arg2, 0);
8114                 pact = &act;
8115             } else {
8116                 pact = NULL;
8117             }
8118             ret = get_errno(do_sigaction(arg1, pact, &oact));
8119             if (!is_error(ret) && arg3) {
8120                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8121                     return -TARGET_EFAULT;
8122                 old_act->_sa_handler = oact._sa_handler;
8123                 old_act->sa_mask = oact.sa_mask.sig[0];
8124                 old_act->sa_flags = oact.sa_flags;
8125                 old_act->sa_restorer = oact.sa_restorer;
8126                 unlock_user_struct(old_act, arg3, 1);
8127             }
8128 #endif
8129         }
8130         return ret;
8131 #endif
8132     case TARGET_NR_rt_sigaction:
8133         {
8134 #if defined(TARGET_ALPHA)
8135             /* For Alpha and SPARC this is a 5 argument syscall, with
8136              * a 'restorer' parameter which must be copied into the
8137              * sa_restorer field of the sigaction struct.
8138              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8139              * and arg5 is the sigsetsize.
8140              * Alpha also has a separate rt_sigaction struct that it uses
8141              * here; SPARC uses the usual sigaction struct.
8142              */
8143             struct target_rt_sigaction *rt_act;
8144             struct target_sigaction act, oact, *pact = 0;
8145 
8146             if (arg4 != sizeof(target_sigset_t)) {
8147                 return -TARGET_EINVAL;
8148             }
8149             if (arg2) {
8150                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8151                     return -TARGET_EFAULT;
8152                 act._sa_handler = rt_act->_sa_handler;
8153                 act.sa_mask = rt_act->sa_mask;
8154                 act.sa_flags = rt_act->sa_flags;
8155                 act.sa_restorer = arg5;
8156                 unlock_user_struct(rt_act, arg2, 0);
8157                 pact = &act;
8158             }
8159             ret = get_errno(do_sigaction(arg1, pact, &oact));
8160             if (!is_error(ret) && arg3) {
8161                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8162                     return -TARGET_EFAULT;
8163                 rt_act->_sa_handler = oact._sa_handler;
8164                 rt_act->sa_mask = oact.sa_mask;
8165                 rt_act->sa_flags = oact.sa_flags;
8166                 unlock_user_struct(rt_act, arg3, 1);
8167             }
8168 #else
8169 #ifdef TARGET_SPARC
8170             target_ulong restorer = arg4;
8171             target_ulong sigsetsize = arg5;
8172 #else
8173             target_ulong sigsetsize = arg4;
8174 #endif
8175             struct target_sigaction *act;
8176             struct target_sigaction *oact;
8177 
8178             if (sigsetsize != sizeof(target_sigset_t)) {
8179                 return -TARGET_EINVAL;
8180             }
8181             if (arg2) {
8182                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8183                     return -TARGET_EFAULT;
8184                 }
8185 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8186                 act->ka_restorer = restorer;
8187 #endif
8188             } else {
8189                 act = NULL;
8190             }
8191             if (arg3) {
8192                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8193                     ret = -TARGET_EFAULT;
8194                     goto rt_sigaction_fail;
8195                 }
8196             } else
8197                 oact = NULL;
8198             ret = get_errno(do_sigaction(arg1, act, oact));
8199 	rt_sigaction_fail:
8200             if (act)
8201                 unlock_user_struct(act, arg2, 0);
8202             if (oact)
8203                 unlock_user_struct(oact, arg3, 1);
8204 #endif
8205         }
8206         return ret;
8207 #ifdef TARGET_NR_sgetmask /* not on alpha */
8208     case TARGET_NR_sgetmask:
8209         {
8210             sigset_t cur_set;
8211             abi_ulong target_set;
8212             ret = do_sigprocmask(0, NULL, &cur_set);
8213             if (!ret) {
8214                 host_to_target_old_sigset(&target_set, &cur_set);
8215                 ret = target_set;
8216             }
8217         }
8218         return ret;
8219 #endif
8220 #ifdef TARGET_NR_ssetmask /* not on alpha */
8221     case TARGET_NR_ssetmask:
8222         {
8223             sigset_t set, oset;
8224             abi_ulong target_set = arg1;
8225             target_to_host_old_sigset(&set, &target_set);
8226             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8227             if (!ret) {
8228                 host_to_target_old_sigset(&target_set, &oset);
8229                 ret = target_set;
8230             }
8231         }
8232         return ret;
8233 #endif
8234 #ifdef TARGET_NR_sigprocmask
8235     case TARGET_NR_sigprocmask:
8236         {
8237 #if defined(TARGET_ALPHA)
8238             sigset_t set, oldset;
8239             abi_ulong mask;
8240             int how;
8241 
8242             switch (arg1) {
8243             case TARGET_SIG_BLOCK:
8244                 how = SIG_BLOCK;
8245                 break;
8246             case TARGET_SIG_UNBLOCK:
8247                 how = SIG_UNBLOCK;
8248                 break;
8249             case TARGET_SIG_SETMASK:
8250                 how = SIG_SETMASK;
8251                 break;
8252             default:
8253                 return -TARGET_EINVAL;
8254             }
8255             mask = arg2;
8256             target_to_host_old_sigset(&set, &mask);
8257 
8258             ret = do_sigprocmask(how, &set, &oldset);
8259             if (!is_error(ret)) {
8260                 host_to_target_old_sigset(&mask, &oldset);
8261                 ret = mask;
8262                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8263             }
8264 #else
8265             sigset_t set, oldset, *set_ptr;
8266             int how;
8267 
8268             if (arg2) {
8269                 switch (arg1) {
8270                 case TARGET_SIG_BLOCK:
8271                     how = SIG_BLOCK;
8272                     break;
8273                 case TARGET_SIG_UNBLOCK:
8274                     how = SIG_UNBLOCK;
8275                     break;
8276                 case TARGET_SIG_SETMASK:
8277                     how = SIG_SETMASK;
8278                     break;
8279                 default:
8280                     return -TARGET_EINVAL;
8281                 }
8282                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8283                     return -TARGET_EFAULT;
8284                 target_to_host_old_sigset(&set, p);
8285                 unlock_user(p, arg2, 0);
8286                 set_ptr = &set;
8287             } else {
8288                 how = 0;
8289                 set_ptr = NULL;
8290             }
8291             ret = do_sigprocmask(how, set_ptr, &oldset);
8292             if (!is_error(ret) && arg3) {
8293                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8294                     return -TARGET_EFAULT;
8295                 host_to_target_old_sigset(p, &oldset);
8296                 unlock_user(p, arg3, sizeof(target_sigset_t));
8297             }
8298 #endif
8299         }
8300         return ret;
8301 #endif
8302     case TARGET_NR_rt_sigprocmask:
8303         {
8304             int how = arg1;
8305             sigset_t set, oldset, *set_ptr;
8306 
8307             if (arg4 != sizeof(target_sigset_t)) {
8308                 return -TARGET_EINVAL;
8309             }
8310 
8311             if (arg2) {
8312                 switch(how) {
8313                 case TARGET_SIG_BLOCK:
8314                     how = SIG_BLOCK;
8315                     break;
8316                 case TARGET_SIG_UNBLOCK:
8317                     how = SIG_UNBLOCK;
8318                     break;
8319                 case TARGET_SIG_SETMASK:
8320                     how = SIG_SETMASK;
8321                     break;
8322                 default:
8323                     return -TARGET_EINVAL;
8324                 }
8325                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8326                     return -TARGET_EFAULT;
8327                 target_to_host_sigset(&set, p);
8328                 unlock_user(p, arg2, 0);
8329                 set_ptr = &set;
8330             } else {
8331                 how = 0;
8332                 set_ptr = NULL;
8333             }
8334             ret = do_sigprocmask(how, set_ptr, &oldset);
8335             if (!is_error(ret) && arg3) {
8336                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8337                     return -TARGET_EFAULT;
8338                 host_to_target_sigset(p, &oldset);
8339                 unlock_user(p, arg3, sizeof(target_sigset_t));
8340             }
8341         }
8342         return ret;
8343 #ifdef TARGET_NR_sigpending
8344     case TARGET_NR_sigpending:
8345         {
8346             sigset_t set;
8347             ret = get_errno(sigpending(&set));
8348             if (!is_error(ret)) {
8349                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8350                     return -TARGET_EFAULT;
8351                 host_to_target_old_sigset(p, &set);
8352                 unlock_user(p, arg1, sizeof(target_sigset_t));
8353             }
8354         }
8355         return ret;
8356 #endif
8357     case TARGET_NR_rt_sigpending:
8358         {
8359             sigset_t set;
8360 
8361             /* Yes, this check is >, not != like most. We follow the kernel's
8362              * logic and it does it like this because it implements
8363              * NR_sigpending through the same code path, and in that case
8364              * the old_sigset_t is smaller in size.
8365              */
8366             if (arg2 > sizeof(target_sigset_t)) {
8367                 return -TARGET_EINVAL;
8368             }
8369 
8370             ret = get_errno(sigpending(&set));
8371             if (!is_error(ret)) {
8372                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8373                     return -TARGET_EFAULT;
8374                 host_to_target_sigset(p, &set);
8375                 unlock_user(p, arg1, sizeof(target_sigset_t));
8376             }
8377         }
8378         return ret;
8379 #ifdef TARGET_NR_sigsuspend
8380     case TARGET_NR_sigsuspend:
8381         {
8382             TaskState *ts = cpu->opaque;
8383 #if defined(TARGET_ALPHA)
8384             abi_ulong mask = arg1;
8385             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8386 #else
8387             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8388                 return -TARGET_EFAULT;
8389             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8390             unlock_user(p, arg1, 0);
8391 #endif
8392             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8393                                                SIGSET_T_SIZE));
8394             if (ret != -TARGET_ERESTARTSYS) {
8395                 ts->in_sigsuspend = 1;
8396             }
8397         }
8398         return ret;
8399 #endif
8400     case TARGET_NR_rt_sigsuspend:
8401         {
8402             TaskState *ts = cpu->opaque;
8403 
8404             if (arg2 != sizeof(target_sigset_t)) {
8405                 return -TARGET_EINVAL;
8406             }
8407             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8408                 return -TARGET_EFAULT;
8409             target_to_host_sigset(&ts->sigsuspend_mask, p);
8410             unlock_user(p, arg1, 0);
8411             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8412                                                SIGSET_T_SIZE));
8413             if (ret != -TARGET_ERESTARTSYS) {
8414                 ts->in_sigsuspend = 1;
8415             }
8416         }
8417         return ret;
8418     case TARGET_NR_rt_sigtimedwait:
8419         {
8420             sigset_t set;
8421             struct timespec uts, *puts;
8422             siginfo_t uinfo;
8423 
8424             if (arg4 != sizeof(target_sigset_t)) {
8425                 return -TARGET_EINVAL;
8426             }
8427 
8428             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8429                 return -TARGET_EFAULT;
8430             target_to_host_sigset(&set, p);
8431             unlock_user(p, arg1, 0);
8432             if (arg3) {
8433                 puts = &uts;
8434                 target_to_host_timespec(puts, arg3);
8435             } else {
8436                 puts = NULL;
8437             }
8438             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8439                                                  SIGSET_T_SIZE));
8440             if (!is_error(ret)) {
8441                 if (arg2) {
8442                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8443                                   0);
8444                     if (!p) {
8445                         return -TARGET_EFAULT;
8446                     }
8447                     host_to_target_siginfo(p, &uinfo);
8448                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8449                 }
8450                 ret = host_to_target_signal(ret);
8451             }
8452         }
8453         return ret;
8454     case TARGET_NR_rt_sigqueueinfo:
8455         {
8456             siginfo_t uinfo;
8457 
8458             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8459             if (!p) {
8460                 return -TARGET_EFAULT;
8461             }
8462             target_to_host_siginfo(&uinfo, p);
8463             unlock_user(p, arg3, 0);
8464             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8465         }
8466         return ret;
8467     case TARGET_NR_rt_tgsigqueueinfo:
8468         {
8469             siginfo_t uinfo;
8470 
8471             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8472             if (!p) {
8473                 return -TARGET_EFAULT;
8474             }
8475             target_to_host_siginfo(&uinfo, p);
8476             unlock_user(p, arg4, 0);
8477             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8478         }
8479         return ret;
8480 #ifdef TARGET_NR_sigreturn
8481     case TARGET_NR_sigreturn:
8482         if (block_signals()) {
8483             return -TARGET_ERESTARTSYS;
8484         }
8485         return do_sigreturn(cpu_env);
8486 #endif
8487     case TARGET_NR_rt_sigreturn:
8488         if (block_signals()) {
8489             return -TARGET_ERESTARTSYS;
8490         }
8491         return do_rt_sigreturn(cpu_env);
8492     case TARGET_NR_sethostname:
8493         if (!(p = lock_user_string(arg1)))
8494             return -TARGET_EFAULT;
8495         ret = get_errno(sethostname(p, arg2));
8496         unlock_user(p, arg1, 0);
8497         return ret;
8498 #ifdef TARGET_NR_setrlimit
8499     case TARGET_NR_setrlimit:
8500         {
8501             int resource = target_to_host_resource(arg1);
8502             struct target_rlimit *target_rlim;
8503             struct rlimit rlim;
8504             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8505                 return -TARGET_EFAULT;
8506             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8507             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8508             unlock_user_struct(target_rlim, arg2, 0);
8509             /*
8510              * If we just passed through resource limit settings for memory then
8511              * they would also apply to QEMU's own allocations, and QEMU will
8512              * crash or hang or die if its allocations fail. Ideally we would
8513              * track the guest allocations in QEMU and apply the limits ourselves.
8514              * For now, just tell the guest the call succeeded but don't actually
8515              * limit anything.
8516              */
8517             if (resource != RLIMIT_AS &&
8518                 resource != RLIMIT_DATA &&
8519                 resource != RLIMIT_STACK) {
8520                 return get_errno(setrlimit(resource, &rlim));
8521             } else {
8522                 return 0;
8523             }
8524         }
8525 #endif
8526 #ifdef TARGET_NR_getrlimit
8527     case TARGET_NR_getrlimit:
8528         {
8529             int resource = target_to_host_resource(arg1);
8530             struct target_rlimit *target_rlim;
8531             struct rlimit rlim;
8532 
8533             ret = get_errno(getrlimit(resource, &rlim));
8534             if (!is_error(ret)) {
8535                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8536                     return -TARGET_EFAULT;
8537                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8538                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8539                 unlock_user_struct(target_rlim, arg2, 1);
8540             }
8541         }
8542         return ret;
8543 #endif
8544     case TARGET_NR_getrusage:
8545         {
8546             struct rusage rusage;
8547             ret = get_errno(getrusage(arg1, &rusage));
8548             if (!is_error(ret)) {
8549                 ret = host_to_target_rusage(arg2, &rusage);
8550             }
8551         }
8552         return ret;
8553     case TARGET_NR_gettimeofday:
8554         {
8555             struct timeval tv;
8556             ret = get_errno(gettimeofday(&tv, NULL));
8557             if (!is_error(ret)) {
8558                 if (copy_to_user_timeval(arg1, &tv))
8559                     return -TARGET_EFAULT;
8560             }
8561         }
8562         return ret;
8563     case TARGET_NR_settimeofday:
8564         {
8565             struct timeval tv, *ptv = NULL;
8566             struct timezone tz, *ptz = NULL;
8567 
8568             if (arg1) {
8569                 if (copy_from_user_timeval(&tv, arg1)) {
8570                     return -TARGET_EFAULT;
8571                 }
8572                 ptv = &tv;
8573             }
8574 
8575             if (arg2) {
8576                 if (copy_from_user_timezone(&tz, arg2)) {
8577                     return -TARGET_EFAULT;
8578                 }
8579                 ptz = &tz;
8580             }
8581 
8582             return get_errno(settimeofday(ptv, ptz));
8583         }
8584 #if defined(TARGET_NR_select)
8585     case TARGET_NR_select:
8586 #if defined(TARGET_WANT_NI_OLD_SELECT)
8587         /* some architectures used to have old_select here
8588          * but now ENOSYS it.
8589          */
8590         ret = -TARGET_ENOSYS;
8591 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8592         ret = do_old_select(arg1);
8593 #else
8594         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8595 #endif
8596         return ret;
8597 #endif
8598 #ifdef TARGET_NR_pselect6
8599     case TARGET_NR_pselect6:
8600         {
8601             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8602             fd_set rfds, wfds, efds;
8603             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8604             struct timespec ts, *ts_ptr;
8605 
8606             /*
8607              * The 6th arg is actually two args smashed together,
8608              * so we cannot use the C library.
8609              */
8610             sigset_t set;
8611             struct {
8612                 sigset_t *set;
8613                 size_t size;
8614             } sig, *sig_ptr;
8615 
8616             abi_ulong arg_sigset, arg_sigsize, *arg7;
8617             target_sigset_t *target_sigset;
8618 
8619             n = arg1;
8620             rfd_addr = arg2;
8621             wfd_addr = arg3;
8622             efd_addr = arg4;
8623             ts_addr = arg5;
8624 
8625             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8626             if (ret) {
8627                 return ret;
8628             }
8629             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8630             if (ret) {
8631                 return ret;
8632             }
8633             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8634             if (ret) {
8635                 return ret;
8636             }
8637 
8638             /*
8639              * This takes a timespec, and not a timeval, so we cannot
8640              * use the do_select() helper ...
8641              */
8642             if (ts_addr) {
8643                 if (target_to_host_timespec(&ts, ts_addr)) {
8644                     return -TARGET_EFAULT;
8645                 }
8646                 ts_ptr = &ts;
8647             } else {
8648                 ts_ptr = NULL;
8649             }
8650 
8651             /* Extract the two packed args for the sigset */
8652             if (arg6) {
8653                 sig_ptr = &sig;
8654                 sig.size = SIGSET_T_SIZE;
8655 
8656                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8657                 if (!arg7) {
8658                     return -TARGET_EFAULT;
8659                 }
8660                 arg_sigset = tswapal(arg7[0]);
8661                 arg_sigsize = tswapal(arg7[1]);
8662                 unlock_user(arg7, arg6, 0);
8663 
8664                 if (arg_sigset) {
8665                     sig.set = &set;
8666                     if (arg_sigsize != sizeof(*target_sigset)) {
8667                         /* Like the kernel, we enforce correct size sigsets */
8668                         return -TARGET_EINVAL;
8669                     }
8670                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8671                                               sizeof(*target_sigset), 1);
8672                     if (!target_sigset) {
8673                         return -TARGET_EFAULT;
8674                     }
8675                     target_to_host_sigset(&set, target_sigset);
8676                     unlock_user(target_sigset, arg_sigset, 0);
8677                 } else {
8678                     sig.set = NULL;
8679                 }
8680             } else {
8681                 sig_ptr = NULL;
8682             }
8683 
8684             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8685                                           ts_ptr, sig_ptr));
8686 
8687             if (!is_error(ret)) {
8688                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8689                     return -TARGET_EFAULT;
8690                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8691                     return -TARGET_EFAULT;
8692                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8693                     return -TARGET_EFAULT;
8694 
8695                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8696                     return -TARGET_EFAULT;
8697             }
8698         }
8699         return ret;
8700 #endif
8701 #ifdef TARGET_NR_symlink
8702     case TARGET_NR_symlink:
8703         {
8704             void *p2;
8705             p = lock_user_string(arg1);
8706             p2 = lock_user_string(arg2);
8707             if (!p || !p2)
8708                 ret = -TARGET_EFAULT;
8709             else
8710                 ret = get_errno(symlink(p, p2));
8711             unlock_user(p2, arg2, 0);
8712             unlock_user(p, arg1, 0);
8713         }
8714         return ret;
8715 #endif
8716 #if defined(TARGET_NR_symlinkat)
8717     case TARGET_NR_symlinkat:
8718         {
8719             void *p2;
8720             p  = lock_user_string(arg1);
8721             p2 = lock_user_string(arg3);
8722             if (!p || !p2)
8723                 ret = -TARGET_EFAULT;
8724             else
8725                 ret = get_errno(symlinkat(p, arg2, p2));
8726             unlock_user(p2, arg3, 0);
8727             unlock_user(p, arg1, 0);
8728         }
8729         return ret;
8730 #endif
8731 #ifdef TARGET_NR_readlink
8732     case TARGET_NR_readlink:
8733         {
8734             void *p2;
8735             p = lock_user_string(arg1);
8736             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8737             if (!p || !p2) {
8738                 ret = -TARGET_EFAULT;
8739             } else if (!arg3) {
8740                 /* Short circuit this for the magic exe check. */
8741                 ret = -TARGET_EINVAL;
8742             } else if (is_proc_myself((const char *)p, "exe")) {
8743                 char real[PATH_MAX], *temp;
8744                 temp = realpath(exec_path, real);
8745                 /* Return value is # of bytes that we wrote to the buffer. */
8746                 if (temp == NULL) {
8747                     ret = get_errno(-1);
8748                 } else {
8749                     /* Don't worry about sign mismatch as earlier mapping
8750                      * logic would have thrown a bad address error. */
8751                     ret = MIN(strlen(real), arg3);
8752                     /* We cannot NUL terminate the string. */
8753                     memcpy(p2, real, ret);
8754                 }
8755             } else {
8756                 ret = get_errno(readlink(path(p), p2, arg3));
8757             }
8758             unlock_user(p2, arg2, ret);
8759             unlock_user(p, arg1, 0);
8760         }
8761         return ret;
8762 #endif
8763 #if defined(TARGET_NR_readlinkat)
8764     case TARGET_NR_readlinkat:
8765         {
8766             void *p2;
8767             p  = lock_user_string(arg2);
8768             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8769             if (!p || !p2) {
8770                 ret = -TARGET_EFAULT;
8771             } else if (is_proc_myself((const char *)p, "exe")) {
8772                 char real[PATH_MAX], *temp;
8773                 temp = realpath(exec_path, real);
8774                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8775                 snprintf((char *)p2, arg4, "%s", real);
8776             } else {
8777                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8778             }
8779             unlock_user(p2, arg3, ret);
8780             unlock_user(p, arg2, 0);
8781         }
8782         return ret;
8783 #endif
8784 #ifdef TARGET_NR_swapon
8785     case TARGET_NR_swapon:
8786         if (!(p = lock_user_string(arg1)))
8787             return -TARGET_EFAULT;
8788         ret = get_errno(swapon(p, arg2));
8789         unlock_user(p, arg1, 0);
8790         return ret;
8791 #endif
8792     case TARGET_NR_reboot:
8793         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8794            /* arg4 must be ignored in all other cases */
8795            p = lock_user_string(arg4);
8796            if (!p) {
8797                return -TARGET_EFAULT;
8798            }
8799            ret = get_errno(reboot(arg1, arg2, arg3, p));
8800            unlock_user(p, arg4, 0);
8801         } else {
8802            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8803         }
8804         return ret;
8805 #ifdef TARGET_NR_mmap
8806     case TARGET_NR_mmap:
8807 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8808     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8809     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8810     || defined(TARGET_S390X)
8811         {
8812             abi_ulong *v;
8813             abi_ulong v1, v2, v3, v4, v5, v6;
8814             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8815                 return -TARGET_EFAULT;
8816             v1 = tswapal(v[0]);
8817             v2 = tswapal(v[1]);
8818             v3 = tswapal(v[2]);
8819             v4 = tswapal(v[3]);
8820             v5 = tswapal(v[4]);
8821             v6 = tswapal(v[5]);
8822             unlock_user(v, arg1, 0);
8823             ret = get_errno(target_mmap(v1, v2, v3,
8824                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8825                                         v5, v6));
8826         }
8827 #else
8828         ret = get_errno(target_mmap(arg1, arg2, arg3,
8829                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8830                                     arg5,
8831                                     arg6));
8832 #endif
8833         return ret;
8834 #endif
8835 #ifdef TARGET_NR_mmap2
8836     case TARGET_NR_mmap2:
8837 #ifndef MMAP_SHIFT
8838 #define MMAP_SHIFT 12
8839 #endif
8840         ret = target_mmap(arg1, arg2, arg3,
8841                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8842                           arg5, arg6 << MMAP_SHIFT);
8843         return get_errno(ret);
8844 #endif
8845     case TARGET_NR_munmap:
8846         return get_errno(target_munmap(arg1, arg2));
8847     case TARGET_NR_mprotect:
8848         {
8849             TaskState *ts = cpu->opaque;
8850             /* Special hack to detect libc making the stack executable.  */
8851             if ((arg3 & PROT_GROWSDOWN)
8852                 && arg1 >= ts->info->stack_limit
8853                 && arg1 <= ts->info->start_stack) {
8854                 arg3 &= ~PROT_GROWSDOWN;
8855                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8856                 arg1 = ts->info->stack_limit;
8857             }
8858         }
8859         return get_errno(target_mprotect(arg1, arg2, arg3));
8860 #ifdef TARGET_NR_mremap
8861     case TARGET_NR_mremap:
8862         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8863 #endif
8864         /* ??? msync/mlock/munlock are broken for softmmu.  */
8865 #ifdef TARGET_NR_msync
8866     case TARGET_NR_msync:
8867         return get_errno(msync(g2h(arg1), arg2, arg3));
8868 #endif
8869 #ifdef TARGET_NR_mlock
8870     case TARGET_NR_mlock:
8871         return get_errno(mlock(g2h(arg1), arg2));
8872 #endif
8873 #ifdef TARGET_NR_munlock
8874     case TARGET_NR_munlock:
8875         return get_errno(munlock(g2h(arg1), arg2));
8876 #endif
8877 #ifdef TARGET_NR_mlockall
8878     case TARGET_NR_mlockall:
8879         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8880 #endif
8881 #ifdef TARGET_NR_munlockall
8882     case TARGET_NR_munlockall:
8883         return get_errno(munlockall());
8884 #endif
8885 #ifdef TARGET_NR_truncate
8886     case TARGET_NR_truncate:
8887         if (!(p = lock_user_string(arg1)))
8888             return -TARGET_EFAULT;
8889         ret = get_errno(truncate(p, arg2));
8890         unlock_user(p, arg1, 0);
8891         return ret;
8892 #endif
8893 #ifdef TARGET_NR_ftruncate
8894     case TARGET_NR_ftruncate:
8895         return get_errno(ftruncate(arg1, arg2));
8896 #endif
8897     case TARGET_NR_fchmod:
8898         return get_errno(fchmod(arg1, arg2));
8899 #if defined(TARGET_NR_fchmodat)
8900     case TARGET_NR_fchmodat:
8901         if (!(p = lock_user_string(arg2)))
8902             return -TARGET_EFAULT;
8903         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8904         unlock_user(p, arg2, 0);
8905         return ret;
8906 #endif
8907     case TARGET_NR_getpriority:
8908         /* Note that negative values are valid for getpriority, so we must
8909            differentiate based on errno settings.  */
8910         errno = 0;
8911         ret = getpriority(arg1, arg2);
8912         if (ret == -1 && errno != 0) {
8913             return -host_to_target_errno(errno);
8914         }
8915 #ifdef TARGET_ALPHA
8916         /* Return value is the unbiased priority.  Signal no error.  */
8917         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8918 #else
8919         /* Return value is a biased priority to avoid negative numbers.  */
8920         ret = 20 - ret;
8921 #endif
8922         return ret;
8923     case TARGET_NR_setpriority:
8924         return get_errno(setpriority(arg1, arg2, arg3));
8925 #ifdef TARGET_NR_statfs
8926     case TARGET_NR_statfs:
8927         if (!(p = lock_user_string(arg1))) {
8928             return -TARGET_EFAULT;
8929         }
8930         ret = get_errno(statfs(path(p), &stfs));
8931         unlock_user(p, arg1, 0);
8932     convert_statfs:
8933         if (!is_error(ret)) {
8934             struct target_statfs *target_stfs;
8935 
8936             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8937                 return -TARGET_EFAULT;
8938             __put_user(stfs.f_type, &target_stfs->f_type);
8939             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8940             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8941             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8942             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8943             __put_user(stfs.f_files, &target_stfs->f_files);
8944             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8945             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8946             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8947             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8948             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8949 #ifdef _STATFS_F_FLAGS
8950             __put_user(stfs.f_flags, &target_stfs->f_flags);
8951 #else
8952             __put_user(0, &target_stfs->f_flags);
8953 #endif
8954             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8955             unlock_user_struct(target_stfs, arg2, 1);
8956         }
8957         return ret;
8958 #endif
8959 #ifdef TARGET_NR_fstatfs
8960     case TARGET_NR_fstatfs:
8961         ret = get_errno(fstatfs(arg1, &stfs));
8962         goto convert_statfs;
8963 #endif
8964 #ifdef TARGET_NR_statfs64
8965     case TARGET_NR_statfs64:
8966         if (!(p = lock_user_string(arg1))) {
8967             return -TARGET_EFAULT;
8968         }
8969         ret = get_errno(statfs(path(p), &stfs));
8970         unlock_user(p, arg1, 0);
8971     convert_statfs64:
8972         if (!is_error(ret)) {
8973             struct target_statfs64 *target_stfs;
8974 
8975             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8976                 return -TARGET_EFAULT;
8977             __put_user(stfs.f_type, &target_stfs->f_type);
8978             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8979             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8980             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8981             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8982             __put_user(stfs.f_files, &target_stfs->f_files);
8983             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8984             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8985             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8986             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8987             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8988             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8989             unlock_user_struct(target_stfs, arg3, 1);
8990         }
8991         return ret;
8992     case TARGET_NR_fstatfs64:
8993         ret = get_errno(fstatfs(arg1, &stfs));
8994         goto convert_statfs64;
8995 #endif
8996 #ifdef TARGET_NR_socketcall
8997     case TARGET_NR_socketcall:
8998         return do_socketcall(arg1, arg2);
8999 #endif
9000 #ifdef TARGET_NR_accept
9001     case TARGET_NR_accept:
9002         return do_accept4(arg1, arg2, arg3, 0);
9003 #endif
9004 #ifdef TARGET_NR_accept4
9005     case TARGET_NR_accept4:
9006         return do_accept4(arg1, arg2, arg3, arg4);
9007 #endif
9008 #ifdef TARGET_NR_bind
9009     case TARGET_NR_bind:
9010         return do_bind(arg1, arg2, arg3);
9011 #endif
9012 #ifdef TARGET_NR_connect
9013     case TARGET_NR_connect:
9014         return do_connect(arg1, arg2, arg3);
9015 #endif
9016 #ifdef TARGET_NR_getpeername
9017     case TARGET_NR_getpeername:
9018         return do_getpeername(arg1, arg2, arg3);
9019 #endif
9020 #ifdef TARGET_NR_getsockname
9021     case TARGET_NR_getsockname:
9022         return do_getsockname(arg1, arg2, arg3);
9023 #endif
9024 #ifdef TARGET_NR_getsockopt
9025     case TARGET_NR_getsockopt:
9026         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9027 #endif
9028 #ifdef TARGET_NR_listen
9029     case TARGET_NR_listen:
9030         return get_errno(listen(arg1, arg2));
9031 #endif
9032 #ifdef TARGET_NR_recv
9033     case TARGET_NR_recv:
9034         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9035 #endif
9036 #ifdef TARGET_NR_recvfrom
9037     case TARGET_NR_recvfrom:
9038         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9039 #endif
9040 #ifdef TARGET_NR_recvmsg
9041     case TARGET_NR_recvmsg:
9042         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9043 #endif
9044 #ifdef TARGET_NR_send
9045     case TARGET_NR_send:
9046         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9047 #endif
9048 #ifdef TARGET_NR_sendmsg
9049     case TARGET_NR_sendmsg:
9050         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9051 #endif
9052 #ifdef TARGET_NR_sendmmsg
9053     case TARGET_NR_sendmmsg:
9054         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9055     case TARGET_NR_recvmmsg:
9056         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9057 #endif
9058 #ifdef TARGET_NR_sendto
9059     case TARGET_NR_sendto:
9060         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9061 #endif
9062 #ifdef TARGET_NR_shutdown
9063     case TARGET_NR_shutdown:
9064         return get_errno(shutdown(arg1, arg2));
9065 #endif
9066 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9067     case TARGET_NR_getrandom:
9068         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9069         if (!p) {
9070             return -TARGET_EFAULT;
9071         }
9072         ret = get_errno(getrandom(p, arg2, arg3));
9073         unlock_user(p, arg1, ret);
9074         return ret;
9075 #endif
9076 #ifdef TARGET_NR_socket
9077     case TARGET_NR_socket:
9078         return do_socket(arg1, arg2, arg3);
9079 #endif
9080 #ifdef TARGET_NR_socketpair
9081     case TARGET_NR_socketpair:
9082         return do_socketpair(arg1, arg2, arg3, arg4);
9083 #endif
9084 #ifdef TARGET_NR_setsockopt
9085     case TARGET_NR_setsockopt:
9086         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9087 #endif
9088 #if defined(TARGET_NR_syslog)
9089     case TARGET_NR_syslog:
9090         {
9091             int len = arg2;
9092 
9093             switch (arg1) {
9094             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9095             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9096             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9097             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9098             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9099             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9100             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9101             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9102                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9103             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9104             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9105             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9106                 {
9107                     if (len < 0) {
9108                         return -TARGET_EINVAL;
9109                     }
9110                     if (len == 0) {
9111                         return 0;
9112                     }
9113                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9114                     if (!p) {
9115                         return -TARGET_EFAULT;
9116                     }
9117                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9118                     unlock_user(p, arg2, arg3);
9119                 }
9120                 return ret;
9121             default:
9122                 return -TARGET_EINVAL;
9123             }
9124         }
9125         break;
9126 #endif
9127     case TARGET_NR_setitimer:
9128         {
9129             struct itimerval value, ovalue, *pvalue;
9130 
9131             if (arg2) {
9132                 pvalue = &value;
9133                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9134                     || copy_from_user_timeval(&pvalue->it_value,
9135                                               arg2 + sizeof(struct target_timeval)))
9136                     return -TARGET_EFAULT;
9137             } else {
9138                 pvalue = NULL;
9139             }
9140             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9141             if (!is_error(ret) && arg3) {
9142                 if (copy_to_user_timeval(arg3,
9143                                          &ovalue.it_interval)
9144                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9145                                             &ovalue.it_value))
9146                     return -TARGET_EFAULT;
9147             }
9148         }
9149         return ret;
9150     case TARGET_NR_getitimer:
9151         {
9152             struct itimerval value;
9153 
9154             ret = get_errno(getitimer(arg1, &value));
9155             if (!is_error(ret) && arg2) {
9156                 if (copy_to_user_timeval(arg2,
9157                                          &value.it_interval)
9158                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9159                                             &value.it_value))
9160                     return -TARGET_EFAULT;
9161             }
9162         }
9163         return ret;
9164 #ifdef TARGET_NR_stat
9165     case TARGET_NR_stat:
9166         if (!(p = lock_user_string(arg1))) {
9167             return -TARGET_EFAULT;
9168         }
9169         ret = get_errno(stat(path(p), &st));
9170         unlock_user(p, arg1, 0);
9171         goto do_stat;
9172 #endif
9173 #ifdef TARGET_NR_lstat
9174     case TARGET_NR_lstat:
9175         if (!(p = lock_user_string(arg1))) {
9176             return -TARGET_EFAULT;
9177         }
9178         ret = get_errno(lstat(path(p), &st));
9179         unlock_user(p, arg1, 0);
9180         goto do_stat;
9181 #endif
9182 #ifdef TARGET_NR_fstat
9183     case TARGET_NR_fstat:
9184         {
9185             ret = get_errno(fstat(arg1, &st));
9186 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9187         do_stat:
9188 #endif
9189             if (!is_error(ret)) {
9190                 struct target_stat *target_st;
9191 
9192                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9193                     return -TARGET_EFAULT;
9194                 memset(target_st, 0, sizeof(*target_st));
9195                 __put_user(st.st_dev, &target_st->st_dev);
9196                 __put_user(st.st_ino, &target_st->st_ino);
9197                 __put_user(st.st_mode, &target_st->st_mode);
9198                 __put_user(st.st_uid, &target_st->st_uid);
9199                 __put_user(st.st_gid, &target_st->st_gid);
9200                 __put_user(st.st_nlink, &target_st->st_nlink);
9201                 __put_user(st.st_rdev, &target_st->st_rdev);
9202                 __put_user(st.st_size, &target_st->st_size);
9203                 __put_user(st.st_blksize, &target_st->st_blksize);
9204                 __put_user(st.st_blocks, &target_st->st_blocks);
9205                 __put_user(st.st_atime, &target_st->target_st_atime);
9206                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9207                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9208 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9209     defined(TARGET_STAT_HAVE_NSEC)
9210                 __put_user(st.st_atim.tv_nsec,
9211                            &target_st->target_st_atime_nsec);
9212                 __put_user(st.st_mtim.tv_nsec,
9213                            &target_st->target_st_mtime_nsec);
9214                 __put_user(st.st_ctim.tv_nsec,
9215                            &target_st->target_st_ctime_nsec);
9216 #endif
9217                 unlock_user_struct(target_st, arg2, 1);
9218             }
9219         }
9220         return ret;
9221 #endif
9222     case TARGET_NR_vhangup:
9223         return get_errno(vhangup());
9224 #ifdef TARGET_NR_syscall
9225     case TARGET_NR_syscall:
9226         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9227                           arg6, arg7, arg8, 0);
9228 #endif
9229     case TARGET_NR_wait4:
9230         {
9231             int status;
9232             abi_long status_ptr = arg2;
9233             struct rusage rusage, *rusage_ptr;
9234             abi_ulong target_rusage = arg4;
9235             abi_long rusage_err;
9236             if (target_rusage)
9237                 rusage_ptr = &rusage;
9238             else
9239                 rusage_ptr = NULL;
9240             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9241             if (!is_error(ret)) {
9242                 if (status_ptr && ret) {
9243                     status = host_to_target_waitstatus(status);
9244                     if (put_user_s32(status, status_ptr))
9245                         return -TARGET_EFAULT;
9246                 }
9247                 if (target_rusage) {
9248                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9249                     if (rusage_err) {
9250                         ret = rusage_err;
9251                     }
9252                 }
9253             }
9254         }
9255         return ret;
9256 #ifdef TARGET_NR_swapoff
9257     case TARGET_NR_swapoff:
9258         if (!(p = lock_user_string(arg1)))
9259             return -TARGET_EFAULT;
9260         ret = get_errno(swapoff(p));
9261         unlock_user(p, arg1, 0);
9262         return ret;
9263 #endif
9264     case TARGET_NR_sysinfo:
9265         {
9266             struct target_sysinfo *target_value;
9267             struct sysinfo value;
9268             ret = get_errno(sysinfo(&value));
9269             if (!is_error(ret) && arg1)
9270             {
9271                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9272                     return -TARGET_EFAULT;
9273                 __put_user(value.uptime, &target_value->uptime);
9274                 __put_user(value.loads[0], &target_value->loads[0]);
9275                 __put_user(value.loads[1], &target_value->loads[1]);
9276                 __put_user(value.loads[2], &target_value->loads[2]);
9277                 __put_user(value.totalram, &target_value->totalram);
9278                 __put_user(value.freeram, &target_value->freeram);
9279                 __put_user(value.sharedram, &target_value->sharedram);
9280                 __put_user(value.bufferram, &target_value->bufferram);
9281                 __put_user(value.totalswap, &target_value->totalswap);
9282                 __put_user(value.freeswap, &target_value->freeswap);
9283                 __put_user(value.procs, &target_value->procs);
9284                 __put_user(value.totalhigh, &target_value->totalhigh);
9285                 __put_user(value.freehigh, &target_value->freehigh);
9286                 __put_user(value.mem_unit, &target_value->mem_unit);
9287                 unlock_user_struct(target_value, arg1, 1);
9288             }
9289         }
9290         return ret;
9291 #ifdef TARGET_NR_ipc
9292     case TARGET_NR_ipc:
9293         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9294 #endif
9295 #ifdef TARGET_NR_semget
9296     case TARGET_NR_semget:
9297         return get_errno(semget(arg1, arg2, arg3));
9298 #endif
9299 #ifdef TARGET_NR_semop
9300     case TARGET_NR_semop:
9301         return do_semop(arg1, arg2, arg3);
9302 #endif
9303 #ifdef TARGET_NR_semctl
9304     case TARGET_NR_semctl:
9305         return do_semctl(arg1, arg2, arg3, arg4);
9306 #endif
9307 #ifdef TARGET_NR_msgctl
9308     case TARGET_NR_msgctl:
9309         return do_msgctl(arg1, arg2, arg3);
9310 #endif
9311 #ifdef TARGET_NR_msgget
9312     case TARGET_NR_msgget:
9313         return get_errno(msgget(arg1, arg2));
9314 #endif
9315 #ifdef TARGET_NR_msgrcv
9316     case TARGET_NR_msgrcv:
9317         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9318 #endif
9319 #ifdef TARGET_NR_msgsnd
9320     case TARGET_NR_msgsnd:
9321         return do_msgsnd(arg1, arg2, arg3, arg4);
9322 #endif
9323 #ifdef TARGET_NR_shmget
9324     case TARGET_NR_shmget:
9325         return get_errno(shmget(arg1, arg2, arg3));
9326 #endif
9327 #ifdef TARGET_NR_shmctl
9328     case TARGET_NR_shmctl:
9329         return do_shmctl(arg1, arg2, arg3);
9330 #endif
9331 #ifdef TARGET_NR_shmat
9332     case TARGET_NR_shmat:
9333         return do_shmat(cpu_env, arg1, arg2, arg3);
9334 #endif
9335 #ifdef TARGET_NR_shmdt
9336     case TARGET_NR_shmdt:
9337         return do_shmdt(arg1);
9338 #endif
9339     case TARGET_NR_fsync:
9340         return get_errno(fsync(arg1));
9341     case TARGET_NR_clone:
9342         /* Linux manages to have three different orderings for its
9343          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9344          * match the kernel's CONFIG_CLONE_* settings.
9345          * Microblaze is further special in that it uses a sixth
9346          * implicit argument to clone for the TLS pointer.
9347          */
9348 #if defined(TARGET_MICROBLAZE)
9349         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9350 #elif defined(TARGET_CLONE_BACKWARDS)
9351         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9352 #elif defined(TARGET_CLONE_BACKWARDS2)
9353         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9354 #else
9355         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9356 #endif
9357         return ret;
9358 #ifdef __NR_exit_group
9359         /* new thread calls */
9360     case TARGET_NR_exit_group:
9361         preexit_cleanup(cpu_env, arg1);
9362         return get_errno(exit_group(arg1));
9363 #endif
9364     case TARGET_NR_setdomainname:
9365         if (!(p = lock_user_string(arg1)))
9366             return -TARGET_EFAULT;
9367         ret = get_errno(setdomainname(p, arg2));
9368         unlock_user(p, arg1, 0);
9369         return ret;
9370     case TARGET_NR_uname:
9371         /* no need to transcode because we use the linux syscall */
9372         {
9373             struct new_utsname * buf;
9374 
9375             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9376                 return -TARGET_EFAULT;
9377             ret = get_errno(sys_uname(buf));
9378             if (!is_error(ret)) {
9379                 /* Overwrite the native machine name with whatever is being
9380                    emulated. */
9381                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9382                           sizeof(buf->machine));
9383                 /* Allow the user to override the reported release.  */
9384                 if (qemu_uname_release && *qemu_uname_release) {
9385                     g_strlcpy(buf->release, qemu_uname_release,
9386                               sizeof(buf->release));
9387                 }
9388             }
9389             unlock_user_struct(buf, arg1, 1);
9390         }
9391         return ret;
9392 #ifdef TARGET_I386
9393     case TARGET_NR_modify_ldt:
9394         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9395 #if !defined(TARGET_X86_64)
9396     case TARGET_NR_vm86:
9397         return do_vm86(cpu_env, arg1, arg2);
9398 #endif
9399 #endif
9400     case TARGET_NR_adjtimex:
9401         {
9402             struct timex host_buf;
9403 
9404             if (target_to_host_timex(&host_buf, arg1) != 0) {
9405                 return -TARGET_EFAULT;
9406             }
9407             ret = get_errno(adjtimex(&host_buf));
9408             if (!is_error(ret)) {
9409                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9410                     return -TARGET_EFAULT;
9411                 }
9412             }
9413         }
9414         return ret;
9415 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9416     case TARGET_NR_clock_adjtime:
9417         {
9418             struct timex htx, *phtx = &htx;
9419 
9420             if (target_to_host_timex(phtx, arg2) != 0) {
9421                 return -TARGET_EFAULT;
9422             }
9423             ret = get_errno(clock_adjtime(arg1, phtx));
9424             if (!is_error(ret) && phtx) {
9425                 if (host_to_target_timex(arg2, phtx) != 0) {
9426                     return -TARGET_EFAULT;
9427                 }
9428             }
9429         }
9430         return ret;
9431 #endif
9432     case TARGET_NR_getpgid:
9433         return get_errno(getpgid(arg1));
9434     case TARGET_NR_fchdir:
9435         return get_errno(fchdir(arg1));
9436     case TARGET_NR_personality:
9437         return get_errno(personality(arg1));
9438 #ifdef TARGET_NR__llseek /* Not on alpha */
9439     case TARGET_NR__llseek:
9440         {
9441             int64_t res;
9442 #if !defined(__NR_llseek)
9443             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9444             if (res == -1) {
9445                 ret = get_errno(res);
9446             } else {
9447                 ret = 0;
9448             }
9449 #else
9450             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9451 #endif
9452             if ((ret == 0) && put_user_s64(res, arg4)) {
9453                 return -TARGET_EFAULT;
9454             }
9455         }
9456         return ret;
9457 #endif
9458 #ifdef TARGET_NR_getdents
9459     case TARGET_NR_getdents:
9460 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9461 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9462         {
9463             struct target_dirent *target_dirp;
9464             struct linux_dirent *dirp;
9465             abi_long count = arg3;
9466 
9467             dirp = g_try_malloc(count);
9468             if (!dirp) {
9469                 return -TARGET_ENOMEM;
9470             }
9471 
9472             ret = get_errno(sys_getdents(arg1, dirp, count));
9473             if (!is_error(ret)) {
9474                 struct linux_dirent *de;
9475 		struct target_dirent *tde;
9476                 int len = ret;
9477                 int reclen, treclen;
9478 		int count1, tnamelen;
9479 
9480 		count1 = 0;
9481                 de = dirp;
9482                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9483                     return -TARGET_EFAULT;
9484 		tde = target_dirp;
9485                 while (len > 0) {
9486                     reclen = de->d_reclen;
9487                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9488                     assert(tnamelen >= 0);
9489                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9490                     assert(count1 + treclen <= count);
9491                     tde->d_reclen = tswap16(treclen);
9492                     tde->d_ino = tswapal(de->d_ino);
9493                     tde->d_off = tswapal(de->d_off);
9494                     memcpy(tde->d_name, de->d_name, tnamelen);
9495                     de = (struct linux_dirent *)((char *)de + reclen);
9496                     len -= reclen;
9497                     tde = (struct target_dirent *)((char *)tde + treclen);
9498 		    count1 += treclen;
9499                 }
9500 		ret = count1;
9501                 unlock_user(target_dirp, arg2, ret);
9502             }
9503             g_free(dirp);
9504         }
9505 #else
9506         {
9507             struct linux_dirent *dirp;
9508             abi_long count = arg3;
9509 
9510             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9511                 return -TARGET_EFAULT;
9512             ret = get_errno(sys_getdents(arg1, dirp, count));
9513             if (!is_error(ret)) {
9514                 struct linux_dirent *de;
9515                 int len = ret;
9516                 int reclen;
9517                 de = dirp;
9518                 while (len > 0) {
9519                     reclen = de->d_reclen;
9520                     if (reclen > len)
9521                         break;
9522                     de->d_reclen = tswap16(reclen);
9523                     tswapls(&de->d_ino);
9524                     tswapls(&de->d_off);
9525                     de = (struct linux_dirent *)((char *)de + reclen);
9526                     len -= reclen;
9527                 }
9528             }
9529             unlock_user(dirp, arg2, ret);
9530         }
9531 #endif
9532 #else
9533         /* Implement getdents in terms of getdents64 */
9534         {
9535             struct linux_dirent64 *dirp;
9536             abi_long count = arg3;
9537 
9538             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9539             if (!dirp) {
9540                 return -TARGET_EFAULT;
9541             }
9542             ret = get_errno(sys_getdents64(arg1, dirp, count));
9543             if (!is_error(ret)) {
9544                 /* Convert the dirent64 structs to target dirent.  We do this
9545                  * in-place, since we can guarantee that a target_dirent is no
9546                  * larger than a dirent64; however this means we have to be
9547                  * careful to read everything before writing in the new format.
9548                  */
9549                 struct linux_dirent64 *de;
9550                 struct target_dirent *tde;
9551                 int len = ret;
9552                 int tlen = 0;
9553 
9554                 de = dirp;
9555                 tde = (struct target_dirent *)dirp;
9556                 while (len > 0) {
9557                     int namelen, treclen;
9558                     int reclen = de->d_reclen;
9559                     uint64_t ino = de->d_ino;
9560                     int64_t off = de->d_off;
9561                     uint8_t type = de->d_type;
9562 
9563                     namelen = strlen(de->d_name);
9564                     treclen = offsetof(struct target_dirent, d_name)
9565                         + namelen + 2;
9566                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9567 
9568                     memmove(tde->d_name, de->d_name, namelen + 1);
9569                     tde->d_ino = tswapal(ino);
9570                     tde->d_off = tswapal(off);
9571                     tde->d_reclen = tswap16(treclen);
9572                     /* The target_dirent type is in what was formerly a padding
9573                      * byte at the end of the structure:
9574                      */
9575                     *(((char *)tde) + treclen - 1) = type;
9576 
9577                     de = (struct linux_dirent64 *)((char *)de + reclen);
9578                     tde = (struct target_dirent *)((char *)tde + treclen);
9579                     len -= reclen;
9580                     tlen += treclen;
9581                 }
9582                 ret = tlen;
9583             }
9584             unlock_user(dirp, arg2, ret);
9585         }
9586 #endif
9587         return ret;
9588 #endif /* TARGET_NR_getdents */
9589 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9590     case TARGET_NR_getdents64:
9591         {
9592             struct linux_dirent64 *dirp;
9593             abi_long count = arg3;
9594             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9595                 return -TARGET_EFAULT;
9596             ret = get_errno(sys_getdents64(arg1, dirp, count));
9597             if (!is_error(ret)) {
9598                 struct linux_dirent64 *de;
9599                 int len = ret;
9600                 int reclen;
9601                 de = dirp;
9602                 while (len > 0) {
9603                     reclen = de->d_reclen;
9604                     if (reclen > len)
9605                         break;
9606                     de->d_reclen = tswap16(reclen);
9607                     tswap64s((uint64_t *)&de->d_ino);
9608                     tswap64s((uint64_t *)&de->d_off);
9609                     de = (struct linux_dirent64 *)((char *)de + reclen);
9610                     len -= reclen;
9611                 }
9612             }
9613             unlock_user(dirp, arg2, ret);
9614         }
9615         return ret;
9616 #endif /* TARGET_NR_getdents64 */
9617 #if defined(TARGET_NR__newselect)
9618     case TARGET_NR__newselect:
9619         return do_select(arg1, arg2, arg3, arg4, arg5);
9620 #endif
9621 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9622 # ifdef TARGET_NR_poll
9623     case TARGET_NR_poll:
9624 # endif
9625 # ifdef TARGET_NR_ppoll
9626     case TARGET_NR_ppoll:
9627 # endif
9628         {
9629             struct target_pollfd *target_pfd;
9630             unsigned int nfds = arg2;
9631             struct pollfd *pfd;
9632             unsigned int i;
9633 
9634             pfd = NULL;
9635             target_pfd = NULL;
9636             if (nfds) {
9637                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9638                     return -TARGET_EINVAL;
9639                 }
9640 
9641                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9642                                        sizeof(struct target_pollfd) * nfds, 1);
9643                 if (!target_pfd) {
9644                     return -TARGET_EFAULT;
9645                 }
9646 
9647                 pfd = alloca(sizeof(struct pollfd) * nfds);
9648                 for (i = 0; i < nfds; i++) {
9649                     pfd[i].fd = tswap32(target_pfd[i].fd);
9650                     pfd[i].events = tswap16(target_pfd[i].events);
9651                 }
9652             }
9653 
9654             switch (num) {
9655 # ifdef TARGET_NR_ppoll
9656             case TARGET_NR_ppoll:
9657             {
9658                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9659                 target_sigset_t *target_set;
9660                 sigset_t _set, *set = &_set;
9661 
9662                 if (arg3) {
9663                     if (target_to_host_timespec(timeout_ts, arg3)) {
9664                         unlock_user(target_pfd, arg1, 0);
9665                         return -TARGET_EFAULT;
9666                     }
9667                 } else {
9668                     timeout_ts = NULL;
9669                 }
9670 
9671                 if (arg4) {
9672                     if (arg5 != sizeof(target_sigset_t)) {
9673                         unlock_user(target_pfd, arg1, 0);
9674                         return -TARGET_EINVAL;
9675                     }
9676 
9677                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9678                     if (!target_set) {
9679                         unlock_user(target_pfd, arg1, 0);
9680                         return -TARGET_EFAULT;
9681                     }
9682                     target_to_host_sigset(set, target_set);
9683                 } else {
9684                     set = NULL;
9685                 }
9686 
9687                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9688                                            set, SIGSET_T_SIZE));
9689 
9690                 if (!is_error(ret) && arg3) {
9691                     host_to_target_timespec(arg3, timeout_ts);
9692                 }
9693                 if (arg4) {
9694                     unlock_user(target_set, arg4, 0);
9695                 }
9696                 break;
9697             }
9698 # endif
9699 # ifdef TARGET_NR_poll
9700             case TARGET_NR_poll:
9701             {
9702                 struct timespec ts, *pts;
9703 
9704                 if (arg3 >= 0) {
9705                     /* Convert ms to secs, ns */
9706                     ts.tv_sec = arg3 / 1000;
9707                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9708                     pts = &ts;
9709                 } else {
9710                     /* -ve poll() timeout means "infinite" */
9711                     pts = NULL;
9712                 }
9713                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9714                 break;
9715             }
9716 # endif
9717             default:
9718                 g_assert_not_reached();
9719             }
9720 
9721             if (!is_error(ret)) {
9722                 for(i = 0; i < nfds; i++) {
9723                     target_pfd[i].revents = tswap16(pfd[i].revents);
9724                 }
9725             }
9726             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9727         }
9728         return ret;
9729 #endif
9730     case TARGET_NR_flock:
9731         /* NOTE: the flock constant seems to be the same for every
9732            Linux platform */
9733         return get_errno(safe_flock(arg1, arg2));
9734     case TARGET_NR_readv:
9735         {
9736             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9737             if (vec != NULL) {
9738                 ret = get_errno(safe_readv(arg1, vec, arg3));
9739                 unlock_iovec(vec, arg2, arg3, 1);
9740             } else {
9741                 ret = -host_to_target_errno(errno);
9742             }
9743         }
9744         return ret;
9745     case TARGET_NR_writev:
9746         {
9747             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9748             if (vec != NULL) {
9749                 ret = get_errno(safe_writev(arg1, vec, arg3));
9750                 unlock_iovec(vec, arg2, arg3, 0);
9751             } else {
9752                 ret = -host_to_target_errno(errno);
9753             }
9754         }
9755         return ret;
9756 #if defined(TARGET_NR_preadv)
9757     case TARGET_NR_preadv:
9758         {
9759             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9760             if (vec != NULL) {
9761                 unsigned long low, high;
9762 
9763                 target_to_host_low_high(arg4, arg5, &low, &high);
9764                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9765                 unlock_iovec(vec, arg2, arg3, 1);
9766             } else {
9767                 ret = -host_to_target_errno(errno);
9768            }
9769         }
9770         return ret;
9771 #endif
9772 #if defined(TARGET_NR_pwritev)
9773     case TARGET_NR_pwritev:
9774         {
9775             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9776             if (vec != NULL) {
9777                 unsigned long low, high;
9778 
9779                 target_to_host_low_high(arg4, arg5, &low, &high);
9780                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9781                 unlock_iovec(vec, arg2, arg3, 0);
9782             } else {
9783                 ret = -host_to_target_errno(errno);
9784            }
9785         }
9786         return ret;
9787 #endif
9788     case TARGET_NR_getsid:
9789         return get_errno(getsid(arg1));
9790 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9791     case TARGET_NR_fdatasync:
9792         return get_errno(fdatasync(arg1));
9793 #endif
9794 #ifdef TARGET_NR__sysctl
9795     case TARGET_NR__sysctl:
9796         /* We don't implement this, but ENOTDIR is always a safe
9797            return value. */
9798         return -TARGET_ENOTDIR;
9799 #endif
9800     case TARGET_NR_sched_getaffinity:
9801         {
9802             unsigned int mask_size;
9803             unsigned long *mask;
9804 
9805             /*
9806              * sched_getaffinity needs multiples of ulong, so need to take
9807              * care of mismatches between target ulong and host ulong sizes.
9808              */
9809             if (arg2 & (sizeof(abi_ulong) - 1)) {
9810                 return -TARGET_EINVAL;
9811             }
9812             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9813 
9814             mask = alloca(mask_size);
9815             memset(mask, 0, mask_size);
9816             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9817 
9818             if (!is_error(ret)) {
9819                 if (ret > arg2) {
9820                     /* More data returned than the caller's buffer will fit.
9821                      * This only happens if sizeof(abi_long) < sizeof(long)
9822                      * and the caller passed us a buffer holding an odd number
9823                      * of abi_longs. If the host kernel is actually using the
9824                      * extra 4 bytes then fail EINVAL; otherwise we can just
9825                      * ignore them and only copy the interesting part.
9826                      */
9827                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9828                     if (numcpus > arg2 * 8) {
9829                         return -TARGET_EINVAL;
9830                     }
9831                     ret = arg2;
9832                 }
9833 
9834                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9835                     return -TARGET_EFAULT;
9836                 }
9837             }
9838         }
9839         return ret;
9840     case TARGET_NR_sched_setaffinity:
9841         {
9842             unsigned int mask_size;
9843             unsigned long *mask;
9844 
9845             /*
9846              * sched_setaffinity needs multiples of ulong, so need to take
9847              * care of mismatches between target ulong and host ulong sizes.
9848              */
9849             if (arg2 & (sizeof(abi_ulong) - 1)) {
9850                 return -TARGET_EINVAL;
9851             }
9852             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9853             mask = alloca(mask_size);
9854 
9855             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9856             if (ret) {
9857                 return ret;
9858             }
9859 
9860             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9861         }
9862     case TARGET_NR_getcpu:
9863         {
9864             unsigned cpu, node;
9865             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9866                                        arg2 ? &node : NULL,
9867                                        NULL));
9868             if (is_error(ret)) {
9869                 return ret;
9870             }
9871             if (arg1 && put_user_u32(cpu, arg1)) {
9872                 return -TARGET_EFAULT;
9873             }
9874             if (arg2 && put_user_u32(node, arg2)) {
9875                 return -TARGET_EFAULT;
9876             }
9877         }
9878         return ret;
9879     case TARGET_NR_sched_setparam:
9880         {
9881             struct sched_param *target_schp;
9882             struct sched_param schp;
9883 
9884             if (arg2 == 0) {
9885                 return -TARGET_EINVAL;
9886             }
9887             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9888                 return -TARGET_EFAULT;
9889             schp.sched_priority = tswap32(target_schp->sched_priority);
9890             unlock_user_struct(target_schp, arg2, 0);
9891             return get_errno(sched_setparam(arg1, &schp));
9892         }
9893     case TARGET_NR_sched_getparam:
9894         {
9895             struct sched_param *target_schp;
9896             struct sched_param schp;
9897 
9898             if (arg2 == 0) {
9899                 return -TARGET_EINVAL;
9900             }
9901             ret = get_errno(sched_getparam(arg1, &schp));
9902             if (!is_error(ret)) {
9903                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9904                     return -TARGET_EFAULT;
9905                 target_schp->sched_priority = tswap32(schp.sched_priority);
9906                 unlock_user_struct(target_schp, arg2, 1);
9907             }
9908         }
9909         return ret;
9910     case TARGET_NR_sched_setscheduler:
9911         {
9912             struct sched_param *target_schp;
9913             struct sched_param schp;
9914             if (arg3 == 0) {
9915                 return -TARGET_EINVAL;
9916             }
9917             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9918                 return -TARGET_EFAULT;
9919             schp.sched_priority = tswap32(target_schp->sched_priority);
9920             unlock_user_struct(target_schp, arg3, 0);
9921             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9922         }
9923     case TARGET_NR_sched_getscheduler:
9924         return get_errno(sched_getscheduler(arg1));
9925     case TARGET_NR_sched_yield:
9926         return get_errno(sched_yield());
9927     case TARGET_NR_sched_get_priority_max:
9928         return get_errno(sched_get_priority_max(arg1));
9929     case TARGET_NR_sched_get_priority_min:
9930         return get_errno(sched_get_priority_min(arg1));
9931     case TARGET_NR_sched_rr_get_interval:
9932         {
9933             struct timespec ts;
9934             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9935             if (!is_error(ret)) {
9936                 ret = host_to_target_timespec(arg2, &ts);
9937             }
9938         }
9939         return ret;
9940     case TARGET_NR_nanosleep:
9941         {
9942             struct timespec req, rem;
9943             target_to_host_timespec(&req, arg1);
9944             ret = get_errno(safe_nanosleep(&req, &rem));
9945             if (is_error(ret) && arg2) {
9946                 host_to_target_timespec(arg2, &rem);
9947             }
9948         }
9949         return ret;
9950     case TARGET_NR_prctl:
9951         switch (arg1) {
9952         case PR_GET_PDEATHSIG:
9953         {
9954             int deathsig;
9955             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9956             if (!is_error(ret) && arg2
9957                 && put_user_ual(deathsig, arg2)) {
9958                 return -TARGET_EFAULT;
9959             }
9960             return ret;
9961         }
9962 #ifdef PR_GET_NAME
9963         case PR_GET_NAME:
9964         {
9965             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9966             if (!name) {
9967                 return -TARGET_EFAULT;
9968             }
9969             ret = get_errno(prctl(arg1, (unsigned long)name,
9970                                   arg3, arg4, arg5));
9971             unlock_user(name, arg2, 16);
9972             return ret;
9973         }
9974         case PR_SET_NAME:
9975         {
9976             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9977             if (!name) {
9978                 return -TARGET_EFAULT;
9979             }
9980             ret = get_errno(prctl(arg1, (unsigned long)name,
9981                                   arg3, arg4, arg5));
9982             unlock_user(name, arg2, 0);
9983             return ret;
9984         }
9985 #endif
9986 #ifdef TARGET_MIPS
9987         case TARGET_PR_GET_FP_MODE:
9988         {
9989             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9990             ret = 0;
9991             if (env->CP0_Status & (1 << CP0St_FR)) {
9992                 ret |= TARGET_PR_FP_MODE_FR;
9993             }
9994             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9995                 ret |= TARGET_PR_FP_MODE_FRE;
9996             }
9997             return ret;
9998         }
9999         case TARGET_PR_SET_FP_MODE:
10000         {
10001             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10002             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10003             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10004             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10005             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10006 
10007             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10008                                             TARGET_PR_FP_MODE_FRE;
10009 
10010             /* If nothing to change, return right away, successfully.  */
10011             if (old_fr == new_fr && old_fre == new_fre) {
10012                 return 0;
10013             }
10014             /* Check the value is valid */
10015             if (arg2 & ~known_bits) {
10016                 return -TARGET_EOPNOTSUPP;
10017             }
10018             /* Setting FRE without FR is not supported.  */
10019             if (new_fre && !new_fr) {
10020                 return -TARGET_EOPNOTSUPP;
10021             }
10022             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10023                 /* FR1 is not supported */
10024                 return -TARGET_EOPNOTSUPP;
10025             }
10026             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10027                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10028                 /* cannot set FR=0 */
10029                 return -TARGET_EOPNOTSUPP;
10030             }
10031             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10032                 /* Cannot set FRE=1 */
10033                 return -TARGET_EOPNOTSUPP;
10034             }
10035 
10036             int i;
10037             fpr_t *fpr = env->active_fpu.fpr;
10038             for (i = 0; i < 32 ; i += 2) {
10039                 if (!old_fr && new_fr) {
10040                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10041                 } else if (old_fr && !new_fr) {
10042                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10043                 }
10044             }
10045 
10046             if (new_fr) {
10047                 env->CP0_Status |= (1 << CP0St_FR);
10048                 env->hflags |= MIPS_HFLAG_F64;
10049             } else {
10050                 env->CP0_Status &= ~(1 << CP0St_FR);
10051                 env->hflags &= ~MIPS_HFLAG_F64;
10052             }
10053             if (new_fre) {
10054                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10055                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10056                     env->hflags |= MIPS_HFLAG_FRE;
10057                 }
10058             } else {
10059                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10060                 env->hflags &= ~MIPS_HFLAG_FRE;
10061             }
10062 
10063             return 0;
10064         }
10065 #endif /* MIPS */
10066 #ifdef TARGET_AARCH64
10067         case TARGET_PR_SVE_SET_VL:
10068             /*
10069              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10070              * PR_SVE_VL_INHERIT.  Note the kernel definition
10071              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10072              * even though the current architectural maximum is VQ=16.
10073              */
10074             ret = -TARGET_EINVAL;
10075             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10076                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10077                 CPUARMState *env = cpu_env;
10078                 ARMCPU *cpu = env_archcpu(env);
10079                 uint32_t vq, old_vq;
10080 
10081                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10082                 vq = MAX(arg2 / 16, 1);
10083                 vq = MIN(vq, cpu->sve_max_vq);
10084 
10085                 if (vq < old_vq) {
10086                     aarch64_sve_narrow_vq(env, vq);
10087                 }
10088                 env->vfp.zcr_el[1] = vq - 1;
10089                 arm_rebuild_hflags(env);
10090                 ret = vq * 16;
10091             }
10092             return ret;
10093         case TARGET_PR_SVE_GET_VL:
10094             ret = -TARGET_EINVAL;
10095             {
10096                 ARMCPU *cpu = env_archcpu(cpu_env);
10097                 if (cpu_isar_feature(aa64_sve, cpu)) {
10098                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10099                 }
10100             }
10101             return ret;
10102         case TARGET_PR_PAC_RESET_KEYS:
10103             {
10104                 CPUARMState *env = cpu_env;
10105                 ARMCPU *cpu = env_archcpu(env);
10106 
10107                 if (arg3 || arg4 || arg5) {
10108                     return -TARGET_EINVAL;
10109                 }
10110                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10111                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10112                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10113                                TARGET_PR_PAC_APGAKEY);
10114                     int ret = 0;
10115                     Error *err = NULL;
10116 
10117                     if (arg2 == 0) {
10118                         arg2 = all;
10119                     } else if (arg2 & ~all) {
10120                         return -TARGET_EINVAL;
10121                     }
10122                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10123                         ret |= qemu_guest_getrandom(&env->keys.apia,
10124                                                     sizeof(ARMPACKey), &err);
10125                     }
10126                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10127                         ret |= qemu_guest_getrandom(&env->keys.apib,
10128                                                     sizeof(ARMPACKey), &err);
10129                     }
10130                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10131                         ret |= qemu_guest_getrandom(&env->keys.apda,
10132                                                     sizeof(ARMPACKey), &err);
10133                     }
10134                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10135                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10136                                                     sizeof(ARMPACKey), &err);
10137                     }
10138                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10139                         ret |= qemu_guest_getrandom(&env->keys.apga,
10140                                                     sizeof(ARMPACKey), &err);
10141                     }
10142                     if (ret != 0) {
10143                         /*
10144                          * Some unknown failure in the crypto.  The best
10145                          * we can do is log it and fail the syscall.
10146                          * The real syscall cannot fail this way.
10147                          */
10148                         qemu_log_mask(LOG_UNIMP,
10149                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10150                                       error_get_pretty(err));
10151                         error_free(err);
10152                         return -TARGET_EIO;
10153                     }
10154                     return 0;
10155                 }
10156             }
10157             return -TARGET_EINVAL;
10158 #endif /* AARCH64 */
10159         case PR_GET_SECCOMP:
10160         case PR_SET_SECCOMP:
10161             /* Disable seccomp to prevent the target disabling syscalls we
10162              * need. */
10163             return -TARGET_EINVAL;
10164         default:
10165             /* Most prctl options have no pointer arguments */
10166             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10167         }
10168         break;
10169 #ifdef TARGET_NR_arch_prctl
10170     case TARGET_NR_arch_prctl:
10171 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10172         return do_arch_prctl(cpu_env, arg1, arg2);
10173 #else
10174 #error unreachable
10175 #endif
10176 #endif
10177 #ifdef TARGET_NR_pread64
10178     case TARGET_NR_pread64:
10179         if (regpairs_aligned(cpu_env, num)) {
10180             arg4 = arg5;
10181             arg5 = arg6;
10182         }
10183         if (arg2 == 0 && arg3 == 0) {
10184             /* Special-case NULL buffer and zero length, which should succeed */
10185             p = 0;
10186         } else {
10187             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10188             if (!p) {
10189                 return -TARGET_EFAULT;
10190             }
10191         }
10192         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10193         unlock_user(p, arg2, ret);
10194         return ret;
10195     case TARGET_NR_pwrite64:
10196         if (regpairs_aligned(cpu_env, num)) {
10197             arg4 = arg5;
10198             arg5 = arg6;
10199         }
10200         if (arg2 == 0 && arg3 == 0) {
10201             /* Special-case NULL buffer and zero length, which should succeed */
10202             p = 0;
10203         } else {
10204             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10205             if (!p) {
10206                 return -TARGET_EFAULT;
10207             }
10208         }
10209         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10210         unlock_user(p, arg2, 0);
10211         return ret;
10212 #endif
10213     case TARGET_NR_getcwd:
10214         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10215             return -TARGET_EFAULT;
10216         ret = get_errno(sys_getcwd1(p, arg2));
10217         unlock_user(p, arg1, ret);
10218         return ret;
10219     case TARGET_NR_capget:
10220     case TARGET_NR_capset:
10221     {
10222         struct target_user_cap_header *target_header;
10223         struct target_user_cap_data *target_data = NULL;
10224         struct __user_cap_header_struct header;
10225         struct __user_cap_data_struct data[2];
10226         struct __user_cap_data_struct *dataptr = NULL;
10227         int i, target_datalen;
10228         int data_items = 1;
10229 
10230         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10231             return -TARGET_EFAULT;
10232         }
10233         header.version = tswap32(target_header->version);
10234         header.pid = tswap32(target_header->pid);
10235 
10236         if (header.version != _LINUX_CAPABILITY_VERSION) {
10237             /* Version 2 and up takes pointer to two user_data structs */
10238             data_items = 2;
10239         }
10240 
10241         target_datalen = sizeof(*target_data) * data_items;
10242 
10243         if (arg2) {
10244             if (num == TARGET_NR_capget) {
10245                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10246             } else {
10247                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10248             }
10249             if (!target_data) {
10250                 unlock_user_struct(target_header, arg1, 0);
10251                 return -TARGET_EFAULT;
10252             }
10253 
10254             if (num == TARGET_NR_capset) {
10255                 for (i = 0; i < data_items; i++) {
10256                     data[i].effective = tswap32(target_data[i].effective);
10257                     data[i].permitted = tswap32(target_data[i].permitted);
10258                     data[i].inheritable = tswap32(target_data[i].inheritable);
10259                 }
10260             }
10261 
10262             dataptr = data;
10263         }
10264 
10265         if (num == TARGET_NR_capget) {
10266             ret = get_errno(capget(&header, dataptr));
10267         } else {
10268             ret = get_errno(capset(&header, dataptr));
10269         }
10270 
10271         /* The kernel always updates version for both capget and capset */
10272         target_header->version = tswap32(header.version);
10273         unlock_user_struct(target_header, arg1, 1);
10274 
10275         if (arg2) {
10276             if (num == TARGET_NR_capget) {
10277                 for (i = 0; i < data_items; i++) {
10278                     target_data[i].effective = tswap32(data[i].effective);
10279                     target_data[i].permitted = tswap32(data[i].permitted);
10280                     target_data[i].inheritable = tswap32(data[i].inheritable);
10281                 }
10282                 unlock_user(target_data, arg2, target_datalen);
10283             } else {
10284                 unlock_user(target_data, arg2, 0);
10285             }
10286         }
10287         return ret;
10288     }
10289     case TARGET_NR_sigaltstack:
10290         return do_sigaltstack(arg1, arg2,
10291                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10292 
10293 #ifdef CONFIG_SENDFILE
10294 #ifdef TARGET_NR_sendfile
10295     case TARGET_NR_sendfile:
10296     {
10297         off_t *offp = NULL;
10298         off_t off;
10299         if (arg3) {
10300             ret = get_user_sal(off, arg3);
10301             if (is_error(ret)) {
10302                 return ret;
10303             }
10304             offp = &off;
10305         }
10306         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10307         if (!is_error(ret) && arg3) {
10308             abi_long ret2 = put_user_sal(off, arg3);
10309             if (is_error(ret2)) {
10310                 ret = ret2;
10311             }
10312         }
10313         return ret;
10314     }
10315 #endif
10316 #ifdef TARGET_NR_sendfile64
10317     case TARGET_NR_sendfile64:
10318     {
10319         off_t *offp = NULL;
10320         off_t off;
10321         if (arg3) {
10322             ret = get_user_s64(off, arg3);
10323             if (is_error(ret)) {
10324                 return ret;
10325             }
10326             offp = &off;
10327         }
10328         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10329         if (!is_error(ret) && arg3) {
10330             abi_long ret2 = put_user_s64(off, arg3);
10331             if (is_error(ret2)) {
10332                 ret = ret2;
10333             }
10334         }
10335         return ret;
10336     }
10337 #endif
10338 #endif
10339 #ifdef TARGET_NR_vfork
10340     case TARGET_NR_vfork:
10341         return get_errno(do_fork(cpu_env,
10342                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10343                          0, 0, 0, 0));
10344 #endif
10345 #ifdef TARGET_NR_ugetrlimit
10346     case TARGET_NR_ugetrlimit:
10347     {
10348 	struct rlimit rlim;
10349 	int resource = target_to_host_resource(arg1);
10350 	ret = get_errno(getrlimit(resource, &rlim));
10351 	if (!is_error(ret)) {
10352 	    struct target_rlimit *target_rlim;
10353             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10354                 return -TARGET_EFAULT;
10355 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10356 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10357             unlock_user_struct(target_rlim, arg2, 1);
10358 	}
10359         return ret;
10360     }
10361 #endif
10362 #ifdef TARGET_NR_truncate64
10363     case TARGET_NR_truncate64:
10364         if (!(p = lock_user_string(arg1)))
10365             return -TARGET_EFAULT;
10366 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10367         unlock_user(p, arg1, 0);
10368         return ret;
10369 #endif
10370 #ifdef TARGET_NR_ftruncate64
10371     case TARGET_NR_ftruncate64:
10372         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10373 #endif
10374 #ifdef TARGET_NR_stat64
10375     case TARGET_NR_stat64:
10376         if (!(p = lock_user_string(arg1))) {
10377             return -TARGET_EFAULT;
10378         }
10379         ret = get_errno(stat(path(p), &st));
10380         unlock_user(p, arg1, 0);
10381         if (!is_error(ret))
10382             ret = host_to_target_stat64(cpu_env, arg2, &st);
10383         return ret;
10384 #endif
10385 #ifdef TARGET_NR_lstat64
10386     case TARGET_NR_lstat64:
10387         if (!(p = lock_user_string(arg1))) {
10388             return -TARGET_EFAULT;
10389         }
10390         ret = get_errno(lstat(path(p), &st));
10391         unlock_user(p, arg1, 0);
10392         if (!is_error(ret))
10393             ret = host_to_target_stat64(cpu_env, arg2, &st);
10394         return ret;
10395 #endif
10396 #ifdef TARGET_NR_fstat64
10397     case TARGET_NR_fstat64:
10398         ret = get_errno(fstat(arg1, &st));
10399         if (!is_error(ret))
10400             ret = host_to_target_stat64(cpu_env, arg2, &st);
10401         return ret;
10402 #endif
10403 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10404 #ifdef TARGET_NR_fstatat64
10405     case TARGET_NR_fstatat64:
10406 #endif
10407 #ifdef TARGET_NR_newfstatat
10408     case TARGET_NR_newfstatat:
10409 #endif
10410         if (!(p = lock_user_string(arg2))) {
10411             return -TARGET_EFAULT;
10412         }
10413         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10414         unlock_user(p, arg2, 0);
10415         if (!is_error(ret))
10416             ret = host_to_target_stat64(cpu_env, arg3, &st);
10417         return ret;
10418 #endif
10419 #if defined(TARGET_NR_statx)
10420     case TARGET_NR_statx:
10421         {
10422             struct target_statx *target_stx;
10423             int dirfd = arg1;
10424             int flags = arg3;
10425 
10426             p = lock_user_string(arg2);
10427             if (p == NULL) {
10428                 return -TARGET_EFAULT;
10429             }
10430 #if defined(__NR_statx)
10431             {
10432                 /*
10433                  * It is assumed that struct statx is architecture independent.
10434                  */
10435                 struct target_statx host_stx;
10436                 int mask = arg4;
10437 
10438                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10439                 if (!is_error(ret)) {
10440                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10441                         unlock_user(p, arg2, 0);
10442                         return -TARGET_EFAULT;
10443                     }
10444                 }
10445 
10446                 if (ret != -TARGET_ENOSYS) {
10447                     unlock_user(p, arg2, 0);
10448                     return ret;
10449                 }
10450             }
10451 #endif
10452             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10453             unlock_user(p, arg2, 0);
10454 
10455             if (!is_error(ret)) {
10456                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10457                     return -TARGET_EFAULT;
10458                 }
10459                 memset(target_stx, 0, sizeof(*target_stx));
10460                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10461                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10462                 __put_user(st.st_ino, &target_stx->stx_ino);
10463                 __put_user(st.st_mode, &target_stx->stx_mode);
10464                 __put_user(st.st_uid, &target_stx->stx_uid);
10465                 __put_user(st.st_gid, &target_stx->stx_gid);
10466                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10467                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10468                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10469                 __put_user(st.st_size, &target_stx->stx_size);
10470                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10471                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10472                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10473                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10474                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10475                 unlock_user_struct(target_stx, arg5, 1);
10476             }
10477         }
10478         return ret;
10479 #endif
10480 #ifdef TARGET_NR_lchown
10481     case TARGET_NR_lchown:
10482         if (!(p = lock_user_string(arg1)))
10483             return -TARGET_EFAULT;
10484         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10485         unlock_user(p, arg1, 0);
10486         return ret;
10487 #endif
10488 #ifdef TARGET_NR_getuid
10489     case TARGET_NR_getuid:
10490         return get_errno(high2lowuid(getuid()));
10491 #endif
10492 #ifdef TARGET_NR_getgid
10493     case TARGET_NR_getgid:
10494         return get_errno(high2lowgid(getgid()));
10495 #endif
10496 #ifdef TARGET_NR_geteuid
10497     case TARGET_NR_geteuid:
10498         return get_errno(high2lowuid(geteuid()));
10499 #endif
10500 #ifdef TARGET_NR_getegid
10501     case TARGET_NR_getegid:
10502         return get_errno(high2lowgid(getegid()));
10503 #endif
10504     case TARGET_NR_setreuid:
10505         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10506     case TARGET_NR_setregid:
10507         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10508     case TARGET_NR_getgroups:
10509         {
10510             int gidsetsize = arg1;
10511             target_id *target_grouplist;
10512             gid_t *grouplist;
10513             int i;
10514 
10515             grouplist = alloca(gidsetsize * sizeof(gid_t));
10516             ret = get_errno(getgroups(gidsetsize, grouplist));
10517             if (gidsetsize == 0)
10518                 return ret;
10519             if (!is_error(ret)) {
10520                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10521                 if (!target_grouplist)
10522                     return -TARGET_EFAULT;
10523                 for(i = 0;i < ret; i++)
10524                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10525                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10526             }
10527         }
10528         return ret;
10529     case TARGET_NR_setgroups:
10530         {
10531             int gidsetsize = arg1;
10532             target_id *target_grouplist;
10533             gid_t *grouplist = NULL;
10534             int i;
10535             if (gidsetsize) {
10536                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10537                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10538                 if (!target_grouplist) {
10539                     return -TARGET_EFAULT;
10540                 }
10541                 for (i = 0; i < gidsetsize; i++) {
10542                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10543                 }
10544                 unlock_user(target_grouplist, arg2, 0);
10545             }
10546             return get_errno(setgroups(gidsetsize, grouplist));
10547         }
10548     case TARGET_NR_fchown:
10549         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10550 #if defined(TARGET_NR_fchownat)
10551     case TARGET_NR_fchownat:
10552         if (!(p = lock_user_string(arg2)))
10553             return -TARGET_EFAULT;
10554         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10555                                  low2highgid(arg4), arg5));
10556         unlock_user(p, arg2, 0);
10557         return ret;
10558 #endif
10559 #ifdef TARGET_NR_setresuid
10560     case TARGET_NR_setresuid:
10561         return get_errno(sys_setresuid(low2highuid(arg1),
10562                                        low2highuid(arg2),
10563                                        low2highuid(arg3)));
10564 #endif
10565 #ifdef TARGET_NR_getresuid
10566     case TARGET_NR_getresuid:
10567         {
10568             uid_t ruid, euid, suid;
10569             ret = get_errno(getresuid(&ruid, &euid, &suid));
10570             if (!is_error(ret)) {
10571                 if (put_user_id(high2lowuid(ruid), arg1)
10572                     || put_user_id(high2lowuid(euid), arg2)
10573                     || put_user_id(high2lowuid(suid), arg3))
10574                     return -TARGET_EFAULT;
10575             }
10576         }
10577         return ret;
10578 #endif
10579 #ifdef TARGET_NR_getresgid
10580     case TARGET_NR_setresgid:
10581         return get_errno(sys_setresgid(low2highgid(arg1),
10582                                        low2highgid(arg2),
10583                                        low2highgid(arg3)));
10584 #endif
10585 #ifdef TARGET_NR_getresgid
10586     case TARGET_NR_getresgid:
10587         {
10588             gid_t rgid, egid, sgid;
10589             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10590             if (!is_error(ret)) {
10591                 if (put_user_id(high2lowgid(rgid), arg1)
10592                     || put_user_id(high2lowgid(egid), arg2)
10593                     || put_user_id(high2lowgid(sgid), arg3))
10594                     return -TARGET_EFAULT;
10595             }
10596         }
10597         return ret;
10598 #endif
10599 #ifdef TARGET_NR_chown
10600     case TARGET_NR_chown:
10601         if (!(p = lock_user_string(arg1)))
10602             return -TARGET_EFAULT;
10603         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10604         unlock_user(p, arg1, 0);
10605         return ret;
10606 #endif
10607     case TARGET_NR_setuid:
10608         return get_errno(sys_setuid(low2highuid(arg1)));
10609     case TARGET_NR_setgid:
10610         return get_errno(sys_setgid(low2highgid(arg1)));
10611     case TARGET_NR_setfsuid:
10612         return get_errno(setfsuid(arg1));
10613     case TARGET_NR_setfsgid:
10614         return get_errno(setfsgid(arg1));
10615 
10616 #ifdef TARGET_NR_lchown32
10617     case TARGET_NR_lchown32:
10618         if (!(p = lock_user_string(arg1)))
10619             return -TARGET_EFAULT;
10620         ret = get_errno(lchown(p, arg2, arg3));
10621         unlock_user(p, arg1, 0);
10622         return ret;
10623 #endif
10624 #ifdef TARGET_NR_getuid32
10625     case TARGET_NR_getuid32:
10626         return get_errno(getuid());
10627 #endif
10628 
10629 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10630    /* Alpha specific */
10631     case TARGET_NR_getxuid:
10632          {
10633             uid_t euid;
10634             euid=geteuid();
10635             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10636          }
10637         return get_errno(getuid());
10638 #endif
10639 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10640    /* Alpha specific */
10641     case TARGET_NR_getxgid:
10642          {
10643             uid_t egid;
10644             egid=getegid();
10645             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10646          }
10647         return get_errno(getgid());
10648 #endif
10649 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10650     /* Alpha specific */
10651     case TARGET_NR_osf_getsysinfo:
10652         ret = -TARGET_EOPNOTSUPP;
10653         switch (arg1) {
10654           case TARGET_GSI_IEEE_FP_CONTROL:
10655             {
10656                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10657                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10658 
10659                 swcr &= ~SWCR_STATUS_MASK;
10660                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10661 
10662                 if (put_user_u64 (swcr, arg2))
10663                         return -TARGET_EFAULT;
10664                 ret = 0;
10665             }
10666             break;
10667 
10668           /* case GSI_IEEE_STATE_AT_SIGNAL:
10669              -- Not implemented in linux kernel.
10670              case GSI_UACPROC:
10671              -- Retrieves current unaligned access state; not much used.
10672              case GSI_PROC_TYPE:
10673              -- Retrieves implver information; surely not used.
10674              case GSI_GET_HWRPB:
10675              -- Grabs a copy of the HWRPB; surely not used.
10676           */
10677         }
10678         return ret;
10679 #endif
10680 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10681     /* Alpha specific */
10682     case TARGET_NR_osf_setsysinfo:
10683         ret = -TARGET_EOPNOTSUPP;
10684         switch (arg1) {
10685           case TARGET_SSI_IEEE_FP_CONTROL:
10686             {
10687                 uint64_t swcr, fpcr;
10688 
10689                 if (get_user_u64 (swcr, arg2)) {
10690                     return -TARGET_EFAULT;
10691                 }
10692 
10693                 /*
10694                  * The kernel calls swcr_update_status to update the
10695                  * status bits from the fpcr at every point that it
10696                  * could be queried.  Therefore, we store the status
10697                  * bits only in FPCR.
10698                  */
10699                 ((CPUAlphaState *)cpu_env)->swcr
10700                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10701 
10702                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10703                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10704                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10705                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10706                 ret = 0;
10707             }
10708             break;
10709 
10710           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10711             {
10712                 uint64_t exc, fpcr, fex;
10713 
10714                 if (get_user_u64(exc, arg2)) {
10715                     return -TARGET_EFAULT;
10716                 }
10717                 exc &= SWCR_STATUS_MASK;
10718                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10719 
10720                 /* Old exceptions are not signaled.  */
10721                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10722                 fex = exc & ~fex;
10723                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10724                 fex &= ((CPUArchState *)cpu_env)->swcr;
10725 
10726                 /* Update the hardware fpcr.  */
10727                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10728                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10729 
10730                 if (fex) {
10731                     int si_code = TARGET_FPE_FLTUNK;
10732                     target_siginfo_t info;
10733 
10734                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10735                         si_code = TARGET_FPE_FLTUND;
10736                     }
10737                     if (fex & SWCR_TRAP_ENABLE_INE) {
10738                         si_code = TARGET_FPE_FLTRES;
10739                     }
10740                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10741                         si_code = TARGET_FPE_FLTUND;
10742                     }
10743                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10744                         si_code = TARGET_FPE_FLTOVF;
10745                     }
10746                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10747                         si_code = TARGET_FPE_FLTDIV;
10748                     }
10749                     if (fex & SWCR_TRAP_ENABLE_INV) {
10750                         si_code = TARGET_FPE_FLTINV;
10751                     }
10752 
10753                     info.si_signo = SIGFPE;
10754                     info.si_errno = 0;
10755                     info.si_code = si_code;
10756                     info._sifields._sigfault._addr
10757                         = ((CPUArchState *)cpu_env)->pc;
10758                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10759                                  QEMU_SI_FAULT, &info);
10760                 }
10761                 ret = 0;
10762             }
10763             break;
10764 
10765           /* case SSI_NVPAIRS:
10766              -- Used with SSIN_UACPROC to enable unaligned accesses.
10767              case SSI_IEEE_STATE_AT_SIGNAL:
10768              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10769              -- Not implemented in linux kernel
10770           */
10771         }
10772         return ret;
10773 #endif
10774 #ifdef TARGET_NR_osf_sigprocmask
10775     /* Alpha specific.  */
10776     case TARGET_NR_osf_sigprocmask:
10777         {
10778             abi_ulong mask;
10779             int how;
10780             sigset_t set, oldset;
10781 
10782             switch(arg1) {
10783             case TARGET_SIG_BLOCK:
10784                 how = SIG_BLOCK;
10785                 break;
10786             case TARGET_SIG_UNBLOCK:
10787                 how = SIG_UNBLOCK;
10788                 break;
10789             case TARGET_SIG_SETMASK:
10790                 how = SIG_SETMASK;
10791                 break;
10792             default:
10793                 return -TARGET_EINVAL;
10794             }
10795             mask = arg2;
10796             target_to_host_old_sigset(&set, &mask);
10797             ret = do_sigprocmask(how, &set, &oldset);
10798             if (!ret) {
10799                 host_to_target_old_sigset(&mask, &oldset);
10800                 ret = mask;
10801             }
10802         }
10803         return ret;
10804 #endif
10805 
10806 #ifdef TARGET_NR_getgid32
10807     case TARGET_NR_getgid32:
10808         return get_errno(getgid());
10809 #endif
10810 #ifdef TARGET_NR_geteuid32
10811     case TARGET_NR_geteuid32:
10812         return get_errno(geteuid());
10813 #endif
10814 #ifdef TARGET_NR_getegid32
10815     case TARGET_NR_getegid32:
10816         return get_errno(getegid());
10817 #endif
10818 #ifdef TARGET_NR_setreuid32
10819     case TARGET_NR_setreuid32:
10820         return get_errno(setreuid(arg1, arg2));
10821 #endif
10822 #ifdef TARGET_NR_setregid32
10823     case TARGET_NR_setregid32:
10824         return get_errno(setregid(arg1, arg2));
10825 #endif
10826 #ifdef TARGET_NR_getgroups32
10827     case TARGET_NR_getgroups32:
10828         {
10829             int gidsetsize = arg1;
10830             uint32_t *target_grouplist;
10831             gid_t *grouplist;
10832             int i;
10833 
10834             grouplist = alloca(gidsetsize * sizeof(gid_t));
10835             ret = get_errno(getgroups(gidsetsize, grouplist));
10836             if (gidsetsize == 0)
10837                 return ret;
10838             if (!is_error(ret)) {
10839                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10840                 if (!target_grouplist) {
10841                     return -TARGET_EFAULT;
10842                 }
10843                 for(i = 0;i < ret; i++)
10844                     target_grouplist[i] = tswap32(grouplist[i]);
10845                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10846             }
10847         }
10848         return ret;
10849 #endif
10850 #ifdef TARGET_NR_setgroups32
10851     case TARGET_NR_setgroups32:
10852         {
10853             int gidsetsize = arg1;
10854             uint32_t *target_grouplist;
10855             gid_t *grouplist;
10856             int i;
10857 
10858             grouplist = alloca(gidsetsize * sizeof(gid_t));
10859             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10860             if (!target_grouplist) {
10861                 return -TARGET_EFAULT;
10862             }
10863             for(i = 0;i < gidsetsize; i++)
10864                 grouplist[i] = tswap32(target_grouplist[i]);
10865             unlock_user(target_grouplist, arg2, 0);
10866             return get_errno(setgroups(gidsetsize, grouplist));
10867         }
10868 #endif
10869 #ifdef TARGET_NR_fchown32
10870     case TARGET_NR_fchown32:
10871         return get_errno(fchown(arg1, arg2, arg3));
10872 #endif
10873 #ifdef TARGET_NR_setresuid32
10874     case TARGET_NR_setresuid32:
10875         return get_errno(sys_setresuid(arg1, arg2, arg3));
10876 #endif
10877 #ifdef TARGET_NR_getresuid32
10878     case TARGET_NR_getresuid32:
10879         {
10880             uid_t ruid, euid, suid;
10881             ret = get_errno(getresuid(&ruid, &euid, &suid));
10882             if (!is_error(ret)) {
10883                 if (put_user_u32(ruid, arg1)
10884                     || put_user_u32(euid, arg2)
10885                     || put_user_u32(suid, arg3))
10886                     return -TARGET_EFAULT;
10887             }
10888         }
10889         return ret;
10890 #endif
10891 #ifdef TARGET_NR_setresgid32
10892     case TARGET_NR_setresgid32:
10893         return get_errno(sys_setresgid(arg1, arg2, arg3));
10894 #endif
10895 #ifdef TARGET_NR_getresgid32
10896     case TARGET_NR_getresgid32:
10897         {
10898             gid_t rgid, egid, sgid;
10899             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10900             if (!is_error(ret)) {
10901                 if (put_user_u32(rgid, arg1)
10902                     || put_user_u32(egid, arg2)
10903                     || put_user_u32(sgid, arg3))
10904                     return -TARGET_EFAULT;
10905             }
10906         }
10907         return ret;
10908 #endif
10909 #ifdef TARGET_NR_chown32
10910     case TARGET_NR_chown32:
10911         if (!(p = lock_user_string(arg1)))
10912             return -TARGET_EFAULT;
10913         ret = get_errno(chown(p, arg2, arg3));
10914         unlock_user(p, arg1, 0);
10915         return ret;
10916 #endif
10917 #ifdef TARGET_NR_setuid32
10918     case TARGET_NR_setuid32:
10919         return get_errno(sys_setuid(arg1));
10920 #endif
10921 #ifdef TARGET_NR_setgid32
10922     case TARGET_NR_setgid32:
10923         return get_errno(sys_setgid(arg1));
10924 #endif
10925 #ifdef TARGET_NR_setfsuid32
10926     case TARGET_NR_setfsuid32:
10927         return get_errno(setfsuid(arg1));
10928 #endif
10929 #ifdef TARGET_NR_setfsgid32
10930     case TARGET_NR_setfsgid32:
10931         return get_errno(setfsgid(arg1));
10932 #endif
10933 #ifdef TARGET_NR_mincore
10934     case TARGET_NR_mincore:
10935         {
10936             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10937             if (!a) {
10938                 return -TARGET_ENOMEM;
10939             }
10940             p = lock_user_string(arg3);
10941             if (!p) {
10942                 ret = -TARGET_EFAULT;
10943             } else {
10944                 ret = get_errno(mincore(a, arg2, p));
10945                 unlock_user(p, arg3, ret);
10946             }
10947             unlock_user(a, arg1, 0);
10948         }
10949         return ret;
10950 #endif
10951 #ifdef TARGET_NR_arm_fadvise64_64
10952     case TARGET_NR_arm_fadvise64_64:
10953         /* arm_fadvise64_64 looks like fadvise64_64 but
10954          * with different argument order: fd, advice, offset, len
10955          * rather than the usual fd, offset, len, advice.
10956          * Note that offset and len are both 64-bit so appear as
10957          * pairs of 32-bit registers.
10958          */
10959         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10960                             target_offset64(arg5, arg6), arg2);
10961         return -host_to_target_errno(ret);
10962 #endif
10963 
10964 #if TARGET_ABI_BITS == 32
10965 
10966 #ifdef TARGET_NR_fadvise64_64
10967     case TARGET_NR_fadvise64_64:
10968 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10969         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10970         ret = arg2;
10971         arg2 = arg3;
10972         arg3 = arg4;
10973         arg4 = arg5;
10974         arg5 = arg6;
10975         arg6 = ret;
10976 #else
10977         /* 6 args: fd, offset (high, low), len (high, low), advice */
10978         if (regpairs_aligned(cpu_env, num)) {
10979             /* offset is in (3,4), len in (5,6) and advice in 7 */
10980             arg2 = arg3;
10981             arg3 = arg4;
10982             arg4 = arg5;
10983             arg5 = arg6;
10984             arg6 = arg7;
10985         }
10986 #endif
10987         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10988                             target_offset64(arg4, arg5), arg6);
10989         return -host_to_target_errno(ret);
10990 #endif
10991 
10992 #ifdef TARGET_NR_fadvise64
10993     case TARGET_NR_fadvise64:
10994         /* 5 args: fd, offset (high, low), len, advice */
10995         if (regpairs_aligned(cpu_env, num)) {
10996             /* offset is in (3,4), len in 5 and advice in 6 */
10997             arg2 = arg3;
10998             arg3 = arg4;
10999             arg4 = arg5;
11000             arg5 = arg6;
11001         }
11002         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11003         return -host_to_target_errno(ret);
11004 #endif
11005 
11006 #else /* not a 32-bit ABI */
11007 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11008 #ifdef TARGET_NR_fadvise64_64
11009     case TARGET_NR_fadvise64_64:
11010 #endif
11011 #ifdef TARGET_NR_fadvise64
11012     case TARGET_NR_fadvise64:
11013 #endif
11014 #ifdef TARGET_S390X
11015         switch (arg4) {
11016         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11017         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11018         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11019         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11020         default: break;
11021         }
11022 #endif
11023         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11024 #endif
11025 #endif /* end of 64-bit ABI fadvise handling */
11026 
11027 #ifdef TARGET_NR_madvise
11028     case TARGET_NR_madvise:
11029         /* A straight passthrough may not be safe because qemu sometimes
11030            turns private file-backed mappings into anonymous mappings.
11031            This will break MADV_DONTNEED.
11032            This is a hint, so ignoring and returning success is ok.  */
11033         return 0;
11034 #endif
11035 #if TARGET_ABI_BITS == 32
11036     case TARGET_NR_fcntl64:
11037     {
11038 	int cmd;
11039 	struct flock64 fl;
11040         from_flock64_fn *copyfrom = copy_from_user_flock64;
11041         to_flock64_fn *copyto = copy_to_user_flock64;
11042 
11043 #ifdef TARGET_ARM
11044         if (!((CPUARMState *)cpu_env)->eabi) {
11045             copyfrom = copy_from_user_oabi_flock64;
11046             copyto = copy_to_user_oabi_flock64;
11047         }
11048 #endif
11049 
11050 	cmd = target_to_host_fcntl_cmd(arg2);
11051         if (cmd == -TARGET_EINVAL) {
11052             return cmd;
11053         }
11054 
11055         switch(arg2) {
11056         case TARGET_F_GETLK64:
11057             ret = copyfrom(&fl, arg3);
11058             if (ret) {
11059                 break;
11060             }
11061             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11062             if (ret == 0) {
11063                 ret = copyto(arg3, &fl);
11064             }
11065 	    break;
11066 
11067         case TARGET_F_SETLK64:
11068         case TARGET_F_SETLKW64:
11069             ret = copyfrom(&fl, arg3);
11070             if (ret) {
11071                 break;
11072             }
11073             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11074 	    break;
11075         default:
11076             ret = do_fcntl(arg1, arg2, arg3);
11077             break;
11078         }
11079         return ret;
11080     }
11081 #endif
11082 #ifdef TARGET_NR_cacheflush
11083     case TARGET_NR_cacheflush:
11084         /* self-modifying code is handled automatically, so nothing needed */
11085         return 0;
11086 #endif
11087 #ifdef TARGET_NR_getpagesize
11088     case TARGET_NR_getpagesize:
11089         return TARGET_PAGE_SIZE;
11090 #endif
11091     case TARGET_NR_gettid:
11092         return get_errno(sys_gettid());
11093 #ifdef TARGET_NR_readahead
11094     case TARGET_NR_readahead:
11095 #if TARGET_ABI_BITS == 32
11096         if (regpairs_aligned(cpu_env, num)) {
11097             arg2 = arg3;
11098             arg3 = arg4;
11099             arg4 = arg5;
11100         }
11101         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11102 #else
11103         ret = get_errno(readahead(arg1, arg2, arg3));
11104 #endif
11105         return ret;
11106 #endif
11107 #ifdef CONFIG_ATTR
11108 #ifdef TARGET_NR_setxattr
11109     case TARGET_NR_listxattr:
11110     case TARGET_NR_llistxattr:
11111     {
11112         void *p, *b = 0;
11113         if (arg2) {
11114             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11115             if (!b) {
11116                 return -TARGET_EFAULT;
11117             }
11118         }
11119         p = lock_user_string(arg1);
11120         if (p) {
11121             if (num == TARGET_NR_listxattr) {
11122                 ret = get_errno(listxattr(p, b, arg3));
11123             } else {
11124                 ret = get_errno(llistxattr(p, b, arg3));
11125             }
11126         } else {
11127             ret = -TARGET_EFAULT;
11128         }
11129         unlock_user(p, arg1, 0);
11130         unlock_user(b, arg2, arg3);
11131         return ret;
11132     }
11133     case TARGET_NR_flistxattr:
11134     {
11135         void *b = 0;
11136         if (arg2) {
11137             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11138             if (!b) {
11139                 return -TARGET_EFAULT;
11140             }
11141         }
11142         ret = get_errno(flistxattr(arg1, b, arg3));
11143         unlock_user(b, arg2, arg3);
11144         return ret;
11145     }
11146     case TARGET_NR_setxattr:
11147     case TARGET_NR_lsetxattr:
11148         {
11149             void *p, *n, *v = 0;
11150             if (arg3) {
11151                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11152                 if (!v) {
11153                     return -TARGET_EFAULT;
11154                 }
11155             }
11156             p = lock_user_string(arg1);
11157             n = lock_user_string(arg2);
11158             if (p && n) {
11159                 if (num == TARGET_NR_setxattr) {
11160                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11161                 } else {
11162                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11163                 }
11164             } else {
11165                 ret = -TARGET_EFAULT;
11166             }
11167             unlock_user(p, arg1, 0);
11168             unlock_user(n, arg2, 0);
11169             unlock_user(v, arg3, 0);
11170         }
11171         return ret;
11172     case TARGET_NR_fsetxattr:
11173         {
11174             void *n, *v = 0;
11175             if (arg3) {
11176                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11177                 if (!v) {
11178                     return -TARGET_EFAULT;
11179                 }
11180             }
11181             n = lock_user_string(arg2);
11182             if (n) {
11183                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11184             } else {
11185                 ret = -TARGET_EFAULT;
11186             }
11187             unlock_user(n, arg2, 0);
11188             unlock_user(v, arg3, 0);
11189         }
11190         return ret;
11191     case TARGET_NR_getxattr:
11192     case TARGET_NR_lgetxattr:
11193         {
11194             void *p, *n, *v = 0;
11195             if (arg3) {
11196                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11197                 if (!v) {
11198                     return -TARGET_EFAULT;
11199                 }
11200             }
11201             p = lock_user_string(arg1);
11202             n = lock_user_string(arg2);
11203             if (p && n) {
11204                 if (num == TARGET_NR_getxattr) {
11205                     ret = get_errno(getxattr(p, n, v, arg4));
11206                 } else {
11207                     ret = get_errno(lgetxattr(p, n, v, arg4));
11208                 }
11209             } else {
11210                 ret = -TARGET_EFAULT;
11211             }
11212             unlock_user(p, arg1, 0);
11213             unlock_user(n, arg2, 0);
11214             unlock_user(v, arg3, arg4);
11215         }
11216         return ret;
11217     case TARGET_NR_fgetxattr:
11218         {
11219             void *n, *v = 0;
11220             if (arg3) {
11221                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11222                 if (!v) {
11223                     return -TARGET_EFAULT;
11224                 }
11225             }
11226             n = lock_user_string(arg2);
11227             if (n) {
11228                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11229             } else {
11230                 ret = -TARGET_EFAULT;
11231             }
11232             unlock_user(n, arg2, 0);
11233             unlock_user(v, arg3, arg4);
11234         }
11235         return ret;
11236     case TARGET_NR_removexattr:
11237     case TARGET_NR_lremovexattr:
11238         {
11239             void *p, *n;
11240             p = lock_user_string(arg1);
11241             n = lock_user_string(arg2);
11242             if (p && n) {
11243                 if (num == TARGET_NR_removexattr) {
11244                     ret = get_errno(removexattr(p, n));
11245                 } else {
11246                     ret = get_errno(lremovexattr(p, n));
11247                 }
11248             } else {
11249                 ret = -TARGET_EFAULT;
11250             }
11251             unlock_user(p, arg1, 0);
11252             unlock_user(n, arg2, 0);
11253         }
11254         return ret;
11255     case TARGET_NR_fremovexattr:
11256         {
11257             void *n;
11258             n = lock_user_string(arg2);
11259             if (n) {
11260                 ret = get_errno(fremovexattr(arg1, n));
11261             } else {
11262                 ret = -TARGET_EFAULT;
11263             }
11264             unlock_user(n, arg2, 0);
11265         }
11266         return ret;
11267 #endif
11268 #endif /* CONFIG_ATTR */
11269 #ifdef TARGET_NR_set_thread_area
11270     case TARGET_NR_set_thread_area:
11271 #if defined(TARGET_MIPS)
11272       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11273       return 0;
11274 #elif defined(TARGET_CRIS)
11275       if (arg1 & 0xff)
11276           ret = -TARGET_EINVAL;
11277       else {
11278           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11279           ret = 0;
11280       }
11281       return ret;
11282 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11283       return do_set_thread_area(cpu_env, arg1);
11284 #elif defined(TARGET_M68K)
11285       {
11286           TaskState *ts = cpu->opaque;
11287           ts->tp_value = arg1;
11288           return 0;
11289       }
11290 #else
11291       return -TARGET_ENOSYS;
11292 #endif
11293 #endif
11294 #ifdef TARGET_NR_get_thread_area
11295     case TARGET_NR_get_thread_area:
11296 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11297         return do_get_thread_area(cpu_env, arg1);
11298 #elif defined(TARGET_M68K)
11299         {
11300             TaskState *ts = cpu->opaque;
11301             return ts->tp_value;
11302         }
11303 #else
11304         return -TARGET_ENOSYS;
11305 #endif
11306 #endif
11307 #ifdef TARGET_NR_getdomainname
11308     case TARGET_NR_getdomainname:
11309         return -TARGET_ENOSYS;
11310 #endif
11311 
11312 #ifdef TARGET_NR_clock_settime
11313     case TARGET_NR_clock_settime:
11314     {
11315         struct timespec ts;
11316 
11317         ret = target_to_host_timespec(&ts, arg2);
11318         if (!is_error(ret)) {
11319             ret = get_errno(clock_settime(arg1, &ts));
11320         }
11321         return ret;
11322     }
11323 #endif
11324 #ifdef TARGET_NR_clock_gettime
11325     case TARGET_NR_clock_gettime:
11326     {
11327         struct timespec ts;
11328         ret = get_errno(clock_gettime(arg1, &ts));
11329         if (!is_error(ret)) {
11330             ret = host_to_target_timespec(arg2, &ts);
11331         }
11332         return ret;
11333     }
11334 #endif
11335 #ifdef TARGET_NR_clock_getres
11336     case TARGET_NR_clock_getres:
11337     {
11338         struct timespec ts;
11339         ret = get_errno(clock_getres(arg1, &ts));
11340         if (!is_error(ret)) {
11341             host_to_target_timespec(arg2, &ts);
11342         }
11343         return ret;
11344     }
11345 #endif
11346 #ifdef TARGET_NR_clock_nanosleep
11347     case TARGET_NR_clock_nanosleep:
11348     {
11349         struct timespec ts;
11350         target_to_host_timespec(&ts, arg3);
11351         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11352                                              &ts, arg4 ? &ts : NULL));
11353         if (arg4)
11354             host_to_target_timespec(arg4, &ts);
11355 
11356 #if defined(TARGET_PPC)
11357         /* clock_nanosleep is odd in that it returns positive errno values.
11358          * On PPC, CR0 bit 3 should be set in such a situation. */
11359         if (ret && ret != -TARGET_ERESTARTSYS) {
11360             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11361         }
11362 #endif
11363         return ret;
11364     }
11365 #endif
11366 
11367 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11368     case TARGET_NR_set_tid_address:
11369         return get_errno(set_tid_address((int *)g2h(arg1)));
11370 #endif
11371 
11372     case TARGET_NR_tkill:
11373         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11374 
11375     case TARGET_NR_tgkill:
11376         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11377                          target_to_host_signal(arg3)));
11378 
11379 #ifdef TARGET_NR_set_robust_list
11380     case TARGET_NR_set_robust_list:
11381     case TARGET_NR_get_robust_list:
11382         /* The ABI for supporting robust futexes has userspace pass
11383          * the kernel a pointer to a linked list which is updated by
11384          * userspace after the syscall; the list is walked by the kernel
11385          * when the thread exits. Since the linked list in QEMU guest
11386          * memory isn't a valid linked list for the host and we have
11387          * no way to reliably intercept the thread-death event, we can't
11388          * support these. Silently return ENOSYS so that guest userspace
11389          * falls back to a non-robust futex implementation (which should
11390          * be OK except in the corner case of the guest crashing while
11391          * holding a mutex that is shared with another process via
11392          * shared memory).
11393          */
11394         return -TARGET_ENOSYS;
11395 #endif
11396 
11397 #if defined(TARGET_NR_utimensat)
11398     case TARGET_NR_utimensat:
11399         {
11400             struct timespec *tsp, ts[2];
11401             if (!arg3) {
11402                 tsp = NULL;
11403             } else {
11404                 target_to_host_timespec(ts, arg3);
11405                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11406                 tsp = ts;
11407             }
11408             if (!arg2)
11409                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11410             else {
11411                 if (!(p = lock_user_string(arg2))) {
11412                     return -TARGET_EFAULT;
11413                 }
11414                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11415                 unlock_user(p, arg2, 0);
11416             }
11417         }
11418         return ret;
11419 #endif
11420     case TARGET_NR_futex:
11421         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11422 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11423     case TARGET_NR_inotify_init:
11424         ret = get_errno(sys_inotify_init());
11425         if (ret >= 0) {
11426             fd_trans_register(ret, &target_inotify_trans);
11427         }
11428         return ret;
11429 #endif
11430 #ifdef CONFIG_INOTIFY1
11431 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11432     case TARGET_NR_inotify_init1:
11433         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11434                                           fcntl_flags_tbl)));
11435         if (ret >= 0) {
11436             fd_trans_register(ret, &target_inotify_trans);
11437         }
11438         return ret;
11439 #endif
11440 #endif
11441 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11442     case TARGET_NR_inotify_add_watch:
11443         p = lock_user_string(arg2);
11444         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11445         unlock_user(p, arg2, 0);
11446         return ret;
11447 #endif
11448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11449     case TARGET_NR_inotify_rm_watch:
11450         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11451 #endif
11452 
11453 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11454     case TARGET_NR_mq_open:
11455         {
11456             struct mq_attr posix_mq_attr;
11457             struct mq_attr *pposix_mq_attr;
11458             int host_flags;
11459 
11460             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11461             pposix_mq_attr = NULL;
11462             if (arg4) {
11463                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11464                     return -TARGET_EFAULT;
11465                 }
11466                 pposix_mq_attr = &posix_mq_attr;
11467             }
11468             p = lock_user_string(arg1 - 1);
11469             if (!p) {
11470                 return -TARGET_EFAULT;
11471             }
11472             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11473             unlock_user (p, arg1, 0);
11474         }
11475         return ret;
11476 
11477     case TARGET_NR_mq_unlink:
11478         p = lock_user_string(arg1 - 1);
11479         if (!p) {
11480             return -TARGET_EFAULT;
11481         }
11482         ret = get_errno(mq_unlink(p));
11483         unlock_user (p, arg1, 0);
11484         return ret;
11485 
11486     case TARGET_NR_mq_timedsend:
11487         {
11488             struct timespec ts;
11489 
11490             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11491             if (arg5 != 0) {
11492                 target_to_host_timespec(&ts, arg5);
11493                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11494                 host_to_target_timespec(arg5, &ts);
11495             } else {
11496                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11497             }
11498             unlock_user (p, arg2, arg3);
11499         }
11500         return ret;
11501 
11502     case TARGET_NR_mq_timedreceive:
11503         {
11504             struct timespec ts;
11505             unsigned int prio;
11506 
11507             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11508             if (arg5 != 0) {
11509                 target_to_host_timespec(&ts, arg5);
11510                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11511                                                      &prio, &ts));
11512                 host_to_target_timespec(arg5, &ts);
11513             } else {
11514                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11515                                                      &prio, NULL));
11516             }
11517             unlock_user (p, arg2, arg3);
11518             if (arg4 != 0)
11519                 put_user_u32(prio, arg4);
11520         }
11521         return ret;
11522 
11523     /* Not implemented for now... */
11524 /*     case TARGET_NR_mq_notify: */
11525 /*         break; */
11526 
11527     case TARGET_NR_mq_getsetattr:
11528         {
11529             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11530             ret = 0;
11531             if (arg2 != 0) {
11532                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11533                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11534                                            &posix_mq_attr_out));
11535             } else if (arg3 != 0) {
11536                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11537             }
11538             if (ret == 0 && arg3 != 0) {
11539                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11540             }
11541         }
11542         return ret;
11543 #endif
11544 
11545 #ifdef CONFIG_SPLICE
11546 #ifdef TARGET_NR_tee
11547     case TARGET_NR_tee:
11548         {
11549             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11550         }
11551         return ret;
11552 #endif
11553 #ifdef TARGET_NR_splice
11554     case TARGET_NR_splice:
11555         {
11556             loff_t loff_in, loff_out;
11557             loff_t *ploff_in = NULL, *ploff_out = NULL;
11558             if (arg2) {
11559                 if (get_user_u64(loff_in, arg2)) {
11560                     return -TARGET_EFAULT;
11561                 }
11562                 ploff_in = &loff_in;
11563             }
11564             if (arg4) {
11565                 if (get_user_u64(loff_out, arg4)) {
11566                     return -TARGET_EFAULT;
11567                 }
11568                 ploff_out = &loff_out;
11569             }
11570             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11571             if (arg2) {
11572                 if (put_user_u64(loff_in, arg2)) {
11573                     return -TARGET_EFAULT;
11574                 }
11575             }
11576             if (arg4) {
11577                 if (put_user_u64(loff_out, arg4)) {
11578                     return -TARGET_EFAULT;
11579                 }
11580             }
11581         }
11582         return ret;
11583 #endif
11584 #ifdef TARGET_NR_vmsplice
11585 	case TARGET_NR_vmsplice:
11586         {
11587             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11588             if (vec != NULL) {
11589                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11590                 unlock_iovec(vec, arg2, arg3, 0);
11591             } else {
11592                 ret = -host_to_target_errno(errno);
11593             }
11594         }
11595         return ret;
11596 #endif
11597 #endif /* CONFIG_SPLICE */
11598 #ifdef CONFIG_EVENTFD
11599 #if defined(TARGET_NR_eventfd)
11600     case TARGET_NR_eventfd:
11601         ret = get_errno(eventfd(arg1, 0));
11602         if (ret >= 0) {
11603             fd_trans_register(ret, &target_eventfd_trans);
11604         }
11605         return ret;
11606 #endif
11607 #if defined(TARGET_NR_eventfd2)
11608     case TARGET_NR_eventfd2:
11609     {
11610         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11611         if (arg2 & TARGET_O_NONBLOCK) {
11612             host_flags |= O_NONBLOCK;
11613         }
11614         if (arg2 & TARGET_O_CLOEXEC) {
11615             host_flags |= O_CLOEXEC;
11616         }
11617         ret = get_errno(eventfd(arg1, host_flags));
11618         if (ret >= 0) {
11619             fd_trans_register(ret, &target_eventfd_trans);
11620         }
11621         return ret;
11622     }
11623 #endif
11624 #endif /* CONFIG_EVENTFD  */
11625 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11626     case TARGET_NR_fallocate:
11627 #if TARGET_ABI_BITS == 32
11628         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11629                                   target_offset64(arg5, arg6)));
11630 #else
11631         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11632 #endif
11633         return ret;
11634 #endif
11635 #if defined(CONFIG_SYNC_FILE_RANGE)
11636 #if defined(TARGET_NR_sync_file_range)
11637     case TARGET_NR_sync_file_range:
11638 #if TARGET_ABI_BITS == 32
11639 #if defined(TARGET_MIPS)
11640         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11641                                         target_offset64(arg5, arg6), arg7));
11642 #else
11643         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11644                                         target_offset64(arg4, arg5), arg6));
11645 #endif /* !TARGET_MIPS */
11646 #else
11647         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11648 #endif
11649         return ret;
11650 #endif
11651 #if defined(TARGET_NR_sync_file_range2)
11652     case TARGET_NR_sync_file_range2:
11653         /* This is like sync_file_range but the arguments are reordered */
11654 #if TARGET_ABI_BITS == 32
11655         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11656                                         target_offset64(arg5, arg6), arg2));
11657 #else
11658         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11659 #endif
11660         return ret;
11661 #endif
11662 #endif
11663 #if defined(TARGET_NR_signalfd4)
11664     case TARGET_NR_signalfd4:
11665         return do_signalfd4(arg1, arg2, arg4);
11666 #endif
11667 #if defined(TARGET_NR_signalfd)
11668     case TARGET_NR_signalfd:
11669         return do_signalfd4(arg1, arg2, 0);
11670 #endif
11671 #if defined(CONFIG_EPOLL)
11672 #if defined(TARGET_NR_epoll_create)
11673     case TARGET_NR_epoll_create:
11674         return get_errno(epoll_create(arg1));
11675 #endif
11676 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11677     case TARGET_NR_epoll_create1:
11678         return get_errno(epoll_create1(arg1));
11679 #endif
11680 #if defined(TARGET_NR_epoll_ctl)
11681     case TARGET_NR_epoll_ctl:
11682     {
11683         struct epoll_event ep;
11684         struct epoll_event *epp = 0;
11685         if (arg4) {
11686             struct target_epoll_event *target_ep;
11687             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11688                 return -TARGET_EFAULT;
11689             }
11690             ep.events = tswap32(target_ep->events);
11691             /* The epoll_data_t union is just opaque data to the kernel,
11692              * so we transfer all 64 bits across and need not worry what
11693              * actual data type it is.
11694              */
11695             ep.data.u64 = tswap64(target_ep->data.u64);
11696             unlock_user_struct(target_ep, arg4, 0);
11697             epp = &ep;
11698         }
11699         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11700     }
11701 #endif
11702 
11703 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11704 #if defined(TARGET_NR_epoll_wait)
11705     case TARGET_NR_epoll_wait:
11706 #endif
11707 #if defined(TARGET_NR_epoll_pwait)
11708     case TARGET_NR_epoll_pwait:
11709 #endif
11710     {
11711         struct target_epoll_event *target_ep;
11712         struct epoll_event *ep;
11713         int epfd = arg1;
11714         int maxevents = arg3;
11715         int timeout = arg4;
11716 
11717         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11718             return -TARGET_EINVAL;
11719         }
11720 
11721         target_ep = lock_user(VERIFY_WRITE, arg2,
11722                               maxevents * sizeof(struct target_epoll_event), 1);
11723         if (!target_ep) {
11724             return -TARGET_EFAULT;
11725         }
11726 
11727         ep = g_try_new(struct epoll_event, maxevents);
11728         if (!ep) {
11729             unlock_user(target_ep, arg2, 0);
11730             return -TARGET_ENOMEM;
11731         }
11732 
11733         switch (num) {
11734 #if defined(TARGET_NR_epoll_pwait)
11735         case TARGET_NR_epoll_pwait:
11736         {
11737             target_sigset_t *target_set;
11738             sigset_t _set, *set = &_set;
11739 
11740             if (arg5) {
11741                 if (arg6 != sizeof(target_sigset_t)) {
11742                     ret = -TARGET_EINVAL;
11743                     break;
11744                 }
11745 
11746                 target_set = lock_user(VERIFY_READ, arg5,
11747                                        sizeof(target_sigset_t), 1);
11748                 if (!target_set) {
11749                     ret = -TARGET_EFAULT;
11750                     break;
11751                 }
11752                 target_to_host_sigset(set, target_set);
11753                 unlock_user(target_set, arg5, 0);
11754             } else {
11755                 set = NULL;
11756             }
11757 
11758             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11759                                              set, SIGSET_T_SIZE));
11760             break;
11761         }
11762 #endif
11763 #if defined(TARGET_NR_epoll_wait)
11764         case TARGET_NR_epoll_wait:
11765             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11766                                              NULL, 0));
11767             break;
11768 #endif
11769         default:
11770             ret = -TARGET_ENOSYS;
11771         }
11772         if (!is_error(ret)) {
11773             int i;
11774             for (i = 0; i < ret; i++) {
11775                 target_ep[i].events = tswap32(ep[i].events);
11776                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11777             }
11778             unlock_user(target_ep, arg2,
11779                         ret * sizeof(struct target_epoll_event));
11780         } else {
11781             unlock_user(target_ep, arg2, 0);
11782         }
11783         g_free(ep);
11784         return ret;
11785     }
11786 #endif
11787 #endif
11788 #ifdef TARGET_NR_prlimit64
11789     case TARGET_NR_prlimit64:
11790     {
11791         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11792         struct target_rlimit64 *target_rnew, *target_rold;
11793         struct host_rlimit64 rnew, rold, *rnewp = 0;
11794         int resource = target_to_host_resource(arg2);
11795         if (arg3) {
11796             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11797                 return -TARGET_EFAULT;
11798             }
11799             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11800             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11801             unlock_user_struct(target_rnew, arg3, 0);
11802             rnewp = &rnew;
11803         }
11804 
11805         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11806         if (!is_error(ret) && arg4) {
11807             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11808                 return -TARGET_EFAULT;
11809             }
11810             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11811             target_rold->rlim_max = tswap64(rold.rlim_max);
11812             unlock_user_struct(target_rold, arg4, 1);
11813         }
11814         return ret;
11815     }
11816 #endif
11817 #ifdef TARGET_NR_gethostname
11818     case TARGET_NR_gethostname:
11819     {
11820         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11821         if (name) {
11822             ret = get_errno(gethostname(name, arg2));
11823             unlock_user(name, arg1, arg2);
11824         } else {
11825             ret = -TARGET_EFAULT;
11826         }
11827         return ret;
11828     }
11829 #endif
11830 #ifdef TARGET_NR_atomic_cmpxchg_32
11831     case TARGET_NR_atomic_cmpxchg_32:
11832     {
11833         /* should use start_exclusive from main.c */
11834         abi_ulong mem_value;
11835         if (get_user_u32(mem_value, arg6)) {
11836             target_siginfo_t info;
11837             info.si_signo = SIGSEGV;
11838             info.si_errno = 0;
11839             info.si_code = TARGET_SEGV_MAPERR;
11840             info._sifields._sigfault._addr = arg6;
11841             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11842                          QEMU_SI_FAULT, &info);
11843             ret = 0xdeadbeef;
11844 
11845         }
11846         if (mem_value == arg2)
11847             put_user_u32(arg1, arg6);
11848         return mem_value;
11849     }
11850 #endif
11851 #ifdef TARGET_NR_atomic_barrier
11852     case TARGET_NR_atomic_barrier:
11853         /* Like the kernel implementation and the
11854            qemu arm barrier, no-op this? */
11855         return 0;
11856 #endif
11857 
11858 #ifdef TARGET_NR_timer_create
11859     case TARGET_NR_timer_create:
11860     {
11861         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11862 
11863         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11864 
11865         int clkid = arg1;
11866         int timer_index = next_free_host_timer();
11867 
11868         if (timer_index < 0) {
11869             ret = -TARGET_EAGAIN;
11870         } else {
11871             timer_t *phtimer = g_posix_timers  + timer_index;
11872 
11873             if (arg2) {
11874                 phost_sevp = &host_sevp;
11875                 ret = target_to_host_sigevent(phost_sevp, arg2);
11876                 if (ret != 0) {
11877                     return ret;
11878                 }
11879             }
11880 
11881             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11882             if (ret) {
11883                 phtimer = NULL;
11884             } else {
11885                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11886                     return -TARGET_EFAULT;
11887                 }
11888             }
11889         }
11890         return ret;
11891     }
11892 #endif
11893 
11894 #ifdef TARGET_NR_timer_settime
11895     case TARGET_NR_timer_settime:
11896     {
11897         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11898          * struct itimerspec * old_value */
11899         target_timer_t timerid = get_timer_id(arg1);
11900 
11901         if (timerid < 0) {
11902             ret = timerid;
11903         } else if (arg3 == 0) {
11904             ret = -TARGET_EINVAL;
11905         } else {
11906             timer_t htimer = g_posix_timers[timerid];
11907             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11908 
11909             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11910                 return -TARGET_EFAULT;
11911             }
11912             ret = get_errno(
11913                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11914             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11915                 return -TARGET_EFAULT;
11916             }
11917         }
11918         return ret;
11919     }
11920 #endif
11921 
11922 #ifdef TARGET_NR_timer_gettime
11923     case TARGET_NR_timer_gettime:
11924     {
11925         /* args: timer_t timerid, struct itimerspec *curr_value */
11926         target_timer_t timerid = get_timer_id(arg1);
11927 
11928         if (timerid < 0) {
11929             ret = timerid;
11930         } else if (!arg2) {
11931             ret = -TARGET_EFAULT;
11932         } else {
11933             timer_t htimer = g_posix_timers[timerid];
11934             struct itimerspec hspec;
11935             ret = get_errno(timer_gettime(htimer, &hspec));
11936 
11937             if (host_to_target_itimerspec(arg2, &hspec)) {
11938                 ret = -TARGET_EFAULT;
11939             }
11940         }
11941         return ret;
11942     }
11943 #endif
11944 
11945 #ifdef TARGET_NR_timer_getoverrun
11946     case TARGET_NR_timer_getoverrun:
11947     {
11948         /* args: timer_t timerid */
11949         target_timer_t timerid = get_timer_id(arg1);
11950 
11951         if (timerid < 0) {
11952             ret = timerid;
11953         } else {
11954             timer_t htimer = g_posix_timers[timerid];
11955             ret = get_errno(timer_getoverrun(htimer));
11956         }
11957         return ret;
11958     }
11959 #endif
11960 
11961 #ifdef TARGET_NR_timer_delete
11962     case TARGET_NR_timer_delete:
11963     {
11964         /* args: timer_t timerid */
11965         target_timer_t timerid = get_timer_id(arg1);
11966 
11967         if (timerid < 0) {
11968             ret = timerid;
11969         } else {
11970             timer_t htimer = g_posix_timers[timerid];
11971             ret = get_errno(timer_delete(htimer));
11972             g_posix_timers[timerid] = 0;
11973         }
11974         return ret;
11975     }
11976 #endif
11977 
11978 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11979     case TARGET_NR_timerfd_create:
11980         return get_errno(timerfd_create(arg1,
11981                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11982 #endif
11983 
11984 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11985     case TARGET_NR_timerfd_gettime:
11986         {
11987             struct itimerspec its_curr;
11988 
11989             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11990 
11991             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11992                 return -TARGET_EFAULT;
11993             }
11994         }
11995         return ret;
11996 #endif
11997 
11998 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11999     case TARGET_NR_timerfd_settime:
12000         {
12001             struct itimerspec its_new, its_old, *p_new;
12002 
12003             if (arg3) {
12004                 if (target_to_host_itimerspec(&its_new, arg3)) {
12005                     return -TARGET_EFAULT;
12006                 }
12007                 p_new = &its_new;
12008             } else {
12009                 p_new = NULL;
12010             }
12011 
12012             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12013 
12014             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12015                 return -TARGET_EFAULT;
12016             }
12017         }
12018         return ret;
12019 #endif
12020 
12021 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12022     case TARGET_NR_ioprio_get:
12023         return get_errno(ioprio_get(arg1, arg2));
12024 #endif
12025 
12026 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12027     case TARGET_NR_ioprio_set:
12028         return get_errno(ioprio_set(arg1, arg2, arg3));
12029 #endif
12030 
12031 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12032     case TARGET_NR_setns:
12033         return get_errno(setns(arg1, arg2));
12034 #endif
12035 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12036     case TARGET_NR_unshare:
12037         return get_errno(unshare(arg1));
12038 #endif
12039 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12040     case TARGET_NR_kcmp:
12041         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12042 #endif
12043 #ifdef TARGET_NR_swapcontext
12044     case TARGET_NR_swapcontext:
12045         /* PowerPC specific.  */
12046         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12047 #endif
12048 #ifdef TARGET_NR_memfd_create
12049     case TARGET_NR_memfd_create:
12050         p = lock_user_string(arg1);
12051         if (!p) {
12052             return -TARGET_EFAULT;
12053         }
12054         ret = get_errno(memfd_create(p, arg2));
12055         fd_trans_unregister(ret);
12056         unlock_user(p, arg1, 0);
12057         return ret;
12058 #endif
12059 
12060     default:
12061         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12062         return -TARGET_ENOSYS;
12063     }
12064     return ret;
12065 }
12066 
12067 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12068                     abi_long arg2, abi_long arg3, abi_long arg4,
12069                     abi_long arg5, abi_long arg6, abi_long arg7,
12070                     abi_long arg8)
12071 {
12072     CPUState *cpu = env_cpu(cpu_env);
12073     abi_long ret;
12074 
12075 #ifdef DEBUG_ERESTARTSYS
12076     /* Debug-only code for exercising the syscall-restart code paths
12077      * in the per-architecture cpu main loops: restart every syscall
12078      * the guest makes once before letting it through.
12079      */
12080     {
12081         static bool flag;
12082         flag = !flag;
12083         if (flag) {
12084             return -TARGET_ERESTARTSYS;
12085         }
12086     }
12087 #endif
12088 
12089     record_syscall_start(cpu, num, arg1,
12090                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12091 
12092     if (unlikely(do_strace)) {
12093         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12094         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12095                           arg5, arg6, arg7, arg8);
12096         print_syscall_ret(num, ret);
12097     } else {
12098         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12099                           arg5, arg6, arg7, arg8);
12100     }
12101 
12102     record_syscall_return(cpu, num, ret);
12103     return ret;
12104 }
12105