xref: /openbmc/qemu/linux-user/syscall.c (revision ef45f7b3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
118 
119 #ifndef CLONE_IO
120 #define CLONE_IO                0x80000000      /* Clone io context */
121 #endif
122 
123 /* We can't directly call the host clone syscall, because this will
124  * badly confuse libc (breaking mutexes, for example). So we must
125  * divide clone flags into:
126  *  * flag combinations that look like pthread_create()
127  *  * flag combinations that look like fork()
128  *  * flags we can implement within QEMU itself
129  *  * flags we can't support and will return an error for
130  */
131 /* For thread creation, all these flags must be present; for
132  * fork, none must be present.
133  */
134 #define CLONE_THREAD_FLAGS                              \
135     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
136      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137 
138 /* These flags are ignored:
139  * CLONE_DETACHED is now ignored by the kernel;
140  * CLONE_IO is just an optimisation hint to the I/O scheduler
141  */
142 #define CLONE_IGNORED_FLAGS                     \
143     (CLONE_DETACHED | CLONE_IO)
144 
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS               \
147     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
148      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149 
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154 
155 #define CLONE_INVALID_FORK_FLAGS                                        \
156     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157 
158 #define CLONE_INVALID_THREAD_FLAGS                                      \
159     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
160        CLONE_IGNORED_FLAGS))
161 
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163  * have almost all been allocated. We cannot support any of
164  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166  * The checks against the invalid thread masks above will catch these.
167  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168  */
169 
170 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
171  * once. This exercises the codepaths for restart.
172  */
173 //#define DEBUG_ERESTARTSYS
174 
175 //#include <linux/msdos_fs.h>
176 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
177 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
178 
179 #undef _syscall0
180 #undef _syscall1
181 #undef _syscall2
182 #undef _syscall3
183 #undef _syscall4
184 #undef _syscall5
185 #undef _syscall6
186 
187 #define _syscall0(type,name)		\
188 static type name (void)			\
189 {					\
190 	return syscall(__NR_##name);	\
191 }
192 
193 #define _syscall1(type,name,type1,arg1)		\
194 static type name (type1 arg1)			\
195 {						\
196 	return syscall(__NR_##name, arg1);	\
197 }
198 
199 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
200 static type name (type1 arg1,type2 arg2)		\
201 {							\
202 	return syscall(__NR_##name, arg1, arg2);	\
203 }
204 
205 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
206 static type name (type1 arg1,type2 arg2,type3 arg3)		\
207 {								\
208 	return syscall(__NR_##name, arg1, arg2, arg3);		\
209 }
210 
211 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
212 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
213 {										\
214 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
215 }
216 
217 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
218 		  type5,arg5)							\
219 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
220 {										\
221 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
222 }
223 
224 
225 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
226 		  type5,arg5,type6,arg6)					\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
228                   type6 arg6)							\
229 {										\
230 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
231 }
232 
233 
234 #define __NR_sys_uname __NR_uname
235 #define __NR_sys_getcwd1 __NR_getcwd
236 #define __NR_sys_getdents __NR_getdents
237 #define __NR_sys_getdents64 __NR_getdents64
238 #define __NR_sys_getpriority __NR_getpriority
239 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
240 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246 #define __NR_sys_statx __NR_statx
247 
248 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
249 #define __NR__llseek __NR_lseek
250 #endif
251 
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
255 #endif
256 
257 #define __NR_sys_gettid __NR_gettid
258 _syscall0(int, sys_gettid)
259 
260 /* For the 64-bit guest on 32-bit host case we must emulate
261  * getdents using getdents64, because otherwise the host
262  * might hand us back more dirent records than we can fit
263  * into the guest buffer after structure format conversion.
264  * Otherwise we emulate getdents with getdents if the host has it.
265  */
266 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
267 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #endif
269 
270 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
271 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
272 #endif
273 #if (defined(TARGET_NR_getdents) && \
274       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
275     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
276 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
277 #endif
278 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
279 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
280           loff_t *, res, uint, wh);
281 #endif
282 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
283 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
284           siginfo_t *, uinfo)
285 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
286 #ifdef __NR_exit_group
287 _syscall1(int,exit_group,int,error_code)
288 #endif
289 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
290 _syscall1(int,set_tid_address,int *,tidptr)
291 #endif
292 #if defined(TARGET_NR_futex) && defined(__NR_futex)
293 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
294           const struct timespec *,timeout,int *,uaddr2,int,val3)
295 #endif
296 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
297 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
298           unsigned long *, user_mask_ptr);
299 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
300 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_getcpu __NR_getcpu
303 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
304 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
305           void *, arg);
306 _syscall2(int, capget, struct __user_cap_header_struct *, header,
307           struct __user_cap_data_struct *, data);
308 _syscall2(int, capset, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
311 _syscall2(int, ioprio_get, int, which, int, who)
312 #endif
313 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
314 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
315 #endif
316 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
317 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
318 #endif
319 
320 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
321 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
322           unsigned long, idx1, unsigned long, idx2)
323 #endif
324 
325 /*
326  * It is assumed that struct statx is architecture independent.
327  */
328 #if defined(TARGET_NR_statx) && defined(__NR_statx)
329 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
330           unsigned int, mask, struct target_statx *, statxbuf)
331 #endif
332 
333 static bitmask_transtbl fcntl_flags_tbl[] = {
334   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
335   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
336   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
337   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
338   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
339   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
340   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
341   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
342   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
343   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
344   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
345   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
346   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
347 #if defined(O_DIRECT)
348   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
349 #endif
350 #if defined(O_NOATIME)
351   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
352 #endif
353 #if defined(O_CLOEXEC)
354   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
355 #endif
356 #if defined(O_PATH)
357   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
358 #endif
359 #if defined(O_TMPFILE)
360   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
361 #endif
362   /* Don't terminate the list prematurely on 64-bit host+guest.  */
363 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
364   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
365 #endif
366   { 0, 0, 0, 0 }
367 };
368 
369 static int sys_getcwd1(char *buf, size_t size)
370 {
371   if (getcwd(buf, size) == NULL) {
372       /* getcwd() sets errno */
373       return (-1);
374   }
375   return strlen(buf)+1;
376 }
377 
378 #ifdef TARGET_NR_utimensat
379 #if defined(__NR_utimensat)
380 #define __NR_sys_utimensat __NR_utimensat
381 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
382           const struct timespec *,tsp,int,flags)
383 #else
384 static int sys_utimensat(int dirfd, const char *pathname,
385                          const struct timespec times[2], int flags)
386 {
387     errno = ENOSYS;
388     return -1;
389 }
390 #endif
391 #endif /* TARGET_NR_utimensat */
392 
393 #ifdef TARGET_NR_renameat2
394 #if defined(__NR_renameat2)
395 #define __NR_sys_renameat2 __NR_renameat2
396 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
397           const char *, new, unsigned int, flags)
398 #else
399 static int sys_renameat2(int oldfd, const char *old,
400                          int newfd, const char *new, int flags)
401 {
402     if (flags == 0) {
403         return renameat(oldfd, old, newfd, new);
404     }
405     errno = ENOSYS;
406     return -1;
407 }
408 #endif
409 #endif /* TARGET_NR_renameat2 */
410 
411 #ifdef CONFIG_INOTIFY
412 #include <sys/inotify.h>
413 
414 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
415 static int sys_inotify_init(void)
416 {
417   return (inotify_init());
418 }
419 #endif
420 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
421 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
422 {
423   return (inotify_add_watch(fd, pathname, mask));
424 }
425 #endif
426 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
427 static int sys_inotify_rm_watch(int fd, int32_t wd)
428 {
429   return (inotify_rm_watch(fd, wd));
430 }
431 #endif
432 #ifdef CONFIG_INOTIFY1
433 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
434 static int sys_inotify_init1(int flags)
435 {
436   return (inotify_init1(flags));
437 }
438 #endif
439 #endif
440 #else
441 /* Userspace can usually survive runtime without inotify */
442 #undef TARGET_NR_inotify_init
443 #undef TARGET_NR_inotify_init1
444 #undef TARGET_NR_inotify_add_watch
445 #undef TARGET_NR_inotify_rm_watch
446 #endif /* CONFIG_INOTIFY  */
447 
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
451 #endif
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
455     uint64_t rlim_cur;
456     uint64_t rlim_max;
457 };
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459           const struct host_rlimit64 *, new_limit,
460           struct host_rlimit64 *, old_limit)
461 #endif
462 
463 
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
467 
468 static inline int next_free_host_timer(void)
469 {
470     int k ;
471     /* FIXME: Does finding the next free slot require a lock? */
472     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473         if (g_posix_timers[k] == 0) {
474             g_posix_timers[k] = (timer_t) 1;
475             return k;
476         }
477     }
478     return -1;
479 }
480 #endif
481 
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 #ifdef TARGET_ARM
484 static inline int regpairs_aligned(void *cpu_env, int num)
485 {
486     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 }
488 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
489 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
490 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
491 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
492  * of registers which translates to the same as ARM/MIPS, because we start with
493  * r3 as arg1 */
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_SH4)
496 /* SH4 doesn't align register pairs, except for p{read,write}64 */
497 static inline int regpairs_aligned(void *cpu_env, int num)
498 {
499     switch (num) {
500     case TARGET_NR_pread64:
501     case TARGET_NR_pwrite64:
502         return 1;
503 
504     default:
505         return 0;
506     }
507 }
508 #elif defined(TARGET_XTENSA)
509 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
510 #else
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
512 #endif
513 
514 #define ERRNO_TABLE_SIZE 1200
515 
516 /* target_to_host_errno_table[] is initialized from
517  * host_to_target_errno_table[] in syscall_init(). */
518 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
519 };
520 
521 /*
522  * This list is the union of errno values overridden in asm-<arch>/errno.h
523  * minus the errnos that are not actually generic to all archs.
524  */
525 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
526     [EAGAIN]		= TARGET_EAGAIN,
527     [EIDRM]		= TARGET_EIDRM,
528     [ECHRNG]		= TARGET_ECHRNG,
529     [EL2NSYNC]		= TARGET_EL2NSYNC,
530     [EL3HLT]		= TARGET_EL3HLT,
531     [EL3RST]		= TARGET_EL3RST,
532     [ELNRNG]		= TARGET_ELNRNG,
533     [EUNATCH]		= TARGET_EUNATCH,
534     [ENOCSI]		= TARGET_ENOCSI,
535     [EL2HLT]		= TARGET_EL2HLT,
536     [EDEADLK]		= TARGET_EDEADLK,
537     [ENOLCK]		= TARGET_ENOLCK,
538     [EBADE]		= TARGET_EBADE,
539     [EBADR]		= TARGET_EBADR,
540     [EXFULL]		= TARGET_EXFULL,
541     [ENOANO]		= TARGET_ENOANO,
542     [EBADRQC]		= TARGET_EBADRQC,
543     [EBADSLT]		= TARGET_EBADSLT,
544     [EBFONT]		= TARGET_EBFONT,
545     [ENOSTR]		= TARGET_ENOSTR,
546     [ENODATA]		= TARGET_ENODATA,
547     [ETIME]		= TARGET_ETIME,
548     [ENOSR]		= TARGET_ENOSR,
549     [ENONET]		= TARGET_ENONET,
550     [ENOPKG]		= TARGET_ENOPKG,
551     [EREMOTE]		= TARGET_EREMOTE,
552     [ENOLINK]		= TARGET_ENOLINK,
553     [EADV]		= TARGET_EADV,
554     [ESRMNT]		= TARGET_ESRMNT,
555     [ECOMM]		= TARGET_ECOMM,
556     [EPROTO]		= TARGET_EPROTO,
557     [EDOTDOT]		= TARGET_EDOTDOT,
558     [EMULTIHOP]		= TARGET_EMULTIHOP,
559     [EBADMSG]		= TARGET_EBADMSG,
560     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
561     [EOVERFLOW]		= TARGET_EOVERFLOW,
562     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
563     [EBADFD]		= TARGET_EBADFD,
564     [EREMCHG]		= TARGET_EREMCHG,
565     [ELIBACC]		= TARGET_ELIBACC,
566     [ELIBBAD]		= TARGET_ELIBBAD,
567     [ELIBSCN]		= TARGET_ELIBSCN,
568     [ELIBMAX]		= TARGET_ELIBMAX,
569     [ELIBEXEC]		= TARGET_ELIBEXEC,
570     [EILSEQ]		= TARGET_EILSEQ,
571     [ENOSYS]		= TARGET_ENOSYS,
572     [ELOOP]		= TARGET_ELOOP,
573     [ERESTART]		= TARGET_ERESTART,
574     [ESTRPIPE]		= TARGET_ESTRPIPE,
575     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
576     [EUSERS]		= TARGET_EUSERS,
577     [ENOTSOCK]		= TARGET_ENOTSOCK,
578     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
579     [EMSGSIZE]		= TARGET_EMSGSIZE,
580     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
581     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
582     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
583     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
584     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
585     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
586     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
587     [EADDRINUSE]	= TARGET_EADDRINUSE,
588     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
589     [ENETDOWN]		= TARGET_ENETDOWN,
590     [ENETUNREACH]	= TARGET_ENETUNREACH,
591     [ENETRESET]		= TARGET_ENETRESET,
592     [ECONNABORTED]	= TARGET_ECONNABORTED,
593     [ECONNRESET]	= TARGET_ECONNRESET,
594     [ENOBUFS]		= TARGET_ENOBUFS,
595     [EISCONN]		= TARGET_EISCONN,
596     [ENOTCONN]		= TARGET_ENOTCONN,
597     [EUCLEAN]		= TARGET_EUCLEAN,
598     [ENOTNAM]		= TARGET_ENOTNAM,
599     [ENAVAIL]		= TARGET_ENAVAIL,
600     [EISNAM]		= TARGET_EISNAM,
601     [EREMOTEIO]		= TARGET_EREMOTEIO,
602     [EDQUOT]            = TARGET_EDQUOT,
603     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
604     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
605     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
606     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
607     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
608     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
609     [EALREADY]		= TARGET_EALREADY,
610     [EINPROGRESS]	= TARGET_EINPROGRESS,
611     [ESTALE]		= TARGET_ESTALE,
612     [ECANCELED]		= TARGET_ECANCELED,
613     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
614     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
615 #ifdef ENOKEY
616     [ENOKEY]		= TARGET_ENOKEY,
617 #endif
618 #ifdef EKEYEXPIRED
619     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
620 #endif
621 #ifdef EKEYREVOKED
622     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
623 #endif
624 #ifdef EKEYREJECTED
625     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
626 #endif
627 #ifdef EOWNERDEAD
628     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
629 #endif
630 #ifdef ENOTRECOVERABLE
631     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
632 #endif
633 #ifdef ENOMSG
634     [ENOMSG]            = TARGET_ENOMSG,
635 #endif
636 #ifdef ERKFILL
637     [ERFKILL]           = TARGET_ERFKILL,
638 #endif
639 #ifdef EHWPOISON
640     [EHWPOISON]         = TARGET_EHWPOISON,
641 #endif
642 };
643 
644 static inline int host_to_target_errno(int err)
645 {
646     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
647         host_to_target_errno_table[err]) {
648         return host_to_target_errno_table[err];
649     }
650     return err;
651 }
652 
653 static inline int target_to_host_errno(int err)
654 {
655     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
656         target_to_host_errno_table[err]) {
657         return target_to_host_errno_table[err];
658     }
659     return err;
660 }
661 
662 static inline abi_long get_errno(abi_long ret)
663 {
664     if (ret == -1)
665         return -host_to_target_errno(errno);
666     else
667         return ret;
668 }
669 
670 const char *target_strerror(int err)
671 {
672     if (err == TARGET_ERESTARTSYS) {
673         return "To be restarted";
674     }
675     if (err == TARGET_QEMU_ESIGRETURN) {
676         return "Successful exit from sigreturn";
677     }
678 
679     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
680         return NULL;
681     }
682     return strerror(target_to_host_errno(err));
683 }
684 
685 #define safe_syscall0(type, name) \
686 static type safe_##name(void) \
687 { \
688     return safe_syscall(__NR_##name); \
689 }
690 
691 #define safe_syscall1(type, name, type1, arg1) \
692 static type safe_##name(type1 arg1) \
693 { \
694     return safe_syscall(__NR_##name, arg1); \
695 }
696 
697 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
698 static type safe_##name(type1 arg1, type2 arg2) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2); \
701 }
702 
703 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
705 { \
706     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
707 }
708 
709 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
710     type4, arg4) \
711 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
714 }
715 
716 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
717     type4, arg4, type5, arg5) \
718 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
719     type5 arg5) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
722 }
723 
724 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4, type5, arg5, type6, arg6) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727     type5 arg5, type6 arg6) \
728 { \
729     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
730 }
731 
732 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
733 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
734 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
735               int, flags, mode_t, mode)
736 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
737               struct rusage *, rusage)
738 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
739               int, options, struct rusage *, rusage)
740 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
744               struct timespec *, tsp, const sigset_t *, sigmask,
745               size_t, sigsetsize)
746 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
747               int, maxevents, int, timeout, const sigset_t *, sigmask,
748               size_t, sigsetsize)
749 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
771               const struct timespec *, uts, size_t, sigsetsize)
772 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
773               int, flags)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #ifdef TARGET_NR_clock_nanosleep
777 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
778               const struct timespec *, req, struct timespec *, rem)
779 #endif
780 #ifdef __NR_ipc
781 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
782               void *, ptr, long, fifth)
783 #endif
784 #ifdef __NR_msgsnd
785 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
786               int, flags)
787 #endif
788 #ifdef __NR_msgrcv
789 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
790               long, msgtype, int, flags)
791 #endif
792 #ifdef __NR_semtimedop
793 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
794               unsigned, nsops, const struct timespec *, timeout)
795 #endif
796 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
797 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
798               size_t, len, unsigned, prio, const struct timespec *, timeout)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800               size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 #endif
802 /* We do ioctl like this rather than via safe_syscall3 to preserve the
803  * "third argument might be integer or pointer or not present" behaviour of
804  * the libc function.
805  */
806 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
807 /* Similarly for fcntl. Note that callers must always:
808  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
809  *  use the flock64 struct rather than unsuffixed flock
810  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
811  */
812 #ifdef __NR_fcntl64
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
814 #else
815 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
816 #endif
817 
818 static inline int host_to_target_sock_type(int host_type)
819 {
820     int target_type;
821 
822     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
823     case SOCK_DGRAM:
824         target_type = TARGET_SOCK_DGRAM;
825         break;
826     case SOCK_STREAM:
827         target_type = TARGET_SOCK_STREAM;
828         break;
829     default:
830         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
831         break;
832     }
833 
834 #if defined(SOCK_CLOEXEC)
835     if (host_type & SOCK_CLOEXEC) {
836         target_type |= TARGET_SOCK_CLOEXEC;
837     }
838 #endif
839 
840 #if defined(SOCK_NONBLOCK)
841     if (host_type & SOCK_NONBLOCK) {
842         target_type |= TARGET_SOCK_NONBLOCK;
843     }
844 #endif
845 
846     return target_type;
847 }
848 
849 static abi_ulong target_brk;
850 static abi_ulong target_original_brk;
851 static abi_ulong brk_page;
852 
853 void target_set_brk(abi_ulong new_brk)
854 {
855     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
856     brk_page = HOST_PAGE_ALIGN(target_brk);
857 }
858 
859 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
860 #define DEBUGF_BRK(message, args...)
861 
862 /* do_brk() must return target values and target errnos. */
863 abi_long do_brk(abi_ulong new_brk)
864 {
865     abi_long mapped_addr;
866     abi_ulong new_alloc_size;
867 
868     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
869 
870     if (!new_brk) {
871         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
872         return target_brk;
873     }
874     if (new_brk < target_original_brk) {
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
876                    target_brk);
877         return target_brk;
878     }
879 
880     /* If the new brk is less than the highest page reserved to the
881      * target heap allocation, set it and we're almost done...  */
882     if (new_brk <= brk_page) {
883         /* Heap contents are initialized to zero, as for anonymous
884          * mapped pages.  */
885         if (new_brk > target_brk) {
886             memset(g2h(target_brk), 0, new_brk - target_brk);
887         }
888 	target_brk = new_brk;
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
890 	return target_brk;
891     }
892 
893     /* We need to allocate more memory after the brk... Note that
894      * we don't use MAP_FIXED because that will map over the top of
895      * any existing mapping (like the one with the host libc or qemu
896      * itself); instead we treat "mapped but at wrong address" as
897      * a failure and unmap again.
898      */
899     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
900     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
901                                         PROT_READ|PROT_WRITE,
902                                         MAP_ANON|MAP_PRIVATE, 0, 0));
903 
904     if (mapped_addr == brk_page) {
905         /* Heap contents are initialized to zero, as for anonymous
906          * mapped pages.  Technically the new pages are already
907          * initialized to zero since they *are* anonymous mapped
908          * pages, however we have to take care with the contents that
909          * come from the remaining part of the previous page: it may
910          * contains garbage data due to a previous heap usage (grown
911          * then shrunken).  */
912         memset(g2h(target_brk), 0, brk_page - target_brk);
913 
914         target_brk = new_brk;
915         brk_page = HOST_PAGE_ALIGN(target_brk);
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
917             target_brk);
918         return target_brk;
919     } else if (mapped_addr != -1) {
920         /* Mapped but at wrong address, meaning there wasn't actually
921          * enough space for this brk.
922          */
923         target_munmap(mapped_addr, new_alloc_size);
924         mapped_addr = -1;
925         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
926     }
927     else {
928         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
929     }
930 
931 #if defined(TARGET_ALPHA)
932     /* We (partially) emulate OSF/1 on Alpha, which requires we
933        return a proper errno, not an unchanged brk value.  */
934     return -TARGET_ENOMEM;
935 #endif
936     /* For everything else, return the previous break. */
937     return target_brk;
938 }
939 
940 static inline abi_long copy_from_user_fdset(fd_set *fds,
941                                             abi_ulong target_fds_addr,
942                                             int n)
943 {
944     int i, nw, j, k;
945     abi_ulong b, *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_READ,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  1)))
952         return -TARGET_EFAULT;
953 
954     FD_ZERO(fds);
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         /* grab the abi_ulong */
958         __get_user(b, &target_fds[i]);
959         for (j = 0; j < TARGET_ABI_BITS; j++) {
960             /* check the bit inside the abi_ulong */
961             if ((b >> j) & 1)
962                 FD_SET(k, fds);
963             k++;
964         }
965     }
966 
967     unlock_user(target_fds, target_fds_addr, 0);
968 
969     return 0;
970 }
971 
972 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
973                                                  abi_ulong target_fds_addr,
974                                                  int n)
975 {
976     if (target_fds_addr) {
977         if (copy_from_user_fdset(fds, target_fds_addr, n))
978             return -TARGET_EFAULT;
979         *fds_ptr = fds;
980     } else {
981         *fds_ptr = NULL;
982     }
983     return 0;
984 }
985 
986 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
987                                           const fd_set *fds,
988                                           int n)
989 {
990     int i, nw, j, k;
991     abi_long v;
992     abi_ulong *target_fds;
993 
994     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
995     if (!(target_fds = lock_user(VERIFY_WRITE,
996                                  target_fds_addr,
997                                  sizeof(abi_ulong) * nw,
998                                  0)))
999         return -TARGET_EFAULT;
1000 
1001     k = 0;
1002     for (i = 0; i < nw; i++) {
1003         v = 0;
1004         for (j = 0; j < TARGET_ABI_BITS; j++) {
1005             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1006             k++;
1007         }
1008         __put_user(v, &target_fds[i]);
1009     }
1010 
1011     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1012 
1013     return 0;
1014 }
1015 
1016 #if defined(__alpha__)
1017 #define HOST_HZ 1024
1018 #else
1019 #define HOST_HZ 100
1020 #endif
1021 
1022 static inline abi_long host_to_target_clock_t(long ticks)
1023 {
1024 #if HOST_HZ == TARGET_HZ
1025     return ticks;
1026 #else
1027     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1028 #endif
1029 }
1030 
1031 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1032                                              const struct rusage *rusage)
1033 {
1034     struct target_rusage *target_rusage;
1035 
1036     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1037         return -TARGET_EFAULT;
1038     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1039     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1040     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1041     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1042     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1043     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1044     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1045     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1046     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1047     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1048     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1049     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1050     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1051     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1052     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1053     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1054     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1055     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1056     unlock_user_struct(target_rusage, target_addr, 1);
1057 
1058     return 0;
1059 }
1060 
1061 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1062 {
1063     abi_ulong target_rlim_swap;
1064     rlim_t result;
1065 
1066     target_rlim_swap = tswapal(target_rlim);
1067     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1068         return RLIM_INFINITY;
1069 
1070     result = target_rlim_swap;
1071     if (target_rlim_swap != (rlim_t)result)
1072         return RLIM_INFINITY;
1073 
1074     return result;
1075 }
1076 
1077 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1078 {
1079     abi_ulong target_rlim_swap;
1080     abi_ulong result;
1081 
1082     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1083         target_rlim_swap = TARGET_RLIM_INFINITY;
1084     else
1085         target_rlim_swap = rlim;
1086     result = tswapal(target_rlim_swap);
1087 
1088     return result;
1089 }
1090 
1091 static inline int target_to_host_resource(int code)
1092 {
1093     switch (code) {
1094     case TARGET_RLIMIT_AS:
1095         return RLIMIT_AS;
1096     case TARGET_RLIMIT_CORE:
1097         return RLIMIT_CORE;
1098     case TARGET_RLIMIT_CPU:
1099         return RLIMIT_CPU;
1100     case TARGET_RLIMIT_DATA:
1101         return RLIMIT_DATA;
1102     case TARGET_RLIMIT_FSIZE:
1103         return RLIMIT_FSIZE;
1104     case TARGET_RLIMIT_LOCKS:
1105         return RLIMIT_LOCKS;
1106     case TARGET_RLIMIT_MEMLOCK:
1107         return RLIMIT_MEMLOCK;
1108     case TARGET_RLIMIT_MSGQUEUE:
1109         return RLIMIT_MSGQUEUE;
1110     case TARGET_RLIMIT_NICE:
1111         return RLIMIT_NICE;
1112     case TARGET_RLIMIT_NOFILE:
1113         return RLIMIT_NOFILE;
1114     case TARGET_RLIMIT_NPROC:
1115         return RLIMIT_NPROC;
1116     case TARGET_RLIMIT_RSS:
1117         return RLIMIT_RSS;
1118     case TARGET_RLIMIT_RTPRIO:
1119         return RLIMIT_RTPRIO;
1120     case TARGET_RLIMIT_SIGPENDING:
1121         return RLIMIT_SIGPENDING;
1122     case TARGET_RLIMIT_STACK:
1123         return RLIMIT_STACK;
1124     default:
1125         return code;
1126     }
1127 }
1128 
1129 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1130                                               abi_ulong target_tv_addr)
1131 {
1132     struct target_timeval *target_tv;
1133 
1134     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1135         return -TARGET_EFAULT;
1136     }
1137 
1138     __get_user(tv->tv_sec, &target_tv->tv_sec);
1139     __get_user(tv->tv_usec, &target_tv->tv_usec);
1140 
1141     unlock_user_struct(target_tv, target_tv_addr, 0);
1142 
1143     return 0;
1144 }
1145 
1146 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1147                                             const struct timeval *tv)
1148 {
1149     struct target_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __put_user(tv->tv_sec, &target_tv->tv_sec);
1156     __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 1);
1159 
1160     return 0;
1161 }
1162 
1163 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1164                                              const struct timeval *tv)
1165 {
1166     struct target__kernel_sock_timeval *target_tv;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171 
1172     __put_user(tv->tv_sec, &target_tv->tv_sec);
1173     __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 
1175     unlock_user_struct(target_tv, target_tv_addr, 1);
1176 
1177     return 0;
1178 }
1179 
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 
1194 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1195                                                struct timespec *host_ts)
1196 {
1197     struct target_timespec *target_ts;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1203     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204     unlock_user_struct(target_ts, target_addr, 1);
1205     return 0;
1206 }
1207 
1208 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1209                                                  struct timespec *host_ts)
1210 {
1211     struct target__kernel_timespec *target_ts;
1212 
1213     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1214         return -TARGET_EFAULT;
1215     }
1216     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1217     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218     unlock_user_struct(target_ts, target_addr, 1);
1219     return 0;
1220 }
1221 
1222 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1223                                                abi_ulong target_tz_addr)
1224 {
1225     struct target_timezone *target_tz;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230 
1231     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1232     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1233 
1234     unlock_user_struct(target_tz, target_tz_addr, 0);
1235 
1236     return 0;
1237 }
1238 
1239 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1240 #include <mqueue.h>
1241 
1242 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1243                                               abi_ulong target_mq_attr_addr)
1244 {
1245     struct target_mq_attr *target_mq_attr;
1246 
1247     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1248                           target_mq_attr_addr, 1))
1249         return -TARGET_EFAULT;
1250 
1251     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1252     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1253     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1254     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1255 
1256     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1257 
1258     return 0;
1259 }
1260 
1261 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1262                                             const struct mq_attr *attr)
1263 {
1264     struct target_mq_attr *target_mq_attr;
1265 
1266     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1267                           target_mq_attr_addr, 0))
1268         return -TARGET_EFAULT;
1269 
1270     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1271     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1272     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1273     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1274 
1275     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1276 
1277     return 0;
1278 }
1279 #endif
1280 
1281 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1282 /* do_select() must return target values and target errnos. */
1283 static abi_long do_select(int n,
1284                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1285                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1286 {
1287     fd_set rfds, wfds, efds;
1288     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1289     struct timeval tv;
1290     struct timespec ts, *ts_ptr;
1291     abi_long ret;
1292 
1293     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1302     if (ret) {
1303         return ret;
1304     }
1305 
1306     if (target_tv_addr) {
1307         if (copy_from_user_timeval(&tv, target_tv_addr))
1308             return -TARGET_EFAULT;
1309         ts.tv_sec = tv.tv_sec;
1310         ts.tv_nsec = tv.tv_usec * 1000;
1311         ts_ptr = &ts;
1312     } else {
1313         ts_ptr = NULL;
1314     }
1315 
1316     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1317                                   ts_ptr, NULL));
1318 
1319     if (!is_error(ret)) {
1320         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1321             return -TARGET_EFAULT;
1322         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1323             return -TARGET_EFAULT;
1324         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1325             return -TARGET_EFAULT;
1326 
1327         if (target_tv_addr) {
1328             tv.tv_sec = ts.tv_sec;
1329             tv.tv_usec = ts.tv_nsec / 1000;
1330             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1331                 return -TARGET_EFAULT;
1332             }
1333         }
1334     }
1335 
1336     return ret;
1337 }
1338 
1339 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1340 static abi_long do_old_select(abi_ulong arg1)
1341 {
1342     struct target_sel_arg_struct *sel;
1343     abi_ulong inp, outp, exp, tvp;
1344     long nsel;
1345 
1346     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1347         return -TARGET_EFAULT;
1348     }
1349 
1350     nsel = tswapal(sel->n);
1351     inp = tswapal(sel->inp);
1352     outp = tswapal(sel->outp);
1353     exp = tswapal(sel->exp);
1354     tvp = tswapal(sel->tvp);
1355 
1356     unlock_user_struct(sel, arg1, 0);
1357 
1358     return do_select(nsel, inp, outp, exp, tvp);
1359 }
1360 #endif
1361 #endif
1362 
1363 static abi_long do_pipe2(int host_pipe[], int flags)
1364 {
1365 #ifdef CONFIG_PIPE2
1366     return pipe2(host_pipe, flags);
1367 #else
1368     return -ENOSYS;
1369 #endif
1370 }
1371 
1372 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1373                         int flags, int is_pipe2)
1374 {
1375     int host_pipe[2];
1376     abi_long ret;
1377     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1378 
1379     if (is_error(ret))
1380         return get_errno(ret);
1381 
1382     /* Several targets have special calling conventions for the original
1383        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1384     if (!is_pipe2) {
1385 #if defined(TARGET_ALPHA)
1386         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1387         return host_pipe[0];
1388 #elif defined(TARGET_MIPS)
1389         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1390         return host_pipe[0];
1391 #elif defined(TARGET_SH4)
1392         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1393         return host_pipe[0];
1394 #elif defined(TARGET_SPARC)
1395         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1396         return host_pipe[0];
1397 #endif
1398     }
1399 
1400     if (put_user_s32(host_pipe[0], pipedes)
1401         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1402         return -TARGET_EFAULT;
1403     return get_errno(ret);
1404 }
1405 
1406 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1407                                               abi_ulong target_addr,
1408                                               socklen_t len)
1409 {
1410     struct target_ip_mreqn *target_smreqn;
1411 
1412     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1413     if (!target_smreqn)
1414         return -TARGET_EFAULT;
1415     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1416     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1417     if (len == sizeof(struct target_ip_mreqn))
1418         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1419     unlock_user(target_smreqn, target_addr, 0);
1420 
1421     return 0;
1422 }
1423 
1424 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1425                                                abi_ulong target_addr,
1426                                                socklen_t len)
1427 {
1428     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1429     sa_family_t sa_family;
1430     struct target_sockaddr *target_saddr;
1431 
1432     if (fd_trans_target_to_host_addr(fd)) {
1433         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1434     }
1435 
1436     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1437     if (!target_saddr)
1438         return -TARGET_EFAULT;
1439 
1440     sa_family = tswap16(target_saddr->sa_family);
1441 
1442     /* Oops. The caller might send a incomplete sun_path; sun_path
1443      * must be terminated by \0 (see the manual page), but
1444      * unfortunately it is quite common to specify sockaddr_un
1445      * length as "strlen(x->sun_path)" while it should be
1446      * "strlen(...) + 1". We'll fix that here if needed.
1447      * Linux kernel has a similar feature.
1448      */
1449 
1450     if (sa_family == AF_UNIX) {
1451         if (len < unix_maxlen && len > 0) {
1452             char *cp = (char*)target_saddr;
1453 
1454             if ( cp[len-1] && !cp[len] )
1455                 len++;
1456         }
1457         if (len > unix_maxlen)
1458             len = unix_maxlen;
1459     }
1460 
1461     memcpy(addr, target_saddr, len);
1462     addr->sa_family = sa_family;
1463     if (sa_family == AF_NETLINK) {
1464         struct sockaddr_nl *nladdr;
1465 
1466         nladdr = (struct sockaddr_nl *)addr;
1467         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1468         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1469     } else if (sa_family == AF_PACKET) {
1470 	struct target_sockaddr_ll *lladdr;
1471 
1472 	lladdr = (struct target_sockaddr_ll *)addr;
1473 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1474 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1475     }
1476     unlock_user(target_saddr, target_addr, 0);
1477 
1478     return 0;
1479 }
1480 
1481 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1482                                                struct sockaddr *addr,
1483                                                socklen_t len)
1484 {
1485     struct target_sockaddr *target_saddr;
1486 
1487     if (len == 0) {
1488         return 0;
1489     }
1490     assert(addr);
1491 
1492     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1493     if (!target_saddr)
1494         return -TARGET_EFAULT;
1495     memcpy(target_saddr, addr, len);
1496     if (len >= offsetof(struct target_sockaddr, sa_family) +
1497         sizeof(target_saddr->sa_family)) {
1498         target_saddr->sa_family = tswap16(addr->sa_family);
1499     }
1500     if (addr->sa_family == AF_NETLINK &&
1501         len >= sizeof(struct target_sockaddr_nl)) {
1502         struct target_sockaddr_nl *target_nl =
1503                (struct target_sockaddr_nl *)target_saddr;
1504         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1505         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1506     } else if (addr->sa_family == AF_PACKET) {
1507         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1508         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1509         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1510     } else if (addr->sa_family == AF_INET6 &&
1511                len >= sizeof(struct target_sockaddr_in6)) {
1512         struct target_sockaddr_in6 *target_in6 =
1513                (struct target_sockaddr_in6 *)target_saddr;
1514         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1515     }
1516     unlock_user(target_saddr, target_addr, len);
1517 
1518     return 0;
1519 }
1520 
1521 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1522                                            struct target_msghdr *target_msgh)
1523 {
1524     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1525     abi_long msg_controllen;
1526     abi_ulong target_cmsg_addr;
1527     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1528     socklen_t space = 0;
1529 
1530     msg_controllen = tswapal(target_msgh->msg_controllen);
1531     if (msg_controllen < sizeof (struct target_cmsghdr))
1532         goto the_end;
1533     target_cmsg_addr = tswapal(target_msgh->msg_control);
1534     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1535     target_cmsg_start = target_cmsg;
1536     if (!target_cmsg)
1537         return -TARGET_EFAULT;
1538 
1539     while (cmsg && target_cmsg) {
1540         void *data = CMSG_DATA(cmsg);
1541         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1542 
1543         int len = tswapal(target_cmsg->cmsg_len)
1544             - sizeof(struct target_cmsghdr);
1545 
1546         space += CMSG_SPACE(len);
1547         if (space > msgh->msg_controllen) {
1548             space -= CMSG_SPACE(len);
1549             /* This is a QEMU bug, since we allocated the payload
1550              * area ourselves (unlike overflow in host-to-target
1551              * conversion, which is just the guest giving us a buffer
1552              * that's too small). It can't happen for the payload types
1553              * we currently support; if it becomes an issue in future
1554              * we would need to improve our allocation strategy to
1555              * something more intelligent than "twice the size of the
1556              * target buffer we're reading from".
1557              */
1558             gemu_log("Host cmsg overflow\n");
1559             break;
1560         }
1561 
1562         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1563             cmsg->cmsg_level = SOL_SOCKET;
1564         } else {
1565             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1566         }
1567         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1568         cmsg->cmsg_len = CMSG_LEN(len);
1569 
1570         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1571             int *fd = (int *)data;
1572             int *target_fd = (int *)target_data;
1573             int i, numfds = len / sizeof(int);
1574 
1575             for (i = 0; i < numfds; i++) {
1576                 __get_user(fd[i], target_fd + i);
1577             }
1578         } else if (cmsg->cmsg_level == SOL_SOCKET
1579                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1580             struct ucred *cred = (struct ucred *)data;
1581             struct target_ucred *target_cred =
1582                 (struct target_ucred *)target_data;
1583 
1584             __get_user(cred->pid, &target_cred->pid);
1585             __get_user(cred->uid, &target_cred->uid);
1586             __get_user(cred->gid, &target_cred->gid);
1587         } else {
1588             gemu_log("Unsupported ancillary data: %d/%d\n",
1589                                         cmsg->cmsg_level, cmsg->cmsg_type);
1590             memcpy(data, target_data, len);
1591         }
1592 
1593         cmsg = CMSG_NXTHDR(msgh, cmsg);
1594         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1595                                          target_cmsg_start);
1596     }
1597     unlock_user(target_cmsg, target_cmsg_addr, 0);
1598  the_end:
1599     msgh->msg_controllen = space;
1600     return 0;
1601 }
1602 
1603 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1604                                            struct msghdr *msgh)
1605 {
1606     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1607     abi_long msg_controllen;
1608     abi_ulong target_cmsg_addr;
1609     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1610     socklen_t space = 0;
1611 
1612     msg_controllen = tswapal(target_msgh->msg_controllen);
1613     if (msg_controllen < sizeof (struct target_cmsghdr))
1614         goto the_end;
1615     target_cmsg_addr = tswapal(target_msgh->msg_control);
1616     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1617     target_cmsg_start = target_cmsg;
1618     if (!target_cmsg)
1619         return -TARGET_EFAULT;
1620 
1621     while (cmsg && target_cmsg) {
1622         void *data = CMSG_DATA(cmsg);
1623         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1624 
1625         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1626         int tgt_len, tgt_space;
1627 
1628         /* We never copy a half-header but may copy half-data;
1629          * this is Linux's behaviour in put_cmsg(). Note that
1630          * truncation here is a guest problem (which we report
1631          * to the guest via the CTRUNC bit), unlike truncation
1632          * in target_to_host_cmsg, which is a QEMU bug.
1633          */
1634         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1635             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1636             break;
1637         }
1638 
1639         if (cmsg->cmsg_level == SOL_SOCKET) {
1640             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1641         } else {
1642             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1643         }
1644         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1645 
1646         /* Payload types which need a different size of payload on
1647          * the target must adjust tgt_len here.
1648          */
1649         tgt_len = len;
1650         switch (cmsg->cmsg_level) {
1651         case SOL_SOCKET:
1652             switch (cmsg->cmsg_type) {
1653             case SO_TIMESTAMP:
1654                 tgt_len = sizeof(struct target_timeval);
1655                 break;
1656             default:
1657                 break;
1658             }
1659             break;
1660         default:
1661             break;
1662         }
1663 
1664         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1665             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1667         }
1668 
1669         /* We must now copy-and-convert len bytes of payload
1670          * into tgt_len bytes of destination space. Bear in mind
1671          * that in both source and destination we may be dealing
1672          * with a truncated value!
1673          */
1674         switch (cmsg->cmsg_level) {
1675         case SOL_SOCKET:
1676             switch (cmsg->cmsg_type) {
1677             case SCM_RIGHTS:
1678             {
1679                 int *fd = (int *)data;
1680                 int *target_fd = (int *)target_data;
1681                 int i, numfds = tgt_len / sizeof(int);
1682 
1683                 for (i = 0; i < numfds; i++) {
1684                     __put_user(fd[i], target_fd + i);
1685                 }
1686                 break;
1687             }
1688             case SO_TIMESTAMP:
1689             {
1690                 struct timeval *tv = (struct timeval *)data;
1691                 struct target_timeval *target_tv =
1692                     (struct target_timeval *)target_data;
1693 
1694                 if (len != sizeof(struct timeval) ||
1695                     tgt_len != sizeof(struct target_timeval)) {
1696                     goto unimplemented;
1697                 }
1698 
1699                 /* copy struct timeval to target */
1700                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1701                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1702                 break;
1703             }
1704             case SCM_CREDENTIALS:
1705             {
1706                 struct ucred *cred = (struct ucred *)data;
1707                 struct target_ucred *target_cred =
1708                     (struct target_ucred *)target_data;
1709 
1710                 __put_user(cred->pid, &target_cred->pid);
1711                 __put_user(cred->uid, &target_cred->uid);
1712                 __put_user(cred->gid, &target_cred->gid);
1713                 break;
1714             }
1715             default:
1716                 goto unimplemented;
1717             }
1718             break;
1719 
1720         case SOL_IP:
1721             switch (cmsg->cmsg_type) {
1722             case IP_TTL:
1723             {
1724                 uint32_t *v = (uint32_t *)data;
1725                 uint32_t *t_int = (uint32_t *)target_data;
1726 
1727                 if (len != sizeof(uint32_t) ||
1728                     tgt_len != sizeof(uint32_t)) {
1729                     goto unimplemented;
1730                 }
1731                 __put_user(*v, t_int);
1732                 break;
1733             }
1734             case IP_RECVERR:
1735             {
1736                 struct errhdr_t {
1737                    struct sock_extended_err ee;
1738                    struct sockaddr_in offender;
1739                 };
1740                 struct errhdr_t *errh = (struct errhdr_t *)data;
1741                 struct errhdr_t *target_errh =
1742                     (struct errhdr_t *)target_data;
1743 
1744                 if (len != sizeof(struct errhdr_t) ||
1745                     tgt_len != sizeof(struct errhdr_t)) {
1746                     goto unimplemented;
1747                 }
1748                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1749                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1750                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1751                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1752                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1753                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1754                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1755                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1756                     (void *) &errh->offender, sizeof(errh->offender));
1757                 break;
1758             }
1759             default:
1760                 goto unimplemented;
1761             }
1762             break;
1763 
1764         case SOL_IPV6:
1765             switch (cmsg->cmsg_type) {
1766             case IPV6_HOPLIMIT:
1767             {
1768                 uint32_t *v = (uint32_t *)data;
1769                 uint32_t *t_int = (uint32_t *)target_data;
1770 
1771                 if (len != sizeof(uint32_t) ||
1772                     tgt_len != sizeof(uint32_t)) {
1773                     goto unimplemented;
1774                 }
1775                 __put_user(*v, t_int);
1776                 break;
1777             }
1778             case IPV6_RECVERR:
1779             {
1780                 struct errhdr6_t {
1781                    struct sock_extended_err ee;
1782                    struct sockaddr_in6 offender;
1783                 };
1784                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1785                 struct errhdr6_t *target_errh =
1786                     (struct errhdr6_t *)target_data;
1787 
1788                 if (len != sizeof(struct errhdr6_t) ||
1789                     tgt_len != sizeof(struct errhdr6_t)) {
1790                     goto unimplemented;
1791                 }
1792                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1793                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1794                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1795                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1796                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1797                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1798                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1799                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1800                     (void *) &errh->offender, sizeof(errh->offender));
1801                 break;
1802             }
1803             default:
1804                 goto unimplemented;
1805             }
1806             break;
1807 
1808         default:
1809         unimplemented:
1810             gemu_log("Unsupported ancillary data: %d/%d\n",
1811                                         cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(target_data, data, MIN(len, tgt_len));
1813             if (tgt_len > len) {
1814                 memset(target_data + len, 0, tgt_len - len);
1815             }
1816         }
1817 
1818         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1819         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1820         if (msg_controllen < tgt_space) {
1821             tgt_space = msg_controllen;
1822         }
1823         msg_controllen -= tgt_space;
1824         space += tgt_space;
1825         cmsg = CMSG_NXTHDR(msgh, cmsg);
1826         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1827                                          target_cmsg_start);
1828     }
1829     unlock_user(target_cmsg, target_cmsg_addr, space);
1830  the_end:
1831     target_msgh->msg_controllen = tswapal(space);
1832     return 0;
1833 }
1834 
1835 /* do_setsockopt() Must return target values and target errnos. */
1836 static abi_long do_setsockopt(int sockfd, int level, int optname,
1837                               abi_ulong optval_addr, socklen_t optlen)
1838 {
1839     abi_long ret;
1840     int val;
1841     struct ip_mreqn *ip_mreq;
1842     struct ip_mreq_source *ip_mreq_source;
1843 
1844     switch(level) {
1845     case SOL_TCP:
1846         /* TCP options all take an 'int' value.  */
1847         if (optlen < sizeof(uint32_t))
1848             return -TARGET_EINVAL;
1849 
1850         if (get_user_u32(val, optval_addr))
1851             return -TARGET_EFAULT;
1852         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1853         break;
1854     case SOL_IP:
1855         switch(optname) {
1856         case IP_TOS:
1857         case IP_TTL:
1858         case IP_HDRINCL:
1859         case IP_ROUTER_ALERT:
1860         case IP_RECVOPTS:
1861         case IP_RETOPTS:
1862         case IP_PKTINFO:
1863         case IP_MTU_DISCOVER:
1864         case IP_RECVERR:
1865         case IP_RECVTTL:
1866         case IP_RECVTOS:
1867 #ifdef IP_FREEBIND
1868         case IP_FREEBIND:
1869 #endif
1870         case IP_MULTICAST_TTL:
1871         case IP_MULTICAST_LOOP:
1872             val = 0;
1873             if (optlen >= sizeof(uint32_t)) {
1874                 if (get_user_u32(val, optval_addr))
1875                     return -TARGET_EFAULT;
1876             } else if (optlen >= 1) {
1877                 if (get_user_u8(val, optval_addr))
1878                     return -TARGET_EFAULT;
1879             }
1880             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1881             break;
1882         case IP_ADD_MEMBERSHIP:
1883         case IP_DROP_MEMBERSHIP:
1884             if (optlen < sizeof (struct target_ip_mreq) ||
1885                 optlen > sizeof (struct target_ip_mreqn))
1886                 return -TARGET_EINVAL;
1887 
1888             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1889             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1890             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1891             break;
1892 
1893         case IP_BLOCK_SOURCE:
1894         case IP_UNBLOCK_SOURCE:
1895         case IP_ADD_SOURCE_MEMBERSHIP:
1896         case IP_DROP_SOURCE_MEMBERSHIP:
1897             if (optlen != sizeof (struct target_ip_mreq_source))
1898                 return -TARGET_EINVAL;
1899 
1900             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1901             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1902             unlock_user (ip_mreq_source, optval_addr, 0);
1903             break;
1904 
1905         default:
1906             goto unimplemented;
1907         }
1908         break;
1909     case SOL_IPV6:
1910         switch (optname) {
1911         case IPV6_MTU_DISCOVER:
1912         case IPV6_MTU:
1913         case IPV6_V6ONLY:
1914         case IPV6_RECVPKTINFO:
1915         case IPV6_UNICAST_HOPS:
1916         case IPV6_MULTICAST_HOPS:
1917         case IPV6_MULTICAST_LOOP:
1918         case IPV6_RECVERR:
1919         case IPV6_RECVHOPLIMIT:
1920         case IPV6_2292HOPLIMIT:
1921         case IPV6_CHECKSUM:
1922         case IPV6_ADDRFORM:
1923         case IPV6_2292PKTINFO:
1924         case IPV6_RECVTCLASS:
1925         case IPV6_RECVRTHDR:
1926         case IPV6_2292RTHDR:
1927         case IPV6_RECVHOPOPTS:
1928         case IPV6_2292HOPOPTS:
1929         case IPV6_RECVDSTOPTS:
1930         case IPV6_2292DSTOPTS:
1931         case IPV6_TCLASS:
1932 #ifdef IPV6_RECVPATHMTU
1933         case IPV6_RECVPATHMTU:
1934 #endif
1935 #ifdef IPV6_TRANSPARENT
1936         case IPV6_TRANSPARENT:
1937 #endif
1938 #ifdef IPV6_FREEBIND
1939         case IPV6_FREEBIND:
1940 #endif
1941 #ifdef IPV6_RECVORIGDSTADDR
1942         case IPV6_RECVORIGDSTADDR:
1943 #endif
1944             val = 0;
1945             if (optlen < sizeof(uint32_t)) {
1946                 return -TARGET_EINVAL;
1947             }
1948             if (get_user_u32(val, optval_addr)) {
1949                 return -TARGET_EFAULT;
1950             }
1951             ret = get_errno(setsockopt(sockfd, level, optname,
1952                                        &val, sizeof(val)));
1953             break;
1954         case IPV6_PKTINFO:
1955         {
1956             struct in6_pktinfo pki;
1957 
1958             if (optlen < sizeof(pki)) {
1959                 return -TARGET_EINVAL;
1960             }
1961 
1962             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1963                 return -TARGET_EFAULT;
1964             }
1965 
1966             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1967 
1968             ret = get_errno(setsockopt(sockfd, level, optname,
1969                                        &pki, sizeof(pki)));
1970             break;
1971         }
1972         case IPV6_ADD_MEMBERSHIP:
1973         case IPV6_DROP_MEMBERSHIP:
1974         {
1975             struct ipv6_mreq ipv6mreq;
1976 
1977             if (optlen < sizeof(ipv6mreq)) {
1978                 return -TARGET_EINVAL;
1979             }
1980 
1981             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1982                 return -TARGET_EFAULT;
1983             }
1984 
1985             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1986 
1987             ret = get_errno(setsockopt(sockfd, level, optname,
1988                                        &ipv6mreq, sizeof(ipv6mreq)));
1989             break;
1990         }
1991         default:
1992             goto unimplemented;
1993         }
1994         break;
1995     case SOL_ICMPV6:
1996         switch (optname) {
1997         case ICMPV6_FILTER:
1998         {
1999             struct icmp6_filter icmp6f;
2000 
2001             if (optlen > sizeof(icmp6f)) {
2002                 optlen = sizeof(icmp6f);
2003             }
2004 
2005             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2006                 return -TARGET_EFAULT;
2007             }
2008 
2009             for (val = 0; val < 8; val++) {
2010                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2011             }
2012 
2013             ret = get_errno(setsockopt(sockfd, level, optname,
2014                                        &icmp6f, optlen));
2015             break;
2016         }
2017         default:
2018             goto unimplemented;
2019         }
2020         break;
2021     case SOL_RAW:
2022         switch (optname) {
2023         case ICMP_FILTER:
2024         case IPV6_CHECKSUM:
2025             /* those take an u32 value */
2026             if (optlen < sizeof(uint32_t)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (get_user_u32(val, optval_addr)) {
2031                 return -TARGET_EFAULT;
2032             }
2033             ret = get_errno(setsockopt(sockfd, level, optname,
2034                                        &val, sizeof(val)));
2035             break;
2036 
2037         default:
2038             goto unimplemented;
2039         }
2040         break;
2041 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2042     case SOL_ALG:
2043         switch (optname) {
2044         case ALG_SET_KEY:
2045         {
2046             char *alg_key = g_malloc(optlen);
2047 
2048             if (!alg_key) {
2049                 return -TARGET_ENOMEM;
2050             }
2051             if (copy_from_user(alg_key, optval_addr, optlen)) {
2052                 g_free(alg_key);
2053                 return -TARGET_EFAULT;
2054             }
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        alg_key, optlen));
2057             g_free(alg_key);
2058             break;
2059         }
2060         case ALG_SET_AEAD_AUTHSIZE:
2061         {
2062             ret = get_errno(setsockopt(sockfd, level, optname,
2063                                        NULL, optlen));
2064             break;
2065         }
2066         default:
2067             goto unimplemented;
2068         }
2069         break;
2070 #endif
2071     case TARGET_SOL_SOCKET:
2072         switch (optname) {
2073         case TARGET_SO_RCVTIMEO:
2074         {
2075                 struct timeval tv;
2076 
2077                 optname = SO_RCVTIMEO;
2078 
2079 set_timeout:
2080                 if (optlen != sizeof(struct target_timeval)) {
2081                     return -TARGET_EINVAL;
2082                 }
2083 
2084                 if (copy_from_user_timeval(&tv, optval_addr)) {
2085                     return -TARGET_EFAULT;
2086                 }
2087 
2088                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2089                                 &tv, sizeof(tv)));
2090                 return ret;
2091         }
2092         case TARGET_SO_SNDTIMEO:
2093                 optname = SO_SNDTIMEO;
2094                 goto set_timeout;
2095         case TARGET_SO_ATTACH_FILTER:
2096         {
2097                 struct target_sock_fprog *tfprog;
2098                 struct target_sock_filter *tfilter;
2099                 struct sock_fprog fprog;
2100                 struct sock_filter *filter;
2101                 int i;
2102 
2103                 if (optlen != sizeof(*tfprog)) {
2104                     return -TARGET_EINVAL;
2105                 }
2106                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2107                     return -TARGET_EFAULT;
2108                 }
2109                 if (!lock_user_struct(VERIFY_READ, tfilter,
2110                                       tswapal(tfprog->filter), 0)) {
2111                     unlock_user_struct(tfprog, optval_addr, 1);
2112                     return -TARGET_EFAULT;
2113                 }
2114 
2115                 fprog.len = tswap16(tfprog->len);
2116                 filter = g_try_new(struct sock_filter, fprog.len);
2117                 if (filter == NULL) {
2118                     unlock_user_struct(tfilter, tfprog->filter, 1);
2119                     unlock_user_struct(tfprog, optval_addr, 1);
2120                     return -TARGET_ENOMEM;
2121                 }
2122                 for (i = 0; i < fprog.len; i++) {
2123                     filter[i].code = tswap16(tfilter[i].code);
2124                     filter[i].jt = tfilter[i].jt;
2125                     filter[i].jf = tfilter[i].jf;
2126                     filter[i].k = tswap32(tfilter[i].k);
2127                 }
2128                 fprog.filter = filter;
2129 
2130                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2131                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2132                 g_free(filter);
2133 
2134                 unlock_user_struct(tfilter, tfprog->filter, 1);
2135                 unlock_user_struct(tfprog, optval_addr, 1);
2136                 return ret;
2137         }
2138 	case TARGET_SO_BINDTODEVICE:
2139 	{
2140 		char *dev_ifname, *addr_ifname;
2141 
2142 		if (optlen > IFNAMSIZ - 1) {
2143 		    optlen = IFNAMSIZ - 1;
2144 		}
2145 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2146 		if (!dev_ifname) {
2147 		    return -TARGET_EFAULT;
2148 		}
2149 		optname = SO_BINDTODEVICE;
2150 		addr_ifname = alloca(IFNAMSIZ);
2151 		memcpy(addr_ifname, dev_ifname, optlen);
2152 		addr_ifname[optlen] = 0;
2153 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2154                                            addr_ifname, optlen));
2155 		unlock_user (dev_ifname, optval_addr, 0);
2156 		return ret;
2157 	}
2158         case TARGET_SO_LINGER:
2159         {
2160                 struct linger lg;
2161                 struct target_linger *tlg;
2162 
2163                 if (optlen != sizeof(struct target_linger)) {
2164                     return -TARGET_EINVAL;
2165                 }
2166                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2167                     return -TARGET_EFAULT;
2168                 }
2169                 __get_user(lg.l_onoff, &tlg->l_onoff);
2170                 __get_user(lg.l_linger, &tlg->l_linger);
2171                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2172                                 &lg, sizeof(lg)));
2173                 unlock_user_struct(tlg, optval_addr, 0);
2174                 return ret;
2175         }
2176             /* Options with 'int' argument.  */
2177         case TARGET_SO_DEBUG:
2178 		optname = SO_DEBUG;
2179 		break;
2180         case TARGET_SO_REUSEADDR:
2181 		optname = SO_REUSEADDR;
2182 		break;
2183 #ifdef SO_REUSEPORT
2184         case TARGET_SO_REUSEPORT:
2185                 optname = SO_REUSEPORT;
2186                 break;
2187 #endif
2188         case TARGET_SO_TYPE:
2189 		optname = SO_TYPE;
2190 		break;
2191         case TARGET_SO_ERROR:
2192 		optname = SO_ERROR;
2193 		break;
2194         case TARGET_SO_DONTROUTE:
2195 		optname = SO_DONTROUTE;
2196 		break;
2197         case TARGET_SO_BROADCAST:
2198 		optname = SO_BROADCAST;
2199 		break;
2200         case TARGET_SO_SNDBUF:
2201 		optname = SO_SNDBUF;
2202 		break;
2203         case TARGET_SO_SNDBUFFORCE:
2204                 optname = SO_SNDBUFFORCE;
2205                 break;
2206         case TARGET_SO_RCVBUF:
2207 		optname = SO_RCVBUF;
2208 		break;
2209         case TARGET_SO_RCVBUFFORCE:
2210                 optname = SO_RCVBUFFORCE;
2211                 break;
2212         case TARGET_SO_KEEPALIVE:
2213 		optname = SO_KEEPALIVE;
2214 		break;
2215         case TARGET_SO_OOBINLINE:
2216 		optname = SO_OOBINLINE;
2217 		break;
2218         case TARGET_SO_NO_CHECK:
2219 		optname = SO_NO_CHECK;
2220 		break;
2221         case TARGET_SO_PRIORITY:
2222 		optname = SO_PRIORITY;
2223 		break;
2224 #ifdef SO_BSDCOMPAT
2225         case TARGET_SO_BSDCOMPAT:
2226 		optname = SO_BSDCOMPAT;
2227 		break;
2228 #endif
2229         case TARGET_SO_PASSCRED:
2230 		optname = SO_PASSCRED;
2231 		break;
2232         case TARGET_SO_PASSSEC:
2233                 optname = SO_PASSSEC;
2234                 break;
2235         case TARGET_SO_TIMESTAMP:
2236 		optname = SO_TIMESTAMP;
2237 		break;
2238         case TARGET_SO_RCVLOWAT:
2239 		optname = SO_RCVLOWAT;
2240 		break;
2241         default:
2242             goto unimplemented;
2243         }
2244 	if (optlen < sizeof(uint32_t))
2245             return -TARGET_EINVAL;
2246 
2247 	if (get_user_u32(val, optval_addr))
2248             return -TARGET_EFAULT;
2249 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2250         break;
2251 #ifdef SOL_NETLINK
2252     case SOL_NETLINK:
2253         switch (optname) {
2254         case NETLINK_PKTINFO:
2255         case NETLINK_ADD_MEMBERSHIP:
2256         case NETLINK_DROP_MEMBERSHIP:
2257         case NETLINK_BROADCAST_ERROR:
2258         case NETLINK_NO_ENOBUFS:
2259 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2260         case NETLINK_LISTEN_ALL_NSID:
2261         case NETLINK_CAP_ACK:
2262 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2264         case NETLINK_EXT_ACK:
2265 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2266 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2267         case NETLINK_GET_STRICT_CHK:
2268 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2269             break;
2270         default:
2271             goto unimplemented;
2272         }
2273         val = 0;
2274         if (optlen < sizeof(uint32_t)) {
2275             return -TARGET_EINVAL;
2276         }
2277         if (get_user_u32(val, optval_addr)) {
2278             return -TARGET_EFAULT;
2279         }
2280         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2281                                    sizeof(val)));
2282         break;
2283 #endif /* SOL_NETLINK */
2284     default:
2285     unimplemented:
2286         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2287         ret = -TARGET_ENOPROTOOPT;
2288     }
2289     return ret;
2290 }
2291 
2292 /* do_getsockopt() Must return target values and target errnos. */
2293 static abi_long do_getsockopt(int sockfd, int level, int optname,
2294                               abi_ulong optval_addr, abi_ulong optlen)
2295 {
2296     abi_long ret;
2297     int len, val;
2298     socklen_t lv;
2299 
2300     switch(level) {
2301     case TARGET_SOL_SOCKET:
2302         level = SOL_SOCKET;
2303         switch (optname) {
2304         /* These don't just return a single integer */
2305         case TARGET_SO_RCVTIMEO:
2306         case TARGET_SO_SNDTIMEO:
2307         case TARGET_SO_PEERNAME:
2308             goto unimplemented;
2309         case TARGET_SO_PEERCRED: {
2310             struct ucred cr;
2311             socklen_t crlen;
2312             struct target_ucred *tcr;
2313 
2314             if (get_user_u32(len, optlen)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             if (len < 0) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             crlen = sizeof(cr);
2322             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2323                                        &cr, &crlen));
2324             if (ret < 0) {
2325                 return ret;
2326             }
2327             if (len > crlen) {
2328                 len = crlen;
2329             }
2330             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2331                 return -TARGET_EFAULT;
2332             }
2333             __put_user(cr.pid, &tcr->pid);
2334             __put_user(cr.uid, &tcr->uid);
2335             __put_user(cr.gid, &tcr->gid);
2336             unlock_user_struct(tcr, optval_addr, 1);
2337             if (put_user_u32(len, optlen)) {
2338                 return -TARGET_EFAULT;
2339             }
2340             break;
2341         }
2342         case TARGET_SO_LINGER:
2343         {
2344             struct linger lg;
2345             socklen_t lglen;
2346             struct target_linger *tlg;
2347 
2348             if (get_user_u32(len, optlen)) {
2349                 return -TARGET_EFAULT;
2350             }
2351             if (len < 0) {
2352                 return -TARGET_EINVAL;
2353             }
2354 
2355             lglen = sizeof(lg);
2356             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2357                                        &lg, &lglen));
2358             if (ret < 0) {
2359                 return ret;
2360             }
2361             if (len > lglen) {
2362                 len = lglen;
2363             }
2364             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2365                 return -TARGET_EFAULT;
2366             }
2367             __put_user(lg.l_onoff, &tlg->l_onoff);
2368             __put_user(lg.l_linger, &tlg->l_linger);
2369             unlock_user_struct(tlg, optval_addr, 1);
2370             if (put_user_u32(len, optlen)) {
2371                 return -TARGET_EFAULT;
2372             }
2373             break;
2374         }
2375         /* Options with 'int' argument.  */
2376         case TARGET_SO_DEBUG:
2377             optname = SO_DEBUG;
2378             goto int_case;
2379         case TARGET_SO_REUSEADDR:
2380             optname = SO_REUSEADDR;
2381             goto int_case;
2382 #ifdef SO_REUSEPORT
2383         case TARGET_SO_REUSEPORT:
2384             optname = SO_REUSEPORT;
2385             goto int_case;
2386 #endif
2387         case TARGET_SO_TYPE:
2388             optname = SO_TYPE;
2389             goto int_case;
2390         case TARGET_SO_ERROR:
2391             optname = SO_ERROR;
2392             goto int_case;
2393         case TARGET_SO_DONTROUTE:
2394             optname = SO_DONTROUTE;
2395             goto int_case;
2396         case TARGET_SO_BROADCAST:
2397             optname = SO_BROADCAST;
2398             goto int_case;
2399         case TARGET_SO_SNDBUF:
2400             optname = SO_SNDBUF;
2401             goto int_case;
2402         case TARGET_SO_RCVBUF:
2403             optname = SO_RCVBUF;
2404             goto int_case;
2405         case TARGET_SO_KEEPALIVE:
2406             optname = SO_KEEPALIVE;
2407             goto int_case;
2408         case TARGET_SO_OOBINLINE:
2409             optname = SO_OOBINLINE;
2410             goto int_case;
2411         case TARGET_SO_NO_CHECK:
2412             optname = SO_NO_CHECK;
2413             goto int_case;
2414         case TARGET_SO_PRIORITY:
2415             optname = SO_PRIORITY;
2416             goto int_case;
2417 #ifdef SO_BSDCOMPAT
2418         case TARGET_SO_BSDCOMPAT:
2419             optname = SO_BSDCOMPAT;
2420             goto int_case;
2421 #endif
2422         case TARGET_SO_PASSCRED:
2423             optname = SO_PASSCRED;
2424             goto int_case;
2425         case TARGET_SO_TIMESTAMP:
2426             optname = SO_TIMESTAMP;
2427             goto int_case;
2428         case TARGET_SO_RCVLOWAT:
2429             optname = SO_RCVLOWAT;
2430             goto int_case;
2431         case TARGET_SO_ACCEPTCONN:
2432             optname = SO_ACCEPTCONN;
2433             goto int_case;
2434         default:
2435             goto int_case;
2436         }
2437         break;
2438     case SOL_TCP:
2439         /* TCP options all take an 'int' value.  */
2440     int_case:
2441         if (get_user_u32(len, optlen))
2442             return -TARGET_EFAULT;
2443         if (len < 0)
2444             return -TARGET_EINVAL;
2445         lv = sizeof(lv);
2446         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2447         if (ret < 0)
2448             return ret;
2449         if (optname == SO_TYPE) {
2450             val = host_to_target_sock_type(val);
2451         }
2452         if (len > lv)
2453             len = lv;
2454         if (len == 4) {
2455             if (put_user_u32(val, optval_addr))
2456                 return -TARGET_EFAULT;
2457         } else {
2458             if (put_user_u8(val, optval_addr))
2459                 return -TARGET_EFAULT;
2460         }
2461         if (put_user_u32(len, optlen))
2462             return -TARGET_EFAULT;
2463         break;
2464     case SOL_IP:
2465         switch(optname) {
2466         case IP_TOS:
2467         case IP_TTL:
2468         case IP_HDRINCL:
2469         case IP_ROUTER_ALERT:
2470         case IP_RECVOPTS:
2471         case IP_RETOPTS:
2472         case IP_PKTINFO:
2473         case IP_MTU_DISCOVER:
2474         case IP_RECVERR:
2475         case IP_RECVTOS:
2476 #ifdef IP_FREEBIND
2477         case IP_FREEBIND:
2478 #endif
2479         case IP_MULTICAST_TTL:
2480         case IP_MULTICAST_LOOP:
2481             if (get_user_u32(len, optlen))
2482                 return -TARGET_EFAULT;
2483             if (len < 0)
2484                 return -TARGET_EINVAL;
2485             lv = sizeof(lv);
2486             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2487             if (ret < 0)
2488                 return ret;
2489             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2490                 len = 1;
2491                 if (put_user_u32(len, optlen)
2492                     || put_user_u8(val, optval_addr))
2493                     return -TARGET_EFAULT;
2494             } else {
2495                 if (len > sizeof(int))
2496                     len = sizeof(int);
2497                 if (put_user_u32(len, optlen)
2498                     || put_user_u32(val, optval_addr))
2499                     return -TARGET_EFAULT;
2500             }
2501             break;
2502         default:
2503             ret = -TARGET_ENOPROTOOPT;
2504             break;
2505         }
2506         break;
2507     case SOL_IPV6:
2508         switch (optname) {
2509         case IPV6_MTU_DISCOVER:
2510         case IPV6_MTU:
2511         case IPV6_V6ONLY:
2512         case IPV6_RECVPKTINFO:
2513         case IPV6_UNICAST_HOPS:
2514         case IPV6_MULTICAST_HOPS:
2515         case IPV6_MULTICAST_LOOP:
2516         case IPV6_RECVERR:
2517         case IPV6_RECVHOPLIMIT:
2518         case IPV6_2292HOPLIMIT:
2519         case IPV6_CHECKSUM:
2520         case IPV6_ADDRFORM:
2521         case IPV6_2292PKTINFO:
2522         case IPV6_RECVTCLASS:
2523         case IPV6_RECVRTHDR:
2524         case IPV6_2292RTHDR:
2525         case IPV6_RECVHOPOPTS:
2526         case IPV6_2292HOPOPTS:
2527         case IPV6_RECVDSTOPTS:
2528         case IPV6_2292DSTOPTS:
2529         case IPV6_TCLASS:
2530 #ifdef IPV6_RECVPATHMTU
2531         case IPV6_RECVPATHMTU:
2532 #endif
2533 #ifdef IPV6_TRANSPARENT
2534         case IPV6_TRANSPARENT:
2535 #endif
2536 #ifdef IPV6_FREEBIND
2537         case IPV6_FREEBIND:
2538 #endif
2539 #ifdef IPV6_RECVORIGDSTADDR
2540         case IPV6_RECVORIGDSTADDR:
2541 #endif
2542             if (get_user_u32(len, optlen))
2543                 return -TARGET_EFAULT;
2544             if (len < 0)
2545                 return -TARGET_EINVAL;
2546             lv = sizeof(lv);
2547             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2548             if (ret < 0)
2549                 return ret;
2550             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2551                 len = 1;
2552                 if (put_user_u32(len, optlen)
2553                     || put_user_u8(val, optval_addr))
2554                     return -TARGET_EFAULT;
2555             } else {
2556                 if (len > sizeof(int))
2557                     len = sizeof(int);
2558                 if (put_user_u32(len, optlen)
2559                     || put_user_u32(val, optval_addr))
2560                     return -TARGET_EFAULT;
2561             }
2562             break;
2563         default:
2564             ret = -TARGET_ENOPROTOOPT;
2565             break;
2566         }
2567         break;
2568 #ifdef SOL_NETLINK
2569     case SOL_NETLINK:
2570         switch (optname) {
2571         case NETLINK_PKTINFO:
2572         case NETLINK_BROADCAST_ERROR:
2573         case NETLINK_NO_ENOBUFS:
2574 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2575         case NETLINK_LISTEN_ALL_NSID:
2576         case NETLINK_CAP_ACK:
2577 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2578 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2579         case NETLINK_EXT_ACK:
2580 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2581 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2582         case NETLINK_GET_STRICT_CHK:
2583 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2584             if (get_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             if (len != sizeof(val)) {
2588                 return -TARGET_EINVAL;
2589             }
2590             lv = len;
2591             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (put_user_u32(lv, optlen)
2596                 || put_user_u32(val, optval_addr)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601         case NETLINK_LIST_MEMBERSHIPS:
2602         {
2603             uint32_t *results;
2604             int i;
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2612             if (!results) {
2613                 return -TARGET_EFAULT;
2614             }
2615             lv = len;
2616             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2617             if (ret < 0) {
2618                 unlock_user(results, optval_addr, 0);
2619                 return ret;
2620             }
2621             /* swap host endianess to target endianess. */
2622             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2623                 results[i] = tswap32(results[i]);
2624             }
2625             if (put_user_u32(lv, optlen)) {
2626                 return -TARGET_EFAULT;
2627             }
2628             unlock_user(results, optval_addr, 0);
2629             break;
2630         }
2631 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2632         default:
2633             goto unimplemented;
2634         }
2635         break;
2636 #endif /* SOL_NETLINK */
2637     default:
2638     unimplemented:
2639         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2640                  level, optname);
2641         ret = -TARGET_EOPNOTSUPP;
2642         break;
2643     }
2644     return ret;
2645 }
2646 
2647 /* Convert target low/high pair representing file offset into the host
2648  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2649  * as the kernel doesn't handle them either.
2650  */
2651 static void target_to_host_low_high(abi_ulong tlow,
2652                                     abi_ulong thigh,
2653                                     unsigned long *hlow,
2654                                     unsigned long *hhigh)
2655 {
2656     uint64_t off = tlow |
2657         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2658         TARGET_LONG_BITS / 2;
2659 
2660     *hlow = off;
2661     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2662 }
2663 
2664 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2665                                 abi_ulong count, int copy)
2666 {
2667     struct target_iovec *target_vec;
2668     struct iovec *vec;
2669     abi_ulong total_len, max_len;
2670     int i;
2671     int err = 0;
2672     bool bad_address = false;
2673 
2674     if (count == 0) {
2675         errno = 0;
2676         return NULL;
2677     }
2678     if (count > IOV_MAX) {
2679         errno = EINVAL;
2680         return NULL;
2681     }
2682 
2683     vec = g_try_new0(struct iovec, count);
2684     if (vec == NULL) {
2685         errno = ENOMEM;
2686         return NULL;
2687     }
2688 
2689     target_vec = lock_user(VERIFY_READ, target_addr,
2690                            count * sizeof(struct target_iovec), 1);
2691     if (target_vec == NULL) {
2692         err = EFAULT;
2693         goto fail2;
2694     }
2695 
2696     /* ??? If host page size > target page size, this will result in a
2697        value larger than what we can actually support.  */
2698     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2699     total_len = 0;
2700 
2701     for (i = 0; i < count; i++) {
2702         abi_ulong base = tswapal(target_vec[i].iov_base);
2703         abi_long len = tswapal(target_vec[i].iov_len);
2704 
2705         if (len < 0) {
2706             err = EINVAL;
2707             goto fail;
2708         } else if (len == 0) {
2709             /* Zero length pointer is ignored.  */
2710             vec[i].iov_base = 0;
2711         } else {
2712             vec[i].iov_base = lock_user(type, base, len, copy);
2713             /* If the first buffer pointer is bad, this is a fault.  But
2714              * subsequent bad buffers will result in a partial write; this
2715              * is realized by filling the vector with null pointers and
2716              * zero lengths. */
2717             if (!vec[i].iov_base) {
2718                 if (i == 0) {
2719                     err = EFAULT;
2720                     goto fail;
2721                 } else {
2722                     bad_address = true;
2723                 }
2724             }
2725             if (bad_address) {
2726                 len = 0;
2727             }
2728             if (len > max_len - total_len) {
2729                 len = max_len - total_len;
2730             }
2731         }
2732         vec[i].iov_len = len;
2733         total_len += len;
2734     }
2735 
2736     unlock_user(target_vec, target_addr, 0);
2737     return vec;
2738 
2739  fail:
2740     while (--i >= 0) {
2741         if (tswapal(target_vec[i].iov_len) > 0) {
2742             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2743         }
2744     }
2745     unlock_user(target_vec, target_addr, 0);
2746  fail2:
2747     g_free(vec);
2748     errno = err;
2749     return NULL;
2750 }
2751 
2752 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2753                          abi_ulong count, int copy)
2754 {
2755     struct target_iovec *target_vec;
2756     int i;
2757 
2758     target_vec = lock_user(VERIFY_READ, target_addr,
2759                            count * sizeof(struct target_iovec), 1);
2760     if (target_vec) {
2761         for (i = 0; i < count; i++) {
2762             abi_ulong base = tswapal(target_vec[i].iov_base);
2763             abi_long len = tswapal(target_vec[i].iov_len);
2764             if (len < 0) {
2765                 break;
2766             }
2767             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2768         }
2769         unlock_user(target_vec, target_addr, 0);
2770     }
2771 
2772     g_free(vec);
2773 }
2774 
2775 static inline int target_to_host_sock_type(int *type)
2776 {
2777     int host_type = 0;
2778     int target_type = *type;
2779 
2780     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2781     case TARGET_SOCK_DGRAM:
2782         host_type = SOCK_DGRAM;
2783         break;
2784     case TARGET_SOCK_STREAM:
2785         host_type = SOCK_STREAM;
2786         break;
2787     default:
2788         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2789         break;
2790     }
2791     if (target_type & TARGET_SOCK_CLOEXEC) {
2792 #if defined(SOCK_CLOEXEC)
2793         host_type |= SOCK_CLOEXEC;
2794 #else
2795         return -TARGET_EINVAL;
2796 #endif
2797     }
2798     if (target_type & TARGET_SOCK_NONBLOCK) {
2799 #if defined(SOCK_NONBLOCK)
2800         host_type |= SOCK_NONBLOCK;
2801 #elif !defined(O_NONBLOCK)
2802         return -TARGET_EINVAL;
2803 #endif
2804     }
2805     *type = host_type;
2806     return 0;
2807 }
2808 
2809 /* Try to emulate socket type flags after socket creation.  */
2810 static int sock_flags_fixup(int fd, int target_type)
2811 {
2812 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2813     if (target_type & TARGET_SOCK_NONBLOCK) {
2814         int flags = fcntl(fd, F_GETFL);
2815         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2816             close(fd);
2817             return -TARGET_EINVAL;
2818         }
2819     }
2820 #endif
2821     return fd;
2822 }
2823 
2824 /* do_socket() Must return target values and target errnos. */
2825 static abi_long do_socket(int domain, int type, int protocol)
2826 {
2827     int target_type = type;
2828     int ret;
2829 
2830     ret = target_to_host_sock_type(&type);
2831     if (ret) {
2832         return ret;
2833     }
2834 
2835     if (domain == PF_NETLINK && !(
2836 #ifdef CONFIG_RTNETLINK
2837          protocol == NETLINK_ROUTE ||
2838 #endif
2839          protocol == NETLINK_KOBJECT_UEVENT ||
2840          protocol == NETLINK_AUDIT)) {
2841         return -EPFNOSUPPORT;
2842     }
2843 
2844     if (domain == AF_PACKET ||
2845         (domain == AF_INET && type == SOCK_PACKET)) {
2846         protocol = tswap16(protocol);
2847     }
2848 
2849     ret = get_errno(socket(domain, type, protocol));
2850     if (ret >= 0) {
2851         ret = sock_flags_fixup(ret, target_type);
2852         if (type == SOCK_PACKET) {
2853             /* Manage an obsolete case :
2854              * if socket type is SOCK_PACKET, bind by name
2855              */
2856             fd_trans_register(ret, &target_packet_trans);
2857         } else if (domain == PF_NETLINK) {
2858             switch (protocol) {
2859 #ifdef CONFIG_RTNETLINK
2860             case NETLINK_ROUTE:
2861                 fd_trans_register(ret, &target_netlink_route_trans);
2862                 break;
2863 #endif
2864             case NETLINK_KOBJECT_UEVENT:
2865                 /* nothing to do: messages are strings */
2866                 break;
2867             case NETLINK_AUDIT:
2868                 fd_trans_register(ret, &target_netlink_audit_trans);
2869                 break;
2870             default:
2871                 g_assert_not_reached();
2872             }
2873         }
2874     }
2875     return ret;
2876 }
2877 
2878 /* do_bind() Must return target values and target errnos. */
2879 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2880                         socklen_t addrlen)
2881 {
2882     void *addr;
2883     abi_long ret;
2884 
2885     if ((int)addrlen < 0) {
2886         return -TARGET_EINVAL;
2887     }
2888 
2889     addr = alloca(addrlen+1);
2890 
2891     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2892     if (ret)
2893         return ret;
2894 
2895     return get_errno(bind(sockfd, addr, addrlen));
2896 }
2897 
2898 /* do_connect() Must return target values and target errnos. */
2899 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2900                            socklen_t addrlen)
2901 {
2902     void *addr;
2903     abi_long ret;
2904 
2905     if ((int)addrlen < 0) {
2906         return -TARGET_EINVAL;
2907     }
2908 
2909     addr = alloca(addrlen+1);
2910 
2911     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2912     if (ret)
2913         return ret;
2914 
2915     return get_errno(safe_connect(sockfd, addr, addrlen));
2916 }
2917 
2918 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2919 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2920                                       int flags, int send)
2921 {
2922     abi_long ret, len;
2923     struct msghdr msg;
2924     abi_ulong count;
2925     struct iovec *vec;
2926     abi_ulong target_vec;
2927 
2928     if (msgp->msg_name) {
2929         msg.msg_namelen = tswap32(msgp->msg_namelen);
2930         msg.msg_name = alloca(msg.msg_namelen+1);
2931         ret = target_to_host_sockaddr(fd, msg.msg_name,
2932                                       tswapal(msgp->msg_name),
2933                                       msg.msg_namelen);
2934         if (ret == -TARGET_EFAULT) {
2935             /* For connected sockets msg_name and msg_namelen must
2936              * be ignored, so returning EFAULT immediately is wrong.
2937              * Instead, pass a bad msg_name to the host kernel, and
2938              * let it decide whether to return EFAULT or not.
2939              */
2940             msg.msg_name = (void *)-1;
2941         } else if (ret) {
2942             goto out2;
2943         }
2944     } else {
2945         msg.msg_name = NULL;
2946         msg.msg_namelen = 0;
2947     }
2948     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2949     msg.msg_control = alloca(msg.msg_controllen);
2950     memset(msg.msg_control, 0, msg.msg_controllen);
2951 
2952     msg.msg_flags = tswap32(msgp->msg_flags);
2953 
2954     count = tswapal(msgp->msg_iovlen);
2955     target_vec = tswapal(msgp->msg_iov);
2956 
2957     if (count > IOV_MAX) {
2958         /* sendrcvmsg returns a different errno for this condition than
2959          * readv/writev, so we must catch it here before lock_iovec() does.
2960          */
2961         ret = -TARGET_EMSGSIZE;
2962         goto out2;
2963     }
2964 
2965     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2966                      target_vec, count, send);
2967     if (vec == NULL) {
2968         ret = -host_to_target_errno(errno);
2969         goto out2;
2970     }
2971     msg.msg_iovlen = count;
2972     msg.msg_iov = vec;
2973 
2974     if (send) {
2975         if (fd_trans_target_to_host_data(fd)) {
2976             void *host_msg;
2977 
2978             host_msg = g_malloc(msg.msg_iov->iov_len);
2979             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2980             ret = fd_trans_target_to_host_data(fd)(host_msg,
2981                                                    msg.msg_iov->iov_len);
2982             if (ret >= 0) {
2983                 msg.msg_iov->iov_base = host_msg;
2984                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2985             }
2986             g_free(host_msg);
2987         } else {
2988             ret = target_to_host_cmsg(&msg, msgp);
2989             if (ret == 0) {
2990                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2991             }
2992         }
2993     } else {
2994         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2995         if (!is_error(ret)) {
2996             len = ret;
2997             if (fd_trans_host_to_target_data(fd)) {
2998                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2999                                                MIN(msg.msg_iov->iov_len, len));
3000             } else {
3001                 ret = host_to_target_cmsg(msgp, &msg);
3002             }
3003             if (!is_error(ret)) {
3004                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3005                 msgp->msg_flags = tswap32(msg.msg_flags);
3006                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3007                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3008                                     msg.msg_name, msg.msg_namelen);
3009                     if (ret) {
3010                         goto out;
3011                     }
3012                 }
3013 
3014                 ret = len;
3015             }
3016         }
3017     }
3018 
3019 out:
3020     unlock_iovec(vec, target_vec, count, !send);
3021 out2:
3022     return ret;
3023 }
3024 
3025 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3026                                int flags, int send)
3027 {
3028     abi_long ret;
3029     struct target_msghdr *msgp;
3030 
3031     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3032                           msgp,
3033                           target_msg,
3034                           send ? 1 : 0)) {
3035         return -TARGET_EFAULT;
3036     }
3037     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3038     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3039     return ret;
3040 }
3041 
3042 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3043  * so it might not have this *mmsg-specific flag either.
3044  */
3045 #ifndef MSG_WAITFORONE
3046 #define MSG_WAITFORONE 0x10000
3047 #endif
3048 
3049 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3050                                 unsigned int vlen, unsigned int flags,
3051                                 int send)
3052 {
3053     struct target_mmsghdr *mmsgp;
3054     abi_long ret = 0;
3055     int i;
3056 
3057     if (vlen > UIO_MAXIOV) {
3058         vlen = UIO_MAXIOV;
3059     }
3060 
3061     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3062     if (!mmsgp) {
3063         return -TARGET_EFAULT;
3064     }
3065 
3066     for (i = 0; i < vlen; i++) {
3067         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3068         if (is_error(ret)) {
3069             break;
3070         }
3071         mmsgp[i].msg_len = tswap32(ret);
3072         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3073         if (flags & MSG_WAITFORONE) {
3074             flags |= MSG_DONTWAIT;
3075         }
3076     }
3077 
3078     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3079 
3080     /* Return number of datagrams sent if we sent any at all;
3081      * otherwise return the error.
3082      */
3083     if (i) {
3084         return i;
3085     }
3086     return ret;
3087 }
3088 
3089 /* do_accept4() Must return target values and target errnos. */
3090 static abi_long do_accept4(int fd, abi_ulong target_addr,
3091                            abi_ulong target_addrlen_addr, int flags)
3092 {
3093     socklen_t addrlen, ret_addrlen;
3094     void *addr;
3095     abi_long ret;
3096     int host_flags;
3097 
3098     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3099 
3100     if (target_addr == 0) {
3101         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3102     }
3103 
3104     /* linux returns EINVAL if addrlen pointer is invalid */
3105     if (get_user_u32(addrlen, target_addrlen_addr))
3106         return -TARGET_EINVAL;
3107 
3108     if ((int)addrlen < 0) {
3109         return -TARGET_EINVAL;
3110     }
3111 
3112     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3113         return -TARGET_EINVAL;
3114 
3115     addr = alloca(addrlen);
3116 
3117     ret_addrlen = addrlen;
3118     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3119     if (!is_error(ret)) {
3120         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3121         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3122             ret = -TARGET_EFAULT;
3123         }
3124     }
3125     return ret;
3126 }
3127 
3128 /* do_getpeername() Must return target values and target errnos. */
3129 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3130                                abi_ulong target_addrlen_addr)
3131 {
3132     socklen_t addrlen, ret_addrlen;
3133     void *addr;
3134     abi_long ret;
3135 
3136     if (get_user_u32(addrlen, target_addrlen_addr))
3137         return -TARGET_EFAULT;
3138 
3139     if ((int)addrlen < 0) {
3140         return -TARGET_EINVAL;
3141     }
3142 
3143     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3144         return -TARGET_EFAULT;
3145 
3146     addr = alloca(addrlen);
3147 
3148     ret_addrlen = addrlen;
3149     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3150     if (!is_error(ret)) {
3151         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3152         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3153             ret = -TARGET_EFAULT;
3154         }
3155     }
3156     return ret;
3157 }
3158 
3159 /* do_getsockname() Must return target values and target errnos. */
3160 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3161                                abi_ulong target_addrlen_addr)
3162 {
3163     socklen_t addrlen, ret_addrlen;
3164     void *addr;
3165     abi_long ret;
3166 
3167     if (get_user_u32(addrlen, target_addrlen_addr))
3168         return -TARGET_EFAULT;
3169 
3170     if ((int)addrlen < 0) {
3171         return -TARGET_EINVAL;
3172     }
3173 
3174     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3175         return -TARGET_EFAULT;
3176 
3177     addr = alloca(addrlen);
3178 
3179     ret_addrlen = addrlen;
3180     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3181     if (!is_error(ret)) {
3182         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3183         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3184             ret = -TARGET_EFAULT;
3185         }
3186     }
3187     return ret;
3188 }
3189 
3190 /* do_socketpair() Must return target values and target errnos. */
3191 static abi_long do_socketpair(int domain, int type, int protocol,
3192                               abi_ulong target_tab_addr)
3193 {
3194     int tab[2];
3195     abi_long ret;
3196 
3197     target_to_host_sock_type(&type);
3198 
3199     ret = get_errno(socketpair(domain, type, protocol, tab));
3200     if (!is_error(ret)) {
3201         if (put_user_s32(tab[0], target_tab_addr)
3202             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3203             ret = -TARGET_EFAULT;
3204     }
3205     return ret;
3206 }
3207 
3208 /* do_sendto() Must return target values and target errnos. */
3209 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3210                           abi_ulong target_addr, socklen_t addrlen)
3211 {
3212     void *addr;
3213     void *host_msg;
3214     void *copy_msg = NULL;
3215     abi_long ret;
3216 
3217     if ((int)addrlen < 0) {
3218         return -TARGET_EINVAL;
3219     }
3220 
3221     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3222     if (!host_msg)
3223         return -TARGET_EFAULT;
3224     if (fd_trans_target_to_host_data(fd)) {
3225         copy_msg = host_msg;
3226         host_msg = g_malloc(len);
3227         memcpy(host_msg, copy_msg, len);
3228         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3229         if (ret < 0) {
3230             goto fail;
3231         }
3232     }
3233     if (target_addr) {
3234         addr = alloca(addrlen+1);
3235         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3236         if (ret) {
3237             goto fail;
3238         }
3239         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3240     } else {
3241         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3242     }
3243 fail:
3244     if (copy_msg) {
3245         g_free(host_msg);
3246         host_msg = copy_msg;
3247     }
3248     unlock_user(host_msg, msg, 0);
3249     return ret;
3250 }
3251 
3252 /* do_recvfrom() Must return target values and target errnos. */
3253 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3254                             abi_ulong target_addr,
3255                             abi_ulong target_addrlen)
3256 {
3257     socklen_t addrlen, ret_addrlen;
3258     void *addr;
3259     void *host_msg;
3260     abi_long ret;
3261 
3262     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3263     if (!host_msg)
3264         return -TARGET_EFAULT;
3265     if (target_addr) {
3266         if (get_user_u32(addrlen, target_addrlen)) {
3267             ret = -TARGET_EFAULT;
3268             goto fail;
3269         }
3270         if ((int)addrlen < 0) {
3271             ret = -TARGET_EINVAL;
3272             goto fail;
3273         }
3274         addr = alloca(addrlen);
3275         ret_addrlen = addrlen;
3276         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3277                                       addr, &ret_addrlen));
3278     } else {
3279         addr = NULL; /* To keep compiler quiet.  */
3280         addrlen = 0; /* To keep compiler quiet.  */
3281         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3282     }
3283     if (!is_error(ret)) {
3284         if (fd_trans_host_to_target_data(fd)) {
3285             abi_long trans;
3286             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3287             if (is_error(trans)) {
3288                 ret = trans;
3289                 goto fail;
3290             }
3291         }
3292         if (target_addr) {
3293             host_to_target_sockaddr(target_addr, addr,
3294                                     MIN(addrlen, ret_addrlen));
3295             if (put_user_u32(ret_addrlen, target_addrlen)) {
3296                 ret = -TARGET_EFAULT;
3297                 goto fail;
3298             }
3299         }
3300         unlock_user(host_msg, msg, len);
3301     } else {
3302 fail:
3303         unlock_user(host_msg, msg, 0);
3304     }
3305     return ret;
3306 }
3307 
3308 #ifdef TARGET_NR_socketcall
3309 /* do_socketcall() must return target values and target errnos. */
3310 static abi_long do_socketcall(int num, abi_ulong vptr)
3311 {
3312     static const unsigned nargs[] = { /* number of arguments per operation */
3313         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3314         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3315         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3316         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3317         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3318         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3319         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3320         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3321         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3322         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3323         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3324         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3325         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3326         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3327         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3328         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3329         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3330         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3331         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3332         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3333     };
3334     abi_long a[6]; /* max 6 args */
3335     unsigned i;
3336 
3337     /* check the range of the first argument num */
3338     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3339     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3340         return -TARGET_EINVAL;
3341     }
3342     /* ensure we have space for args */
3343     if (nargs[num] > ARRAY_SIZE(a)) {
3344         return -TARGET_EINVAL;
3345     }
3346     /* collect the arguments in a[] according to nargs[] */
3347     for (i = 0; i < nargs[num]; ++i) {
3348         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3349             return -TARGET_EFAULT;
3350         }
3351     }
3352     /* now when we have the args, invoke the appropriate underlying function */
3353     switch (num) {
3354     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3355         return do_socket(a[0], a[1], a[2]);
3356     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3357         return do_bind(a[0], a[1], a[2]);
3358     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3359         return do_connect(a[0], a[1], a[2]);
3360     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3361         return get_errno(listen(a[0], a[1]));
3362     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3363         return do_accept4(a[0], a[1], a[2], 0);
3364     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3365         return do_getsockname(a[0], a[1], a[2]);
3366     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3367         return do_getpeername(a[0], a[1], a[2]);
3368     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3369         return do_socketpair(a[0], a[1], a[2], a[3]);
3370     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3371         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3372     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3373         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3374     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3375         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3376     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3377         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3378     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3379         return get_errno(shutdown(a[0], a[1]));
3380     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3381         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3382     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3383         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3384     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3385         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3386     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3387         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3388     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3389         return do_accept4(a[0], a[1], a[2], a[3]);
3390     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3391         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3392     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3393         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3394     default:
3395         gemu_log("Unsupported socketcall: %d\n", num);
3396         return -TARGET_EINVAL;
3397     }
3398 }
3399 #endif
3400 
3401 #define N_SHM_REGIONS	32
3402 
3403 static struct shm_region {
3404     abi_ulong start;
3405     abi_ulong size;
3406     bool in_use;
3407 } shm_regions[N_SHM_REGIONS];
3408 
3409 #ifndef TARGET_SEMID64_DS
3410 /* asm-generic version of this struct */
3411 struct target_semid64_ds
3412 {
3413   struct target_ipc_perm sem_perm;
3414   abi_ulong sem_otime;
3415 #if TARGET_ABI_BITS == 32
3416   abi_ulong __unused1;
3417 #endif
3418   abi_ulong sem_ctime;
3419 #if TARGET_ABI_BITS == 32
3420   abi_ulong __unused2;
3421 #endif
3422   abi_ulong sem_nsems;
3423   abi_ulong __unused3;
3424   abi_ulong __unused4;
3425 };
3426 #endif
3427 
3428 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3429                                                abi_ulong target_addr)
3430 {
3431     struct target_ipc_perm *target_ip;
3432     struct target_semid64_ds *target_sd;
3433 
3434     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3435         return -TARGET_EFAULT;
3436     target_ip = &(target_sd->sem_perm);
3437     host_ip->__key = tswap32(target_ip->__key);
3438     host_ip->uid = tswap32(target_ip->uid);
3439     host_ip->gid = tswap32(target_ip->gid);
3440     host_ip->cuid = tswap32(target_ip->cuid);
3441     host_ip->cgid = tswap32(target_ip->cgid);
3442 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3443     host_ip->mode = tswap32(target_ip->mode);
3444 #else
3445     host_ip->mode = tswap16(target_ip->mode);
3446 #endif
3447 #if defined(TARGET_PPC)
3448     host_ip->__seq = tswap32(target_ip->__seq);
3449 #else
3450     host_ip->__seq = tswap16(target_ip->__seq);
3451 #endif
3452     unlock_user_struct(target_sd, target_addr, 0);
3453     return 0;
3454 }
3455 
3456 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3457                                                struct ipc_perm *host_ip)
3458 {
3459     struct target_ipc_perm *target_ip;
3460     struct target_semid64_ds *target_sd;
3461 
3462     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3463         return -TARGET_EFAULT;
3464     target_ip = &(target_sd->sem_perm);
3465     target_ip->__key = tswap32(host_ip->__key);
3466     target_ip->uid = tswap32(host_ip->uid);
3467     target_ip->gid = tswap32(host_ip->gid);
3468     target_ip->cuid = tswap32(host_ip->cuid);
3469     target_ip->cgid = tswap32(host_ip->cgid);
3470 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3471     target_ip->mode = tswap32(host_ip->mode);
3472 #else
3473     target_ip->mode = tswap16(host_ip->mode);
3474 #endif
3475 #if defined(TARGET_PPC)
3476     target_ip->__seq = tswap32(host_ip->__seq);
3477 #else
3478     target_ip->__seq = tswap16(host_ip->__seq);
3479 #endif
3480     unlock_user_struct(target_sd, target_addr, 1);
3481     return 0;
3482 }
3483 
3484 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3485                                                abi_ulong target_addr)
3486 {
3487     struct target_semid64_ds *target_sd;
3488 
3489     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3490         return -TARGET_EFAULT;
3491     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3492         return -TARGET_EFAULT;
3493     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3494     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3495     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3496     unlock_user_struct(target_sd, target_addr, 0);
3497     return 0;
3498 }
3499 
3500 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3501                                                struct semid_ds *host_sd)
3502 {
3503     struct target_semid64_ds *target_sd;
3504 
3505     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3506         return -TARGET_EFAULT;
3507     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3508         return -TARGET_EFAULT;
3509     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3510     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3511     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3512     unlock_user_struct(target_sd, target_addr, 1);
3513     return 0;
3514 }
3515 
3516 struct target_seminfo {
3517     int semmap;
3518     int semmni;
3519     int semmns;
3520     int semmnu;
3521     int semmsl;
3522     int semopm;
3523     int semume;
3524     int semusz;
3525     int semvmx;
3526     int semaem;
3527 };
3528 
3529 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3530                                               struct seminfo *host_seminfo)
3531 {
3532     struct target_seminfo *target_seminfo;
3533     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3534         return -TARGET_EFAULT;
3535     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3536     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3537     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3538     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3539     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3540     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3541     __put_user(host_seminfo->semume, &target_seminfo->semume);
3542     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3543     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3544     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3545     unlock_user_struct(target_seminfo, target_addr, 1);
3546     return 0;
3547 }
3548 
3549 union semun {
3550 	int val;
3551 	struct semid_ds *buf;
3552 	unsigned short *array;
3553 	struct seminfo *__buf;
3554 };
3555 
3556 union target_semun {
3557 	int val;
3558 	abi_ulong buf;
3559 	abi_ulong array;
3560 	abi_ulong __buf;
3561 };
3562 
3563 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3564                                                abi_ulong target_addr)
3565 {
3566     int nsems;
3567     unsigned short *array;
3568     union semun semun;
3569     struct semid_ds semid_ds;
3570     int i, ret;
3571 
3572     semun.buf = &semid_ds;
3573 
3574     ret = semctl(semid, 0, IPC_STAT, semun);
3575     if (ret == -1)
3576         return get_errno(ret);
3577 
3578     nsems = semid_ds.sem_nsems;
3579 
3580     *host_array = g_try_new(unsigned short, nsems);
3581     if (!*host_array) {
3582         return -TARGET_ENOMEM;
3583     }
3584     array = lock_user(VERIFY_READ, target_addr,
3585                       nsems*sizeof(unsigned short), 1);
3586     if (!array) {
3587         g_free(*host_array);
3588         return -TARGET_EFAULT;
3589     }
3590 
3591     for(i=0; i<nsems; i++) {
3592         __get_user((*host_array)[i], &array[i]);
3593     }
3594     unlock_user(array, target_addr, 0);
3595 
3596     return 0;
3597 }
3598 
3599 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3600                                                unsigned short **host_array)
3601 {
3602     int nsems;
3603     unsigned short *array;
3604     union semun semun;
3605     struct semid_ds semid_ds;
3606     int i, ret;
3607 
3608     semun.buf = &semid_ds;
3609 
3610     ret = semctl(semid, 0, IPC_STAT, semun);
3611     if (ret == -1)
3612         return get_errno(ret);
3613 
3614     nsems = semid_ds.sem_nsems;
3615 
3616     array = lock_user(VERIFY_WRITE, target_addr,
3617                       nsems*sizeof(unsigned short), 0);
3618     if (!array)
3619         return -TARGET_EFAULT;
3620 
3621     for(i=0; i<nsems; i++) {
3622         __put_user((*host_array)[i], &array[i]);
3623     }
3624     g_free(*host_array);
3625     unlock_user(array, target_addr, 1);
3626 
3627     return 0;
3628 }
3629 
3630 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3631                                  abi_ulong target_arg)
3632 {
3633     union target_semun target_su = { .buf = target_arg };
3634     union semun arg;
3635     struct semid_ds dsarg;
3636     unsigned short *array = NULL;
3637     struct seminfo seminfo;
3638     abi_long ret = -TARGET_EINVAL;
3639     abi_long err;
3640     cmd &= 0xff;
3641 
3642     switch( cmd ) {
3643 	case GETVAL:
3644 	case SETVAL:
3645             /* In 64 bit cross-endian situations, we will erroneously pick up
3646              * the wrong half of the union for the "val" element.  To rectify
3647              * this, the entire 8-byte structure is byteswapped, followed by
3648 	     * a swap of the 4 byte val field. In other cases, the data is
3649 	     * already in proper host byte order. */
3650 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3651 		target_su.buf = tswapal(target_su.buf);
3652 		arg.val = tswap32(target_su.val);
3653 	    } else {
3654 		arg.val = target_su.val;
3655 	    }
3656             ret = get_errno(semctl(semid, semnum, cmd, arg));
3657             break;
3658 	case GETALL:
3659 	case SETALL:
3660             err = target_to_host_semarray(semid, &array, target_su.array);
3661             if (err)
3662                 return err;
3663             arg.array = array;
3664             ret = get_errno(semctl(semid, semnum, cmd, arg));
3665             err = host_to_target_semarray(semid, target_su.array, &array);
3666             if (err)
3667                 return err;
3668             break;
3669 	case IPC_STAT:
3670 	case IPC_SET:
3671 	case SEM_STAT:
3672             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3673             if (err)
3674                 return err;
3675             arg.buf = &dsarg;
3676             ret = get_errno(semctl(semid, semnum, cmd, arg));
3677             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3678             if (err)
3679                 return err;
3680             break;
3681 	case IPC_INFO:
3682 	case SEM_INFO:
3683             arg.__buf = &seminfo;
3684             ret = get_errno(semctl(semid, semnum, cmd, arg));
3685             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3686             if (err)
3687                 return err;
3688             break;
3689 	case IPC_RMID:
3690 	case GETPID:
3691 	case GETNCNT:
3692 	case GETZCNT:
3693             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3694             break;
3695     }
3696 
3697     return ret;
3698 }
3699 
3700 struct target_sembuf {
3701     unsigned short sem_num;
3702     short sem_op;
3703     short sem_flg;
3704 };
3705 
3706 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3707                                              abi_ulong target_addr,
3708                                              unsigned nsops)
3709 {
3710     struct target_sembuf *target_sembuf;
3711     int i;
3712 
3713     target_sembuf = lock_user(VERIFY_READ, target_addr,
3714                               nsops*sizeof(struct target_sembuf), 1);
3715     if (!target_sembuf)
3716         return -TARGET_EFAULT;
3717 
3718     for(i=0; i<nsops; i++) {
3719         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3720         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3721         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3722     }
3723 
3724     unlock_user(target_sembuf, target_addr, 0);
3725 
3726     return 0;
3727 }
3728 
3729 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3730 {
3731     struct sembuf sops[nsops];
3732     abi_long ret;
3733 
3734     if (target_to_host_sembuf(sops, ptr, nsops))
3735         return -TARGET_EFAULT;
3736 
3737     ret = -TARGET_ENOSYS;
3738 #ifdef __NR_semtimedop
3739     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3740 #endif
3741 #ifdef __NR_ipc
3742     if (ret == -TARGET_ENOSYS) {
3743         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3744     }
3745 #endif
3746     return ret;
3747 }
3748 
3749 struct target_msqid_ds
3750 {
3751     struct target_ipc_perm msg_perm;
3752     abi_ulong msg_stime;
3753 #if TARGET_ABI_BITS == 32
3754     abi_ulong __unused1;
3755 #endif
3756     abi_ulong msg_rtime;
3757 #if TARGET_ABI_BITS == 32
3758     abi_ulong __unused2;
3759 #endif
3760     abi_ulong msg_ctime;
3761 #if TARGET_ABI_BITS == 32
3762     abi_ulong __unused3;
3763 #endif
3764     abi_ulong __msg_cbytes;
3765     abi_ulong msg_qnum;
3766     abi_ulong msg_qbytes;
3767     abi_ulong msg_lspid;
3768     abi_ulong msg_lrpid;
3769     abi_ulong __unused4;
3770     abi_ulong __unused5;
3771 };
3772 
3773 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3774                                                abi_ulong target_addr)
3775 {
3776     struct target_msqid_ds *target_md;
3777 
3778     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3779         return -TARGET_EFAULT;
3780     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3781         return -TARGET_EFAULT;
3782     host_md->msg_stime = tswapal(target_md->msg_stime);
3783     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3784     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3785     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3786     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3787     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3788     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3789     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3790     unlock_user_struct(target_md, target_addr, 0);
3791     return 0;
3792 }
3793 
3794 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3795                                                struct msqid_ds *host_md)
3796 {
3797     struct target_msqid_ds *target_md;
3798 
3799     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3800         return -TARGET_EFAULT;
3801     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3802         return -TARGET_EFAULT;
3803     target_md->msg_stime = tswapal(host_md->msg_stime);
3804     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3805     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3806     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3807     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3808     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3809     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3810     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3811     unlock_user_struct(target_md, target_addr, 1);
3812     return 0;
3813 }
3814 
3815 struct target_msginfo {
3816     int msgpool;
3817     int msgmap;
3818     int msgmax;
3819     int msgmnb;
3820     int msgmni;
3821     int msgssz;
3822     int msgtql;
3823     unsigned short int msgseg;
3824 };
3825 
3826 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3827                                               struct msginfo *host_msginfo)
3828 {
3829     struct target_msginfo *target_msginfo;
3830     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3831         return -TARGET_EFAULT;
3832     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3833     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3834     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3835     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3836     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3837     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3838     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3839     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3840     unlock_user_struct(target_msginfo, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3845 {
3846     struct msqid_ds dsarg;
3847     struct msginfo msginfo;
3848     abi_long ret = -TARGET_EINVAL;
3849 
3850     cmd &= 0xff;
3851 
3852     switch (cmd) {
3853     case IPC_STAT:
3854     case IPC_SET:
3855     case MSG_STAT:
3856         if (target_to_host_msqid_ds(&dsarg,ptr))
3857             return -TARGET_EFAULT;
3858         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3859         if (host_to_target_msqid_ds(ptr,&dsarg))
3860             return -TARGET_EFAULT;
3861         break;
3862     case IPC_RMID:
3863         ret = get_errno(msgctl(msgid, cmd, NULL));
3864         break;
3865     case IPC_INFO:
3866     case MSG_INFO:
3867         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3868         if (host_to_target_msginfo(ptr, &msginfo))
3869             return -TARGET_EFAULT;
3870         break;
3871     }
3872 
3873     return ret;
3874 }
3875 
3876 struct target_msgbuf {
3877     abi_long mtype;
3878     char	mtext[1];
3879 };
3880 
3881 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3882                                  ssize_t msgsz, int msgflg)
3883 {
3884     struct target_msgbuf *target_mb;
3885     struct msgbuf *host_mb;
3886     abi_long ret = 0;
3887 
3888     if (msgsz < 0) {
3889         return -TARGET_EINVAL;
3890     }
3891 
3892     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3893         return -TARGET_EFAULT;
3894     host_mb = g_try_malloc(msgsz + sizeof(long));
3895     if (!host_mb) {
3896         unlock_user_struct(target_mb, msgp, 0);
3897         return -TARGET_ENOMEM;
3898     }
3899     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3900     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3901     ret = -TARGET_ENOSYS;
3902 #ifdef __NR_msgsnd
3903     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3904 #endif
3905 #ifdef __NR_ipc
3906     if (ret == -TARGET_ENOSYS) {
3907         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3908                                  host_mb, 0));
3909     }
3910 #endif
3911     g_free(host_mb);
3912     unlock_user_struct(target_mb, msgp, 0);
3913 
3914     return ret;
3915 }
3916 
3917 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3918                                  ssize_t msgsz, abi_long msgtyp,
3919                                  int msgflg)
3920 {
3921     struct target_msgbuf *target_mb;
3922     char *target_mtext;
3923     struct msgbuf *host_mb;
3924     abi_long ret = 0;
3925 
3926     if (msgsz < 0) {
3927         return -TARGET_EINVAL;
3928     }
3929 
3930     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3931         return -TARGET_EFAULT;
3932 
3933     host_mb = g_try_malloc(msgsz + sizeof(long));
3934     if (!host_mb) {
3935         ret = -TARGET_ENOMEM;
3936         goto end;
3937     }
3938     ret = -TARGET_ENOSYS;
3939 #ifdef __NR_msgrcv
3940     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3941 #endif
3942 #ifdef __NR_ipc
3943     if (ret == -TARGET_ENOSYS) {
3944         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3945                         msgflg, host_mb, msgtyp));
3946     }
3947 #endif
3948 
3949     if (ret > 0) {
3950         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3951         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3952         if (!target_mtext) {
3953             ret = -TARGET_EFAULT;
3954             goto end;
3955         }
3956         memcpy(target_mb->mtext, host_mb->mtext, ret);
3957         unlock_user(target_mtext, target_mtext_addr, ret);
3958     }
3959 
3960     target_mb->mtype = tswapal(host_mb->mtype);
3961 
3962 end:
3963     if (target_mb)
3964         unlock_user_struct(target_mb, msgp, 1);
3965     g_free(host_mb);
3966     return ret;
3967 }
3968 
3969 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3970                                                abi_ulong target_addr)
3971 {
3972     struct target_shmid_ds *target_sd;
3973 
3974     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3975         return -TARGET_EFAULT;
3976     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3977         return -TARGET_EFAULT;
3978     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3979     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3980     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3981     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3982     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3983     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3984     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3985     unlock_user_struct(target_sd, target_addr, 0);
3986     return 0;
3987 }
3988 
3989 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3990                                                struct shmid_ds *host_sd)
3991 {
3992     struct target_shmid_ds *target_sd;
3993 
3994     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3995         return -TARGET_EFAULT;
3996     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3997         return -TARGET_EFAULT;
3998     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3999     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4000     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4001     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4002     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4003     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4004     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4005     unlock_user_struct(target_sd, target_addr, 1);
4006     return 0;
4007 }
4008 
4009 struct  target_shminfo {
4010     abi_ulong shmmax;
4011     abi_ulong shmmin;
4012     abi_ulong shmmni;
4013     abi_ulong shmseg;
4014     abi_ulong shmall;
4015 };
4016 
4017 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4018                                               struct shminfo *host_shminfo)
4019 {
4020     struct target_shminfo *target_shminfo;
4021     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4022         return -TARGET_EFAULT;
4023     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4024     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4025     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4026     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4027     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4028     unlock_user_struct(target_shminfo, target_addr, 1);
4029     return 0;
4030 }
4031 
4032 struct target_shm_info {
4033     int used_ids;
4034     abi_ulong shm_tot;
4035     abi_ulong shm_rss;
4036     abi_ulong shm_swp;
4037     abi_ulong swap_attempts;
4038     abi_ulong swap_successes;
4039 };
4040 
4041 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4042                                                struct shm_info *host_shm_info)
4043 {
4044     struct target_shm_info *target_shm_info;
4045     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4046         return -TARGET_EFAULT;
4047     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4048     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4049     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4050     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4051     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4052     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4053     unlock_user_struct(target_shm_info, target_addr, 1);
4054     return 0;
4055 }
4056 
4057 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4058 {
4059     struct shmid_ds dsarg;
4060     struct shminfo shminfo;
4061     struct shm_info shm_info;
4062     abi_long ret = -TARGET_EINVAL;
4063 
4064     cmd &= 0xff;
4065 
4066     switch(cmd) {
4067     case IPC_STAT:
4068     case IPC_SET:
4069     case SHM_STAT:
4070         if (target_to_host_shmid_ds(&dsarg, buf))
4071             return -TARGET_EFAULT;
4072         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4073         if (host_to_target_shmid_ds(buf, &dsarg))
4074             return -TARGET_EFAULT;
4075         break;
4076     case IPC_INFO:
4077         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4078         if (host_to_target_shminfo(buf, &shminfo))
4079             return -TARGET_EFAULT;
4080         break;
4081     case SHM_INFO:
4082         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4083         if (host_to_target_shm_info(buf, &shm_info))
4084             return -TARGET_EFAULT;
4085         break;
4086     case IPC_RMID:
4087     case SHM_LOCK:
4088     case SHM_UNLOCK:
4089         ret = get_errno(shmctl(shmid, cmd, NULL));
4090         break;
4091     }
4092 
4093     return ret;
4094 }
4095 
4096 #ifndef TARGET_FORCE_SHMLBA
4097 /* For most architectures, SHMLBA is the same as the page size;
4098  * some architectures have larger values, in which case they should
4099  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4100  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4101  * and defining its own value for SHMLBA.
4102  *
4103  * The kernel also permits SHMLBA to be set by the architecture to a
4104  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4105  * this means that addresses are rounded to the large size if
4106  * SHM_RND is set but addresses not aligned to that size are not rejected
4107  * as long as they are at least page-aligned. Since the only architecture
4108  * which uses this is ia64 this code doesn't provide for that oddity.
4109  */
4110 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4111 {
4112     return TARGET_PAGE_SIZE;
4113 }
4114 #endif
4115 
4116 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4117                                  int shmid, abi_ulong shmaddr, int shmflg)
4118 {
4119     abi_long raddr;
4120     void *host_raddr;
4121     struct shmid_ds shm_info;
4122     int i,ret;
4123     abi_ulong shmlba;
4124 
4125     /* find out the length of the shared memory segment */
4126     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4127     if (is_error(ret)) {
4128         /* can't get length, bail out */
4129         return ret;
4130     }
4131 
4132     shmlba = target_shmlba(cpu_env);
4133 
4134     if (shmaddr & (shmlba - 1)) {
4135         if (shmflg & SHM_RND) {
4136             shmaddr &= ~(shmlba - 1);
4137         } else {
4138             return -TARGET_EINVAL;
4139         }
4140     }
4141     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4142         return -TARGET_EINVAL;
4143     }
4144 
4145     mmap_lock();
4146 
4147     if (shmaddr)
4148         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4149     else {
4150         abi_ulong mmap_start;
4151 
4152         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4153         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4154 
4155         if (mmap_start == -1) {
4156             errno = ENOMEM;
4157             host_raddr = (void *)-1;
4158         } else
4159             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4160     }
4161 
4162     if (host_raddr == (void *)-1) {
4163         mmap_unlock();
4164         return get_errno((long)host_raddr);
4165     }
4166     raddr=h2g((unsigned long)host_raddr);
4167 
4168     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4169                    PAGE_VALID | PAGE_READ |
4170                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4171 
4172     for (i = 0; i < N_SHM_REGIONS; i++) {
4173         if (!shm_regions[i].in_use) {
4174             shm_regions[i].in_use = true;
4175             shm_regions[i].start = raddr;
4176             shm_regions[i].size = shm_info.shm_segsz;
4177             break;
4178         }
4179     }
4180 
4181     mmap_unlock();
4182     return raddr;
4183 
4184 }
4185 
4186 static inline abi_long do_shmdt(abi_ulong shmaddr)
4187 {
4188     int i;
4189     abi_long rv;
4190 
4191     mmap_lock();
4192 
4193     for (i = 0; i < N_SHM_REGIONS; ++i) {
4194         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4195             shm_regions[i].in_use = false;
4196             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4197             break;
4198         }
4199     }
4200     rv = get_errno(shmdt(g2h(shmaddr)));
4201 
4202     mmap_unlock();
4203 
4204     return rv;
4205 }
4206 
4207 #ifdef TARGET_NR_ipc
4208 /* ??? This only works with linear mappings.  */
4209 /* do_ipc() must return target values and target errnos. */
4210 static abi_long do_ipc(CPUArchState *cpu_env,
4211                        unsigned int call, abi_long first,
4212                        abi_long second, abi_long third,
4213                        abi_long ptr, abi_long fifth)
4214 {
4215     int version;
4216     abi_long ret = 0;
4217 
4218     version = call >> 16;
4219     call &= 0xffff;
4220 
4221     switch (call) {
4222     case IPCOP_semop:
4223         ret = do_semop(first, ptr, second);
4224         break;
4225 
4226     case IPCOP_semget:
4227         ret = get_errno(semget(first, second, third));
4228         break;
4229 
4230     case IPCOP_semctl: {
4231         /* The semun argument to semctl is passed by value, so dereference the
4232          * ptr argument. */
4233         abi_ulong atptr;
4234         get_user_ual(atptr, ptr);
4235         ret = do_semctl(first, second, third, atptr);
4236         break;
4237     }
4238 
4239     case IPCOP_msgget:
4240         ret = get_errno(msgget(first, second));
4241         break;
4242 
4243     case IPCOP_msgsnd:
4244         ret = do_msgsnd(first, ptr, second, third);
4245         break;
4246 
4247     case IPCOP_msgctl:
4248         ret = do_msgctl(first, second, ptr);
4249         break;
4250 
4251     case IPCOP_msgrcv:
4252         switch (version) {
4253         case 0:
4254             {
4255                 struct target_ipc_kludge {
4256                     abi_long msgp;
4257                     abi_long msgtyp;
4258                 } *tmp;
4259 
4260                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4261                     ret = -TARGET_EFAULT;
4262                     break;
4263                 }
4264 
4265                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4266 
4267                 unlock_user_struct(tmp, ptr, 0);
4268                 break;
4269             }
4270         default:
4271             ret = do_msgrcv(first, ptr, second, fifth, third);
4272         }
4273         break;
4274 
4275     case IPCOP_shmat:
4276         switch (version) {
4277         default:
4278         {
4279             abi_ulong raddr;
4280             raddr = do_shmat(cpu_env, first, ptr, second);
4281             if (is_error(raddr))
4282                 return get_errno(raddr);
4283             if (put_user_ual(raddr, third))
4284                 return -TARGET_EFAULT;
4285             break;
4286         }
4287         case 1:
4288             ret = -TARGET_EINVAL;
4289             break;
4290         }
4291 	break;
4292     case IPCOP_shmdt:
4293         ret = do_shmdt(ptr);
4294 	break;
4295 
4296     case IPCOP_shmget:
4297 	/* IPC_* flag values are the same on all linux platforms */
4298 	ret = get_errno(shmget(first, second, third));
4299 	break;
4300 
4301 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4302     case IPCOP_shmctl:
4303         ret = do_shmctl(first, second, ptr);
4304         break;
4305     default:
4306 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4307 	ret = -TARGET_ENOSYS;
4308 	break;
4309     }
4310     return ret;
4311 }
4312 #endif
4313 
4314 /* kernel structure types definitions */
4315 
4316 #define STRUCT(name, ...) STRUCT_ ## name,
4317 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4318 enum {
4319 #include "syscall_types.h"
4320 STRUCT_MAX
4321 };
4322 #undef STRUCT
4323 #undef STRUCT_SPECIAL
4324 
4325 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4326 #define STRUCT_SPECIAL(name)
4327 #include "syscall_types.h"
4328 #undef STRUCT
4329 #undef STRUCT_SPECIAL
4330 
4331 typedef struct IOCTLEntry IOCTLEntry;
4332 
4333 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4334                              int fd, int cmd, abi_long arg);
4335 
4336 struct IOCTLEntry {
4337     int target_cmd;
4338     unsigned int host_cmd;
4339     const char *name;
4340     int access;
4341     do_ioctl_fn *do_ioctl;
4342     const argtype arg_type[5];
4343 };
4344 
4345 #define IOC_R 0x0001
4346 #define IOC_W 0x0002
4347 #define IOC_RW (IOC_R | IOC_W)
4348 
4349 #define MAX_STRUCT_SIZE 4096
4350 
4351 #ifdef CONFIG_FIEMAP
4352 /* So fiemap access checks don't overflow on 32 bit systems.
4353  * This is very slightly smaller than the limit imposed by
4354  * the underlying kernel.
4355  */
4356 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4357                             / sizeof(struct fiemap_extent))
4358 
4359 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4360                                        int fd, int cmd, abi_long arg)
4361 {
4362     /* The parameter for this ioctl is a struct fiemap followed
4363      * by an array of struct fiemap_extent whose size is set
4364      * in fiemap->fm_extent_count. The array is filled in by the
4365      * ioctl.
4366      */
4367     int target_size_in, target_size_out;
4368     struct fiemap *fm;
4369     const argtype *arg_type = ie->arg_type;
4370     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4371     void *argptr, *p;
4372     abi_long ret;
4373     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4374     uint32_t outbufsz;
4375     int free_fm = 0;
4376 
4377     assert(arg_type[0] == TYPE_PTR);
4378     assert(ie->access == IOC_RW);
4379     arg_type++;
4380     target_size_in = thunk_type_size(arg_type, 0);
4381     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4382     if (!argptr) {
4383         return -TARGET_EFAULT;
4384     }
4385     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4386     unlock_user(argptr, arg, 0);
4387     fm = (struct fiemap *)buf_temp;
4388     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4389         return -TARGET_EINVAL;
4390     }
4391 
4392     outbufsz = sizeof (*fm) +
4393         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4394 
4395     if (outbufsz > MAX_STRUCT_SIZE) {
4396         /* We can't fit all the extents into the fixed size buffer.
4397          * Allocate one that is large enough and use it instead.
4398          */
4399         fm = g_try_malloc(outbufsz);
4400         if (!fm) {
4401             return -TARGET_ENOMEM;
4402         }
4403         memcpy(fm, buf_temp, sizeof(struct fiemap));
4404         free_fm = 1;
4405     }
4406     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4407     if (!is_error(ret)) {
4408         target_size_out = target_size_in;
4409         /* An extent_count of 0 means we were only counting the extents
4410          * so there are no structs to copy
4411          */
4412         if (fm->fm_extent_count != 0) {
4413             target_size_out += fm->fm_mapped_extents * extent_size;
4414         }
4415         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4416         if (!argptr) {
4417             ret = -TARGET_EFAULT;
4418         } else {
4419             /* Convert the struct fiemap */
4420             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4421             if (fm->fm_extent_count != 0) {
4422                 p = argptr + target_size_in;
4423                 /* ...and then all the struct fiemap_extents */
4424                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4425                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4426                                   THUNK_TARGET);
4427                     p += extent_size;
4428                 }
4429             }
4430             unlock_user(argptr, arg, target_size_out);
4431         }
4432     }
4433     if (free_fm) {
4434         g_free(fm);
4435     }
4436     return ret;
4437 }
4438 #endif
4439 
4440 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4441                                 int fd, int cmd, abi_long arg)
4442 {
4443     const argtype *arg_type = ie->arg_type;
4444     int target_size;
4445     void *argptr;
4446     int ret;
4447     struct ifconf *host_ifconf;
4448     uint32_t outbufsz;
4449     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4450     int target_ifreq_size;
4451     int nb_ifreq;
4452     int free_buf = 0;
4453     int i;
4454     int target_ifc_len;
4455     abi_long target_ifc_buf;
4456     int host_ifc_len;
4457     char *host_ifc_buf;
4458 
4459     assert(arg_type[0] == TYPE_PTR);
4460     assert(ie->access == IOC_RW);
4461 
4462     arg_type++;
4463     target_size = thunk_type_size(arg_type, 0);
4464 
4465     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4466     if (!argptr)
4467         return -TARGET_EFAULT;
4468     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4469     unlock_user(argptr, arg, 0);
4470 
4471     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4472     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4473     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4474 
4475     if (target_ifc_buf != 0) {
4476         target_ifc_len = host_ifconf->ifc_len;
4477         nb_ifreq = target_ifc_len / target_ifreq_size;
4478         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4479 
4480         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4481         if (outbufsz > MAX_STRUCT_SIZE) {
4482             /*
4483              * We can't fit all the extents into the fixed size buffer.
4484              * Allocate one that is large enough and use it instead.
4485              */
4486             host_ifconf = malloc(outbufsz);
4487             if (!host_ifconf) {
4488                 return -TARGET_ENOMEM;
4489             }
4490             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4491             free_buf = 1;
4492         }
4493         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4494 
4495         host_ifconf->ifc_len = host_ifc_len;
4496     } else {
4497       host_ifc_buf = NULL;
4498     }
4499     host_ifconf->ifc_buf = host_ifc_buf;
4500 
4501     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4502     if (!is_error(ret)) {
4503 	/* convert host ifc_len to target ifc_len */
4504 
4505         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4506         target_ifc_len = nb_ifreq * target_ifreq_size;
4507         host_ifconf->ifc_len = target_ifc_len;
4508 
4509 	/* restore target ifc_buf */
4510 
4511         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4512 
4513 	/* copy struct ifconf to target user */
4514 
4515         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4516         if (!argptr)
4517             return -TARGET_EFAULT;
4518         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4519         unlock_user(argptr, arg, target_size);
4520 
4521         if (target_ifc_buf != 0) {
4522             /* copy ifreq[] to target user */
4523             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4524             for (i = 0; i < nb_ifreq ; i++) {
4525                 thunk_convert(argptr + i * target_ifreq_size,
4526                               host_ifc_buf + i * sizeof(struct ifreq),
4527                               ifreq_arg_type, THUNK_TARGET);
4528             }
4529             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4530         }
4531     }
4532 
4533     if (free_buf) {
4534         free(host_ifconf);
4535     }
4536 
4537     return ret;
4538 }
4539 
4540 #if defined(CONFIG_USBFS)
4541 #if HOST_LONG_BITS > 64
4542 #error USBDEVFS thunks do not support >64 bit hosts yet.
4543 #endif
4544 struct live_urb {
4545     uint64_t target_urb_adr;
4546     uint64_t target_buf_adr;
4547     char *target_buf_ptr;
4548     struct usbdevfs_urb host_urb;
4549 };
4550 
4551 static GHashTable *usbdevfs_urb_hashtable(void)
4552 {
4553     static GHashTable *urb_hashtable;
4554 
4555     if (!urb_hashtable) {
4556         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4557     }
4558     return urb_hashtable;
4559 }
4560 
4561 static void urb_hashtable_insert(struct live_urb *urb)
4562 {
4563     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4564     g_hash_table_insert(urb_hashtable, urb, urb);
4565 }
4566 
4567 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4568 {
4569     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4570     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4571 }
4572 
4573 static void urb_hashtable_remove(struct live_urb *urb)
4574 {
4575     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4576     g_hash_table_remove(urb_hashtable, urb);
4577 }
4578 
4579 static abi_long
4580 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4581                           int fd, int cmd, abi_long arg)
4582 {
4583     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4584     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4585     struct live_urb *lurb;
4586     void *argptr;
4587     uint64_t hurb;
4588     int target_size;
4589     uintptr_t target_urb_adr;
4590     abi_long ret;
4591 
4592     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4593 
4594     memset(buf_temp, 0, sizeof(uint64_t));
4595     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4596     if (is_error(ret)) {
4597         return ret;
4598     }
4599 
4600     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4601     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4602     if (!lurb->target_urb_adr) {
4603         return -TARGET_EFAULT;
4604     }
4605     urb_hashtable_remove(lurb);
4606     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4607         lurb->host_urb.buffer_length);
4608     lurb->target_buf_ptr = NULL;
4609 
4610     /* restore the guest buffer pointer */
4611     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4612 
4613     /* update the guest urb struct */
4614     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4615     if (!argptr) {
4616         g_free(lurb);
4617         return -TARGET_EFAULT;
4618     }
4619     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4620     unlock_user(argptr, lurb->target_urb_adr, target_size);
4621 
4622     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4623     /* write back the urb handle */
4624     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4625     if (!argptr) {
4626         g_free(lurb);
4627         return -TARGET_EFAULT;
4628     }
4629 
4630     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4631     target_urb_adr = lurb->target_urb_adr;
4632     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4633     unlock_user(argptr, arg, target_size);
4634 
4635     g_free(lurb);
4636     return ret;
4637 }
4638 
4639 static abi_long
4640 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4641                              uint8_t *buf_temp __attribute__((unused)),
4642                              int fd, int cmd, abi_long arg)
4643 {
4644     struct live_urb *lurb;
4645 
4646     /* map target address back to host URB with metadata. */
4647     lurb = urb_hashtable_lookup(arg);
4648     if (!lurb) {
4649         return -TARGET_EFAULT;
4650     }
4651     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4652 }
4653 
4654 static abi_long
4655 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4656                             int fd, int cmd, abi_long arg)
4657 {
4658     const argtype *arg_type = ie->arg_type;
4659     int target_size;
4660     abi_long ret;
4661     void *argptr;
4662     int rw_dir;
4663     struct live_urb *lurb;
4664 
4665     /*
4666      * each submitted URB needs to map to a unique ID for the
4667      * kernel, and that unique ID needs to be a pointer to
4668      * host memory.  hence, we need to malloc for each URB.
4669      * isochronous transfers have a variable length struct.
4670      */
4671     arg_type++;
4672     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4673 
4674     /* construct host copy of urb and metadata */
4675     lurb = g_try_malloc0(sizeof(struct live_urb));
4676     if (!lurb) {
4677         return -TARGET_ENOMEM;
4678     }
4679 
4680     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4681     if (!argptr) {
4682         g_free(lurb);
4683         return -TARGET_EFAULT;
4684     }
4685     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4686     unlock_user(argptr, arg, 0);
4687 
4688     lurb->target_urb_adr = arg;
4689     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4690 
4691     /* buffer space used depends on endpoint type so lock the entire buffer */
4692     /* control type urbs should check the buffer contents for true direction */
4693     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4694     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4695         lurb->host_urb.buffer_length, 1);
4696     if (lurb->target_buf_ptr == NULL) {
4697         g_free(lurb);
4698         return -TARGET_EFAULT;
4699     }
4700 
4701     /* update buffer pointer in host copy */
4702     lurb->host_urb.buffer = lurb->target_buf_ptr;
4703 
4704     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4705     if (is_error(ret)) {
4706         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4707         g_free(lurb);
4708     } else {
4709         urb_hashtable_insert(lurb);
4710     }
4711 
4712     return ret;
4713 }
4714 #endif /* CONFIG_USBFS */
4715 
4716 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4717                             int cmd, abi_long arg)
4718 {
4719     void *argptr;
4720     struct dm_ioctl *host_dm;
4721     abi_long guest_data;
4722     uint32_t guest_data_size;
4723     int target_size;
4724     const argtype *arg_type = ie->arg_type;
4725     abi_long ret;
4726     void *big_buf = NULL;
4727     char *host_data;
4728 
4729     arg_type++;
4730     target_size = thunk_type_size(arg_type, 0);
4731     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4732     if (!argptr) {
4733         ret = -TARGET_EFAULT;
4734         goto out;
4735     }
4736     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4737     unlock_user(argptr, arg, 0);
4738 
4739     /* buf_temp is too small, so fetch things into a bigger buffer */
4740     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4741     memcpy(big_buf, buf_temp, target_size);
4742     buf_temp = big_buf;
4743     host_dm = big_buf;
4744 
4745     guest_data = arg + host_dm->data_start;
4746     if ((guest_data - arg) < 0) {
4747         ret = -TARGET_EINVAL;
4748         goto out;
4749     }
4750     guest_data_size = host_dm->data_size - host_dm->data_start;
4751     host_data = (char*)host_dm + host_dm->data_start;
4752 
4753     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4754     if (!argptr) {
4755         ret = -TARGET_EFAULT;
4756         goto out;
4757     }
4758 
4759     switch (ie->host_cmd) {
4760     case DM_REMOVE_ALL:
4761     case DM_LIST_DEVICES:
4762     case DM_DEV_CREATE:
4763     case DM_DEV_REMOVE:
4764     case DM_DEV_SUSPEND:
4765     case DM_DEV_STATUS:
4766     case DM_DEV_WAIT:
4767     case DM_TABLE_STATUS:
4768     case DM_TABLE_CLEAR:
4769     case DM_TABLE_DEPS:
4770     case DM_LIST_VERSIONS:
4771         /* no input data */
4772         break;
4773     case DM_DEV_RENAME:
4774     case DM_DEV_SET_GEOMETRY:
4775         /* data contains only strings */
4776         memcpy(host_data, argptr, guest_data_size);
4777         break;
4778     case DM_TARGET_MSG:
4779         memcpy(host_data, argptr, guest_data_size);
4780         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4781         break;
4782     case DM_TABLE_LOAD:
4783     {
4784         void *gspec = argptr;
4785         void *cur_data = host_data;
4786         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4787         int spec_size = thunk_type_size(arg_type, 0);
4788         int i;
4789 
4790         for (i = 0; i < host_dm->target_count; i++) {
4791             struct dm_target_spec *spec = cur_data;
4792             uint32_t next;
4793             int slen;
4794 
4795             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4796             slen = strlen((char*)gspec + spec_size) + 1;
4797             next = spec->next;
4798             spec->next = sizeof(*spec) + slen;
4799             strcpy((char*)&spec[1], gspec + spec_size);
4800             gspec += next;
4801             cur_data += spec->next;
4802         }
4803         break;
4804     }
4805     default:
4806         ret = -TARGET_EINVAL;
4807         unlock_user(argptr, guest_data, 0);
4808         goto out;
4809     }
4810     unlock_user(argptr, guest_data, 0);
4811 
4812     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4813     if (!is_error(ret)) {
4814         guest_data = arg + host_dm->data_start;
4815         guest_data_size = host_dm->data_size - host_dm->data_start;
4816         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4817         switch (ie->host_cmd) {
4818         case DM_REMOVE_ALL:
4819         case DM_DEV_CREATE:
4820         case DM_DEV_REMOVE:
4821         case DM_DEV_RENAME:
4822         case DM_DEV_SUSPEND:
4823         case DM_DEV_STATUS:
4824         case DM_TABLE_LOAD:
4825         case DM_TABLE_CLEAR:
4826         case DM_TARGET_MSG:
4827         case DM_DEV_SET_GEOMETRY:
4828             /* no return data */
4829             break;
4830         case DM_LIST_DEVICES:
4831         {
4832             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4833             uint32_t remaining_data = guest_data_size;
4834             void *cur_data = argptr;
4835             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4836             int nl_size = 12; /* can't use thunk_size due to alignment */
4837 
4838             while (1) {
4839                 uint32_t next = nl->next;
4840                 if (next) {
4841                     nl->next = nl_size + (strlen(nl->name) + 1);
4842                 }
4843                 if (remaining_data < nl->next) {
4844                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4845                     break;
4846                 }
4847                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4848                 strcpy(cur_data + nl_size, nl->name);
4849                 cur_data += nl->next;
4850                 remaining_data -= nl->next;
4851                 if (!next) {
4852                     break;
4853                 }
4854                 nl = (void*)nl + next;
4855             }
4856             break;
4857         }
4858         case DM_DEV_WAIT:
4859         case DM_TABLE_STATUS:
4860         {
4861             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4862             void *cur_data = argptr;
4863             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4864             int spec_size = thunk_type_size(arg_type, 0);
4865             int i;
4866 
4867             for (i = 0; i < host_dm->target_count; i++) {
4868                 uint32_t next = spec->next;
4869                 int slen = strlen((char*)&spec[1]) + 1;
4870                 spec->next = (cur_data - argptr) + spec_size + slen;
4871                 if (guest_data_size < spec->next) {
4872                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4873                     break;
4874                 }
4875                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4876                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4877                 cur_data = argptr + spec->next;
4878                 spec = (void*)host_dm + host_dm->data_start + next;
4879             }
4880             break;
4881         }
4882         case DM_TABLE_DEPS:
4883         {
4884             void *hdata = (void*)host_dm + host_dm->data_start;
4885             int count = *(uint32_t*)hdata;
4886             uint64_t *hdev = hdata + 8;
4887             uint64_t *gdev = argptr + 8;
4888             int i;
4889 
4890             *(uint32_t*)argptr = tswap32(count);
4891             for (i = 0; i < count; i++) {
4892                 *gdev = tswap64(*hdev);
4893                 gdev++;
4894                 hdev++;
4895             }
4896             break;
4897         }
4898         case DM_LIST_VERSIONS:
4899         {
4900             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4901             uint32_t remaining_data = guest_data_size;
4902             void *cur_data = argptr;
4903             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4904             int vers_size = thunk_type_size(arg_type, 0);
4905 
4906             while (1) {
4907                 uint32_t next = vers->next;
4908                 if (next) {
4909                     vers->next = vers_size + (strlen(vers->name) + 1);
4910                 }
4911                 if (remaining_data < vers->next) {
4912                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4913                     break;
4914                 }
4915                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4916                 strcpy(cur_data + vers_size, vers->name);
4917                 cur_data += vers->next;
4918                 remaining_data -= vers->next;
4919                 if (!next) {
4920                     break;
4921                 }
4922                 vers = (void*)vers + next;
4923             }
4924             break;
4925         }
4926         default:
4927             unlock_user(argptr, guest_data, 0);
4928             ret = -TARGET_EINVAL;
4929             goto out;
4930         }
4931         unlock_user(argptr, guest_data, guest_data_size);
4932 
4933         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4934         if (!argptr) {
4935             ret = -TARGET_EFAULT;
4936             goto out;
4937         }
4938         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4939         unlock_user(argptr, arg, target_size);
4940     }
4941 out:
4942     g_free(big_buf);
4943     return ret;
4944 }
4945 
4946 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4947                                int cmd, abi_long arg)
4948 {
4949     void *argptr;
4950     int target_size;
4951     const argtype *arg_type = ie->arg_type;
4952     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4953     abi_long ret;
4954 
4955     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4956     struct blkpg_partition host_part;
4957 
4958     /* Read and convert blkpg */
4959     arg_type++;
4960     target_size = thunk_type_size(arg_type, 0);
4961     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4962     if (!argptr) {
4963         ret = -TARGET_EFAULT;
4964         goto out;
4965     }
4966     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4967     unlock_user(argptr, arg, 0);
4968 
4969     switch (host_blkpg->op) {
4970     case BLKPG_ADD_PARTITION:
4971     case BLKPG_DEL_PARTITION:
4972         /* payload is struct blkpg_partition */
4973         break;
4974     default:
4975         /* Unknown opcode */
4976         ret = -TARGET_EINVAL;
4977         goto out;
4978     }
4979 
4980     /* Read and convert blkpg->data */
4981     arg = (abi_long)(uintptr_t)host_blkpg->data;
4982     target_size = thunk_type_size(part_arg_type, 0);
4983     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4984     if (!argptr) {
4985         ret = -TARGET_EFAULT;
4986         goto out;
4987     }
4988     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4989     unlock_user(argptr, arg, 0);
4990 
4991     /* Swizzle the data pointer to our local copy and call! */
4992     host_blkpg->data = &host_part;
4993     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4994 
4995 out:
4996     return ret;
4997 }
4998 
4999 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5000                                 int fd, int cmd, abi_long arg)
5001 {
5002     const argtype *arg_type = ie->arg_type;
5003     const StructEntry *se;
5004     const argtype *field_types;
5005     const int *dst_offsets, *src_offsets;
5006     int target_size;
5007     void *argptr;
5008     abi_ulong *target_rt_dev_ptr = NULL;
5009     unsigned long *host_rt_dev_ptr = NULL;
5010     abi_long ret;
5011     int i;
5012 
5013     assert(ie->access == IOC_W);
5014     assert(*arg_type == TYPE_PTR);
5015     arg_type++;
5016     assert(*arg_type == TYPE_STRUCT);
5017     target_size = thunk_type_size(arg_type, 0);
5018     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5019     if (!argptr) {
5020         return -TARGET_EFAULT;
5021     }
5022     arg_type++;
5023     assert(*arg_type == (int)STRUCT_rtentry);
5024     se = struct_entries + *arg_type++;
5025     assert(se->convert[0] == NULL);
5026     /* convert struct here to be able to catch rt_dev string */
5027     field_types = se->field_types;
5028     dst_offsets = se->field_offsets[THUNK_HOST];
5029     src_offsets = se->field_offsets[THUNK_TARGET];
5030     for (i = 0; i < se->nb_fields; i++) {
5031         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5032             assert(*field_types == TYPE_PTRVOID);
5033             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5034             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5035             if (*target_rt_dev_ptr != 0) {
5036                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5037                                                   tswapal(*target_rt_dev_ptr));
5038                 if (!*host_rt_dev_ptr) {
5039                     unlock_user(argptr, arg, 0);
5040                     return -TARGET_EFAULT;
5041                 }
5042             } else {
5043                 *host_rt_dev_ptr = 0;
5044             }
5045             field_types++;
5046             continue;
5047         }
5048         field_types = thunk_convert(buf_temp + dst_offsets[i],
5049                                     argptr + src_offsets[i],
5050                                     field_types, THUNK_HOST);
5051     }
5052     unlock_user(argptr, arg, 0);
5053 
5054     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5055 
5056     assert(host_rt_dev_ptr != NULL);
5057     assert(target_rt_dev_ptr != NULL);
5058     if (*host_rt_dev_ptr != 0) {
5059         unlock_user((void *)*host_rt_dev_ptr,
5060                     *target_rt_dev_ptr, 0);
5061     }
5062     return ret;
5063 }
5064 
5065 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5066                                      int fd, int cmd, abi_long arg)
5067 {
5068     int sig = target_to_host_signal(arg);
5069     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5070 }
5071 
5072 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5073                                     int fd, int cmd, abi_long arg)
5074 {
5075     struct timeval tv;
5076     abi_long ret;
5077 
5078     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5079     if (is_error(ret)) {
5080         return ret;
5081     }
5082 
5083     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5084         if (copy_to_user_timeval(arg, &tv)) {
5085             return -TARGET_EFAULT;
5086         }
5087     } else {
5088         if (copy_to_user_timeval64(arg, &tv)) {
5089             return -TARGET_EFAULT;
5090         }
5091     }
5092 
5093     return ret;
5094 }
5095 
5096 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5097                                       int fd, int cmd, abi_long arg)
5098 {
5099     struct timespec ts;
5100     abi_long ret;
5101 
5102     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5103     if (is_error(ret)) {
5104         return ret;
5105     }
5106 
5107     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5108         if (host_to_target_timespec(arg, &ts)) {
5109             return -TARGET_EFAULT;
5110         }
5111     } else{
5112         if (host_to_target_timespec64(arg, &ts)) {
5113             return -TARGET_EFAULT;
5114         }
5115     }
5116 
5117     return ret;
5118 }
5119 
5120 #ifdef TIOCGPTPEER
5121 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5122                                      int fd, int cmd, abi_long arg)
5123 {
5124     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5125     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5126 }
5127 #endif
5128 
5129 static IOCTLEntry ioctl_entries[] = {
5130 #define IOCTL(cmd, access, ...) \
5131     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5132 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5133     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5134 #define IOCTL_IGNORE(cmd) \
5135     { TARGET_ ## cmd, 0, #cmd },
5136 #include "ioctls.h"
5137     { 0, 0, },
5138 };
5139 
5140 /* ??? Implement proper locking for ioctls.  */
5141 /* do_ioctl() Must return target values and target errnos. */
5142 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5143 {
5144     const IOCTLEntry *ie;
5145     const argtype *arg_type;
5146     abi_long ret;
5147     uint8_t buf_temp[MAX_STRUCT_SIZE];
5148     int target_size;
5149     void *argptr;
5150 
5151     ie = ioctl_entries;
5152     for(;;) {
5153         if (ie->target_cmd == 0) {
5154             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5155             return -TARGET_ENOSYS;
5156         }
5157         if (ie->target_cmd == cmd)
5158             break;
5159         ie++;
5160     }
5161     arg_type = ie->arg_type;
5162     if (ie->do_ioctl) {
5163         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5164     } else if (!ie->host_cmd) {
5165         /* Some architectures define BSD ioctls in their headers
5166            that are not implemented in Linux.  */
5167         return -TARGET_ENOSYS;
5168     }
5169 
5170     switch(arg_type[0]) {
5171     case TYPE_NULL:
5172         /* no argument */
5173         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5174         break;
5175     case TYPE_PTRVOID:
5176     case TYPE_INT:
5177         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5178         break;
5179     case TYPE_PTR:
5180         arg_type++;
5181         target_size = thunk_type_size(arg_type, 0);
5182         switch(ie->access) {
5183         case IOC_R:
5184             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5185             if (!is_error(ret)) {
5186                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5187                 if (!argptr)
5188                     return -TARGET_EFAULT;
5189                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5190                 unlock_user(argptr, arg, target_size);
5191             }
5192             break;
5193         case IOC_W:
5194             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5195             if (!argptr)
5196                 return -TARGET_EFAULT;
5197             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5198             unlock_user(argptr, arg, 0);
5199             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5200             break;
5201         default:
5202         case IOC_RW:
5203             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5204             if (!argptr)
5205                 return -TARGET_EFAULT;
5206             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5207             unlock_user(argptr, arg, 0);
5208             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5209             if (!is_error(ret)) {
5210                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5211                 if (!argptr)
5212                     return -TARGET_EFAULT;
5213                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5214                 unlock_user(argptr, arg, target_size);
5215             }
5216             break;
5217         }
5218         break;
5219     default:
5220         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5221                  (long)cmd, arg_type[0]);
5222         ret = -TARGET_ENOSYS;
5223         break;
5224     }
5225     return ret;
5226 }
5227 
5228 static const bitmask_transtbl iflag_tbl[] = {
5229         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5230         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5231         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5232         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5233         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5234         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5235         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5236         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5237         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5238         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5239         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5240         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5241         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5242         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5243         { 0, 0, 0, 0 }
5244 };
5245 
5246 static const bitmask_transtbl oflag_tbl[] = {
5247 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5248 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5249 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5250 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5251 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5252 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5253 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5254 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5255 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5256 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5257 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5258 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5259 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5260 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5261 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5262 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5263 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5264 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5265 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5266 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5267 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5268 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5269 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5270 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5271 	{ 0, 0, 0, 0 }
5272 };
5273 
5274 static const bitmask_transtbl cflag_tbl[] = {
5275 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5276 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5277 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5278 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5279 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5280 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5281 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5282 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5283 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5284 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5285 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5286 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5287 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5288 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5289 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5290 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5291 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5292 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5293 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5294 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5295 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5296 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5297 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5298 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5299 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5300 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5301 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5302 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5303 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5304 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5305 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5306 	{ 0, 0, 0, 0 }
5307 };
5308 
5309 static const bitmask_transtbl lflag_tbl[] = {
5310 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5311 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5312 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5313 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5314 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5315 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5316 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5317 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5318 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5319 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5320 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5321 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5322 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5323 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5324 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5325 	{ 0, 0, 0, 0 }
5326 };
5327 
5328 static void target_to_host_termios (void *dst, const void *src)
5329 {
5330     struct host_termios *host = dst;
5331     const struct target_termios *target = src;
5332 
5333     host->c_iflag =
5334         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5335     host->c_oflag =
5336         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5337     host->c_cflag =
5338         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5339     host->c_lflag =
5340         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5341     host->c_line = target->c_line;
5342 
5343     memset(host->c_cc, 0, sizeof(host->c_cc));
5344     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5345     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5346     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5347     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5348     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5349     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5350     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5351     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5352     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5353     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5354     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5355     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5356     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5357     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5358     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5359     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5360     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5361 }
5362 
5363 static void host_to_target_termios (void *dst, const void *src)
5364 {
5365     struct target_termios *target = dst;
5366     const struct host_termios *host = src;
5367 
5368     target->c_iflag =
5369         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5370     target->c_oflag =
5371         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5372     target->c_cflag =
5373         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5374     target->c_lflag =
5375         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5376     target->c_line = host->c_line;
5377 
5378     memset(target->c_cc, 0, sizeof(target->c_cc));
5379     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5380     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5381     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5382     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5383     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5384     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5385     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5386     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5387     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5388     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5389     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5390     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5391     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5392     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5393     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5394     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5395     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5396 }
5397 
5398 static const StructEntry struct_termios_def = {
5399     .convert = { host_to_target_termios, target_to_host_termios },
5400     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5401     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5402 };
5403 
5404 static bitmask_transtbl mmap_flags_tbl[] = {
5405     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5406     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5407     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5408     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5409       MAP_ANONYMOUS, MAP_ANONYMOUS },
5410     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5411       MAP_GROWSDOWN, MAP_GROWSDOWN },
5412     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5413       MAP_DENYWRITE, MAP_DENYWRITE },
5414     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5415       MAP_EXECUTABLE, MAP_EXECUTABLE },
5416     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5417     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5418       MAP_NORESERVE, MAP_NORESERVE },
5419     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5420     /* MAP_STACK had been ignored by the kernel for quite some time.
5421        Recognize it for the target insofar as we do not want to pass
5422        it through to the host.  */
5423     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5424     { 0, 0, 0, 0 }
5425 };
5426 
5427 #if defined(TARGET_I386)
5428 
5429 /* NOTE: there is really one LDT for all the threads */
5430 static uint8_t *ldt_table;
5431 
5432 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5433 {
5434     int size;
5435     void *p;
5436 
5437     if (!ldt_table)
5438         return 0;
5439     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5440     if (size > bytecount)
5441         size = bytecount;
5442     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5443     if (!p)
5444         return -TARGET_EFAULT;
5445     /* ??? Should this by byteswapped?  */
5446     memcpy(p, ldt_table, size);
5447     unlock_user(p, ptr, size);
5448     return size;
5449 }
5450 
5451 /* XXX: add locking support */
5452 static abi_long write_ldt(CPUX86State *env,
5453                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5454 {
5455     struct target_modify_ldt_ldt_s ldt_info;
5456     struct target_modify_ldt_ldt_s *target_ldt_info;
5457     int seg_32bit, contents, read_exec_only, limit_in_pages;
5458     int seg_not_present, useable, lm;
5459     uint32_t *lp, entry_1, entry_2;
5460 
5461     if (bytecount != sizeof(ldt_info))
5462         return -TARGET_EINVAL;
5463     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5464         return -TARGET_EFAULT;
5465     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5466     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5467     ldt_info.limit = tswap32(target_ldt_info->limit);
5468     ldt_info.flags = tswap32(target_ldt_info->flags);
5469     unlock_user_struct(target_ldt_info, ptr, 0);
5470 
5471     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5472         return -TARGET_EINVAL;
5473     seg_32bit = ldt_info.flags & 1;
5474     contents = (ldt_info.flags >> 1) & 3;
5475     read_exec_only = (ldt_info.flags >> 3) & 1;
5476     limit_in_pages = (ldt_info.flags >> 4) & 1;
5477     seg_not_present = (ldt_info.flags >> 5) & 1;
5478     useable = (ldt_info.flags >> 6) & 1;
5479 #ifdef TARGET_ABI32
5480     lm = 0;
5481 #else
5482     lm = (ldt_info.flags >> 7) & 1;
5483 #endif
5484     if (contents == 3) {
5485         if (oldmode)
5486             return -TARGET_EINVAL;
5487         if (seg_not_present == 0)
5488             return -TARGET_EINVAL;
5489     }
5490     /* allocate the LDT */
5491     if (!ldt_table) {
5492         env->ldt.base = target_mmap(0,
5493                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5494                                     PROT_READ|PROT_WRITE,
5495                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5496         if (env->ldt.base == -1)
5497             return -TARGET_ENOMEM;
5498         memset(g2h(env->ldt.base), 0,
5499                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5500         env->ldt.limit = 0xffff;
5501         ldt_table = g2h(env->ldt.base);
5502     }
5503 
5504     /* NOTE: same code as Linux kernel */
5505     /* Allow LDTs to be cleared by the user. */
5506     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5507         if (oldmode ||
5508             (contents == 0		&&
5509              read_exec_only == 1	&&
5510              seg_32bit == 0		&&
5511              limit_in_pages == 0	&&
5512              seg_not_present == 1	&&
5513              useable == 0 )) {
5514             entry_1 = 0;
5515             entry_2 = 0;
5516             goto install;
5517         }
5518     }
5519 
5520     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5521         (ldt_info.limit & 0x0ffff);
5522     entry_2 = (ldt_info.base_addr & 0xff000000) |
5523         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5524         (ldt_info.limit & 0xf0000) |
5525         ((read_exec_only ^ 1) << 9) |
5526         (contents << 10) |
5527         ((seg_not_present ^ 1) << 15) |
5528         (seg_32bit << 22) |
5529         (limit_in_pages << 23) |
5530         (lm << 21) |
5531         0x7000;
5532     if (!oldmode)
5533         entry_2 |= (useable << 20);
5534 
5535     /* Install the new entry ...  */
5536 install:
5537     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5538     lp[0] = tswap32(entry_1);
5539     lp[1] = tswap32(entry_2);
5540     return 0;
5541 }
5542 
5543 /* specific and weird i386 syscalls */
5544 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5545                               unsigned long bytecount)
5546 {
5547     abi_long ret;
5548 
5549     switch (func) {
5550     case 0:
5551         ret = read_ldt(ptr, bytecount);
5552         break;
5553     case 1:
5554         ret = write_ldt(env, ptr, bytecount, 1);
5555         break;
5556     case 0x11:
5557         ret = write_ldt(env, ptr, bytecount, 0);
5558         break;
5559     default:
5560         ret = -TARGET_ENOSYS;
5561         break;
5562     }
5563     return ret;
5564 }
5565 
5566 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5567 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5568 {
5569     uint64_t *gdt_table = g2h(env->gdt.base);
5570     struct target_modify_ldt_ldt_s ldt_info;
5571     struct target_modify_ldt_ldt_s *target_ldt_info;
5572     int seg_32bit, contents, read_exec_only, limit_in_pages;
5573     int seg_not_present, useable, lm;
5574     uint32_t *lp, entry_1, entry_2;
5575     int i;
5576 
5577     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5578     if (!target_ldt_info)
5579         return -TARGET_EFAULT;
5580     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5581     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5582     ldt_info.limit = tswap32(target_ldt_info->limit);
5583     ldt_info.flags = tswap32(target_ldt_info->flags);
5584     if (ldt_info.entry_number == -1) {
5585         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5586             if (gdt_table[i] == 0) {
5587                 ldt_info.entry_number = i;
5588                 target_ldt_info->entry_number = tswap32(i);
5589                 break;
5590             }
5591         }
5592     }
5593     unlock_user_struct(target_ldt_info, ptr, 1);
5594 
5595     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5596         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5597            return -TARGET_EINVAL;
5598     seg_32bit = ldt_info.flags & 1;
5599     contents = (ldt_info.flags >> 1) & 3;
5600     read_exec_only = (ldt_info.flags >> 3) & 1;
5601     limit_in_pages = (ldt_info.flags >> 4) & 1;
5602     seg_not_present = (ldt_info.flags >> 5) & 1;
5603     useable = (ldt_info.flags >> 6) & 1;
5604 #ifdef TARGET_ABI32
5605     lm = 0;
5606 #else
5607     lm = (ldt_info.flags >> 7) & 1;
5608 #endif
5609 
5610     if (contents == 3) {
5611         if (seg_not_present == 0)
5612             return -TARGET_EINVAL;
5613     }
5614 
5615     /* NOTE: same code as Linux kernel */
5616     /* Allow LDTs to be cleared by the user. */
5617     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5618         if ((contents == 0             &&
5619              read_exec_only == 1       &&
5620              seg_32bit == 0            &&
5621              limit_in_pages == 0       &&
5622              seg_not_present == 1      &&
5623              useable == 0 )) {
5624             entry_1 = 0;
5625             entry_2 = 0;
5626             goto install;
5627         }
5628     }
5629 
5630     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5631         (ldt_info.limit & 0x0ffff);
5632     entry_2 = (ldt_info.base_addr & 0xff000000) |
5633         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5634         (ldt_info.limit & 0xf0000) |
5635         ((read_exec_only ^ 1) << 9) |
5636         (contents << 10) |
5637         ((seg_not_present ^ 1) << 15) |
5638         (seg_32bit << 22) |
5639         (limit_in_pages << 23) |
5640         (useable << 20) |
5641         (lm << 21) |
5642         0x7000;
5643 
5644     /* Install the new entry ...  */
5645 install:
5646     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5647     lp[0] = tswap32(entry_1);
5648     lp[1] = tswap32(entry_2);
5649     return 0;
5650 }
5651 
5652 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5653 {
5654     struct target_modify_ldt_ldt_s *target_ldt_info;
5655     uint64_t *gdt_table = g2h(env->gdt.base);
5656     uint32_t base_addr, limit, flags;
5657     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5658     int seg_not_present, useable, lm;
5659     uint32_t *lp, entry_1, entry_2;
5660 
5661     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5662     if (!target_ldt_info)
5663         return -TARGET_EFAULT;
5664     idx = tswap32(target_ldt_info->entry_number);
5665     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5666         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5667         unlock_user_struct(target_ldt_info, ptr, 1);
5668         return -TARGET_EINVAL;
5669     }
5670     lp = (uint32_t *)(gdt_table + idx);
5671     entry_1 = tswap32(lp[0]);
5672     entry_2 = tswap32(lp[1]);
5673 
5674     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5675     contents = (entry_2 >> 10) & 3;
5676     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5677     seg_32bit = (entry_2 >> 22) & 1;
5678     limit_in_pages = (entry_2 >> 23) & 1;
5679     useable = (entry_2 >> 20) & 1;
5680 #ifdef TARGET_ABI32
5681     lm = 0;
5682 #else
5683     lm = (entry_2 >> 21) & 1;
5684 #endif
5685     flags = (seg_32bit << 0) | (contents << 1) |
5686         (read_exec_only << 3) | (limit_in_pages << 4) |
5687         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5688     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5689     base_addr = (entry_1 >> 16) |
5690         (entry_2 & 0xff000000) |
5691         ((entry_2 & 0xff) << 16);
5692     target_ldt_info->base_addr = tswapal(base_addr);
5693     target_ldt_info->limit = tswap32(limit);
5694     target_ldt_info->flags = tswap32(flags);
5695     unlock_user_struct(target_ldt_info, ptr, 1);
5696     return 0;
5697 }
5698 #endif /* TARGET_I386 && TARGET_ABI32 */
5699 
5700 #ifndef TARGET_ABI32
5701 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5702 {
5703     abi_long ret = 0;
5704     abi_ulong val;
5705     int idx;
5706 
5707     switch(code) {
5708     case TARGET_ARCH_SET_GS:
5709     case TARGET_ARCH_SET_FS:
5710         if (code == TARGET_ARCH_SET_GS)
5711             idx = R_GS;
5712         else
5713             idx = R_FS;
5714         cpu_x86_load_seg(env, idx, 0);
5715         env->segs[idx].base = addr;
5716         break;
5717     case TARGET_ARCH_GET_GS:
5718     case TARGET_ARCH_GET_FS:
5719         if (code == TARGET_ARCH_GET_GS)
5720             idx = R_GS;
5721         else
5722             idx = R_FS;
5723         val = env->segs[idx].base;
5724         if (put_user(val, addr, abi_ulong))
5725             ret = -TARGET_EFAULT;
5726         break;
5727     default:
5728         ret = -TARGET_EINVAL;
5729         break;
5730     }
5731     return ret;
5732 }
5733 #endif
5734 
5735 #endif /* defined(TARGET_I386) */
5736 
5737 #define NEW_STACK_SIZE 0x40000
5738 
5739 
5740 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5741 typedef struct {
5742     CPUArchState *env;
5743     pthread_mutex_t mutex;
5744     pthread_cond_t cond;
5745     pthread_t thread;
5746     uint32_t tid;
5747     abi_ulong child_tidptr;
5748     abi_ulong parent_tidptr;
5749     sigset_t sigmask;
5750 } new_thread_info;
5751 
5752 static void *clone_func(void *arg)
5753 {
5754     new_thread_info *info = arg;
5755     CPUArchState *env;
5756     CPUState *cpu;
5757     TaskState *ts;
5758 
5759     rcu_register_thread();
5760     tcg_register_thread();
5761     env = info->env;
5762     cpu = env_cpu(env);
5763     thread_cpu = cpu;
5764     ts = (TaskState *)cpu->opaque;
5765     info->tid = sys_gettid();
5766     task_settid(ts);
5767     if (info->child_tidptr)
5768         put_user_u32(info->tid, info->child_tidptr);
5769     if (info->parent_tidptr)
5770         put_user_u32(info->tid, info->parent_tidptr);
5771     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5772     /* Enable signals.  */
5773     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5774     /* Signal to the parent that we're ready.  */
5775     pthread_mutex_lock(&info->mutex);
5776     pthread_cond_broadcast(&info->cond);
5777     pthread_mutex_unlock(&info->mutex);
5778     /* Wait until the parent has finished initializing the tls state.  */
5779     pthread_mutex_lock(&clone_lock);
5780     pthread_mutex_unlock(&clone_lock);
5781     cpu_loop(env);
5782     /* never exits */
5783     return NULL;
5784 }
5785 
5786 /* do_fork() Must return host values and target errnos (unlike most
5787    do_*() functions). */
5788 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5789                    abi_ulong parent_tidptr, target_ulong newtls,
5790                    abi_ulong child_tidptr)
5791 {
5792     CPUState *cpu = env_cpu(env);
5793     int ret;
5794     TaskState *ts;
5795     CPUState *new_cpu;
5796     CPUArchState *new_env;
5797     sigset_t sigmask;
5798 
5799     flags &= ~CLONE_IGNORED_FLAGS;
5800 
5801     /* Emulate vfork() with fork() */
5802     if (flags & CLONE_VFORK)
5803         flags &= ~(CLONE_VFORK | CLONE_VM);
5804 
5805     if (flags & CLONE_VM) {
5806         TaskState *parent_ts = (TaskState *)cpu->opaque;
5807         new_thread_info info;
5808         pthread_attr_t attr;
5809 
5810         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5811             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5812             return -TARGET_EINVAL;
5813         }
5814 
5815         ts = g_new0(TaskState, 1);
5816         init_task_state(ts);
5817 
5818         /* Grab a mutex so that thread setup appears atomic.  */
5819         pthread_mutex_lock(&clone_lock);
5820 
5821         /* we create a new CPU instance. */
5822         new_env = cpu_copy(env);
5823         /* Init regs that differ from the parent.  */
5824         cpu_clone_regs_child(new_env, newsp, flags);
5825         cpu_clone_regs_parent(env, flags);
5826         new_cpu = env_cpu(new_env);
5827         new_cpu->opaque = ts;
5828         ts->bprm = parent_ts->bprm;
5829         ts->info = parent_ts->info;
5830         ts->signal_mask = parent_ts->signal_mask;
5831 
5832         if (flags & CLONE_CHILD_CLEARTID) {
5833             ts->child_tidptr = child_tidptr;
5834         }
5835 
5836         if (flags & CLONE_SETTLS) {
5837             cpu_set_tls (new_env, newtls);
5838         }
5839 
5840         memset(&info, 0, sizeof(info));
5841         pthread_mutex_init(&info.mutex, NULL);
5842         pthread_mutex_lock(&info.mutex);
5843         pthread_cond_init(&info.cond, NULL);
5844         info.env = new_env;
5845         if (flags & CLONE_CHILD_SETTID) {
5846             info.child_tidptr = child_tidptr;
5847         }
5848         if (flags & CLONE_PARENT_SETTID) {
5849             info.parent_tidptr = parent_tidptr;
5850         }
5851 
5852         ret = pthread_attr_init(&attr);
5853         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5854         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5855         /* It is not safe to deliver signals until the child has finished
5856            initializing, so temporarily block all signals.  */
5857         sigfillset(&sigmask);
5858         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5859         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5860 
5861         /* If this is our first additional thread, we need to ensure we
5862          * generate code for parallel execution and flush old translations.
5863          */
5864         if (!parallel_cpus) {
5865             parallel_cpus = true;
5866             tb_flush(cpu);
5867         }
5868 
5869         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5870         /* TODO: Free new CPU state if thread creation failed.  */
5871 
5872         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5873         pthread_attr_destroy(&attr);
5874         if (ret == 0) {
5875             /* Wait for the child to initialize.  */
5876             pthread_cond_wait(&info.cond, &info.mutex);
5877             ret = info.tid;
5878         } else {
5879             ret = -1;
5880         }
5881         pthread_mutex_unlock(&info.mutex);
5882         pthread_cond_destroy(&info.cond);
5883         pthread_mutex_destroy(&info.mutex);
5884         pthread_mutex_unlock(&clone_lock);
5885     } else {
5886         /* if no CLONE_VM, we consider it is a fork */
5887         if (flags & CLONE_INVALID_FORK_FLAGS) {
5888             return -TARGET_EINVAL;
5889         }
5890 
5891         /* We can't support custom termination signals */
5892         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5893             return -TARGET_EINVAL;
5894         }
5895 
5896         if (block_signals()) {
5897             return -TARGET_ERESTARTSYS;
5898         }
5899 
5900         fork_start();
5901         ret = fork();
5902         if (ret == 0) {
5903             /* Child Process.  */
5904             cpu_clone_regs_child(env, newsp, flags);
5905             fork_end(1);
5906             /* There is a race condition here.  The parent process could
5907                theoretically read the TID in the child process before the child
5908                tid is set.  This would require using either ptrace
5909                (not implemented) or having *_tidptr to point at a shared memory
5910                mapping.  We can't repeat the spinlock hack used above because
5911                the child process gets its own copy of the lock.  */
5912             if (flags & CLONE_CHILD_SETTID)
5913                 put_user_u32(sys_gettid(), child_tidptr);
5914             if (flags & CLONE_PARENT_SETTID)
5915                 put_user_u32(sys_gettid(), parent_tidptr);
5916             ts = (TaskState *)cpu->opaque;
5917             if (flags & CLONE_SETTLS)
5918                 cpu_set_tls (env, newtls);
5919             if (flags & CLONE_CHILD_CLEARTID)
5920                 ts->child_tidptr = child_tidptr;
5921         } else {
5922             cpu_clone_regs_parent(env, flags);
5923             fork_end(0);
5924         }
5925     }
5926     return ret;
5927 }
5928 
5929 /* warning : doesn't handle linux specific flags... */
5930 static int target_to_host_fcntl_cmd(int cmd)
5931 {
5932     int ret;
5933 
5934     switch(cmd) {
5935     case TARGET_F_DUPFD:
5936     case TARGET_F_GETFD:
5937     case TARGET_F_SETFD:
5938     case TARGET_F_GETFL:
5939     case TARGET_F_SETFL:
5940         ret = cmd;
5941         break;
5942     case TARGET_F_GETLK:
5943         ret = F_GETLK64;
5944         break;
5945     case TARGET_F_SETLK:
5946         ret = F_SETLK64;
5947         break;
5948     case TARGET_F_SETLKW:
5949         ret = F_SETLKW64;
5950         break;
5951     case TARGET_F_GETOWN:
5952         ret = F_GETOWN;
5953         break;
5954     case TARGET_F_SETOWN:
5955         ret = F_SETOWN;
5956         break;
5957     case TARGET_F_GETSIG:
5958         ret = F_GETSIG;
5959         break;
5960     case TARGET_F_SETSIG:
5961         ret = F_SETSIG;
5962         break;
5963 #if TARGET_ABI_BITS == 32
5964     case TARGET_F_GETLK64:
5965         ret = F_GETLK64;
5966         break;
5967     case TARGET_F_SETLK64:
5968         ret = F_SETLK64;
5969         break;
5970     case TARGET_F_SETLKW64:
5971         ret = F_SETLKW64;
5972         break;
5973 #endif
5974     case TARGET_F_SETLEASE:
5975         ret = F_SETLEASE;
5976         break;
5977     case TARGET_F_GETLEASE:
5978         ret = F_GETLEASE;
5979         break;
5980 #ifdef F_DUPFD_CLOEXEC
5981     case TARGET_F_DUPFD_CLOEXEC:
5982         ret = F_DUPFD_CLOEXEC;
5983         break;
5984 #endif
5985     case TARGET_F_NOTIFY:
5986         ret = F_NOTIFY;
5987         break;
5988 #ifdef F_GETOWN_EX
5989     case TARGET_F_GETOWN_EX:
5990         ret = F_GETOWN_EX;
5991         break;
5992 #endif
5993 #ifdef F_SETOWN_EX
5994     case TARGET_F_SETOWN_EX:
5995         ret = F_SETOWN_EX;
5996         break;
5997 #endif
5998 #ifdef F_SETPIPE_SZ
5999     case TARGET_F_SETPIPE_SZ:
6000         ret = F_SETPIPE_SZ;
6001         break;
6002     case TARGET_F_GETPIPE_SZ:
6003         ret = F_GETPIPE_SZ;
6004         break;
6005 #endif
6006     default:
6007         ret = -TARGET_EINVAL;
6008         break;
6009     }
6010 
6011 #if defined(__powerpc64__)
6012     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6013      * is not supported by kernel. The glibc fcntl call actually adjusts
6014      * them to 5, 6 and 7 before making the syscall(). Since we make the
6015      * syscall directly, adjust to what is supported by the kernel.
6016      */
6017     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6018         ret -= F_GETLK64 - 5;
6019     }
6020 #endif
6021 
6022     return ret;
6023 }
6024 
6025 #define FLOCK_TRANSTBL \
6026     switch (type) { \
6027     TRANSTBL_CONVERT(F_RDLCK); \
6028     TRANSTBL_CONVERT(F_WRLCK); \
6029     TRANSTBL_CONVERT(F_UNLCK); \
6030     TRANSTBL_CONVERT(F_EXLCK); \
6031     TRANSTBL_CONVERT(F_SHLCK); \
6032     }
6033 
6034 static int target_to_host_flock(int type)
6035 {
6036 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6037     FLOCK_TRANSTBL
6038 #undef  TRANSTBL_CONVERT
6039     return -TARGET_EINVAL;
6040 }
6041 
6042 static int host_to_target_flock(int type)
6043 {
6044 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6045     FLOCK_TRANSTBL
6046 #undef  TRANSTBL_CONVERT
6047     /* if we don't know how to convert the value coming
6048      * from the host we copy to the target field as-is
6049      */
6050     return type;
6051 }
6052 
6053 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6054                                             abi_ulong target_flock_addr)
6055 {
6056     struct target_flock *target_fl;
6057     int l_type;
6058 
6059     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6060         return -TARGET_EFAULT;
6061     }
6062 
6063     __get_user(l_type, &target_fl->l_type);
6064     l_type = target_to_host_flock(l_type);
6065     if (l_type < 0) {
6066         return l_type;
6067     }
6068     fl->l_type = l_type;
6069     __get_user(fl->l_whence, &target_fl->l_whence);
6070     __get_user(fl->l_start, &target_fl->l_start);
6071     __get_user(fl->l_len, &target_fl->l_len);
6072     __get_user(fl->l_pid, &target_fl->l_pid);
6073     unlock_user_struct(target_fl, target_flock_addr, 0);
6074     return 0;
6075 }
6076 
6077 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6078                                           const struct flock64 *fl)
6079 {
6080     struct target_flock *target_fl;
6081     short l_type;
6082 
6083     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6084         return -TARGET_EFAULT;
6085     }
6086 
6087     l_type = host_to_target_flock(fl->l_type);
6088     __put_user(l_type, &target_fl->l_type);
6089     __put_user(fl->l_whence, &target_fl->l_whence);
6090     __put_user(fl->l_start, &target_fl->l_start);
6091     __put_user(fl->l_len, &target_fl->l_len);
6092     __put_user(fl->l_pid, &target_fl->l_pid);
6093     unlock_user_struct(target_fl, target_flock_addr, 1);
6094     return 0;
6095 }
6096 
6097 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6098 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6099 
6100 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6101 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6102                                                    abi_ulong target_flock_addr)
6103 {
6104     struct target_oabi_flock64 *target_fl;
6105     int l_type;
6106 
6107     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6108         return -TARGET_EFAULT;
6109     }
6110 
6111     __get_user(l_type, &target_fl->l_type);
6112     l_type = target_to_host_flock(l_type);
6113     if (l_type < 0) {
6114         return l_type;
6115     }
6116     fl->l_type = l_type;
6117     __get_user(fl->l_whence, &target_fl->l_whence);
6118     __get_user(fl->l_start, &target_fl->l_start);
6119     __get_user(fl->l_len, &target_fl->l_len);
6120     __get_user(fl->l_pid, &target_fl->l_pid);
6121     unlock_user_struct(target_fl, target_flock_addr, 0);
6122     return 0;
6123 }
6124 
6125 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6126                                                  const struct flock64 *fl)
6127 {
6128     struct target_oabi_flock64 *target_fl;
6129     short l_type;
6130 
6131     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6132         return -TARGET_EFAULT;
6133     }
6134 
6135     l_type = host_to_target_flock(fl->l_type);
6136     __put_user(l_type, &target_fl->l_type);
6137     __put_user(fl->l_whence, &target_fl->l_whence);
6138     __put_user(fl->l_start, &target_fl->l_start);
6139     __put_user(fl->l_len, &target_fl->l_len);
6140     __put_user(fl->l_pid, &target_fl->l_pid);
6141     unlock_user_struct(target_fl, target_flock_addr, 1);
6142     return 0;
6143 }
6144 #endif
6145 
6146 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6147                                               abi_ulong target_flock_addr)
6148 {
6149     struct target_flock64 *target_fl;
6150     int l_type;
6151 
6152     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6153         return -TARGET_EFAULT;
6154     }
6155 
6156     __get_user(l_type, &target_fl->l_type);
6157     l_type = target_to_host_flock(l_type);
6158     if (l_type < 0) {
6159         return l_type;
6160     }
6161     fl->l_type = l_type;
6162     __get_user(fl->l_whence, &target_fl->l_whence);
6163     __get_user(fl->l_start, &target_fl->l_start);
6164     __get_user(fl->l_len, &target_fl->l_len);
6165     __get_user(fl->l_pid, &target_fl->l_pid);
6166     unlock_user_struct(target_fl, target_flock_addr, 0);
6167     return 0;
6168 }
6169 
6170 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6171                                             const struct flock64 *fl)
6172 {
6173     struct target_flock64 *target_fl;
6174     short l_type;
6175 
6176     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6177         return -TARGET_EFAULT;
6178     }
6179 
6180     l_type = host_to_target_flock(fl->l_type);
6181     __put_user(l_type, &target_fl->l_type);
6182     __put_user(fl->l_whence, &target_fl->l_whence);
6183     __put_user(fl->l_start, &target_fl->l_start);
6184     __put_user(fl->l_len, &target_fl->l_len);
6185     __put_user(fl->l_pid, &target_fl->l_pid);
6186     unlock_user_struct(target_fl, target_flock_addr, 1);
6187     return 0;
6188 }
6189 
6190 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6191 {
6192     struct flock64 fl64;
6193 #ifdef F_GETOWN_EX
6194     struct f_owner_ex fox;
6195     struct target_f_owner_ex *target_fox;
6196 #endif
6197     abi_long ret;
6198     int host_cmd = target_to_host_fcntl_cmd(cmd);
6199 
6200     if (host_cmd == -TARGET_EINVAL)
6201 	    return host_cmd;
6202 
6203     switch(cmd) {
6204     case TARGET_F_GETLK:
6205         ret = copy_from_user_flock(&fl64, arg);
6206         if (ret) {
6207             return ret;
6208         }
6209         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6210         if (ret == 0) {
6211             ret = copy_to_user_flock(arg, &fl64);
6212         }
6213         break;
6214 
6215     case TARGET_F_SETLK:
6216     case TARGET_F_SETLKW:
6217         ret = copy_from_user_flock(&fl64, arg);
6218         if (ret) {
6219             return ret;
6220         }
6221         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6222         break;
6223 
6224     case TARGET_F_GETLK64:
6225         ret = copy_from_user_flock64(&fl64, arg);
6226         if (ret) {
6227             return ret;
6228         }
6229         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6230         if (ret == 0) {
6231             ret = copy_to_user_flock64(arg, &fl64);
6232         }
6233         break;
6234     case TARGET_F_SETLK64:
6235     case TARGET_F_SETLKW64:
6236         ret = copy_from_user_flock64(&fl64, arg);
6237         if (ret) {
6238             return ret;
6239         }
6240         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6241         break;
6242 
6243     case TARGET_F_GETFL:
6244         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6245         if (ret >= 0) {
6246             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6247         }
6248         break;
6249 
6250     case TARGET_F_SETFL:
6251         ret = get_errno(safe_fcntl(fd, host_cmd,
6252                                    target_to_host_bitmask(arg,
6253                                                           fcntl_flags_tbl)));
6254         break;
6255 
6256 #ifdef F_GETOWN_EX
6257     case TARGET_F_GETOWN_EX:
6258         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6259         if (ret >= 0) {
6260             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6261                 return -TARGET_EFAULT;
6262             target_fox->type = tswap32(fox.type);
6263             target_fox->pid = tswap32(fox.pid);
6264             unlock_user_struct(target_fox, arg, 1);
6265         }
6266         break;
6267 #endif
6268 
6269 #ifdef F_SETOWN_EX
6270     case TARGET_F_SETOWN_EX:
6271         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6272             return -TARGET_EFAULT;
6273         fox.type = tswap32(target_fox->type);
6274         fox.pid = tswap32(target_fox->pid);
6275         unlock_user_struct(target_fox, arg, 0);
6276         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6277         break;
6278 #endif
6279 
6280     case TARGET_F_SETOWN:
6281     case TARGET_F_GETOWN:
6282     case TARGET_F_SETSIG:
6283     case TARGET_F_GETSIG:
6284     case TARGET_F_SETLEASE:
6285     case TARGET_F_GETLEASE:
6286     case TARGET_F_SETPIPE_SZ:
6287     case TARGET_F_GETPIPE_SZ:
6288         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6289         break;
6290 
6291     default:
6292         ret = get_errno(safe_fcntl(fd, cmd, arg));
6293         break;
6294     }
6295     return ret;
6296 }
6297 
6298 #ifdef USE_UID16
6299 
6300 static inline int high2lowuid(int uid)
6301 {
6302     if (uid > 65535)
6303         return 65534;
6304     else
6305         return uid;
6306 }
6307 
6308 static inline int high2lowgid(int gid)
6309 {
6310     if (gid > 65535)
6311         return 65534;
6312     else
6313         return gid;
6314 }
6315 
6316 static inline int low2highuid(int uid)
6317 {
6318     if ((int16_t)uid == -1)
6319         return -1;
6320     else
6321         return uid;
6322 }
6323 
6324 static inline int low2highgid(int gid)
6325 {
6326     if ((int16_t)gid == -1)
6327         return -1;
6328     else
6329         return gid;
6330 }
6331 static inline int tswapid(int id)
6332 {
6333     return tswap16(id);
6334 }
6335 
6336 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6337 
6338 #else /* !USE_UID16 */
6339 static inline int high2lowuid(int uid)
6340 {
6341     return uid;
6342 }
6343 static inline int high2lowgid(int gid)
6344 {
6345     return gid;
6346 }
6347 static inline int low2highuid(int uid)
6348 {
6349     return uid;
6350 }
6351 static inline int low2highgid(int gid)
6352 {
6353     return gid;
6354 }
6355 static inline int tswapid(int id)
6356 {
6357     return tswap32(id);
6358 }
6359 
6360 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6361 
6362 #endif /* USE_UID16 */
6363 
6364 /* We must do direct syscalls for setting UID/GID, because we want to
6365  * implement the Linux system call semantics of "change only for this thread",
6366  * not the libc/POSIX semantics of "change for all threads in process".
6367  * (See http://ewontfix.com/17/ for more details.)
6368  * We use the 32-bit version of the syscalls if present; if it is not
6369  * then either the host architecture supports 32-bit UIDs natively with
6370  * the standard syscall, or the 16-bit UID is the best we can do.
6371  */
6372 #ifdef __NR_setuid32
6373 #define __NR_sys_setuid __NR_setuid32
6374 #else
6375 #define __NR_sys_setuid __NR_setuid
6376 #endif
6377 #ifdef __NR_setgid32
6378 #define __NR_sys_setgid __NR_setgid32
6379 #else
6380 #define __NR_sys_setgid __NR_setgid
6381 #endif
6382 #ifdef __NR_setresuid32
6383 #define __NR_sys_setresuid __NR_setresuid32
6384 #else
6385 #define __NR_sys_setresuid __NR_setresuid
6386 #endif
6387 #ifdef __NR_setresgid32
6388 #define __NR_sys_setresgid __NR_setresgid32
6389 #else
6390 #define __NR_sys_setresgid __NR_setresgid
6391 #endif
6392 
6393 _syscall1(int, sys_setuid, uid_t, uid)
6394 _syscall1(int, sys_setgid, gid_t, gid)
6395 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6396 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6397 
6398 void syscall_init(void)
6399 {
6400     IOCTLEntry *ie;
6401     const argtype *arg_type;
6402     int size;
6403     int i;
6404 
6405     thunk_init(STRUCT_MAX);
6406 
6407 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6408 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6409 #include "syscall_types.h"
6410 #undef STRUCT
6411 #undef STRUCT_SPECIAL
6412 
6413     /* Build target_to_host_errno_table[] table from
6414      * host_to_target_errno_table[]. */
6415     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6416         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6417     }
6418 
6419     /* we patch the ioctl size if necessary. We rely on the fact that
6420        no ioctl has all the bits at '1' in the size field */
6421     ie = ioctl_entries;
6422     while (ie->target_cmd != 0) {
6423         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6424             TARGET_IOC_SIZEMASK) {
6425             arg_type = ie->arg_type;
6426             if (arg_type[0] != TYPE_PTR) {
6427                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6428                         ie->target_cmd);
6429                 exit(1);
6430             }
6431             arg_type++;
6432             size = thunk_type_size(arg_type, 0);
6433             ie->target_cmd = (ie->target_cmd &
6434                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6435                 (size << TARGET_IOC_SIZESHIFT);
6436         }
6437 
6438         /* automatic consistency check if same arch */
6439 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6440     (defined(__x86_64__) && defined(TARGET_X86_64))
6441         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6442             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6443                     ie->name, ie->target_cmd, ie->host_cmd);
6444         }
6445 #endif
6446         ie++;
6447     }
6448 }
6449 
6450 #if TARGET_ABI_BITS == 32
6451 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6452 {
6453 #ifdef TARGET_WORDS_BIGENDIAN
6454     return ((uint64_t)word0 << 32) | word1;
6455 #else
6456     return ((uint64_t)word1 << 32) | word0;
6457 #endif
6458 }
6459 #else /* TARGET_ABI_BITS == 32 */
6460 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6461 {
6462     return word0;
6463 }
6464 #endif /* TARGET_ABI_BITS != 32 */
6465 
6466 #ifdef TARGET_NR_truncate64
6467 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6468                                          abi_long arg2,
6469                                          abi_long arg3,
6470                                          abi_long arg4)
6471 {
6472     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6473         arg2 = arg3;
6474         arg3 = arg4;
6475     }
6476     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6477 }
6478 #endif
6479 
6480 #ifdef TARGET_NR_ftruncate64
6481 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6482                                           abi_long arg2,
6483                                           abi_long arg3,
6484                                           abi_long arg4)
6485 {
6486     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6487         arg2 = arg3;
6488         arg3 = arg4;
6489     }
6490     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6491 }
6492 #endif
6493 
6494 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6495                                                  abi_ulong target_addr)
6496 {
6497     struct target_itimerspec *target_itspec;
6498 
6499     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6500         return -TARGET_EFAULT;
6501     }
6502 
6503     host_itspec->it_interval.tv_sec =
6504                             tswapal(target_itspec->it_interval.tv_sec);
6505     host_itspec->it_interval.tv_nsec =
6506                             tswapal(target_itspec->it_interval.tv_nsec);
6507     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6508     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6509 
6510     unlock_user_struct(target_itspec, target_addr, 1);
6511     return 0;
6512 }
6513 
6514 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6515                                                struct itimerspec *host_its)
6516 {
6517     struct target_itimerspec *target_itspec;
6518 
6519     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6520         return -TARGET_EFAULT;
6521     }
6522 
6523     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6524     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6525 
6526     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6527     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6528 
6529     unlock_user_struct(target_itspec, target_addr, 0);
6530     return 0;
6531 }
6532 
6533 static inline abi_long target_to_host_timex(struct timex *host_tx,
6534                                             abi_long target_addr)
6535 {
6536     struct target_timex *target_tx;
6537 
6538     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6539         return -TARGET_EFAULT;
6540     }
6541 
6542     __get_user(host_tx->modes, &target_tx->modes);
6543     __get_user(host_tx->offset, &target_tx->offset);
6544     __get_user(host_tx->freq, &target_tx->freq);
6545     __get_user(host_tx->maxerror, &target_tx->maxerror);
6546     __get_user(host_tx->esterror, &target_tx->esterror);
6547     __get_user(host_tx->status, &target_tx->status);
6548     __get_user(host_tx->constant, &target_tx->constant);
6549     __get_user(host_tx->precision, &target_tx->precision);
6550     __get_user(host_tx->tolerance, &target_tx->tolerance);
6551     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6552     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6553     __get_user(host_tx->tick, &target_tx->tick);
6554     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6555     __get_user(host_tx->jitter, &target_tx->jitter);
6556     __get_user(host_tx->shift, &target_tx->shift);
6557     __get_user(host_tx->stabil, &target_tx->stabil);
6558     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6559     __get_user(host_tx->calcnt, &target_tx->calcnt);
6560     __get_user(host_tx->errcnt, &target_tx->errcnt);
6561     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6562     __get_user(host_tx->tai, &target_tx->tai);
6563 
6564     unlock_user_struct(target_tx, target_addr, 0);
6565     return 0;
6566 }
6567 
6568 static inline abi_long host_to_target_timex(abi_long target_addr,
6569                                             struct timex *host_tx)
6570 {
6571     struct target_timex *target_tx;
6572 
6573     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6574         return -TARGET_EFAULT;
6575     }
6576 
6577     __put_user(host_tx->modes, &target_tx->modes);
6578     __put_user(host_tx->offset, &target_tx->offset);
6579     __put_user(host_tx->freq, &target_tx->freq);
6580     __put_user(host_tx->maxerror, &target_tx->maxerror);
6581     __put_user(host_tx->esterror, &target_tx->esterror);
6582     __put_user(host_tx->status, &target_tx->status);
6583     __put_user(host_tx->constant, &target_tx->constant);
6584     __put_user(host_tx->precision, &target_tx->precision);
6585     __put_user(host_tx->tolerance, &target_tx->tolerance);
6586     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6587     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6588     __put_user(host_tx->tick, &target_tx->tick);
6589     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6590     __put_user(host_tx->jitter, &target_tx->jitter);
6591     __put_user(host_tx->shift, &target_tx->shift);
6592     __put_user(host_tx->stabil, &target_tx->stabil);
6593     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6594     __put_user(host_tx->calcnt, &target_tx->calcnt);
6595     __put_user(host_tx->errcnt, &target_tx->errcnt);
6596     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6597     __put_user(host_tx->tai, &target_tx->tai);
6598 
6599     unlock_user_struct(target_tx, target_addr, 1);
6600     return 0;
6601 }
6602 
6603 
6604 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6605                                                abi_ulong target_addr)
6606 {
6607     struct target_sigevent *target_sevp;
6608 
6609     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6610         return -TARGET_EFAULT;
6611     }
6612 
6613     /* This union is awkward on 64 bit systems because it has a 32 bit
6614      * integer and a pointer in it; we follow the conversion approach
6615      * used for handling sigval types in signal.c so the guest should get
6616      * the correct value back even if we did a 64 bit byteswap and it's
6617      * using the 32 bit integer.
6618      */
6619     host_sevp->sigev_value.sival_ptr =
6620         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6621     host_sevp->sigev_signo =
6622         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6623     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6624     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6625 
6626     unlock_user_struct(target_sevp, target_addr, 1);
6627     return 0;
6628 }
6629 
6630 #if defined(TARGET_NR_mlockall)
6631 static inline int target_to_host_mlockall_arg(int arg)
6632 {
6633     int result = 0;
6634 
6635     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6636         result |= MCL_CURRENT;
6637     }
6638     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6639         result |= MCL_FUTURE;
6640     }
6641     return result;
6642 }
6643 #endif
6644 
6645 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6646      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6647      defined(TARGET_NR_newfstatat))
6648 static inline abi_long host_to_target_stat64(void *cpu_env,
6649                                              abi_ulong target_addr,
6650                                              struct stat *host_st)
6651 {
6652 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6653     if (((CPUARMState *)cpu_env)->eabi) {
6654         struct target_eabi_stat64 *target_st;
6655 
6656         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6657             return -TARGET_EFAULT;
6658         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6659         __put_user(host_st->st_dev, &target_st->st_dev);
6660         __put_user(host_st->st_ino, &target_st->st_ino);
6661 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6662         __put_user(host_st->st_ino, &target_st->__st_ino);
6663 #endif
6664         __put_user(host_st->st_mode, &target_st->st_mode);
6665         __put_user(host_st->st_nlink, &target_st->st_nlink);
6666         __put_user(host_st->st_uid, &target_st->st_uid);
6667         __put_user(host_st->st_gid, &target_st->st_gid);
6668         __put_user(host_st->st_rdev, &target_st->st_rdev);
6669         __put_user(host_st->st_size, &target_st->st_size);
6670         __put_user(host_st->st_blksize, &target_st->st_blksize);
6671         __put_user(host_st->st_blocks, &target_st->st_blocks);
6672         __put_user(host_st->st_atime, &target_st->target_st_atime);
6673         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6674         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6675 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6676         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6677         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6678         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6679 #endif
6680         unlock_user_struct(target_st, target_addr, 1);
6681     } else
6682 #endif
6683     {
6684 #if defined(TARGET_HAS_STRUCT_STAT64)
6685         struct target_stat64 *target_st;
6686 #else
6687         struct target_stat *target_st;
6688 #endif
6689 
6690         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6691             return -TARGET_EFAULT;
6692         memset(target_st, 0, sizeof(*target_st));
6693         __put_user(host_st->st_dev, &target_st->st_dev);
6694         __put_user(host_st->st_ino, &target_st->st_ino);
6695 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6696         __put_user(host_st->st_ino, &target_st->__st_ino);
6697 #endif
6698         __put_user(host_st->st_mode, &target_st->st_mode);
6699         __put_user(host_st->st_nlink, &target_st->st_nlink);
6700         __put_user(host_st->st_uid, &target_st->st_uid);
6701         __put_user(host_st->st_gid, &target_st->st_gid);
6702         __put_user(host_st->st_rdev, &target_st->st_rdev);
6703         /* XXX: better use of kernel struct */
6704         __put_user(host_st->st_size, &target_st->st_size);
6705         __put_user(host_st->st_blksize, &target_st->st_blksize);
6706         __put_user(host_st->st_blocks, &target_st->st_blocks);
6707         __put_user(host_st->st_atime, &target_st->target_st_atime);
6708         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6709         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6710 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6711         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6712         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6713         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6714 #endif
6715         unlock_user_struct(target_st, target_addr, 1);
6716     }
6717 
6718     return 0;
6719 }
6720 #endif
6721 
6722 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6723 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6724                                             abi_ulong target_addr)
6725 {
6726     struct target_statx *target_stx;
6727 
6728     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6729         return -TARGET_EFAULT;
6730     }
6731     memset(target_stx, 0, sizeof(*target_stx));
6732 
6733     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6734     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6735     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6736     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6737     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6738     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6739     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6740     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6741     __put_user(host_stx->stx_size, &target_stx->stx_size);
6742     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6743     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6744     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6745     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6746     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_atime.tv_sec);
6747     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6748     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_atime.tv_sec);
6749     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6750     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_atime.tv_sec);
6751     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6752     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6753     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6754     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6755     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6756 
6757     unlock_user_struct(target_stx, target_addr, 1);
6758 
6759     return 0;
6760 }
6761 #endif
6762 
6763 
6764 /* ??? Using host futex calls even when target atomic operations
6765    are not really atomic probably breaks things.  However implementing
6766    futexes locally would make futexes shared between multiple processes
6767    tricky.  However they're probably useless because guest atomic
6768    operations won't work either.  */
6769 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6770                     target_ulong uaddr2, int val3)
6771 {
6772     struct timespec ts, *pts;
6773     int base_op;
6774 
6775     /* ??? We assume FUTEX_* constants are the same on both host
6776        and target.  */
6777 #ifdef FUTEX_CMD_MASK
6778     base_op = op & FUTEX_CMD_MASK;
6779 #else
6780     base_op = op;
6781 #endif
6782     switch (base_op) {
6783     case FUTEX_WAIT:
6784     case FUTEX_WAIT_BITSET:
6785         if (timeout) {
6786             pts = &ts;
6787             target_to_host_timespec(pts, timeout);
6788         } else {
6789             pts = NULL;
6790         }
6791         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6792                          pts, NULL, val3));
6793     case FUTEX_WAKE:
6794         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6795     case FUTEX_FD:
6796         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6797     case FUTEX_REQUEUE:
6798     case FUTEX_CMP_REQUEUE:
6799     case FUTEX_WAKE_OP:
6800         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6801            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6802            But the prototype takes a `struct timespec *'; insert casts
6803            to satisfy the compiler.  We do not need to tswap TIMEOUT
6804            since it's not compared to guest memory.  */
6805         pts = (struct timespec *)(uintptr_t) timeout;
6806         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6807                                     g2h(uaddr2),
6808                                     (base_op == FUTEX_CMP_REQUEUE
6809                                      ? tswap32(val3)
6810                                      : val3)));
6811     default:
6812         return -TARGET_ENOSYS;
6813     }
6814 }
6815 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6816 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6817                                      abi_long handle, abi_long mount_id,
6818                                      abi_long flags)
6819 {
6820     struct file_handle *target_fh;
6821     struct file_handle *fh;
6822     int mid = 0;
6823     abi_long ret;
6824     char *name;
6825     unsigned int size, total_size;
6826 
6827     if (get_user_s32(size, handle)) {
6828         return -TARGET_EFAULT;
6829     }
6830 
6831     name = lock_user_string(pathname);
6832     if (!name) {
6833         return -TARGET_EFAULT;
6834     }
6835 
6836     total_size = sizeof(struct file_handle) + size;
6837     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6838     if (!target_fh) {
6839         unlock_user(name, pathname, 0);
6840         return -TARGET_EFAULT;
6841     }
6842 
6843     fh = g_malloc0(total_size);
6844     fh->handle_bytes = size;
6845 
6846     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6847     unlock_user(name, pathname, 0);
6848 
6849     /* man name_to_handle_at(2):
6850      * Other than the use of the handle_bytes field, the caller should treat
6851      * the file_handle structure as an opaque data type
6852      */
6853 
6854     memcpy(target_fh, fh, total_size);
6855     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6856     target_fh->handle_type = tswap32(fh->handle_type);
6857     g_free(fh);
6858     unlock_user(target_fh, handle, total_size);
6859 
6860     if (put_user_s32(mid, mount_id)) {
6861         return -TARGET_EFAULT;
6862     }
6863 
6864     return ret;
6865 
6866 }
6867 #endif
6868 
6869 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6870 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6871                                      abi_long flags)
6872 {
6873     struct file_handle *target_fh;
6874     struct file_handle *fh;
6875     unsigned int size, total_size;
6876     abi_long ret;
6877 
6878     if (get_user_s32(size, handle)) {
6879         return -TARGET_EFAULT;
6880     }
6881 
6882     total_size = sizeof(struct file_handle) + size;
6883     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6884     if (!target_fh) {
6885         return -TARGET_EFAULT;
6886     }
6887 
6888     fh = g_memdup(target_fh, total_size);
6889     fh->handle_bytes = size;
6890     fh->handle_type = tswap32(target_fh->handle_type);
6891 
6892     ret = get_errno(open_by_handle_at(mount_fd, fh,
6893                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6894 
6895     g_free(fh);
6896 
6897     unlock_user(target_fh, handle, total_size);
6898 
6899     return ret;
6900 }
6901 #endif
6902 
6903 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6904 
6905 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6906 {
6907     int host_flags;
6908     target_sigset_t *target_mask;
6909     sigset_t host_mask;
6910     abi_long ret;
6911 
6912     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6913         return -TARGET_EINVAL;
6914     }
6915     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6916         return -TARGET_EFAULT;
6917     }
6918 
6919     target_to_host_sigset(&host_mask, target_mask);
6920 
6921     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6922 
6923     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6924     if (ret >= 0) {
6925         fd_trans_register(ret, &target_signalfd_trans);
6926     }
6927 
6928     unlock_user_struct(target_mask, mask, 0);
6929 
6930     return ret;
6931 }
6932 #endif
6933 
6934 /* Map host to target signal numbers for the wait family of syscalls.
6935    Assume all other status bits are the same.  */
6936 int host_to_target_waitstatus(int status)
6937 {
6938     if (WIFSIGNALED(status)) {
6939         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6940     }
6941     if (WIFSTOPPED(status)) {
6942         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6943                | (status & 0xff);
6944     }
6945     return status;
6946 }
6947 
6948 static int open_self_cmdline(void *cpu_env, int fd)
6949 {
6950     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6951     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6952     int i;
6953 
6954     for (i = 0; i < bprm->argc; i++) {
6955         size_t len = strlen(bprm->argv[i]) + 1;
6956 
6957         if (write(fd, bprm->argv[i], len) != len) {
6958             return -1;
6959         }
6960     }
6961 
6962     return 0;
6963 }
6964 
6965 static int open_self_maps(void *cpu_env, int fd)
6966 {
6967     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6968     TaskState *ts = cpu->opaque;
6969     FILE *fp;
6970     char *line = NULL;
6971     size_t len = 0;
6972     ssize_t read;
6973 
6974     fp = fopen("/proc/self/maps", "r");
6975     if (fp == NULL) {
6976         return -1;
6977     }
6978 
6979     while ((read = getline(&line, &len, fp)) != -1) {
6980         int fields, dev_maj, dev_min, inode;
6981         uint64_t min, max, offset;
6982         char flag_r, flag_w, flag_x, flag_p;
6983         char path[512] = "";
6984         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6985                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6986                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6987 
6988         if ((fields < 10) || (fields > 11)) {
6989             continue;
6990         }
6991         if (h2g_valid(min)) {
6992             int flags = page_get_flags(h2g(min));
6993             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6994             if (page_check_range(h2g(min), max - min, flags) == -1) {
6995                 continue;
6996             }
6997             if (h2g(min) == ts->info->stack_limit) {
6998                 pstrcpy(path, sizeof(path), "      [stack]");
6999             }
7000             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7001                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7002                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7003                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7004                     path[0] ? "         " : "", path);
7005         }
7006     }
7007 
7008     free(line);
7009     fclose(fp);
7010 
7011     return 0;
7012 }
7013 
7014 static int open_self_stat(void *cpu_env, int fd)
7015 {
7016     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7017     TaskState *ts = cpu->opaque;
7018     abi_ulong start_stack = ts->info->start_stack;
7019     int i;
7020 
7021     for (i = 0; i < 44; i++) {
7022       char buf[128];
7023       int len;
7024       uint64_t val = 0;
7025 
7026       if (i == 0) {
7027         /* pid */
7028         val = getpid();
7029         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7030       } else if (i == 1) {
7031         /* app name */
7032         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7033       } else if (i == 27) {
7034         /* stack bottom */
7035         val = start_stack;
7036         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7037       } else {
7038         /* for the rest, there is MasterCard */
7039         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7040       }
7041 
7042       len = strlen(buf);
7043       if (write(fd, buf, len) != len) {
7044           return -1;
7045       }
7046     }
7047 
7048     return 0;
7049 }
7050 
7051 static int open_self_auxv(void *cpu_env, int fd)
7052 {
7053     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7054     TaskState *ts = cpu->opaque;
7055     abi_ulong auxv = ts->info->saved_auxv;
7056     abi_ulong len = ts->info->auxv_len;
7057     char *ptr;
7058 
7059     /*
7060      * Auxiliary vector is stored in target process stack.
7061      * read in whole auxv vector and copy it to file
7062      */
7063     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7064     if (ptr != NULL) {
7065         while (len > 0) {
7066             ssize_t r;
7067             r = write(fd, ptr, len);
7068             if (r <= 0) {
7069                 break;
7070             }
7071             len -= r;
7072             ptr += r;
7073         }
7074         lseek(fd, 0, SEEK_SET);
7075         unlock_user(ptr, auxv, len);
7076     }
7077 
7078     return 0;
7079 }
7080 
7081 static int is_proc_myself(const char *filename, const char *entry)
7082 {
7083     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7084         filename += strlen("/proc/");
7085         if (!strncmp(filename, "self/", strlen("self/"))) {
7086             filename += strlen("self/");
7087         } else if (*filename >= '1' && *filename <= '9') {
7088             char myself[80];
7089             snprintf(myself, sizeof(myself), "%d/", getpid());
7090             if (!strncmp(filename, myself, strlen(myself))) {
7091                 filename += strlen(myself);
7092             } else {
7093                 return 0;
7094             }
7095         } else {
7096             return 0;
7097         }
7098         if (!strcmp(filename, entry)) {
7099             return 1;
7100         }
7101     }
7102     return 0;
7103 }
7104 
7105 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7106     defined(TARGET_SPARC) || defined(TARGET_M68K)
7107 static int is_proc(const char *filename, const char *entry)
7108 {
7109     return strcmp(filename, entry) == 0;
7110 }
7111 #endif
7112 
7113 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7114 static int open_net_route(void *cpu_env, int fd)
7115 {
7116     FILE *fp;
7117     char *line = NULL;
7118     size_t len = 0;
7119     ssize_t read;
7120 
7121     fp = fopen("/proc/net/route", "r");
7122     if (fp == NULL) {
7123         return -1;
7124     }
7125 
7126     /* read header */
7127 
7128     read = getline(&line, &len, fp);
7129     dprintf(fd, "%s", line);
7130 
7131     /* read routes */
7132 
7133     while ((read = getline(&line, &len, fp)) != -1) {
7134         char iface[16];
7135         uint32_t dest, gw, mask;
7136         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7137         int fields;
7138 
7139         fields = sscanf(line,
7140                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7141                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7142                         &mask, &mtu, &window, &irtt);
7143         if (fields != 11) {
7144             continue;
7145         }
7146         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7147                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7148                 metric, tswap32(mask), mtu, window, irtt);
7149     }
7150 
7151     free(line);
7152     fclose(fp);
7153 
7154     return 0;
7155 }
7156 #endif
7157 
7158 #if defined(TARGET_SPARC)
7159 static int open_cpuinfo(void *cpu_env, int fd)
7160 {
7161     dprintf(fd, "type\t\t: sun4u\n");
7162     return 0;
7163 }
7164 #endif
7165 
7166 #if defined(TARGET_M68K)
7167 static int open_hardware(void *cpu_env, int fd)
7168 {
7169     dprintf(fd, "Model:\t\tqemu-m68k\n");
7170     return 0;
7171 }
7172 #endif
7173 
7174 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7175 {
7176     struct fake_open {
7177         const char *filename;
7178         int (*fill)(void *cpu_env, int fd);
7179         int (*cmp)(const char *s1, const char *s2);
7180     };
7181     const struct fake_open *fake_open;
7182     static const struct fake_open fakes[] = {
7183         { "maps", open_self_maps, is_proc_myself },
7184         { "stat", open_self_stat, is_proc_myself },
7185         { "auxv", open_self_auxv, is_proc_myself },
7186         { "cmdline", open_self_cmdline, is_proc_myself },
7187 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7188         { "/proc/net/route", open_net_route, is_proc },
7189 #endif
7190 #if defined(TARGET_SPARC)
7191         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7192 #endif
7193 #if defined(TARGET_M68K)
7194         { "/proc/hardware", open_hardware, is_proc },
7195 #endif
7196         { NULL, NULL, NULL }
7197     };
7198 
7199     if (is_proc_myself(pathname, "exe")) {
7200         int execfd = qemu_getauxval(AT_EXECFD);
7201         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7202     }
7203 
7204     for (fake_open = fakes; fake_open->filename; fake_open++) {
7205         if (fake_open->cmp(pathname, fake_open->filename)) {
7206             break;
7207         }
7208     }
7209 
7210     if (fake_open->filename) {
7211         const char *tmpdir;
7212         char filename[PATH_MAX];
7213         int fd, r;
7214 
7215         /* create temporary file to map stat to */
7216         tmpdir = getenv("TMPDIR");
7217         if (!tmpdir)
7218             tmpdir = "/tmp";
7219         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7220         fd = mkstemp(filename);
7221         if (fd < 0) {
7222             return fd;
7223         }
7224         unlink(filename);
7225 
7226         if ((r = fake_open->fill(cpu_env, fd))) {
7227             int e = errno;
7228             close(fd);
7229             errno = e;
7230             return r;
7231         }
7232         lseek(fd, 0, SEEK_SET);
7233 
7234         return fd;
7235     }
7236 
7237     return safe_openat(dirfd, path(pathname), flags, mode);
7238 }
7239 
7240 #define TIMER_MAGIC 0x0caf0000
7241 #define TIMER_MAGIC_MASK 0xffff0000
7242 
7243 /* Convert QEMU provided timer ID back to internal 16bit index format */
7244 static target_timer_t get_timer_id(abi_long arg)
7245 {
7246     target_timer_t timerid = arg;
7247 
7248     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7249         return -TARGET_EINVAL;
7250     }
7251 
7252     timerid &= 0xffff;
7253 
7254     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7255         return -TARGET_EINVAL;
7256     }
7257 
7258     return timerid;
7259 }
7260 
7261 static int target_to_host_cpu_mask(unsigned long *host_mask,
7262                                    size_t host_size,
7263                                    abi_ulong target_addr,
7264                                    size_t target_size)
7265 {
7266     unsigned target_bits = sizeof(abi_ulong) * 8;
7267     unsigned host_bits = sizeof(*host_mask) * 8;
7268     abi_ulong *target_mask;
7269     unsigned i, j;
7270 
7271     assert(host_size >= target_size);
7272 
7273     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7274     if (!target_mask) {
7275         return -TARGET_EFAULT;
7276     }
7277     memset(host_mask, 0, host_size);
7278 
7279     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7280         unsigned bit = i * target_bits;
7281         abi_ulong val;
7282 
7283         __get_user(val, &target_mask[i]);
7284         for (j = 0; j < target_bits; j++, bit++) {
7285             if (val & (1UL << j)) {
7286                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7287             }
7288         }
7289     }
7290 
7291     unlock_user(target_mask, target_addr, 0);
7292     return 0;
7293 }
7294 
7295 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7296                                    size_t host_size,
7297                                    abi_ulong target_addr,
7298                                    size_t target_size)
7299 {
7300     unsigned target_bits = sizeof(abi_ulong) * 8;
7301     unsigned host_bits = sizeof(*host_mask) * 8;
7302     abi_ulong *target_mask;
7303     unsigned i, j;
7304 
7305     assert(host_size >= target_size);
7306 
7307     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7308     if (!target_mask) {
7309         return -TARGET_EFAULT;
7310     }
7311 
7312     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7313         unsigned bit = i * target_bits;
7314         abi_ulong val = 0;
7315 
7316         for (j = 0; j < target_bits; j++, bit++) {
7317             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7318                 val |= 1UL << j;
7319             }
7320         }
7321         __put_user(val, &target_mask[i]);
7322     }
7323 
7324     unlock_user(target_mask, target_addr, target_size);
7325     return 0;
7326 }
7327 
7328 /* This is an internal helper for do_syscall so that it is easier
7329  * to have a single return point, so that actions, such as logging
7330  * of syscall results, can be performed.
7331  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7332  */
7333 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7334                             abi_long arg2, abi_long arg3, abi_long arg4,
7335                             abi_long arg5, abi_long arg6, abi_long arg7,
7336                             abi_long arg8)
7337 {
7338     CPUState *cpu = env_cpu(cpu_env);
7339     abi_long ret;
7340 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7341     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7342     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7343     || defined(TARGET_NR_statx)
7344     struct stat st;
7345 #endif
7346 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7347     || defined(TARGET_NR_fstatfs)
7348     struct statfs stfs;
7349 #endif
7350     void *p;
7351 
7352     switch(num) {
7353     case TARGET_NR_exit:
7354         /* In old applications this may be used to implement _exit(2).
7355            However in threaded applictions it is used for thread termination,
7356            and _exit_group is used for application termination.
7357            Do thread termination if we have more then one thread.  */
7358 
7359         if (block_signals()) {
7360             return -TARGET_ERESTARTSYS;
7361         }
7362 
7363         cpu_list_lock();
7364 
7365         if (CPU_NEXT(first_cpu)) {
7366             TaskState *ts;
7367 
7368             /* Remove the CPU from the list.  */
7369             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7370 
7371             cpu_list_unlock();
7372 
7373             ts = cpu->opaque;
7374             if (ts->child_tidptr) {
7375                 put_user_u32(0, ts->child_tidptr);
7376                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7377                           NULL, NULL, 0);
7378             }
7379             thread_cpu = NULL;
7380             object_unref(OBJECT(cpu));
7381             g_free(ts);
7382             rcu_unregister_thread();
7383             pthread_exit(NULL);
7384         }
7385 
7386         cpu_list_unlock();
7387         preexit_cleanup(cpu_env, arg1);
7388         _exit(arg1);
7389         return 0; /* avoid warning */
7390     case TARGET_NR_read:
7391         if (arg2 == 0 && arg3 == 0) {
7392             return get_errno(safe_read(arg1, 0, 0));
7393         } else {
7394             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7395                 return -TARGET_EFAULT;
7396             ret = get_errno(safe_read(arg1, p, arg3));
7397             if (ret >= 0 &&
7398                 fd_trans_host_to_target_data(arg1)) {
7399                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7400             }
7401             unlock_user(p, arg2, ret);
7402         }
7403         return ret;
7404     case TARGET_NR_write:
7405         if (arg2 == 0 && arg3 == 0) {
7406             return get_errno(safe_write(arg1, 0, 0));
7407         }
7408         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7409             return -TARGET_EFAULT;
7410         if (fd_trans_target_to_host_data(arg1)) {
7411             void *copy = g_malloc(arg3);
7412             memcpy(copy, p, arg3);
7413             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7414             if (ret >= 0) {
7415                 ret = get_errno(safe_write(arg1, copy, ret));
7416             }
7417             g_free(copy);
7418         } else {
7419             ret = get_errno(safe_write(arg1, p, arg3));
7420         }
7421         unlock_user(p, arg2, 0);
7422         return ret;
7423 
7424 #ifdef TARGET_NR_open
7425     case TARGET_NR_open:
7426         if (!(p = lock_user_string(arg1)))
7427             return -TARGET_EFAULT;
7428         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7429                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7430                                   arg3));
7431         fd_trans_unregister(ret);
7432         unlock_user(p, arg1, 0);
7433         return ret;
7434 #endif
7435     case TARGET_NR_openat:
7436         if (!(p = lock_user_string(arg2)))
7437             return -TARGET_EFAULT;
7438         ret = get_errno(do_openat(cpu_env, arg1, p,
7439                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7440                                   arg4));
7441         fd_trans_unregister(ret);
7442         unlock_user(p, arg2, 0);
7443         return ret;
7444 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7445     case TARGET_NR_name_to_handle_at:
7446         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7447         return ret;
7448 #endif
7449 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7450     case TARGET_NR_open_by_handle_at:
7451         ret = do_open_by_handle_at(arg1, arg2, arg3);
7452         fd_trans_unregister(ret);
7453         return ret;
7454 #endif
7455     case TARGET_NR_close:
7456         fd_trans_unregister(arg1);
7457         return get_errno(close(arg1));
7458 
7459     case TARGET_NR_brk:
7460         return do_brk(arg1);
7461 #ifdef TARGET_NR_fork
7462     case TARGET_NR_fork:
7463         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7464 #endif
7465 #ifdef TARGET_NR_waitpid
7466     case TARGET_NR_waitpid:
7467         {
7468             int status;
7469             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7470             if (!is_error(ret) && arg2 && ret
7471                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7472                 return -TARGET_EFAULT;
7473         }
7474         return ret;
7475 #endif
7476 #ifdef TARGET_NR_waitid
7477     case TARGET_NR_waitid:
7478         {
7479             siginfo_t info;
7480             info.si_pid = 0;
7481             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7482             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7483                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7484                     return -TARGET_EFAULT;
7485                 host_to_target_siginfo(p, &info);
7486                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7487             }
7488         }
7489         return ret;
7490 #endif
7491 #ifdef TARGET_NR_creat /* not on alpha */
7492     case TARGET_NR_creat:
7493         if (!(p = lock_user_string(arg1)))
7494             return -TARGET_EFAULT;
7495         ret = get_errno(creat(p, arg2));
7496         fd_trans_unregister(ret);
7497         unlock_user(p, arg1, 0);
7498         return ret;
7499 #endif
7500 #ifdef TARGET_NR_link
7501     case TARGET_NR_link:
7502         {
7503             void * p2;
7504             p = lock_user_string(arg1);
7505             p2 = lock_user_string(arg2);
7506             if (!p || !p2)
7507                 ret = -TARGET_EFAULT;
7508             else
7509                 ret = get_errno(link(p, p2));
7510             unlock_user(p2, arg2, 0);
7511             unlock_user(p, arg1, 0);
7512         }
7513         return ret;
7514 #endif
7515 #if defined(TARGET_NR_linkat)
7516     case TARGET_NR_linkat:
7517         {
7518             void * p2 = NULL;
7519             if (!arg2 || !arg4)
7520                 return -TARGET_EFAULT;
7521             p  = lock_user_string(arg2);
7522             p2 = lock_user_string(arg4);
7523             if (!p || !p2)
7524                 ret = -TARGET_EFAULT;
7525             else
7526                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7527             unlock_user(p, arg2, 0);
7528             unlock_user(p2, arg4, 0);
7529         }
7530         return ret;
7531 #endif
7532 #ifdef TARGET_NR_unlink
7533     case TARGET_NR_unlink:
7534         if (!(p = lock_user_string(arg1)))
7535             return -TARGET_EFAULT;
7536         ret = get_errno(unlink(p));
7537         unlock_user(p, arg1, 0);
7538         return ret;
7539 #endif
7540 #if defined(TARGET_NR_unlinkat)
7541     case TARGET_NR_unlinkat:
7542         if (!(p = lock_user_string(arg2)))
7543             return -TARGET_EFAULT;
7544         ret = get_errno(unlinkat(arg1, p, arg3));
7545         unlock_user(p, arg2, 0);
7546         return ret;
7547 #endif
7548     case TARGET_NR_execve:
7549         {
7550             char **argp, **envp;
7551             int argc, envc;
7552             abi_ulong gp;
7553             abi_ulong guest_argp;
7554             abi_ulong guest_envp;
7555             abi_ulong addr;
7556             char **q;
7557             int total_size = 0;
7558 
7559             argc = 0;
7560             guest_argp = arg2;
7561             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7562                 if (get_user_ual(addr, gp))
7563                     return -TARGET_EFAULT;
7564                 if (!addr)
7565                     break;
7566                 argc++;
7567             }
7568             envc = 0;
7569             guest_envp = arg3;
7570             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7571                 if (get_user_ual(addr, gp))
7572                     return -TARGET_EFAULT;
7573                 if (!addr)
7574                     break;
7575                 envc++;
7576             }
7577 
7578             argp = g_new0(char *, argc + 1);
7579             envp = g_new0(char *, envc + 1);
7580 
7581             for (gp = guest_argp, q = argp; gp;
7582                   gp += sizeof(abi_ulong), q++) {
7583                 if (get_user_ual(addr, gp))
7584                     goto execve_efault;
7585                 if (!addr)
7586                     break;
7587                 if (!(*q = lock_user_string(addr)))
7588                     goto execve_efault;
7589                 total_size += strlen(*q) + 1;
7590             }
7591             *q = NULL;
7592 
7593             for (gp = guest_envp, q = envp; gp;
7594                   gp += sizeof(abi_ulong), q++) {
7595                 if (get_user_ual(addr, gp))
7596                     goto execve_efault;
7597                 if (!addr)
7598                     break;
7599                 if (!(*q = lock_user_string(addr)))
7600                     goto execve_efault;
7601                 total_size += strlen(*q) + 1;
7602             }
7603             *q = NULL;
7604 
7605             if (!(p = lock_user_string(arg1)))
7606                 goto execve_efault;
7607             /* Although execve() is not an interruptible syscall it is
7608              * a special case where we must use the safe_syscall wrapper:
7609              * if we allow a signal to happen before we make the host
7610              * syscall then we will 'lose' it, because at the point of
7611              * execve the process leaves QEMU's control. So we use the
7612              * safe syscall wrapper to ensure that we either take the
7613              * signal as a guest signal, or else it does not happen
7614              * before the execve completes and makes it the other
7615              * program's problem.
7616              */
7617             ret = get_errno(safe_execve(p, argp, envp));
7618             unlock_user(p, arg1, 0);
7619 
7620             goto execve_end;
7621 
7622         execve_efault:
7623             ret = -TARGET_EFAULT;
7624 
7625         execve_end:
7626             for (gp = guest_argp, q = argp; *q;
7627                   gp += sizeof(abi_ulong), q++) {
7628                 if (get_user_ual(addr, gp)
7629                     || !addr)
7630                     break;
7631                 unlock_user(*q, addr, 0);
7632             }
7633             for (gp = guest_envp, q = envp; *q;
7634                   gp += sizeof(abi_ulong), q++) {
7635                 if (get_user_ual(addr, gp)
7636                     || !addr)
7637                     break;
7638                 unlock_user(*q, addr, 0);
7639             }
7640 
7641             g_free(argp);
7642             g_free(envp);
7643         }
7644         return ret;
7645     case TARGET_NR_chdir:
7646         if (!(p = lock_user_string(arg1)))
7647             return -TARGET_EFAULT;
7648         ret = get_errno(chdir(p));
7649         unlock_user(p, arg1, 0);
7650         return ret;
7651 #ifdef TARGET_NR_time
7652     case TARGET_NR_time:
7653         {
7654             time_t host_time;
7655             ret = get_errno(time(&host_time));
7656             if (!is_error(ret)
7657                 && arg1
7658                 && put_user_sal(host_time, arg1))
7659                 return -TARGET_EFAULT;
7660         }
7661         return ret;
7662 #endif
7663 #ifdef TARGET_NR_mknod
7664     case TARGET_NR_mknod:
7665         if (!(p = lock_user_string(arg1)))
7666             return -TARGET_EFAULT;
7667         ret = get_errno(mknod(p, arg2, arg3));
7668         unlock_user(p, arg1, 0);
7669         return ret;
7670 #endif
7671 #if defined(TARGET_NR_mknodat)
7672     case TARGET_NR_mknodat:
7673         if (!(p = lock_user_string(arg2)))
7674             return -TARGET_EFAULT;
7675         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7676         unlock_user(p, arg2, 0);
7677         return ret;
7678 #endif
7679 #ifdef TARGET_NR_chmod
7680     case TARGET_NR_chmod:
7681         if (!(p = lock_user_string(arg1)))
7682             return -TARGET_EFAULT;
7683         ret = get_errno(chmod(p, arg2));
7684         unlock_user(p, arg1, 0);
7685         return ret;
7686 #endif
7687 #ifdef TARGET_NR_lseek
7688     case TARGET_NR_lseek:
7689         return get_errno(lseek(arg1, arg2, arg3));
7690 #endif
7691 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7692     /* Alpha specific */
7693     case TARGET_NR_getxpid:
7694         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7695         return get_errno(getpid());
7696 #endif
7697 #ifdef TARGET_NR_getpid
7698     case TARGET_NR_getpid:
7699         return get_errno(getpid());
7700 #endif
7701     case TARGET_NR_mount:
7702         {
7703             /* need to look at the data field */
7704             void *p2, *p3;
7705 
7706             if (arg1) {
7707                 p = lock_user_string(arg1);
7708                 if (!p) {
7709                     return -TARGET_EFAULT;
7710                 }
7711             } else {
7712                 p = NULL;
7713             }
7714 
7715             p2 = lock_user_string(arg2);
7716             if (!p2) {
7717                 if (arg1) {
7718                     unlock_user(p, arg1, 0);
7719                 }
7720                 return -TARGET_EFAULT;
7721             }
7722 
7723             if (arg3) {
7724                 p3 = lock_user_string(arg3);
7725                 if (!p3) {
7726                     if (arg1) {
7727                         unlock_user(p, arg1, 0);
7728                     }
7729                     unlock_user(p2, arg2, 0);
7730                     return -TARGET_EFAULT;
7731                 }
7732             } else {
7733                 p3 = NULL;
7734             }
7735 
7736             /* FIXME - arg5 should be locked, but it isn't clear how to
7737              * do that since it's not guaranteed to be a NULL-terminated
7738              * string.
7739              */
7740             if (!arg5) {
7741                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7742             } else {
7743                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7744             }
7745             ret = get_errno(ret);
7746 
7747             if (arg1) {
7748                 unlock_user(p, arg1, 0);
7749             }
7750             unlock_user(p2, arg2, 0);
7751             if (arg3) {
7752                 unlock_user(p3, arg3, 0);
7753             }
7754         }
7755         return ret;
7756 #ifdef TARGET_NR_umount
7757     case TARGET_NR_umount:
7758         if (!(p = lock_user_string(arg1)))
7759             return -TARGET_EFAULT;
7760         ret = get_errno(umount(p));
7761         unlock_user(p, arg1, 0);
7762         return ret;
7763 #endif
7764 #ifdef TARGET_NR_stime /* not on alpha */
7765     case TARGET_NR_stime:
7766         {
7767             struct timespec ts;
7768             ts.tv_nsec = 0;
7769             if (get_user_sal(ts.tv_sec, arg1)) {
7770                 return -TARGET_EFAULT;
7771             }
7772             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7773         }
7774 #endif
7775 #ifdef TARGET_NR_alarm /* not on alpha */
7776     case TARGET_NR_alarm:
7777         return alarm(arg1);
7778 #endif
7779 #ifdef TARGET_NR_pause /* not on alpha */
7780     case TARGET_NR_pause:
7781         if (!block_signals()) {
7782             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7783         }
7784         return -TARGET_EINTR;
7785 #endif
7786 #ifdef TARGET_NR_utime
7787     case TARGET_NR_utime:
7788         {
7789             struct utimbuf tbuf, *host_tbuf;
7790             struct target_utimbuf *target_tbuf;
7791             if (arg2) {
7792                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7793                     return -TARGET_EFAULT;
7794                 tbuf.actime = tswapal(target_tbuf->actime);
7795                 tbuf.modtime = tswapal(target_tbuf->modtime);
7796                 unlock_user_struct(target_tbuf, arg2, 0);
7797                 host_tbuf = &tbuf;
7798             } else {
7799                 host_tbuf = NULL;
7800             }
7801             if (!(p = lock_user_string(arg1)))
7802                 return -TARGET_EFAULT;
7803             ret = get_errno(utime(p, host_tbuf));
7804             unlock_user(p, arg1, 0);
7805         }
7806         return ret;
7807 #endif
7808 #ifdef TARGET_NR_utimes
7809     case TARGET_NR_utimes:
7810         {
7811             struct timeval *tvp, tv[2];
7812             if (arg2) {
7813                 if (copy_from_user_timeval(&tv[0], arg2)
7814                     || copy_from_user_timeval(&tv[1],
7815                                               arg2 + sizeof(struct target_timeval)))
7816                     return -TARGET_EFAULT;
7817                 tvp = tv;
7818             } else {
7819                 tvp = NULL;
7820             }
7821             if (!(p = lock_user_string(arg1)))
7822                 return -TARGET_EFAULT;
7823             ret = get_errno(utimes(p, tvp));
7824             unlock_user(p, arg1, 0);
7825         }
7826         return ret;
7827 #endif
7828 #if defined(TARGET_NR_futimesat)
7829     case TARGET_NR_futimesat:
7830         {
7831             struct timeval *tvp, tv[2];
7832             if (arg3) {
7833                 if (copy_from_user_timeval(&tv[0], arg3)
7834                     || copy_from_user_timeval(&tv[1],
7835                                               arg3 + sizeof(struct target_timeval)))
7836                     return -TARGET_EFAULT;
7837                 tvp = tv;
7838             } else {
7839                 tvp = NULL;
7840             }
7841             if (!(p = lock_user_string(arg2))) {
7842                 return -TARGET_EFAULT;
7843             }
7844             ret = get_errno(futimesat(arg1, path(p), tvp));
7845             unlock_user(p, arg2, 0);
7846         }
7847         return ret;
7848 #endif
7849 #ifdef TARGET_NR_access
7850     case TARGET_NR_access:
7851         if (!(p = lock_user_string(arg1))) {
7852             return -TARGET_EFAULT;
7853         }
7854         ret = get_errno(access(path(p), arg2));
7855         unlock_user(p, arg1, 0);
7856         return ret;
7857 #endif
7858 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7859     case TARGET_NR_faccessat:
7860         if (!(p = lock_user_string(arg2))) {
7861             return -TARGET_EFAULT;
7862         }
7863         ret = get_errno(faccessat(arg1, p, arg3, 0));
7864         unlock_user(p, arg2, 0);
7865         return ret;
7866 #endif
7867 #ifdef TARGET_NR_nice /* not on alpha */
7868     case TARGET_NR_nice:
7869         return get_errno(nice(arg1));
7870 #endif
7871     case TARGET_NR_sync:
7872         sync();
7873         return 0;
7874 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7875     case TARGET_NR_syncfs:
7876         return get_errno(syncfs(arg1));
7877 #endif
7878     case TARGET_NR_kill:
7879         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7880 #ifdef TARGET_NR_rename
7881     case TARGET_NR_rename:
7882         {
7883             void *p2;
7884             p = lock_user_string(arg1);
7885             p2 = lock_user_string(arg2);
7886             if (!p || !p2)
7887                 ret = -TARGET_EFAULT;
7888             else
7889                 ret = get_errno(rename(p, p2));
7890             unlock_user(p2, arg2, 0);
7891             unlock_user(p, arg1, 0);
7892         }
7893         return ret;
7894 #endif
7895 #if defined(TARGET_NR_renameat)
7896     case TARGET_NR_renameat:
7897         {
7898             void *p2;
7899             p  = lock_user_string(arg2);
7900             p2 = lock_user_string(arg4);
7901             if (!p || !p2)
7902                 ret = -TARGET_EFAULT;
7903             else
7904                 ret = get_errno(renameat(arg1, p, arg3, p2));
7905             unlock_user(p2, arg4, 0);
7906             unlock_user(p, arg2, 0);
7907         }
7908         return ret;
7909 #endif
7910 #if defined(TARGET_NR_renameat2)
7911     case TARGET_NR_renameat2:
7912         {
7913             void *p2;
7914             p  = lock_user_string(arg2);
7915             p2 = lock_user_string(arg4);
7916             if (!p || !p2) {
7917                 ret = -TARGET_EFAULT;
7918             } else {
7919                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7920             }
7921             unlock_user(p2, arg4, 0);
7922             unlock_user(p, arg2, 0);
7923         }
7924         return ret;
7925 #endif
7926 #ifdef TARGET_NR_mkdir
7927     case TARGET_NR_mkdir:
7928         if (!(p = lock_user_string(arg1)))
7929             return -TARGET_EFAULT;
7930         ret = get_errno(mkdir(p, arg2));
7931         unlock_user(p, arg1, 0);
7932         return ret;
7933 #endif
7934 #if defined(TARGET_NR_mkdirat)
7935     case TARGET_NR_mkdirat:
7936         if (!(p = lock_user_string(arg2)))
7937             return -TARGET_EFAULT;
7938         ret = get_errno(mkdirat(arg1, p, arg3));
7939         unlock_user(p, arg2, 0);
7940         return ret;
7941 #endif
7942 #ifdef TARGET_NR_rmdir
7943     case TARGET_NR_rmdir:
7944         if (!(p = lock_user_string(arg1)))
7945             return -TARGET_EFAULT;
7946         ret = get_errno(rmdir(p));
7947         unlock_user(p, arg1, 0);
7948         return ret;
7949 #endif
7950     case TARGET_NR_dup:
7951         ret = get_errno(dup(arg1));
7952         if (ret >= 0) {
7953             fd_trans_dup(arg1, ret);
7954         }
7955         return ret;
7956 #ifdef TARGET_NR_pipe
7957     case TARGET_NR_pipe:
7958         return do_pipe(cpu_env, arg1, 0, 0);
7959 #endif
7960 #ifdef TARGET_NR_pipe2
7961     case TARGET_NR_pipe2:
7962         return do_pipe(cpu_env, arg1,
7963                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7964 #endif
7965     case TARGET_NR_times:
7966         {
7967             struct target_tms *tmsp;
7968             struct tms tms;
7969             ret = get_errno(times(&tms));
7970             if (arg1) {
7971                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7972                 if (!tmsp)
7973                     return -TARGET_EFAULT;
7974                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7975                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7976                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7977                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7978             }
7979             if (!is_error(ret))
7980                 ret = host_to_target_clock_t(ret);
7981         }
7982         return ret;
7983     case TARGET_NR_acct:
7984         if (arg1 == 0) {
7985             ret = get_errno(acct(NULL));
7986         } else {
7987             if (!(p = lock_user_string(arg1))) {
7988                 return -TARGET_EFAULT;
7989             }
7990             ret = get_errno(acct(path(p)));
7991             unlock_user(p, arg1, 0);
7992         }
7993         return ret;
7994 #ifdef TARGET_NR_umount2
7995     case TARGET_NR_umount2:
7996         if (!(p = lock_user_string(arg1)))
7997             return -TARGET_EFAULT;
7998         ret = get_errno(umount2(p, arg2));
7999         unlock_user(p, arg1, 0);
8000         return ret;
8001 #endif
8002     case TARGET_NR_ioctl:
8003         return do_ioctl(arg1, arg2, arg3);
8004 #ifdef TARGET_NR_fcntl
8005     case TARGET_NR_fcntl:
8006         return do_fcntl(arg1, arg2, arg3);
8007 #endif
8008     case TARGET_NR_setpgid:
8009         return get_errno(setpgid(arg1, arg2));
8010     case TARGET_NR_umask:
8011         return get_errno(umask(arg1));
8012     case TARGET_NR_chroot:
8013         if (!(p = lock_user_string(arg1)))
8014             return -TARGET_EFAULT;
8015         ret = get_errno(chroot(p));
8016         unlock_user(p, arg1, 0);
8017         return ret;
8018 #ifdef TARGET_NR_dup2
8019     case TARGET_NR_dup2:
8020         ret = get_errno(dup2(arg1, arg2));
8021         if (ret >= 0) {
8022             fd_trans_dup(arg1, arg2);
8023         }
8024         return ret;
8025 #endif
8026 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8027     case TARGET_NR_dup3:
8028     {
8029         int host_flags;
8030 
8031         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8032             return -EINVAL;
8033         }
8034         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8035         ret = get_errno(dup3(arg1, arg2, host_flags));
8036         if (ret >= 0) {
8037             fd_trans_dup(arg1, arg2);
8038         }
8039         return ret;
8040     }
8041 #endif
8042 #ifdef TARGET_NR_getppid /* not on alpha */
8043     case TARGET_NR_getppid:
8044         return get_errno(getppid());
8045 #endif
8046 #ifdef TARGET_NR_getpgrp
8047     case TARGET_NR_getpgrp:
8048         return get_errno(getpgrp());
8049 #endif
8050     case TARGET_NR_setsid:
8051         return get_errno(setsid());
8052 #ifdef TARGET_NR_sigaction
8053     case TARGET_NR_sigaction:
8054         {
8055 #if defined(TARGET_ALPHA)
8056             struct target_sigaction act, oact, *pact = 0;
8057             struct target_old_sigaction *old_act;
8058             if (arg2) {
8059                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8060                     return -TARGET_EFAULT;
8061                 act._sa_handler = old_act->_sa_handler;
8062                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8063                 act.sa_flags = old_act->sa_flags;
8064                 act.sa_restorer = 0;
8065                 unlock_user_struct(old_act, arg2, 0);
8066                 pact = &act;
8067             }
8068             ret = get_errno(do_sigaction(arg1, pact, &oact));
8069             if (!is_error(ret) && arg3) {
8070                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8071                     return -TARGET_EFAULT;
8072                 old_act->_sa_handler = oact._sa_handler;
8073                 old_act->sa_mask = oact.sa_mask.sig[0];
8074                 old_act->sa_flags = oact.sa_flags;
8075                 unlock_user_struct(old_act, arg3, 1);
8076             }
8077 #elif defined(TARGET_MIPS)
8078 	    struct target_sigaction act, oact, *pact, *old_act;
8079 
8080 	    if (arg2) {
8081                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8082                     return -TARGET_EFAULT;
8083 		act._sa_handler = old_act->_sa_handler;
8084 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8085 		act.sa_flags = old_act->sa_flags;
8086 		unlock_user_struct(old_act, arg2, 0);
8087 		pact = &act;
8088 	    } else {
8089 		pact = NULL;
8090 	    }
8091 
8092 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8093 
8094 	    if (!is_error(ret) && arg3) {
8095                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8096                     return -TARGET_EFAULT;
8097 		old_act->_sa_handler = oact._sa_handler;
8098 		old_act->sa_flags = oact.sa_flags;
8099 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8100 		old_act->sa_mask.sig[1] = 0;
8101 		old_act->sa_mask.sig[2] = 0;
8102 		old_act->sa_mask.sig[3] = 0;
8103 		unlock_user_struct(old_act, arg3, 1);
8104 	    }
8105 #else
8106             struct target_old_sigaction *old_act;
8107             struct target_sigaction act, oact, *pact;
8108             if (arg2) {
8109                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8110                     return -TARGET_EFAULT;
8111                 act._sa_handler = old_act->_sa_handler;
8112                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8113                 act.sa_flags = old_act->sa_flags;
8114                 act.sa_restorer = old_act->sa_restorer;
8115 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8116                 act.ka_restorer = 0;
8117 #endif
8118                 unlock_user_struct(old_act, arg2, 0);
8119                 pact = &act;
8120             } else {
8121                 pact = NULL;
8122             }
8123             ret = get_errno(do_sigaction(arg1, pact, &oact));
8124             if (!is_error(ret) && arg3) {
8125                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8126                     return -TARGET_EFAULT;
8127                 old_act->_sa_handler = oact._sa_handler;
8128                 old_act->sa_mask = oact.sa_mask.sig[0];
8129                 old_act->sa_flags = oact.sa_flags;
8130                 old_act->sa_restorer = oact.sa_restorer;
8131                 unlock_user_struct(old_act, arg3, 1);
8132             }
8133 #endif
8134         }
8135         return ret;
8136 #endif
8137     case TARGET_NR_rt_sigaction:
8138         {
8139 #if defined(TARGET_ALPHA)
8140             /* For Alpha and SPARC this is a 5 argument syscall, with
8141              * a 'restorer' parameter which must be copied into the
8142              * sa_restorer field of the sigaction struct.
8143              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8144              * and arg5 is the sigsetsize.
8145              * Alpha also has a separate rt_sigaction struct that it uses
8146              * here; SPARC uses the usual sigaction struct.
8147              */
8148             struct target_rt_sigaction *rt_act;
8149             struct target_sigaction act, oact, *pact = 0;
8150 
8151             if (arg4 != sizeof(target_sigset_t)) {
8152                 return -TARGET_EINVAL;
8153             }
8154             if (arg2) {
8155                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8156                     return -TARGET_EFAULT;
8157                 act._sa_handler = rt_act->_sa_handler;
8158                 act.sa_mask = rt_act->sa_mask;
8159                 act.sa_flags = rt_act->sa_flags;
8160                 act.sa_restorer = arg5;
8161                 unlock_user_struct(rt_act, arg2, 0);
8162                 pact = &act;
8163             }
8164             ret = get_errno(do_sigaction(arg1, pact, &oact));
8165             if (!is_error(ret) && arg3) {
8166                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8167                     return -TARGET_EFAULT;
8168                 rt_act->_sa_handler = oact._sa_handler;
8169                 rt_act->sa_mask = oact.sa_mask;
8170                 rt_act->sa_flags = oact.sa_flags;
8171                 unlock_user_struct(rt_act, arg3, 1);
8172             }
8173 #else
8174 #ifdef TARGET_SPARC
8175             target_ulong restorer = arg4;
8176             target_ulong sigsetsize = arg5;
8177 #else
8178             target_ulong sigsetsize = arg4;
8179 #endif
8180             struct target_sigaction *act;
8181             struct target_sigaction *oact;
8182 
8183             if (sigsetsize != sizeof(target_sigset_t)) {
8184                 return -TARGET_EINVAL;
8185             }
8186             if (arg2) {
8187                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8188                     return -TARGET_EFAULT;
8189                 }
8190 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8191                 act->ka_restorer = restorer;
8192 #endif
8193             } else {
8194                 act = NULL;
8195             }
8196             if (arg3) {
8197                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8198                     ret = -TARGET_EFAULT;
8199                     goto rt_sigaction_fail;
8200                 }
8201             } else
8202                 oact = NULL;
8203             ret = get_errno(do_sigaction(arg1, act, oact));
8204 	rt_sigaction_fail:
8205             if (act)
8206                 unlock_user_struct(act, arg2, 0);
8207             if (oact)
8208                 unlock_user_struct(oact, arg3, 1);
8209 #endif
8210         }
8211         return ret;
8212 #ifdef TARGET_NR_sgetmask /* not on alpha */
8213     case TARGET_NR_sgetmask:
8214         {
8215             sigset_t cur_set;
8216             abi_ulong target_set;
8217             ret = do_sigprocmask(0, NULL, &cur_set);
8218             if (!ret) {
8219                 host_to_target_old_sigset(&target_set, &cur_set);
8220                 ret = target_set;
8221             }
8222         }
8223         return ret;
8224 #endif
8225 #ifdef TARGET_NR_ssetmask /* not on alpha */
8226     case TARGET_NR_ssetmask:
8227         {
8228             sigset_t set, oset;
8229             abi_ulong target_set = arg1;
8230             target_to_host_old_sigset(&set, &target_set);
8231             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8232             if (!ret) {
8233                 host_to_target_old_sigset(&target_set, &oset);
8234                 ret = target_set;
8235             }
8236         }
8237         return ret;
8238 #endif
8239 #ifdef TARGET_NR_sigprocmask
8240     case TARGET_NR_sigprocmask:
8241         {
8242 #if defined(TARGET_ALPHA)
8243             sigset_t set, oldset;
8244             abi_ulong mask;
8245             int how;
8246 
8247             switch (arg1) {
8248             case TARGET_SIG_BLOCK:
8249                 how = SIG_BLOCK;
8250                 break;
8251             case TARGET_SIG_UNBLOCK:
8252                 how = SIG_UNBLOCK;
8253                 break;
8254             case TARGET_SIG_SETMASK:
8255                 how = SIG_SETMASK;
8256                 break;
8257             default:
8258                 return -TARGET_EINVAL;
8259             }
8260             mask = arg2;
8261             target_to_host_old_sigset(&set, &mask);
8262 
8263             ret = do_sigprocmask(how, &set, &oldset);
8264             if (!is_error(ret)) {
8265                 host_to_target_old_sigset(&mask, &oldset);
8266                 ret = mask;
8267                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8268             }
8269 #else
8270             sigset_t set, oldset, *set_ptr;
8271             int how;
8272 
8273             if (arg2) {
8274                 switch (arg1) {
8275                 case TARGET_SIG_BLOCK:
8276                     how = SIG_BLOCK;
8277                     break;
8278                 case TARGET_SIG_UNBLOCK:
8279                     how = SIG_UNBLOCK;
8280                     break;
8281                 case TARGET_SIG_SETMASK:
8282                     how = SIG_SETMASK;
8283                     break;
8284                 default:
8285                     return -TARGET_EINVAL;
8286                 }
8287                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8288                     return -TARGET_EFAULT;
8289                 target_to_host_old_sigset(&set, p);
8290                 unlock_user(p, arg2, 0);
8291                 set_ptr = &set;
8292             } else {
8293                 how = 0;
8294                 set_ptr = NULL;
8295             }
8296             ret = do_sigprocmask(how, set_ptr, &oldset);
8297             if (!is_error(ret) && arg3) {
8298                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8299                     return -TARGET_EFAULT;
8300                 host_to_target_old_sigset(p, &oldset);
8301                 unlock_user(p, arg3, sizeof(target_sigset_t));
8302             }
8303 #endif
8304         }
8305         return ret;
8306 #endif
8307     case TARGET_NR_rt_sigprocmask:
8308         {
8309             int how = arg1;
8310             sigset_t set, oldset, *set_ptr;
8311 
8312             if (arg4 != sizeof(target_sigset_t)) {
8313                 return -TARGET_EINVAL;
8314             }
8315 
8316             if (arg2) {
8317                 switch(how) {
8318                 case TARGET_SIG_BLOCK:
8319                     how = SIG_BLOCK;
8320                     break;
8321                 case TARGET_SIG_UNBLOCK:
8322                     how = SIG_UNBLOCK;
8323                     break;
8324                 case TARGET_SIG_SETMASK:
8325                     how = SIG_SETMASK;
8326                     break;
8327                 default:
8328                     return -TARGET_EINVAL;
8329                 }
8330                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8331                     return -TARGET_EFAULT;
8332                 target_to_host_sigset(&set, p);
8333                 unlock_user(p, arg2, 0);
8334                 set_ptr = &set;
8335             } else {
8336                 how = 0;
8337                 set_ptr = NULL;
8338             }
8339             ret = do_sigprocmask(how, set_ptr, &oldset);
8340             if (!is_error(ret) && arg3) {
8341                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8342                     return -TARGET_EFAULT;
8343                 host_to_target_sigset(p, &oldset);
8344                 unlock_user(p, arg3, sizeof(target_sigset_t));
8345             }
8346         }
8347         return ret;
8348 #ifdef TARGET_NR_sigpending
8349     case TARGET_NR_sigpending:
8350         {
8351             sigset_t set;
8352             ret = get_errno(sigpending(&set));
8353             if (!is_error(ret)) {
8354                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8355                     return -TARGET_EFAULT;
8356                 host_to_target_old_sigset(p, &set);
8357                 unlock_user(p, arg1, sizeof(target_sigset_t));
8358             }
8359         }
8360         return ret;
8361 #endif
8362     case TARGET_NR_rt_sigpending:
8363         {
8364             sigset_t set;
8365 
8366             /* Yes, this check is >, not != like most. We follow the kernel's
8367              * logic and it does it like this because it implements
8368              * NR_sigpending through the same code path, and in that case
8369              * the old_sigset_t is smaller in size.
8370              */
8371             if (arg2 > sizeof(target_sigset_t)) {
8372                 return -TARGET_EINVAL;
8373             }
8374 
8375             ret = get_errno(sigpending(&set));
8376             if (!is_error(ret)) {
8377                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8378                     return -TARGET_EFAULT;
8379                 host_to_target_sigset(p, &set);
8380                 unlock_user(p, arg1, sizeof(target_sigset_t));
8381             }
8382         }
8383         return ret;
8384 #ifdef TARGET_NR_sigsuspend
8385     case TARGET_NR_sigsuspend:
8386         {
8387             TaskState *ts = cpu->opaque;
8388 #if defined(TARGET_ALPHA)
8389             abi_ulong mask = arg1;
8390             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8391 #else
8392             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8393                 return -TARGET_EFAULT;
8394             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8395             unlock_user(p, arg1, 0);
8396 #endif
8397             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8398                                                SIGSET_T_SIZE));
8399             if (ret != -TARGET_ERESTARTSYS) {
8400                 ts->in_sigsuspend = 1;
8401             }
8402         }
8403         return ret;
8404 #endif
8405     case TARGET_NR_rt_sigsuspend:
8406         {
8407             TaskState *ts = cpu->opaque;
8408 
8409             if (arg2 != sizeof(target_sigset_t)) {
8410                 return -TARGET_EINVAL;
8411             }
8412             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8413                 return -TARGET_EFAULT;
8414             target_to_host_sigset(&ts->sigsuspend_mask, p);
8415             unlock_user(p, arg1, 0);
8416             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8417                                                SIGSET_T_SIZE));
8418             if (ret != -TARGET_ERESTARTSYS) {
8419                 ts->in_sigsuspend = 1;
8420             }
8421         }
8422         return ret;
8423     case TARGET_NR_rt_sigtimedwait:
8424         {
8425             sigset_t set;
8426             struct timespec uts, *puts;
8427             siginfo_t uinfo;
8428 
8429             if (arg4 != sizeof(target_sigset_t)) {
8430                 return -TARGET_EINVAL;
8431             }
8432 
8433             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8434                 return -TARGET_EFAULT;
8435             target_to_host_sigset(&set, p);
8436             unlock_user(p, arg1, 0);
8437             if (arg3) {
8438                 puts = &uts;
8439                 target_to_host_timespec(puts, arg3);
8440             } else {
8441                 puts = NULL;
8442             }
8443             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8444                                                  SIGSET_T_SIZE));
8445             if (!is_error(ret)) {
8446                 if (arg2) {
8447                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8448                                   0);
8449                     if (!p) {
8450                         return -TARGET_EFAULT;
8451                     }
8452                     host_to_target_siginfo(p, &uinfo);
8453                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8454                 }
8455                 ret = host_to_target_signal(ret);
8456             }
8457         }
8458         return ret;
8459     case TARGET_NR_rt_sigqueueinfo:
8460         {
8461             siginfo_t uinfo;
8462 
8463             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8464             if (!p) {
8465                 return -TARGET_EFAULT;
8466             }
8467             target_to_host_siginfo(&uinfo, p);
8468             unlock_user(p, arg3, 0);
8469             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8470         }
8471         return ret;
8472     case TARGET_NR_rt_tgsigqueueinfo:
8473         {
8474             siginfo_t uinfo;
8475 
8476             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8477             if (!p) {
8478                 return -TARGET_EFAULT;
8479             }
8480             target_to_host_siginfo(&uinfo, p);
8481             unlock_user(p, arg4, 0);
8482             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8483         }
8484         return ret;
8485 #ifdef TARGET_NR_sigreturn
8486     case TARGET_NR_sigreturn:
8487         if (block_signals()) {
8488             return -TARGET_ERESTARTSYS;
8489         }
8490         return do_sigreturn(cpu_env);
8491 #endif
8492     case TARGET_NR_rt_sigreturn:
8493         if (block_signals()) {
8494             return -TARGET_ERESTARTSYS;
8495         }
8496         return do_rt_sigreturn(cpu_env);
8497     case TARGET_NR_sethostname:
8498         if (!(p = lock_user_string(arg1)))
8499             return -TARGET_EFAULT;
8500         ret = get_errno(sethostname(p, arg2));
8501         unlock_user(p, arg1, 0);
8502         return ret;
8503 #ifdef TARGET_NR_setrlimit
8504     case TARGET_NR_setrlimit:
8505         {
8506             int resource = target_to_host_resource(arg1);
8507             struct target_rlimit *target_rlim;
8508             struct rlimit rlim;
8509             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8510                 return -TARGET_EFAULT;
8511             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8512             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8513             unlock_user_struct(target_rlim, arg2, 0);
8514             /*
8515              * If we just passed through resource limit settings for memory then
8516              * they would also apply to QEMU's own allocations, and QEMU will
8517              * crash or hang or die if its allocations fail. Ideally we would
8518              * track the guest allocations in QEMU and apply the limits ourselves.
8519              * For now, just tell the guest the call succeeded but don't actually
8520              * limit anything.
8521              */
8522             if (resource != RLIMIT_AS &&
8523                 resource != RLIMIT_DATA &&
8524                 resource != RLIMIT_STACK) {
8525                 return get_errno(setrlimit(resource, &rlim));
8526             } else {
8527                 return 0;
8528             }
8529         }
8530 #endif
8531 #ifdef TARGET_NR_getrlimit
8532     case TARGET_NR_getrlimit:
8533         {
8534             int resource = target_to_host_resource(arg1);
8535             struct target_rlimit *target_rlim;
8536             struct rlimit rlim;
8537 
8538             ret = get_errno(getrlimit(resource, &rlim));
8539             if (!is_error(ret)) {
8540                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8541                     return -TARGET_EFAULT;
8542                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8543                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8544                 unlock_user_struct(target_rlim, arg2, 1);
8545             }
8546         }
8547         return ret;
8548 #endif
8549     case TARGET_NR_getrusage:
8550         {
8551             struct rusage rusage;
8552             ret = get_errno(getrusage(arg1, &rusage));
8553             if (!is_error(ret)) {
8554                 ret = host_to_target_rusage(arg2, &rusage);
8555             }
8556         }
8557         return ret;
8558     case TARGET_NR_gettimeofday:
8559         {
8560             struct timeval tv;
8561             ret = get_errno(gettimeofday(&tv, NULL));
8562             if (!is_error(ret)) {
8563                 if (copy_to_user_timeval(arg1, &tv))
8564                     return -TARGET_EFAULT;
8565             }
8566         }
8567         return ret;
8568     case TARGET_NR_settimeofday:
8569         {
8570             struct timeval tv, *ptv = NULL;
8571             struct timezone tz, *ptz = NULL;
8572 
8573             if (arg1) {
8574                 if (copy_from_user_timeval(&tv, arg1)) {
8575                     return -TARGET_EFAULT;
8576                 }
8577                 ptv = &tv;
8578             }
8579 
8580             if (arg2) {
8581                 if (copy_from_user_timezone(&tz, arg2)) {
8582                     return -TARGET_EFAULT;
8583                 }
8584                 ptz = &tz;
8585             }
8586 
8587             return get_errno(settimeofday(ptv, ptz));
8588         }
8589 #if defined(TARGET_NR_select)
8590     case TARGET_NR_select:
8591 #if defined(TARGET_WANT_NI_OLD_SELECT)
8592         /* some architectures used to have old_select here
8593          * but now ENOSYS it.
8594          */
8595         ret = -TARGET_ENOSYS;
8596 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8597         ret = do_old_select(arg1);
8598 #else
8599         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8600 #endif
8601         return ret;
8602 #endif
8603 #ifdef TARGET_NR_pselect6
8604     case TARGET_NR_pselect6:
8605         {
8606             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8607             fd_set rfds, wfds, efds;
8608             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8609             struct timespec ts, *ts_ptr;
8610 
8611             /*
8612              * The 6th arg is actually two args smashed together,
8613              * so we cannot use the C library.
8614              */
8615             sigset_t set;
8616             struct {
8617                 sigset_t *set;
8618                 size_t size;
8619             } sig, *sig_ptr;
8620 
8621             abi_ulong arg_sigset, arg_sigsize, *arg7;
8622             target_sigset_t *target_sigset;
8623 
8624             n = arg1;
8625             rfd_addr = arg2;
8626             wfd_addr = arg3;
8627             efd_addr = arg4;
8628             ts_addr = arg5;
8629 
8630             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8631             if (ret) {
8632                 return ret;
8633             }
8634             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8635             if (ret) {
8636                 return ret;
8637             }
8638             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8639             if (ret) {
8640                 return ret;
8641             }
8642 
8643             /*
8644              * This takes a timespec, and not a timeval, so we cannot
8645              * use the do_select() helper ...
8646              */
8647             if (ts_addr) {
8648                 if (target_to_host_timespec(&ts, ts_addr)) {
8649                     return -TARGET_EFAULT;
8650                 }
8651                 ts_ptr = &ts;
8652             } else {
8653                 ts_ptr = NULL;
8654             }
8655 
8656             /* Extract the two packed args for the sigset */
8657             if (arg6) {
8658                 sig_ptr = &sig;
8659                 sig.size = SIGSET_T_SIZE;
8660 
8661                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8662                 if (!arg7) {
8663                     return -TARGET_EFAULT;
8664                 }
8665                 arg_sigset = tswapal(arg7[0]);
8666                 arg_sigsize = tswapal(arg7[1]);
8667                 unlock_user(arg7, arg6, 0);
8668 
8669                 if (arg_sigset) {
8670                     sig.set = &set;
8671                     if (arg_sigsize != sizeof(*target_sigset)) {
8672                         /* Like the kernel, we enforce correct size sigsets */
8673                         return -TARGET_EINVAL;
8674                     }
8675                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8676                                               sizeof(*target_sigset), 1);
8677                     if (!target_sigset) {
8678                         return -TARGET_EFAULT;
8679                     }
8680                     target_to_host_sigset(&set, target_sigset);
8681                     unlock_user(target_sigset, arg_sigset, 0);
8682                 } else {
8683                     sig.set = NULL;
8684                 }
8685             } else {
8686                 sig_ptr = NULL;
8687             }
8688 
8689             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8690                                           ts_ptr, sig_ptr));
8691 
8692             if (!is_error(ret)) {
8693                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8694                     return -TARGET_EFAULT;
8695                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8696                     return -TARGET_EFAULT;
8697                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8698                     return -TARGET_EFAULT;
8699 
8700                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8701                     return -TARGET_EFAULT;
8702             }
8703         }
8704         return ret;
8705 #endif
8706 #ifdef TARGET_NR_symlink
8707     case TARGET_NR_symlink:
8708         {
8709             void *p2;
8710             p = lock_user_string(arg1);
8711             p2 = lock_user_string(arg2);
8712             if (!p || !p2)
8713                 ret = -TARGET_EFAULT;
8714             else
8715                 ret = get_errno(symlink(p, p2));
8716             unlock_user(p2, arg2, 0);
8717             unlock_user(p, arg1, 0);
8718         }
8719         return ret;
8720 #endif
8721 #if defined(TARGET_NR_symlinkat)
8722     case TARGET_NR_symlinkat:
8723         {
8724             void *p2;
8725             p  = lock_user_string(arg1);
8726             p2 = lock_user_string(arg3);
8727             if (!p || !p2)
8728                 ret = -TARGET_EFAULT;
8729             else
8730                 ret = get_errno(symlinkat(p, arg2, p2));
8731             unlock_user(p2, arg3, 0);
8732             unlock_user(p, arg1, 0);
8733         }
8734         return ret;
8735 #endif
8736 #ifdef TARGET_NR_readlink
8737     case TARGET_NR_readlink:
8738         {
8739             void *p2;
8740             p = lock_user_string(arg1);
8741             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8742             if (!p || !p2) {
8743                 ret = -TARGET_EFAULT;
8744             } else if (!arg3) {
8745                 /* Short circuit this for the magic exe check. */
8746                 ret = -TARGET_EINVAL;
8747             } else if (is_proc_myself((const char *)p, "exe")) {
8748                 char real[PATH_MAX], *temp;
8749                 temp = realpath(exec_path, real);
8750                 /* Return value is # of bytes that we wrote to the buffer. */
8751                 if (temp == NULL) {
8752                     ret = get_errno(-1);
8753                 } else {
8754                     /* Don't worry about sign mismatch as earlier mapping
8755                      * logic would have thrown a bad address error. */
8756                     ret = MIN(strlen(real), arg3);
8757                     /* We cannot NUL terminate the string. */
8758                     memcpy(p2, real, ret);
8759                 }
8760             } else {
8761                 ret = get_errno(readlink(path(p), p2, arg3));
8762             }
8763             unlock_user(p2, arg2, ret);
8764             unlock_user(p, arg1, 0);
8765         }
8766         return ret;
8767 #endif
8768 #if defined(TARGET_NR_readlinkat)
8769     case TARGET_NR_readlinkat:
8770         {
8771             void *p2;
8772             p  = lock_user_string(arg2);
8773             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8774             if (!p || !p2) {
8775                 ret = -TARGET_EFAULT;
8776             } else if (is_proc_myself((const char *)p, "exe")) {
8777                 char real[PATH_MAX], *temp;
8778                 temp = realpath(exec_path, real);
8779                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8780                 snprintf((char *)p2, arg4, "%s", real);
8781             } else {
8782                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8783             }
8784             unlock_user(p2, arg3, ret);
8785             unlock_user(p, arg2, 0);
8786         }
8787         return ret;
8788 #endif
8789 #ifdef TARGET_NR_swapon
8790     case TARGET_NR_swapon:
8791         if (!(p = lock_user_string(arg1)))
8792             return -TARGET_EFAULT;
8793         ret = get_errno(swapon(p, arg2));
8794         unlock_user(p, arg1, 0);
8795         return ret;
8796 #endif
8797     case TARGET_NR_reboot:
8798         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8799            /* arg4 must be ignored in all other cases */
8800            p = lock_user_string(arg4);
8801            if (!p) {
8802                return -TARGET_EFAULT;
8803            }
8804            ret = get_errno(reboot(arg1, arg2, arg3, p));
8805            unlock_user(p, arg4, 0);
8806         } else {
8807            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8808         }
8809         return ret;
8810 #ifdef TARGET_NR_mmap
8811     case TARGET_NR_mmap:
8812 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8813     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8814     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8815     || defined(TARGET_S390X)
8816         {
8817             abi_ulong *v;
8818             abi_ulong v1, v2, v3, v4, v5, v6;
8819             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8820                 return -TARGET_EFAULT;
8821             v1 = tswapal(v[0]);
8822             v2 = tswapal(v[1]);
8823             v3 = tswapal(v[2]);
8824             v4 = tswapal(v[3]);
8825             v5 = tswapal(v[4]);
8826             v6 = tswapal(v[5]);
8827             unlock_user(v, arg1, 0);
8828             ret = get_errno(target_mmap(v1, v2, v3,
8829                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8830                                         v5, v6));
8831         }
8832 #else
8833         ret = get_errno(target_mmap(arg1, arg2, arg3,
8834                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8835                                     arg5,
8836                                     arg6));
8837 #endif
8838         return ret;
8839 #endif
8840 #ifdef TARGET_NR_mmap2
8841     case TARGET_NR_mmap2:
8842 #ifndef MMAP_SHIFT
8843 #define MMAP_SHIFT 12
8844 #endif
8845         ret = target_mmap(arg1, arg2, arg3,
8846                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8847                           arg5, arg6 << MMAP_SHIFT);
8848         return get_errno(ret);
8849 #endif
8850     case TARGET_NR_munmap:
8851         return get_errno(target_munmap(arg1, arg2));
8852     case TARGET_NR_mprotect:
8853         {
8854             TaskState *ts = cpu->opaque;
8855             /* Special hack to detect libc making the stack executable.  */
8856             if ((arg3 & PROT_GROWSDOWN)
8857                 && arg1 >= ts->info->stack_limit
8858                 && arg1 <= ts->info->start_stack) {
8859                 arg3 &= ~PROT_GROWSDOWN;
8860                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8861                 arg1 = ts->info->stack_limit;
8862             }
8863         }
8864         return get_errno(target_mprotect(arg1, arg2, arg3));
8865 #ifdef TARGET_NR_mremap
8866     case TARGET_NR_mremap:
8867         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8868 #endif
8869         /* ??? msync/mlock/munlock are broken for softmmu.  */
8870 #ifdef TARGET_NR_msync
8871     case TARGET_NR_msync:
8872         return get_errno(msync(g2h(arg1), arg2, arg3));
8873 #endif
8874 #ifdef TARGET_NR_mlock
8875     case TARGET_NR_mlock:
8876         return get_errno(mlock(g2h(arg1), arg2));
8877 #endif
8878 #ifdef TARGET_NR_munlock
8879     case TARGET_NR_munlock:
8880         return get_errno(munlock(g2h(arg1), arg2));
8881 #endif
8882 #ifdef TARGET_NR_mlockall
8883     case TARGET_NR_mlockall:
8884         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8885 #endif
8886 #ifdef TARGET_NR_munlockall
8887     case TARGET_NR_munlockall:
8888         return get_errno(munlockall());
8889 #endif
8890 #ifdef TARGET_NR_truncate
8891     case TARGET_NR_truncate:
8892         if (!(p = lock_user_string(arg1)))
8893             return -TARGET_EFAULT;
8894         ret = get_errno(truncate(p, arg2));
8895         unlock_user(p, arg1, 0);
8896         return ret;
8897 #endif
8898 #ifdef TARGET_NR_ftruncate
8899     case TARGET_NR_ftruncate:
8900         return get_errno(ftruncate(arg1, arg2));
8901 #endif
8902     case TARGET_NR_fchmod:
8903         return get_errno(fchmod(arg1, arg2));
8904 #if defined(TARGET_NR_fchmodat)
8905     case TARGET_NR_fchmodat:
8906         if (!(p = lock_user_string(arg2)))
8907             return -TARGET_EFAULT;
8908         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8909         unlock_user(p, arg2, 0);
8910         return ret;
8911 #endif
8912     case TARGET_NR_getpriority:
8913         /* Note that negative values are valid for getpriority, so we must
8914            differentiate based on errno settings.  */
8915         errno = 0;
8916         ret = getpriority(arg1, arg2);
8917         if (ret == -1 && errno != 0) {
8918             return -host_to_target_errno(errno);
8919         }
8920 #ifdef TARGET_ALPHA
8921         /* Return value is the unbiased priority.  Signal no error.  */
8922         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8923 #else
8924         /* Return value is a biased priority to avoid negative numbers.  */
8925         ret = 20 - ret;
8926 #endif
8927         return ret;
8928     case TARGET_NR_setpriority:
8929         return get_errno(setpriority(arg1, arg2, arg3));
8930 #ifdef TARGET_NR_statfs
8931     case TARGET_NR_statfs:
8932         if (!(p = lock_user_string(arg1))) {
8933             return -TARGET_EFAULT;
8934         }
8935         ret = get_errno(statfs(path(p), &stfs));
8936         unlock_user(p, arg1, 0);
8937     convert_statfs:
8938         if (!is_error(ret)) {
8939             struct target_statfs *target_stfs;
8940 
8941             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8942                 return -TARGET_EFAULT;
8943             __put_user(stfs.f_type, &target_stfs->f_type);
8944             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8945             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8946             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8947             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8948             __put_user(stfs.f_files, &target_stfs->f_files);
8949             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8950             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8951             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8952             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8953             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8954 #ifdef _STATFS_F_FLAGS
8955             __put_user(stfs.f_flags, &target_stfs->f_flags);
8956 #else
8957             __put_user(0, &target_stfs->f_flags);
8958 #endif
8959             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8960             unlock_user_struct(target_stfs, arg2, 1);
8961         }
8962         return ret;
8963 #endif
8964 #ifdef TARGET_NR_fstatfs
8965     case TARGET_NR_fstatfs:
8966         ret = get_errno(fstatfs(arg1, &stfs));
8967         goto convert_statfs;
8968 #endif
8969 #ifdef TARGET_NR_statfs64
8970     case TARGET_NR_statfs64:
8971         if (!(p = lock_user_string(arg1))) {
8972             return -TARGET_EFAULT;
8973         }
8974         ret = get_errno(statfs(path(p), &stfs));
8975         unlock_user(p, arg1, 0);
8976     convert_statfs64:
8977         if (!is_error(ret)) {
8978             struct target_statfs64 *target_stfs;
8979 
8980             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8981                 return -TARGET_EFAULT;
8982             __put_user(stfs.f_type, &target_stfs->f_type);
8983             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8984             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8985             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8986             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8987             __put_user(stfs.f_files, &target_stfs->f_files);
8988             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8989             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8990             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8991             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8992             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8993             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8994             unlock_user_struct(target_stfs, arg3, 1);
8995         }
8996         return ret;
8997     case TARGET_NR_fstatfs64:
8998         ret = get_errno(fstatfs(arg1, &stfs));
8999         goto convert_statfs64;
9000 #endif
9001 #ifdef TARGET_NR_socketcall
9002     case TARGET_NR_socketcall:
9003         return do_socketcall(arg1, arg2);
9004 #endif
9005 #ifdef TARGET_NR_accept
9006     case TARGET_NR_accept:
9007         return do_accept4(arg1, arg2, arg3, 0);
9008 #endif
9009 #ifdef TARGET_NR_accept4
9010     case TARGET_NR_accept4:
9011         return do_accept4(arg1, arg2, arg3, arg4);
9012 #endif
9013 #ifdef TARGET_NR_bind
9014     case TARGET_NR_bind:
9015         return do_bind(arg1, arg2, arg3);
9016 #endif
9017 #ifdef TARGET_NR_connect
9018     case TARGET_NR_connect:
9019         return do_connect(arg1, arg2, arg3);
9020 #endif
9021 #ifdef TARGET_NR_getpeername
9022     case TARGET_NR_getpeername:
9023         return do_getpeername(arg1, arg2, arg3);
9024 #endif
9025 #ifdef TARGET_NR_getsockname
9026     case TARGET_NR_getsockname:
9027         return do_getsockname(arg1, arg2, arg3);
9028 #endif
9029 #ifdef TARGET_NR_getsockopt
9030     case TARGET_NR_getsockopt:
9031         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9032 #endif
9033 #ifdef TARGET_NR_listen
9034     case TARGET_NR_listen:
9035         return get_errno(listen(arg1, arg2));
9036 #endif
9037 #ifdef TARGET_NR_recv
9038     case TARGET_NR_recv:
9039         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9040 #endif
9041 #ifdef TARGET_NR_recvfrom
9042     case TARGET_NR_recvfrom:
9043         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9044 #endif
9045 #ifdef TARGET_NR_recvmsg
9046     case TARGET_NR_recvmsg:
9047         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9048 #endif
9049 #ifdef TARGET_NR_send
9050     case TARGET_NR_send:
9051         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9052 #endif
9053 #ifdef TARGET_NR_sendmsg
9054     case TARGET_NR_sendmsg:
9055         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9056 #endif
9057 #ifdef TARGET_NR_sendmmsg
9058     case TARGET_NR_sendmmsg:
9059         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9060     case TARGET_NR_recvmmsg:
9061         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9062 #endif
9063 #ifdef TARGET_NR_sendto
9064     case TARGET_NR_sendto:
9065         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9066 #endif
9067 #ifdef TARGET_NR_shutdown
9068     case TARGET_NR_shutdown:
9069         return get_errno(shutdown(arg1, arg2));
9070 #endif
9071 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9072     case TARGET_NR_getrandom:
9073         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9074         if (!p) {
9075             return -TARGET_EFAULT;
9076         }
9077         ret = get_errno(getrandom(p, arg2, arg3));
9078         unlock_user(p, arg1, ret);
9079         return ret;
9080 #endif
9081 #ifdef TARGET_NR_socket
9082     case TARGET_NR_socket:
9083         return do_socket(arg1, arg2, arg3);
9084 #endif
9085 #ifdef TARGET_NR_socketpair
9086     case TARGET_NR_socketpair:
9087         return do_socketpair(arg1, arg2, arg3, arg4);
9088 #endif
9089 #ifdef TARGET_NR_setsockopt
9090     case TARGET_NR_setsockopt:
9091         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9092 #endif
9093 #if defined(TARGET_NR_syslog)
9094     case TARGET_NR_syslog:
9095         {
9096             int len = arg2;
9097 
9098             switch (arg1) {
9099             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9100             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9101             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9102             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9103             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9104             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9105             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9106             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9107                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9108             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9109             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9110             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9111                 {
9112                     if (len < 0) {
9113                         return -TARGET_EINVAL;
9114                     }
9115                     if (len == 0) {
9116                         return 0;
9117                     }
9118                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9119                     if (!p) {
9120                         return -TARGET_EFAULT;
9121                     }
9122                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9123                     unlock_user(p, arg2, arg3);
9124                 }
9125                 return ret;
9126             default:
9127                 return -TARGET_EINVAL;
9128             }
9129         }
9130         break;
9131 #endif
9132     case TARGET_NR_setitimer:
9133         {
9134             struct itimerval value, ovalue, *pvalue;
9135 
9136             if (arg2) {
9137                 pvalue = &value;
9138                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9139                     || copy_from_user_timeval(&pvalue->it_value,
9140                                               arg2 + sizeof(struct target_timeval)))
9141                     return -TARGET_EFAULT;
9142             } else {
9143                 pvalue = NULL;
9144             }
9145             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9146             if (!is_error(ret) && arg3) {
9147                 if (copy_to_user_timeval(arg3,
9148                                          &ovalue.it_interval)
9149                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9150                                             &ovalue.it_value))
9151                     return -TARGET_EFAULT;
9152             }
9153         }
9154         return ret;
9155     case TARGET_NR_getitimer:
9156         {
9157             struct itimerval value;
9158 
9159             ret = get_errno(getitimer(arg1, &value));
9160             if (!is_error(ret) && arg2) {
9161                 if (copy_to_user_timeval(arg2,
9162                                          &value.it_interval)
9163                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9164                                             &value.it_value))
9165                     return -TARGET_EFAULT;
9166             }
9167         }
9168         return ret;
9169 #ifdef TARGET_NR_stat
9170     case TARGET_NR_stat:
9171         if (!(p = lock_user_string(arg1))) {
9172             return -TARGET_EFAULT;
9173         }
9174         ret = get_errno(stat(path(p), &st));
9175         unlock_user(p, arg1, 0);
9176         goto do_stat;
9177 #endif
9178 #ifdef TARGET_NR_lstat
9179     case TARGET_NR_lstat:
9180         if (!(p = lock_user_string(arg1))) {
9181             return -TARGET_EFAULT;
9182         }
9183         ret = get_errno(lstat(path(p), &st));
9184         unlock_user(p, arg1, 0);
9185         goto do_stat;
9186 #endif
9187 #ifdef TARGET_NR_fstat
9188     case TARGET_NR_fstat:
9189         {
9190             ret = get_errno(fstat(arg1, &st));
9191 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9192         do_stat:
9193 #endif
9194             if (!is_error(ret)) {
9195                 struct target_stat *target_st;
9196 
9197                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9198                     return -TARGET_EFAULT;
9199                 memset(target_st, 0, sizeof(*target_st));
9200                 __put_user(st.st_dev, &target_st->st_dev);
9201                 __put_user(st.st_ino, &target_st->st_ino);
9202                 __put_user(st.st_mode, &target_st->st_mode);
9203                 __put_user(st.st_uid, &target_st->st_uid);
9204                 __put_user(st.st_gid, &target_st->st_gid);
9205                 __put_user(st.st_nlink, &target_st->st_nlink);
9206                 __put_user(st.st_rdev, &target_st->st_rdev);
9207                 __put_user(st.st_size, &target_st->st_size);
9208                 __put_user(st.st_blksize, &target_st->st_blksize);
9209                 __put_user(st.st_blocks, &target_st->st_blocks);
9210                 __put_user(st.st_atime, &target_st->target_st_atime);
9211                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9212                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9213 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9214     defined(TARGET_STAT_HAVE_NSEC)
9215                 __put_user(st.st_atim.tv_nsec,
9216                            &target_st->target_st_atime_nsec);
9217                 __put_user(st.st_mtim.tv_nsec,
9218                            &target_st->target_st_mtime_nsec);
9219                 __put_user(st.st_ctim.tv_nsec,
9220                            &target_st->target_st_ctime_nsec);
9221 #endif
9222                 unlock_user_struct(target_st, arg2, 1);
9223             }
9224         }
9225         return ret;
9226 #endif
9227     case TARGET_NR_vhangup:
9228         return get_errno(vhangup());
9229 #ifdef TARGET_NR_syscall
9230     case TARGET_NR_syscall:
9231         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9232                           arg6, arg7, arg8, 0);
9233 #endif
9234     case TARGET_NR_wait4:
9235         {
9236             int status;
9237             abi_long status_ptr = arg2;
9238             struct rusage rusage, *rusage_ptr;
9239             abi_ulong target_rusage = arg4;
9240             abi_long rusage_err;
9241             if (target_rusage)
9242                 rusage_ptr = &rusage;
9243             else
9244                 rusage_ptr = NULL;
9245             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9246             if (!is_error(ret)) {
9247                 if (status_ptr && ret) {
9248                     status = host_to_target_waitstatus(status);
9249                     if (put_user_s32(status, status_ptr))
9250                         return -TARGET_EFAULT;
9251                 }
9252                 if (target_rusage) {
9253                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9254                     if (rusage_err) {
9255                         ret = rusage_err;
9256                     }
9257                 }
9258             }
9259         }
9260         return ret;
9261 #ifdef TARGET_NR_swapoff
9262     case TARGET_NR_swapoff:
9263         if (!(p = lock_user_string(arg1)))
9264             return -TARGET_EFAULT;
9265         ret = get_errno(swapoff(p));
9266         unlock_user(p, arg1, 0);
9267         return ret;
9268 #endif
9269     case TARGET_NR_sysinfo:
9270         {
9271             struct target_sysinfo *target_value;
9272             struct sysinfo value;
9273             ret = get_errno(sysinfo(&value));
9274             if (!is_error(ret) && arg1)
9275             {
9276                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9277                     return -TARGET_EFAULT;
9278                 __put_user(value.uptime, &target_value->uptime);
9279                 __put_user(value.loads[0], &target_value->loads[0]);
9280                 __put_user(value.loads[1], &target_value->loads[1]);
9281                 __put_user(value.loads[2], &target_value->loads[2]);
9282                 __put_user(value.totalram, &target_value->totalram);
9283                 __put_user(value.freeram, &target_value->freeram);
9284                 __put_user(value.sharedram, &target_value->sharedram);
9285                 __put_user(value.bufferram, &target_value->bufferram);
9286                 __put_user(value.totalswap, &target_value->totalswap);
9287                 __put_user(value.freeswap, &target_value->freeswap);
9288                 __put_user(value.procs, &target_value->procs);
9289                 __put_user(value.totalhigh, &target_value->totalhigh);
9290                 __put_user(value.freehigh, &target_value->freehigh);
9291                 __put_user(value.mem_unit, &target_value->mem_unit);
9292                 unlock_user_struct(target_value, arg1, 1);
9293             }
9294         }
9295         return ret;
9296 #ifdef TARGET_NR_ipc
9297     case TARGET_NR_ipc:
9298         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9299 #endif
9300 #ifdef TARGET_NR_semget
9301     case TARGET_NR_semget:
9302         return get_errno(semget(arg1, arg2, arg3));
9303 #endif
9304 #ifdef TARGET_NR_semop
9305     case TARGET_NR_semop:
9306         return do_semop(arg1, arg2, arg3);
9307 #endif
9308 #ifdef TARGET_NR_semctl
9309     case TARGET_NR_semctl:
9310         return do_semctl(arg1, arg2, arg3, arg4);
9311 #endif
9312 #ifdef TARGET_NR_msgctl
9313     case TARGET_NR_msgctl:
9314         return do_msgctl(arg1, arg2, arg3);
9315 #endif
9316 #ifdef TARGET_NR_msgget
9317     case TARGET_NR_msgget:
9318         return get_errno(msgget(arg1, arg2));
9319 #endif
9320 #ifdef TARGET_NR_msgrcv
9321     case TARGET_NR_msgrcv:
9322         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9323 #endif
9324 #ifdef TARGET_NR_msgsnd
9325     case TARGET_NR_msgsnd:
9326         return do_msgsnd(arg1, arg2, arg3, arg4);
9327 #endif
9328 #ifdef TARGET_NR_shmget
9329     case TARGET_NR_shmget:
9330         return get_errno(shmget(arg1, arg2, arg3));
9331 #endif
9332 #ifdef TARGET_NR_shmctl
9333     case TARGET_NR_shmctl:
9334         return do_shmctl(arg1, arg2, arg3);
9335 #endif
9336 #ifdef TARGET_NR_shmat
9337     case TARGET_NR_shmat:
9338         return do_shmat(cpu_env, arg1, arg2, arg3);
9339 #endif
9340 #ifdef TARGET_NR_shmdt
9341     case TARGET_NR_shmdt:
9342         return do_shmdt(arg1);
9343 #endif
9344     case TARGET_NR_fsync:
9345         return get_errno(fsync(arg1));
9346     case TARGET_NR_clone:
9347         /* Linux manages to have three different orderings for its
9348          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9349          * match the kernel's CONFIG_CLONE_* settings.
9350          * Microblaze is further special in that it uses a sixth
9351          * implicit argument to clone for the TLS pointer.
9352          */
9353 #if defined(TARGET_MICROBLAZE)
9354         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9355 #elif defined(TARGET_CLONE_BACKWARDS)
9356         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9357 #elif defined(TARGET_CLONE_BACKWARDS2)
9358         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9359 #else
9360         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9361 #endif
9362         return ret;
9363 #ifdef __NR_exit_group
9364         /* new thread calls */
9365     case TARGET_NR_exit_group:
9366         preexit_cleanup(cpu_env, arg1);
9367         return get_errno(exit_group(arg1));
9368 #endif
9369     case TARGET_NR_setdomainname:
9370         if (!(p = lock_user_string(arg1)))
9371             return -TARGET_EFAULT;
9372         ret = get_errno(setdomainname(p, arg2));
9373         unlock_user(p, arg1, 0);
9374         return ret;
9375     case TARGET_NR_uname:
9376         /* no need to transcode because we use the linux syscall */
9377         {
9378             struct new_utsname * buf;
9379 
9380             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9381                 return -TARGET_EFAULT;
9382             ret = get_errno(sys_uname(buf));
9383             if (!is_error(ret)) {
9384                 /* Overwrite the native machine name with whatever is being
9385                    emulated. */
9386                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9387                           sizeof(buf->machine));
9388                 /* Allow the user to override the reported release.  */
9389                 if (qemu_uname_release && *qemu_uname_release) {
9390                     g_strlcpy(buf->release, qemu_uname_release,
9391                               sizeof(buf->release));
9392                 }
9393             }
9394             unlock_user_struct(buf, arg1, 1);
9395         }
9396         return ret;
9397 #ifdef TARGET_I386
9398     case TARGET_NR_modify_ldt:
9399         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9400 #if !defined(TARGET_X86_64)
9401     case TARGET_NR_vm86:
9402         return do_vm86(cpu_env, arg1, arg2);
9403 #endif
9404 #endif
9405     case TARGET_NR_adjtimex:
9406         {
9407             struct timex host_buf;
9408 
9409             if (target_to_host_timex(&host_buf, arg1) != 0) {
9410                 return -TARGET_EFAULT;
9411             }
9412             ret = get_errno(adjtimex(&host_buf));
9413             if (!is_error(ret)) {
9414                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9415                     return -TARGET_EFAULT;
9416                 }
9417             }
9418         }
9419         return ret;
9420 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9421     case TARGET_NR_clock_adjtime:
9422         {
9423             struct timex htx, *phtx = &htx;
9424 
9425             if (target_to_host_timex(phtx, arg2) != 0) {
9426                 return -TARGET_EFAULT;
9427             }
9428             ret = get_errno(clock_adjtime(arg1, phtx));
9429             if (!is_error(ret) && phtx) {
9430                 if (host_to_target_timex(arg2, phtx) != 0) {
9431                     return -TARGET_EFAULT;
9432                 }
9433             }
9434         }
9435         return ret;
9436 #endif
9437     case TARGET_NR_getpgid:
9438         return get_errno(getpgid(arg1));
9439     case TARGET_NR_fchdir:
9440         return get_errno(fchdir(arg1));
9441     case TARGET_NR_personality:
9442         return get_errno(personality(arg1));
9443 #ifdef TARGET_NR__llseek /* Not on alpha */
9444     case TARGET_NR__llseek:
9445         {
9446             int64_t res;
9447 #if !defined(__NR_llseek)
9448             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9449             if (res == -1) {
9450                 ret = get_errno(res);
9451             } else {
9452                 ret = 0;
9453             }
9454 #else
9455             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9456 #endif
9457             if ((ret == 0) && put_user_s64(res, arg4)) {
9458                 return -TARGET_EFAULT;
9459             }
9460         }
9461         return ret;
9462 #endif
9463 #ifdef TARGET_NR_getdents
9464     case TARGET_NR_getdents:
9465 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9466 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9467         {
9468             struct target_dirent *target_dirp;
9469             struct linux_dirent *dirp;
9470             abi_long count = arg3;
9471 
9472             dirp = g_try_malloc(count);
9473             if (!dirp) {
9474                 return -TARGET_ENOMEM;
9475             }
9476 
9477             ret = get_errno(sys_getdents(arg1, dirp, count));
9478             if (!is_error(ret)) {
9479                 struct linux_dirent *de;
9480 		struct target_dirent *tde;
9481                 int len = ret;
9482                 int reclen, treclen;
9483 		int count1, tnamelen;
9484 
9485 		count1 = 0;
9486                 de = dirp;
9487                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9488                     return -TARGET_EFAULT;
9489 		tde = target_dirp;
9490                 while (len > 0) {
9491                     reclen = de->d_reclen;
9492                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9493                     assert(tnamelen >= 0);
9494                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9495                     assert(count1 + treclen <= count);
9496                     tde->d_reclen = tswap16(treclen);
9497                     tde->d_ino = tswapal(de->d_ino);
9498                     tde->d_off = tswapal(de->d_off);
9499                     memcpy(tde->d_name, de->d_name, tnamelen);
9500                     de = (struct linux_dirent *)((char *)de + reclen);
9501                     len -= reclen;
9502                     tde = (struct target_dirent *)((char *)tde + treclen);
9503 		    count1 += treclen;
9504                 }
9505 		ret = count1;
9506                 unlock_user(target_dirp, arg2, ret);
9507             }
9508             g_free(dirp);
9509         }
9510 #else
9511         {
9512             struct linux_dirent *dirp;
9513             abi_long count = arg3;
9514 
9515             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9516                 return -TARGET_EFAULT;
9517             ret = get_errno(sys_getdents(arg1, dirp, count));
9518             if (!is_error(ret)) {
9519                 struct linux_dirent *de;
9520                 int len = ret;
9521                 int reclen;
9522                 de = dirp;
9523                 while (len > 0) {
9524                     reclen = de->d_reclen;
9525                     if (reclen > len)
9526                         break;
9527                     de->d_reclen = tswap16(reclen);
9528                     tswapls(&de->d_ino);
9529                     tswapls(&de->d_off);
9530                     de = (struct linux_dirent *)((char *)de + reclen);
9531                     len -= reclen;
9532                 }
9533             }
9534             unlock_user(dirp, arg2, ret);
9535         }
9536 #endif
9537 #else
9538         /* Implement getdents in terms of getdents64 */
9539         {
9540             struct linux_dirent64 *dirp;
9541             abi_long count = arg3;
9542 
9543             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9544             if (!dirp) {
9545                 return -TARGET_EFAULT;
9546             }
9547             ret = get_errno(sys_getdents64(arg1, dirp, count));
9548             if (!is_error(ret)) {
9549                 /* Convert the dirent64 structs to target dirent.  We do this
9550                  * in-place, since we can guarantee that a target_dirent is no
9551                  * larger than a dirent64; however this means we have to be
9552                  * careful to read everything before writing in the new format.
9553                  */
9554                 struct linux_dirent64 *de;
9555                 struct target_dirent *tde;
9556                 int len = ret;
9557                 int tlen = 0;
9558 
9559                 de = dirp;
9560                 tde = (struct target_dirent *)dirp;
9561                 while (len > 0) {
9562                     int namelen, treclen;
9563                     int reclen = de->d_reclen;
9564                     uint64_t ino = de->d_ino;
9565                     int64_t off = de->d_off;
9566                     uint8_t type = de->d_type;
9567 
9568                     namelen = strlen(de->d_name);
9569                     treclen = offsetof(struct target_dirent, d_name)
9570                         + namelen + 2;
9571                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9572 
9573                     memmove(tde->d_name, de->d_name, namelen + 1);
9574                     tde->d_ino = tswapal(ino);
9575                     tde->d_off = tswapal(off);
9576                     tde->d_reclen = tswap16(treclen);
9577                     /* The target_dirent type is in what was formerly a padding
9578                      * byte at the end of the structure:
9579                      */
9580                     *(((char *)tde) + treclen - 1) = type;
9581 
9582                     de = (struct linux_dirent64 *)((char *)de + reclen);
9583                     tde = (struct target_dirent *)((char *)tde + treclen);
9584                     len -= reclen;
9585                     tlen += treclen;
9586                 }
9587                 ret = tlen;
9588             }
9589             unlock_user(dirp, arg2, ret);
9590         }
9591 #endif
9592         return ret;
9593 #endif /* TARGET_NR_getdents */
9594 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9595     case TARGET_NR_getdents64:
9596         {
9597             struct linux_dirent64 *dirp;
9598             abi_long count = arg3;
9599             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9600                 return -TARGET_EFAULT;
9601             ret = get_errno(sys_getdents64(arg1, dirp, count));
9602             if (!is_error(ret)) {
9603                 struct linux_dirent64 *de;
9604                 int len = ret;
9605                 int reclen;
9606                 de = dirp;
9607                 while (len > 0) {
9608                     reclen = de->d_reclen;
9609                     if (reclen > len)
9610                         break;
9611                     de->d_reclen = tswap16(reclen);
9612                     tswap64s((uint64_t *)&de->d_ino);
9613                     tswap64s((uint64_t *)&de->d_off);
9614                     de = (struct linux_dirent64 *)((char *)de + reclen);
9615                     len -= reclen;
9616                 }
9617             }
9618             unlock_user(dirp, arg2, ret);
9619         }
9620         return ret;
9621 #endif /* TARGET_NR_getdents64 */
9622 #if defined(TARGET_NR__newselect)
9623     case TARGET_NR__newselect:
9624         return do_select(arg1, arg2, arg3, arg4, arg5);
9625 #endif
9626 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9627 # ifdef TARGET_NR_poll
9628     case TARGET_NR_poll:
9629 # endif
9630 # ifdef TARGET_NR_ppoll
9631     case TARGET_NR_ppoll:
9632 # endif
9633         {
9634             struct target_pollfd *target_pfd;
9635             unsigned int nfds = arg2;
9636             struct pollfd *pfd;
9637             unsigned int i;
9638 
9639             pfd = NULL;
9640             target_pfd = NULL;
9641             if (nfds) {
9642                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9643                     return -TARGET_EINVAL;
9644                 }
9645 
9646                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9647                                        sizeof(struct target_pollfd) * nfds, 1);
9648                 if (!target_pfd) {
9649                     return -TARGET_EFAULT;
9650                 }
9651 
9652                 pfd = alloca(sizeof(struct pollfd) * nfds);
9653                 for (i = 0; i < nfds; i++) {
9654                     pfd[i].fd = tswap32(target_pfd[i].fd);
9655                     pfd[i].events = tswap16(target_pfd[i].events);
9656                 }
9657             }
9658 
9659             switch (num) {
9660 # ifdef TARGET_NR_ppoll
9661             case TARGET_NR_ppoll:
9662             {
9663                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9664                 target_sigset_t *target_set;
9665                 sigset_t _set, *set = &_set;
9666 
9667                 if (arg3) {
9668                     if (target_to_host_timespec(timeout_ts, arg3)) {
9669                         unlock_user(target_pfd, arg1, 0);
9670                         return -TARGET_EFAULT;
9671                     }
9672                 } else {
9673                     timeout_ts = NULL;
9674                 }
9675 
9676                 if (arg4) {
9677                     if (arg5 != sizeof(target_sigset_t)) {
9678                         unlock_user(target_pfd, arg1, 0);
9679                         return -TARGET_EINVAL;
9680                     }
9681 
9682                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9683                     if (!target_set) {
9684                         unlock_user(target_pfd, arg1, 0);
9685                         return -TARGET_EFAULT;
9686                     }
9687                     target_to_host_sigset(set, target_set);
9688                 } else {
9689                     set = NULL;
9690                 }
9691 
9692                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9693                                            set, SIGSET_T_SIZE));
9694 
9695                 if (!is_error(ret) && arg3) {
9696                     host_to_target_timespec(arg3, timeout_ts);
9697                 }
9698                 if (arg4) {
9699                     unlock_user(target_set, arg4, 0);
9700                 }
9701                 break;
9702             }
9703 # endif
9704 # ifdef TARGET_NR_poll
9705             case TARGET_NR_poll:
9706             {
9707                 struct timespec ts, *pts;
9708 
9709                 if (arg3 >= 0) {
9710                     /* Convert ms to secs, ns */
9711                     ts.tv_sec = arg3 / 1000;
9712                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9713                     pts = &ts;
9714                 } else {
9715                     /* -ve poll() timeout means "infinite" */
9716                     pts = NULL;
9717                 }
9718                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9719                 break;
9720             }
9721 # endif
9722             default:
9723                 g_assert_not_reached();
9724             }
9725 
9726             if (!is_error(ret)) {
9727                 for(i = 0; i < nfds; i++) {
9728                     target_pfd[i].revents = tswap16(pfd[i].revents);
9729                 }
9730             }
9731             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9732         }
9733         return ret;
9734 #endif
9735     case TARGET_NR_flock:
9736         /* NOTE: the flock constant seems to be the same for every
9737            Linux platform */
9738         return get_errno(safe_flock(arg1, arg2));
9739     case TARGET_NR_readv:
9740         {
9741             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9742             if (vec != NULL) {
9743                 ret = get_errno(safe_readv(arg1, vec, arg3));
9744                 unlock_iovec(vec, arg2, arg3, 1);
9745             } else {
9746                 ret = -host_to_target_errno(errno);
9747             }
9748         }
9749         return ret;
9750     case TARGET_NR_writev:
9751         {
9752             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9753             if (vec != NULL) {
9754                 ret = get_errno(safe_writev(arg1, vec, arg3));
9755                 unlock_iovec(vec, arg2, arg3, 0);
9756             } else {
9757                 ret = -host_to_target_errno(errno);
9758             }
9759         }
9760         return ret;
9761 #if defined(TARGET_NR_preadv)
9762     case TARGET_NR_preadv:
9763         {
9764             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9765             if (vec != NULL) {
9766                 unsigned long low, high;
9767 
9768                 target_to_host_low_high(arg4, arg5, &low, &high);
9769                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9770                 unlock_iovec(vec, arg2, arg3, 1);
9771             } else {
9772                 ret = -host_to_target_errno(errno);
9773            }
9774         }
9775         return ret;
9776 #endif
9777 #if defined(TARGET_NR_pwritev)
9778     case TARGET_NR_pwritev:
9779         {
9780             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9781             if (vec != NULL) {
9782                 unsigned long low, high;
9783 
9784                 target_to_host_low_high(arg4, arg5, &low, &high);
9785                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9786                 unlock_iovec(vec, arg2, arg3, 0);
9787             } else {
9788                 ret = -host_to_target_errno(errno);
9789            }
9790         }
9791         return ret;
9792 #endif
9793     case TARGET_NR_getsid:
9794         return get_errno(getsid(arg1));
9795 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9796     case TARGET_NR_fdatasync:
9797         return get_errno(fdatasync(arg1));
9798 #endif
9799 #ifdef TARGET_NR__sysctl
9800     case TARGET_NR__sysctl:
9801         /* We don't implement this, but ENOTDIR is always a safe
9802            return value. */
9803         return -TARGET_ENOTDIR;
9804 #endif
9805     case TARGET_NR_sched_getaffinity:
9806         {
9807             unsigned int mask_size;
9808             unsigned long *mask;
9809 
9810             /*
9811              * sched_getaffinity needs multiples of ulong, so need to take
9812              * care of mismatches between target ulong and host ulong sizes.
9813              */
9814             if (arg2 & (sizeof(abi_ulong) - 1)) {
9815                 return -TARGET_EINVAL;
9816             }
9817             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9818 
9819             mask = alloca(mask_size);
9820             memset(mask, 0, mask_size);
9821             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9822 
9823             if (!is_error(ret)) {
9824                 if (ret > arg2) {
9825                     /* More data returned than the caller's buffer will fit.
9826                      * This only happens if sizeof(abi_long) < sizeof(long)
9827                      * and the caller passed us a buffer holding an odd number
9828                      * of abi_longs. If the host kernel is actually using the
9829                      * extra 4 bytes then fail EINVAL; otherwise we can just
9830                      * ignore them and only copy the interesting part.
9831                      */
9832                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9833                     if (numcpus > arg2 * 8) {
9834                         return -TARGET_EINVAL;
9835                     }
9836                     ret = arg2;
9837                 }
9838 
9839                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9840                     return -TARGET_EFAULT;
9841                 }
9842             }
9843         }
9844         return ret;
9845     case TARGET_NR_sched_setaffinity:
9846         {
9847             unsigned int mask_size;
9848             unsigned long *mask;
9849 
9850             /*
9851              * sched_setaffinity needs multiples of ulong, so need to take
9852              * care of mismatches between target ulong and host ulong sizes.
9853              */
9854             if (arg2 & (sizeof(abi_ulong) - 1)) {
9855                 return -TARGET_EINVAL;
9856             }
9857             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9858             mask = alloca(mask_size);
9859 
9860             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9861             if (ret) {
9862                 return ret;
9863             }
9864 
9865             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9866         }
9867     case TARGET_NR_getcpu:
9868         {
9869             unsigned cpu, node;
9870             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9871                                        arg2 ? &node : NULL,
9872                                        NULL));
9873             if (is_error(ret)) {
9874                 return ret;
9875             }
9876             if (arg1 && put_user_u32(cpu, arg1)) {
9877                 return -TARGET_EFAULT;
9878             }
9879             if (arg2 && put_user_u32(node, arg2)) {
9880                 return -TARGET_EFAULT;
9881             }
9882         }
9883         return ret;
9884     case TARGET_NR_sched_setparam:
9885         {
9886             struct sched_param *target_schp;
9887             struct sched_param schp;
9888 
9889             if (arg2 == 0) {
9890                 return -TARGET_EINVAL;
9891             }
9892             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9893                 return -TARGET_EFAULT;
9894             schp.sched_priority = tswap32(target_schp->sched_priority);
9895             unlock_user_struct(target_schp, arg2, 0);
9896             return get_errno(sched_setparam(arg1, &schp));
9897         }
9898     case TARGET_NR_sched_getparam:
9899         {
9900             struct sched_param *target_schp;
9901             struct sched_param schp;
9902 
9903             if (arg2 == 0) {
9904                 return -TARGET_EINVAL;
9905             }
9906             ret = get_errno(sched_getparam(arg1, &schp));
9907             if (!is_error(ret)) {
9908                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9909                     return -TARGET_EFAULT;
9910                 target_schp->sched_priority = tswap32(schp.sched_priority);
9911                 unlock_user_struct(target_schp, arg2, 1);
9912             }
9913         }
9914         return ret;
9915     case TARGET_NR_sched_setscheduler:
9916         {
9917             struct sched_param *target_schp;
9918             struct sched_param schp;
9919             if (arg3 == 0) {
9920                 return -TARGET_EINVAL;
9921             }
9922             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9923                 return -TARGET_EFAULT;
9924             schp.sched_priority = tswap32(target_schp->sched_priority);
9925             unlock_user_struct(target_schp, arg3, 0);
9926             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9927         }
9928     case TARGET_NR_sched_getscheduler:
9929         return get_errno(sched_getscheduler(arg1));
9930     case TARGET_NR_sched_yield:
9931         return get_errno(sched_yield());
9932     case TARGET_NR_sched_get_priority_max:
9933         return get_errno(sched_get_priority_max(arg1));
9934     case TARGET_NR_sched_get_priority_min:
9935         return get_errno(sched_get_priority_min(arg1));
9936     case TARGET_NR_sched_rr_get_interval:
9937         {
9938             struct timespec ts;
9939             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9940             if (!is_error(ret)) {
9941                 ret = host_to_target_timespec(arg2, &ts);
9942             }
9943         }
9944         return ret;
9945     case TARGET_NR_nanosleep:
9946         {
9947             struct timespec req, rem;
9948             target_to_host_timespec(&req, arg1);
9949             ret = get_errno(safe_nanosleep(&req, &rem));
9950             if (is_error(ret) && arg2) {
9951                 host_to_target_timespec(arg2, &rem);
9952             }
9953         }
9954         return ret;
9955     case TARGET_NR_prctl:
9956         switch (arg1) {
9957         case PR_GET_PDEATHSIG:
9958         {
9959             int deathsig;
9960             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9961             if (!is_error(ret) && arg2
9962                 && put_user_ual(deathsig, arg2)) {
9963                 return -TARGET_EFAULT;
9964             }
9965             return ret;
9966         }
9967 #ifdef PR_GET_NAME
9968         case PR_GET_NAME:
9969         {
9970             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9971             if (!name) {
9972                 return -TARGET_EFAULT;
9973             }
9974             ret = get_errno(prctl(arg1, (unsigned long)name,
9975                                   arg3, arg4, arg5));
9976             unlock_user(name, arg2, 16);
9977             return ret;
9978         }
9979         case PR_SET_NAME:
9980         {
9981             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9982             if (!name) {
9983                 return -TARGET_EFAULT;
9984             }
9985             ret = get_errno(prctl(arg1, (unsigned long)name,
9986                                   arg3, arg4, arg5));
9987             unlock_user(name, arg2, 0);
9988             return ret;
9989         }
9990 #endif
9991 #ifdef TARGET_MIPS
9992         case TARGET_PR_GET_FP_MODE:
9993         {
9994             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9995             ret = 0;
9996             if (env->CP0_Status & (1 << CP0St_FR)) {
9997                 ret |= TARGET_PR_FP_MODE_FR;
9998             }
9999             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10000                 ret |= TARGET_PR_FP_MODE_FRE;
10001             }
10002             return ret;
10003         }
10004         case TARGET_PR_SET_FP_MODE:
10005         {
10006             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10007             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10008             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10009             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10010             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10011 
10012             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10013                                             TARGET_PR_FP_MODE_FRE;
10014 
10015             /* If nothing to change, return right away, successfully.  */
10016             if (old_fr == new_fr && old_fre == new_fre) {
10017                 return 0;
10018             }
10019             /* Check the value is valid */
10020             if (arg2 & ~known_bits) {
10021                 return -TARGET_EOPNOTSUPP;
10022             }
10023             /* Setting FRE without FR is not supported.  */
10024             if (new_fre && !new_fr) {
10025                 return -TARGET_EOPNOTSUPP;
10026             }
10027             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10028                 /* FR1 is not supported */
10029                 return -TARGET_EOPNOTSUPP;
10030             }
10031             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10032                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10033                 /* cannot set FR=0 */
10034                 return -TARGET_EOPNOTSUPP;
10035             }
10036             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10037                 /* Cannot set FRE=1 */
10038                 return -TARGET_EOPNOTSUPP;
10039             }
10040 
10041             int i;
10042             fpr_t *fpr = env->active_fpu.fpr;
10043             for (i = 0; i < 32 ; i += 2) {
10044                 if (!old_fr && new_fr) {
10045                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10046                 } else if (old_fr && !new_fr) {
10047                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10048                 }
10049             }
10050 
10051             if (new_fr) {
10052                 env->CP0_Status |= (1 << CP0St_FR);
10053                 env->hflags |= MIPS_HFLAG_F64;
10054             } else {
10055                 env->CP0_Status &= ~(1 << CP0St_FR);
10056                 env->hflags &= ~MIPS_HFLAG_F64;
10057             }
10058             if (new_fre) {
10059                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10060                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10061                     env->hflags |= MIPS_HFLAG_FRE;
10062                 }
10063             } else {
10064                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10065                 env->hflags &= ~MIPS_HFLAG_FRE;
10066             }
10067 
10068             return 0;
10069         }
10070 #endif /* MIPS */
10071 #ifdef TARGET_AARCH64
10072         case TARGET_PR_SVE_SET_VL:
10073             /*
10074              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10075              * PR_SVE_VL_INHERIT.  Note the kernel definition
10076              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10077              * even though the current architectural maximum is VQ=16.
10078              */
10079             ret = -TARGET_EINVAL;
10080             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10081                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10082                 CPUARMState *env = cpu_env;
10083                 ARMCPU *cpu = env_archcpu(env);
10084                 uint32_t vq, old_vq;
10085 
10086                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10087                 vq = MAX(arg2 / 16, 1);
10088                 vq = MIN(vq, cpu->sve_max_vq);
10089 
10090                 if (vq < old_vq) {
10091                     aarch64_sve_narrow_vq(env, vq);
10092                 }
10093                 env->vfp.zcr_el[1] = vq - 1;
10094                 arm_rebuild_hflags(env);
10095                 ret = vq * 16;
10096             }
10097             return ret;
10098         case TARGET_PR_SVE_GET_VL:
10099             ret = -TARGET_EINVAL;
10100             {
10101                 ARMCPU *cpu = env_archcpu(cpu_env);
10102                 if (cpu_isar_feature(aa64_sve, cpu)) {
10103                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10104                 }
10105             }
10106             return ret;
10107         case TARGET_PR_PAC_RESET_KEYS:
10108             {
10109                 CPUARMState *env = cpu_env;
10110                 ARMCPU *cpu = env_archcpu(env);
10111 
10112                 if (arg3 || arg4 || arg5) {
10113                     return -TARGET_EINVAL;
10114                 }
10115                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10116                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10117                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10118                                TARGET_PR_PAC_APGAKEY);
10119                     int ret = 0;
10120                     Error *err = NULL;
10121 
10122                     if (arg2 == 0) {
10123                         arg2 = all;
10124                     } else if (arg2 & ~all) {
10125                         return -TARGET_EINVAL;
10126                     }
10127                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10128                         ret |= qemu_guest_getrandom(&env->keys.apia,
10129                                                     sizeof(ARMPACKey), &err);
10130                     }
10131                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10132                         ret |= qemu_guest_getrandom(&env->keys.apib,
10133                                                     sizeof(ARMPACKey), &err);
10134                     }
10135                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10136                         ret |= qemu_guest_getrandom(&env->keys.apda,
10137                                                     sizeof(ARMPACKey), &err);
10138                     }
10139                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10140                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10141                                                     sizeof(ARMPACKey), &err);
10142                     }
10143                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10144                         ret |= qemu_guest_getrandom(&env->keys.apga,
10145                                                     sizeof(ARMPACKey), &err);
10146                     }
10147                     if (ret != 0) {
10148                         /*
10149                          * Some unknown failure in the crypto.  The best
10150                          * we can do is log it and fail the syscall.
10151                          * The real syscall cannot fail this way.
10152                          */
10153                         qemu_log_mask(LOG_UNIMP,
10154                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10155                                       error_get_pretty(err));
10156                         error_free(err);
10157                         return -TARGET_EIO;
10158                     }
10159                     return 0;
10160                 }
10161             }
10162             return -TARGET_EINVAL;
10163 #endif /* AARCH64 */
10164         case PR_GET_SECCOMP:
10165         case PR_SET_SECCOMP:
10166             /* Disable seccomp to prevent the target disabling syscalls we
10167              * need. */
10168             return -TARGET_EINVAL;
10169         default:
10170             /* Most prctl options have no pointer arguments */
10171             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10172         }
10173         break;
10174 #ifdef TARGET_NR_arch_prctl
10175     case TARGET_NR_arch_prctl:
10176 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10177         return do_arch_prctl(cpu_env, arg1, arg2);
10178 #else
10179 #error unreachable
10180 #endif
10181 #endif
10182 #ifdef TARGET_NR_pread64
10183     case TARGET_NR_pread64:
10184         if (regpairs_aligned(cpu_env, num)) {
10185             arg4 = arg5;
10186             arg5 = arg6;
10187         }
10188         if (arg2 == 0 && arg3 == 0) {
10189             /* Special-case NULL buffer and zero length, which should succeed */
10190             p = 0;
10191         } else {
10192             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10193             if (!p) {
10194                 return -TARGET_EFAULT;
10195             }
10196         }
10197         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10198         unlock_user(p, arg2, ret);
10199         return ret;
10200     case TARGET_NR_pwrite64:
10201         if (regpairs_aligned(cpu_env, num)) {
10202             arg4 = arg5;
10203             arg5 = arg6;
10204         }
10205         if (arg2 == 0 && arg3 == 0) {
10206             /* Special-case NULL buffer and zero length, which should succeed */
10207             p = 0;
10208         } else {
10209             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10210             if (!p) {
10211                 return -TARGET_EFAULT;
10212             }
10213         }
10214         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10215         unlock_user(p, arg2, 0);
10216         return ret;
10217 #endif
10218     case TARGET_NR_getcwd:
10219         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10220             return -TARGET_EFAULT;
10221         ret = get_errno(sys_getcwd1(p, arg2));
10222         unlock_user(p, arg1, ret);
10223         return ret;
10224     case TARGET_NR_capget:
10225     case TARGET_NR_capset:
10226     {
10227         struct target_user_cap_header *target_header;
10228         struct target_user_cap_data *target_data = NULL;
10229         struct __user_cap_header_struct header;
10230         struct __user_cap_data_struct data[2];
10231         struct __user_cap_data_struct *dataptr = NULL;
10232         int i, target_datalen;
10233         int data_items = 1;
10234 
10235         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10236             return -TARGET_EFAULT;
10237         }
10238         header.version = tswap32(target_header->version);
10239         header.pid = tswap32(target_header->pid);
10240 
10241         if (header.version != _LINUX_CAPABILITY_VERSION) {
10242             /* Version 2 and up takes pointer to two user_data structs */
10243             data_items = 2;
10244         }
10245 
10246         target_datalen = sizeof(*target_data) * data_items;
10247 
10248         if (arg2) {
10249             if (num == TARGET_NR_capget) {
10250                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10251             } else {
10252                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10253             }
10254             if (!target_data) {
10255                 unlock_user_struct(target_header, arg1, 0);
10256                 return -TARGET_EFAULT;
10257             }
10258 
10259             if (num == TARGET_NR_capset) {
10260                 for (i = 0; i < data_items; i++) {
10261                     data[i].effective = tswap32(target_data[i].effective);
10262                     data[i].permitted = tswap32(target_data[i].permitted);
10263                     data[i].inheritable = tswap32(target_data[i].inheritable);
10264                 }
10265             }
10266 
10267             dataptr = data;
10268         }
10269 
10270         if (num == TARGET_NR_capget) {
10271             ret = get_errno(capget(&header, dataptr));
10272         } else {
10273             ret = get_errno(capset(&header, dataptr));
10274         }
10275 
10276         /* The kernel always updates version for both capget and capset */
10277         target_header->version = tswap32(header.version);
10278         unlock_user_struct(target_header, arg1, 1);
10279 
10280         if (arg2) {
10281             if (num == TARGET_NR_capget) {
10282                 for (i = 0; i < data_items; i++) {
10283                     target_data[i].effective = tswap32(data[i].effective);
10284                     target_data[i].permitted = tswap32(data[i].permitted);
10285                     target_data[i].inheritable = tswap32(data[i].inheritable);
10286                 }
10287                 unlock_user(target_data, arg2, target_datalen);
10288             } else {
10289                 unlock_user(target_data, arg2, 0);
10290             }
10291         }
10292         return ret;
10293     }
10294     case TARGET_NR_sigaltstack:
10295         return do_sigaltstack(arg1, arg2,
10296                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10297 
10298 #ifdef CONFIG_SENDFILE
10299 #ifdef TARGET_NR_sendfile
10300     case TARGET_NR_sendfile:
10301     {
10302         off_t *offp = NULL;
10303         off_t off;
10304         if (arg3) {
10305             ret = get_user_sal(off, arg3);
10306             if (is_error(ret)) {
10307                 return ret;
10308             }
10309             offp = &off;
10310         }
10311         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10312         if (!is_error(ret) && arg3) {
10313             abi_long ret2 = put_user_sal(off, arg3);
10314             if (is_error(ret2)) {
10315                 ret = ret2;
10316             }
10317         }
10318         return ret;
10319     }
10320 #endif
10321 #ifdef TARGET_NR_sendfile64
10322     case TARGET_NR_sendfile64:
10323     {
10324         off_t *offp = NULL;
10325         off_t off;
10326         if (arg3) {
10327             ret = get_user_s64(off, arg3);
10328             if (is_error(ret)) {
10329                 return ret;
10330             }
10331             offp = &off;
10332         }
10333         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10334         if (!is_error(ret) && arg3) {
10335             abi_long ret2 = put_user_s64(off, arg3);
10336             if (is_error(ret2)) {
10337                 ret = ret2;
10338             }
10339         }
10340         return ret;
10341     }
10342 #endif
10343 #endif
10344 #ifdef TARGET_NR_vfork
10345     case TARGET_NR_vfork:
10346         return get_errno(do_fork(cpu_env,
10347                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10348                          0, 0, 0, 0));
10349 #endif
10350 #ifdef TARGET_NR_ugetrlimit
10351     case TARGET_NR_ugetrlimit:
10352     {
10353 	struct rlimit rlim;
10354 	int resource = target_to_host_resource(arg1);
10355 	ret = get_errno(getrlimit(resource, &rlim));
10356 	if (!is_error(ret)) {
10357 	    struct target_rlimit *target_rlim;
10358             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10359                 return -TARGET_EFAULT;
10360 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10361 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10362             unlock_user_struct(target_rlim, arg2, 1);
10363 	}
10364         return ret;
10365     }
10366 #endif
10367 #ifdef TARGET_NR_truncate64
10368     case TARGET_NR_truncate64:
10369         if (!(p = lock_user_string(arg1)))
10370             return -TARGET_EFAULT;
10371 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10372         unlock_user(p, arg1, 0);
10373         return ret;
10374 #endif
10375 #ifdef TARGET_NR_ftruncate64
10376     case TARGET_NR_ftruncate64:
10377         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10378 #endif
10379 #ifdef TARGET_NR_stat64
10380     case TARGET_NR_stat64:
10381         if (!(p = lock_user_string(arg1))) {
10382             return -TARGET_EFAULT;
10383         }
10384         ret = get_errno(stat(path(p), &st));
10385         unlock_user(p, arg1, 0);
10386         if (!is_error(ret))
10387             ret = host_to_target_stat64(cpu_env, arg2, &st);
10388         return ret;
10389 #endif
10390 #ifdef TARGET_NR_lstat64
10391     case TARGET_NR_lstat64:
10392         if (!(p = lock_user_string(arg1))) {
10393             return -TARGET_EFAULT;
10394         }
10395         ret = get_errno(lstat(path(p), &st));
10396         unlock_user(p, arg1, 0);
10397         if (!is_error(ret))
10398             ret = host_to_target_stat64(cpu_env, arg2, &st);
10399         return ret;
10400 #endif
10401 #ifdef TARGET_NR_fstat64
10402     case TARGET_NR_fstat64:
10403         ret = get_errno(fstat(arg1, &st));
10404         if (!is_error(ret))
10405             ret = host_to_target_stat64(cpu_env, arg2, &st);
10406         return ret;
10407 #endif
10408 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10409 #ifdef TARGET_NR_fstatat64
10410     case TARGET_NR_fstatat64:
10411 #endif
10412 #ifdef TARGET_NR_newfstatat
10413     case TARGET_NR_newfstatat:
10414 #endif
10415         if (!(p = lock_user_string(arg2))) {
10416             return -TARGET_EFAULT;
10417         }
10418         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10419         unlock_user(p, arg2, 0);
10420         if (!is_error(ret))
10421             ret = host_to_target_stat64(cpu_env, arg3, &st);
10422         return ret;
10423 #endif
10424 #if defined(TARGET_NR_statx)
10425     case TARGET_NR_statx:
10426         {
10427             struct target_statx *target_stx;
10428             int dirfd = arg1;
10429             int flags = arg3;
10430 
10431             p = lock_user_string(arg2);
10432             if (p == NULL) {
10433                 return -TARGET_EFAULT;
10434             }
10435 #if defined(__NR_statx)
10436             {
10437                 /*
10438                  * It is assumed that struct statx is architecture independent.
10439                  */
10440                 struct target_statx host_stx;
10441                 int mask = arg4;
10442 
10443                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10444                 if (!is_error(ret)) {
10445                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10446                         unlock_user(p, arg2, 0);
10447                         return -TARGET_EFAULT;
10448                     }
10449                 }
10450 
10451                 if (ret != -TARGET_ENOSYS) {
10452                     unlock_user(p, arg2, 0);
10453                     return ret;
10454                 }
10455             }
10456 #endif
10457             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10458             unlock_user(p, arg2, 0);
10459 
10460             if (!is_error(ret)) {
10461                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10462                     return -TARGET_EFAULT;
10463                 }
10464                 memset(target_stx, 0, sizeof(*target_stx));
10465                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10466                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10467                 __put_user(st.st_ino, &target_stx->stx_ino);
10468                 __put_user(st.st_mode, &target_stx->stx_mode);
10469                 __put_user(st.st_uid, &target_stx->stx_uid);
10470                 __put_user(st.st_gid, &target_stx->stx_gid);
10471                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10472                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10473                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10474                 __put_user(st.st_size, &target_stx->stx_size);
10475                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10476                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10477                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10478                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10479                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10480                 unlock_user_struct(target_stx, arg5, 1);
10481             }
10482         }
10483         return ret;
10484 #endif
10485 #ifdef TARGET_NR_lchown
10486     case TARGET_NR_lchown:
10487         if (!(p = lock_user_string(arg1)))
10488             return -TARGET_EFAULT;
10489         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10490         unlock_user(p, arg1, 0);
10491         return ret;
10492 #endif
10493 #ifdef TARGET_NR_getuid
10494     case TARGET_NR_getuid:
10495         return get_errno(high2lowuid(getuid()));
10496 #endif
10497 #ifdef TARGET_NR_getgid
10498     case TARGET_NR_getgid:
10499         return get_errno(high2lowgid(getgid()));
10500 #endif
10501 #ifdef TARGET_NR_geteuid
10502     case TARGET_NR_geteuid:
10503         return get_errno(high2lowuid(geteuid()));
10504 #endif
10505 #ifdef TARGET_NR_getegid
10506     case TARGET_NR_getegid:
10507         return get_errno(high2lowgid(getegid()));
10508 #endif
10509     case TARGET_NR_setreuid:
10510         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10511     case TARGET_NR_setregid:
10512         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10513     case TARGET_NR_getgroups:
10514         {
10515             int gidsetsize = arg1;
10516             target_id *target_grouplist;
10517             gid_t *grouplist;
10518             int i;
10519 
10520             grouplist = alloca(gidsetsize * sizeof(gid_t));
10521             ret = get_errno(getgroups(gidsetsize, grouplist));
10522             if (gidsetsize == 0)
10523                 return ret;
10524             if (!is_error(ret)) {
10525                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10526                 if (!target_grouplist)
10527                     return -TARGET_EFAULT;
10528                 for(i = 0;i < ret; i++)
10529                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10530                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10531             }
10532         }
10533         return ret;
10534     case TARGET_NR_setgroups:
10535         {
10536             int gidsetsize = arg1;
10537             target_id *target_grouplist;
10538             gid_t *grouplist = NULL;
10539             int i;
10540             if (gidsetsize) {
10541                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10542                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10543                 if (!target_grouplist) {
10544                     return -TARGET_EFAULT;
10545                 }
10546                 for (i = 0; i < gidsetsize; i++) {
10547                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10548                 }
10549                 unlock_user(target_grouplist, arg2, 0);
10550             }
10551             return get_errno(setgroups(gidsetsize, grouplist));
10552         }
10553     case TARGET_NR_fchown:
10554         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10555 #if defined(TARGET_NR_fchownat)
10556     case TARGET_NR_fchownat:
10557         if (!(p = lock_user_string(arg2)))
10558             return -TARGET_EFAULT;
10559         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10560                                  low2highgid(arg4), arg5));
10561         unlock_user(p, arg2, 0);
10562         return ret;
10563 #endif
10564 #ifdef TARGET_NR_setresuid
10565     case TARGET_NR_setresuid:
10566         return get_errno(sys_setresuid(low2highuid(arg1),
10567                                        low2highuid(arg2),
10568                                        low2highuid(arg3)));
10569 #endif
10570 #ifdef TARGET_NR_getresuid
10571     case TARGET_NR_getresuid:
10572         {
10573             uid_t ruid, euid, suid;
10574             ret = get_errno(getresuid(&ruid, &euid, &suid));
10575             if (!is_error(ret)) {
10576                 if (put_user_id(high2lowuid(ruid), arg1)
10577                     || put_user_id(high2lowuid(euid), arg2)
10578                     || put_user_id(high2lowuid(suid), arg3))
10579                     return -TARGET_EFAULT;
10580             }
10581         }
10582         return ret;
10583 #endif
10584 #ifdef TARGET_NR_getresgid
10585     case TARGET_NR_setresgid:
10586         return get_errno(sys_setresgid(low2highgid(arg1),
10587                                        low2highgid(arg2),
10588                                        low2highgid(arg3)));
10589 #endif
10590 #ifdef TARGET_NR_getresgid
10591     case TARGET_NR_getresgid:
10592         {
10593             gid_t rgid, egid, sgid;
10594             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10595             if (!is_error(ret)) {
10596                 if (put_user_id(high2lowgid(rgid), arg1)
10597                     || put_user_id(high2lowgid(egid), arg2)
10598                     || put_user_id(high2lowgid(sgid), arg3))
10599                     return -TARGET_EFAULT;
10600             }
10601         }
10602         return ret;
10603 #endif
10604 #ifdef TARGET_NR_chown
10605     case TARGET_NR_chown:
10606         if (!(p = lock_user_string(arg1)))
10607             return -TARGET_EFAULT;
10608         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10609         unlock_user(p, arg1, 0);
10610         return ret;
10611 #endif
10612     case TARGET_NR_setuid:
10613         return get_errno(sys_setuid(low2highuid(arg1)));
10614     case TARGET_NR_setgid:
10615         return get_errno(sys_setgid(low2highgid(arg1)));
10616     case TARGET_NR_setfsuid:
10617         return get_errno(setfsuid(arg1));
10618     case TARGET_NR_setfsgid:
10619         return get_errno(setfsgid(arg1));
10620 
10621 #ifdef TARGET_NR_lchown32
10622     case TARGET_NR_lchown32:
10623         if (!(p = lock_user_string(arg1)))
10624             return -TARGET_EFAULT;
10625         ret = get_errno(lchown(p, arg2, arg3));
10626         unlock_user(p, arg1, 0);
10627         return ret;
10628 #endif
10629 #ifdef TARGET_NR_getuid32
10630     case TARGET_NR_getuid32:
10631         return get_errno(getuid());
10632 #endif
10633 
10634 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10635    /* Alpha specific */
10636     case TARGET_NR_getxuid:
10637          {
10638             uid_t euid;
10639             euid=geteuid();
10640             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10641          }
10642         return get_errno(getuid());
10643 #endif
10644 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10645    /* Alpha specific */
10646     case TARGET_NR_getxgid:
10647          {
10648             uid_t egid;
10649             egid=getegid();
10650             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10651          }
10652         return get_errno(getgid());
10653 #endif
10654 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10655     /* Alpha specific */
10656     case TARGET_NR_osf_getsysinfo:
10657         ret = -TARGET_EOPNOTSUPP;
10658         switch (arg1) {
10659           case TARGET_GSI_IEEE_FP_CONTROL:
10660             {
10661                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10662                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10663 
10664                 swcr &= ~SWCR_STATUS_MASK;
10665                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10666 
10667                 if (put_user_u64 (swcr, arg2))
10668                         return -TARGET_EFAULT;
10669                 ret = 0;
10670             }
10671             break;
10672 
10673           /* case GSI_IEEE_STATE_AT_SIGNAL:
10674              -- Not implemented in linux kernel.
10675              case GSI_UACPROC:
10676              -- Retrieves current unaligned access state; not much used.
10677              case GSI_PROC_TYPE:
10678              -- Retrieves implver information; surely not used.
10679              case GSI_GET_HWRPB:
10680              -- Grabs a copy of the HWRPB; surely not used.
10681           */
10682         }
10683         return ret;
10684 #endif
10685 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10686     /* Alpha specific */
10687     case TARGET_NR_osf_setsysinfo:
10688         ret = -TARGET_EOPNOTSUPP;
10689         switch (arg1) {
10690           case TARGET_SSI_IEEE_FP_CONTROL:
10691             {
10692                 uint64_t swcr, fpcr;
10693 
10694                 if (get_user_u64 (swcr, arg2)) {
10695                     return -TARGET_EFAULT;
10696                 }
10697 
10698                 /*
10699                  * The kernel calls swcr_update_status to update the
10700                  * status bits from the fpcr at every point that it
10701                  * could be queried.  Therefore, we store the status
10702                  * bits only in FPCR.
10703                  */
10704                 ((CPUAlphaState *)cpu_env)->swcr
10705                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10706 
10707                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10708                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10709                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10710                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10711                 ret = 0;
10712             }
10713             break;
10714 
10715           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10716             {
10717                 uint64_t exc, fpcr, fex;
10718 
10719                 if (get_user_u64(exc, arg2)) {
10720                     return -TARGET_EFAULT;
10721                 }
10722                 exc &= SWCR_STATUS_MASK;
10723                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10724 
10725                 /* Old exceptions are not signaled.  */
10726                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10727                 fex = exc & ~fex;
10728                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10729                 fex &= ((CPUArchState *)cpu_env)->swcr;
10730 
10731                 /* Update the hardware fpcr.  */
10732                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10733                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10734 
10735                 if (fex) {
10736                     int si_code = TARGET_FPE_FLTUNK;
10737                     target_siginfo_t info;
10738 
10739                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10740                         si_code = TARGET_FPE_FLTUND;
10741                     }
10742                     if (fex & SWCR_TRAP_ENABLE_INE) {
10743                         si_code = TARGET_FPE_FLTRES;
10744                     }
10745                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10746                         si_code = TARGET_FPE_FLTUND;
10747                     }
10748                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10749                         si_code = TARGET_FPE_FLTOVF;
10750                     }
10751                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10752                         si_code = TARGET_FPE_FLTDIV;
10753                     }
10754                     if (fex & SWCR_TRAP_ENABLE_INV) {
10755                         si_code = TARGET_FPE_FLTINV;
10756                     }
10757 
10758                     info.si_signo = SIGFPE;
10759                     info.si_errno = 0;
10760                     info.si_code = si_code;
10761                     info._sifields._sigfault._addr
10762                         = ((CPUArchState *)cpu_env)->pc;
10763                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10764                                  QEMU_SI_FAULT, &info);
10765                 }
10766                 ret = 0;
10767             }
10768             break;
10769 
10770           /* case SSI_NVPAIRS:
10771              -- Used with SSIN_UACPROC to enable unaligned accesses.
10772              case SSI_IEEE_STATE_AT_SIGNAL:
10773              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10774              -- Not implemented in linux kernel
10775           */
10776         }
10777         return ret;
10778 #endif
10779 #ifdef TARGET_NR_osf_sigprocmask
10780     /* Alpha specific.  */
10781     case TARGET_NR_osf_sigprocmask:
10782         {
10783             abi_ulong mask;
10784             int how;
10785             sigset_t set, oldset;
10786 
10787             switch(arg1) {
10788             case TARGET_SIG_BLOCK:
10789                 how = SIG_BLOCK;
10790                 break;
10791             case TARGET_SIG_UNBLOCK:
10792                 how = SIG_UNBLOCK;
10793                 break;
10794             case TARGET_SIG_SETMASK:
10795                 how = SIG_SETMASK;
10796                 break;
10797             default:
10798                 return -TARGET_EINVAL;
10799             }
10800             mask = arg2;
10801             target_to_host_old_sigset(&set, &mask);
10802             ret = do_sigprocmask(how, &set, &oldset);
10803             if (!ret) {
10804                 host_to_target_old_sigset(&mask, &oldset);
10805                 ret = mask;
10806             }
10807         }
10808         return ret;
10809 #endif
10810 
10811 #ifdef TARGET_NR_getgid32
10812     case TARGET_NR_getgid32:
10813         return get_errno(getgid());
10814 #endif
10815 #ifdef TARGET_NR_geteuid32
10816     case TARGET_NR_geteuid32:
10817         return get_errno(geteuid());
10818 #endif
10819 #ifdef TARGET_NR_getegid32
10820     case TARGET_NR_getegid32:
10821         return get_errno(getegid());
10822 #endif
10823 #ifdef TARGET_NR_setreuid32
10824     case TARGET_NR_setreuid32:
10825         return get_errno(setreuid(arg1, arg2));
10826 #endif
10827 #ifdef TARGET_NR_setregid32
10828     case TARGET_NR_setregid32:
10829         return get_errno(setregid(arg1, arg2));
10830 #endif
10831 #ifdef TARGET_NR_getgroups32
10832     case TARGET_NR_getgroups32:
10833         {
10834             int gidsetsize = arg1;
10835             uint32_t *target_grouplist;
10836             gid_t *grouplist;
10837             int i;
10838 
10839             grouplist = alloca(gidsetsize * sizeof(gid_t));
10840             ret = get_errno(getgroups(gidsetsize, grouplist));
10841             if (gidsetsize == 0)
10842                 return ret;
10843             if (!is_error(ret)) {
10844                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10845                 if (!target_grouplist) {
10846                     return -TARGET_EFAULT;
10847                 }
10848                 for(i = 0;i < ret; i++)
10849                     target_grouplist[i] = tswap32(grouplist[i]);
10850                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10851             }
10852         }
10853         return ret;
10854 #endif
10855 #ifdef TARGET_NR_setgroups32
10856     case TARGET_NR_setgroups32:
10857         {
10858             int gidsetsize = arg1;
10859             uint32_t *target_grouplist;
10860             gid_t *grouplist;
10861             int i;
10862 
10863             grouplist = alloca(gidsetsize * sizeof(gid_t));
10864             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10865             if (!target_grouplist) {
10866                 return -TARGET_EFAULT;
10867             }
10868             for(i = 0;i < gidsetsize; i++)
10869                 grouplist[i] = tswap32(target_grouplist[i]);
10870             unlock_user(target_grouplist, arg2, 0);
10871             return get_errno(setgroups(gidsetsize, grouplist));
10872         }
10873 #endif
10874 #ifdef TARGET_NR_fchown32
10875     case TARGET_NR_fchown32:
10876         return get_errno(fchown(arg1, arg2, arg3));
10877 #endif
10878 #ifdef TARGET_NR_setresuid32
10879     case TARGET_NR_setresuid32:
10880         return get_errno(sys_setresuid(arg1, arg2, arg3));
10881 #endif
10882 #ifdef TARGET_NR_getresuid32
10883     case TARGET_NR_getresuid32:
10884         {
10885             uid_t ruid, euid, suid;
10886             ret = get_errno(getresuid(&ruid, &euid, &suid));
10887             if (!is_error(ret)) {
10888                 if (put_user_u32(ruid, arg1)
10889                     || put_user_u32(euid, arg2)
10890                     || put_user_u32(suid, arg3))
10891                     return -TARGET_EFAULT;
10892             }
10893         }
10894         return ret;
10895 #endif
10896 #ifdef TARGET_NR_setresgid32
10897     case TARGET_NR_setresgid32:
10898         return get_errno(sys_setresgid(arg1, arg2, arg3));
10899 #endif
10900 #ifdef TARGET_NR_getresgid32
10901     case TARGET_NR_getresgid32:
10902         {
10903             gid_t rgid, egid, sgid;
10904             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10905             if (!is_error(ret)) {
10906                 if (put_user_u32(rgid, arg1)
10907                     || put_user_u32(egid, arg2)
10908                     || put_user_u32(sgid, arg3))
10909                     return -TARGET_EFAULT;
10910             }
10911         }
10912         return ret;
10913 #endif
10914 #ifdef TARGET_NR_chown32
10915     case TARGET_NR_chown32:
10916         if (!(p = lock_user_string(arg1)))
10917             return -TARGET_EFAULT;
10918         ret = get_errno(chown(p, arg2, arg3));
10919         unlock_user(p, arg1, 0);
10920         return ret;
10921 #endif
10922 #ifdef TARGET_NR_setuid32
10923     case TARGET_NR_setuid32:
10924         return get_errno(sys_setuid(arg1));
10925 #endif
10926 #ifdef TARGET_NR_setgid32
10927     case TARGET_NR_setgid32:
10928         return get_errno(sys_setgid(arg1));
10929 #endif
10930 #ifdef TARGET_NR_setfsuid32
10931     case TARGET_NR_setfsuid32:
10932         return get_errno(setfsuid(arg1));
10933 #endif
10934 #ifdef TARGET_NR_setfsgid32
10935     case TARGET_NR_setfsgid32:
10936         return get_errno(setfsgid(arg1));
10937 #endif
10938 #ifdef TARGET_NR_mincore
10939     case TARGET_NR_mincore:
10940         {
10941             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10942             if (!a) {
10943                 return -TARGET_ENOMEM;
10944             }
10945             p = lock_user_string(arg3);
10946             if (!p) {
10947                 ret = -TARGET_EFAULT;
10948             } else {
10949                 ret = get_errno(mincore(a, arg2, p));
10950                 unlock_user(p, arg3, ret);
10951             }
10952             unlock_user(a, arg1, 0);
10953         }
10954         return ret;
10955 #endif
10956 #ifdef TARGET_NR_arm_fadvise64_64
10957     case TARGET_NR_arm_fadvise64_64:
10958         /* arm_fadvise64_64 looks like fadvise64_64 but
10959          * with different argument order: fd, advice, offset, len
10960          * rather than the usual fd, offset, len, advice.
10961          * Note that offset and len are both 64-bit so appear as
10962          * pairs of 32-bit registers.
10963          */
10964         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10965                             target_offset64(arg5, arg6), arg2);
10966         return -host_to_target_errno(ret);
10967 #endif
10968 
10969 #if TARGET_ABI_BITS == 32
10970 
10971 #ifdef TARGET_NR_fadvise64_64
10972     case TARGET_NR_fadvise64_64:
10973 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10974         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10975         ret = arg2;
10976         arg2 = arg3;
10977         arg3 = arg4;
10978         arg4 = arg5;
10979         arg5 = arg6;
10980         arg6 = ret;
10981 #else
10982         /* 6 args: fd, offset (high, low), len (high, low), advice */
10983         if (regpairs_aligned(cpu_env, num)) {
10984             /* offset is in (3,4), len in (5,6) and advice in 7 */
10985             arg2 = arg3;
10986             arg3 = arg4;
10987             arg4 = arg5;
10988             arg5 = arg6;
10989             arg6 = arg7;
10990         }
10991 #endif
10992         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10993                             target_offset64(arg4, arg5), arg6);
10994         return -host_to_target_errno(ret);
10995 #endif
10996 
10997 #ifdef TARGET_NR_fadvise64
10998     case TARGET_NR_fadvise64:
10999         /* 5 args: fd, offset (high, low), len, advice */
11000         if (regpairs_aligned(cpu_env, num)) {
11001             /* offset is in (3,4), len in 5 and advice in 6 */
11002             arg2 = arg3;
11003             arg3 = arg4;
11004             arg4 = arg5;
11005             arg5 = arg6;
11006         }
11007         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11008         return -host_to_target_errno(ret);
11009 #endif
11010 
11011 #else /* not a 32-bit ABI */
11012 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11013 #ifdef TARGET_NR_fadvise64_64
11014     case TARGET_NR_fadvise64_64:
11015 #endif
11016 #ifdef TARGET_NR_fadvise64
11017     case TARGET_NR_fadvise64:
11018 #endif
11019 #ifdef TARGET_S390X
11020         switch (arg4) {
11021         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11022         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11023         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11024         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11025         default: break;
11026         }
11027 #endif
11028         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11029 #endif
11030 #endif /* end of 64-bit ABI fadvise handling */
11031 
11032 #ifdef TARGET_NR_madvise
11033     case TARGET_NR_madvise:
11034         /* A straight passthrough may not be safe because qemu sometimes
11035            turns private file-backed mappings into anonymous mappings.
11036            This will break MADV_DONTNEED.
11037            This is a hint, so ignoring and returning success is ok.  */
11038         return 0;
11039 #endif
11040 #if TARGET_ABI_BITS == 32
11041     case TARGET_NR_fcntl64:
11042     {
11043 	int cmd;
11044 	struct flock64 fl;
11045         from_flock64_fn *copyfrom = copy_from_user_flock64;
11046         to_flock64_fn *copyto = copy_to_user_flock64;
11047 
11048 #ifdef TARGET_ARM
11049         if (!((CPUARMState *)cpu_env)->eabi) {
11050             copyfrom = copy_from_user_oabi_flock64;
11051             copyto = copy_to_user_oabi_flock64;
11052         }
11053 #endif
11054 
11055 	cmd = target_to_host_fcntl_cmd(arg2);
11056         if (cmd == -TARGET_EINVAL) {
11057             return cmd;
11058         }
11059 
11060         switch(arg2) {
11061         case TARGET_F_GETLK64:
11062             ret = copyfrom(&fl, arg3);
11063             if (ret) {
11064                 break;
11065             }
11066             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11067             if (ret == 0) {
11068                 ret = copyto(arg3, &fl);
11069             }
11070 	    break;
11071 
11072         case TARGET_F_SETLK64:
11073         case TARGET_F_SETLKW64:
11074             ret = copyfrom(&fl, arg3);
11075             if (ret) {
11076                 break;
11077             }
11078             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11079 	    break;
11080         default:
11081             ret = do_fcntl(arg1, arg2, arg3);
11082             break;
11083         }
11084         return ret;
11085     }
11086 #endif
11087 #ifdef TARGET_NR_cacheflush
11088     case TARGET_NR_cacheflush:
11089         /* self-modifying code is handled automatically, so nothing needed */
11090         return 0;
11091 #endif
11092 #ifdef TARGET_NR_getpagesize
11093     case TARGET_NR_getpagesize:
11094         return TARGET_PAGE_SIZE;
11095 #endif
11096     case TARGET_NR_gettid:
11097         return get_errno(sys_gettid());
11098 #ifdef TARGET_NR_readahead
11099     case TARGET_NR_readahead:
11100 #if TARGET_ABI_BITS == 32
11101         if (regpairs_aligned(cpu_env, num)) {
11102             arg2 = arg3;
11103             arg3 = arg4;
11104             arg4 = arg5;
11105         }
11106         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11107 #else
11108         ret = get_errno(readahead(arg1, arg2, arg3));
11109 #endif
11110         return ret;
11111 #endif
11112 #ifdef CONFIG_ATTR
11113 #ifdef TARGET_NR_setxattr
11114     case TARGET_NR_listxattr:
11115     case TARGET_NR_llistxattr:
11116     {
11117         void *p, *b = 0;
11118         if (arg2) {
11119             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11120             if (!b) {
11121                 return -TARGET_EFAULT;
11122             }
11123         }
11124         p = lock_user_string(arg1);
11125         if (p) {
11126             if (num == TARGET_NR_listxattr) {
11127                 ret = get_errno(listxattr(p, b, arg3));
11128             } else {
11129                 ret = get_errno(llistxattr(p, b, arg3));
11130             }
11131         } else {
11132             ret = -TARGET_EFAULT;
11133         }
11134         unlock_user(p, arg1, 0);
11135         unlock_user(b, arg2, arg3);
11136         return ret;
11137     }
11138     case TARGET_NR_flistxattr:
11139     {
11140         void *b = 0;
11141         if (arg2) {
11142             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11143             if (!b) {
11144                 return -TARGET_EFAULT;
11145             }
11146         }
11147         ret = get_errno(flistxattr(arg1, b, arg3));
11148         unlock_user(b, arg2, arg3);
11149         return ret;
11150     }
11151     case TARGET_NR_setxattr:
11152     case TARGET_NR_lsetxattr:
11153         {
11154             void *p, *n, *v = 0;
11155             if (arg3) {
11156                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11157                 if (!v) {
11158                     return -TARGET_EFAULT;
11159                 }
11160             }
11161             p = lock_user_string(arg1);
11162             n = lock_user_string(arg2);
11163             if (p && n) {
11164                 if (num == TARGET_NR_setxattr) {
11165                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11166                 } else {
11167                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11168                 }
11169             } else {
11170                 ret = -TARGET_EFAULT;
11171             }
11172             unlock_user(p, arg1, 0);
11173             unlock_user(n, arg2, 0);
11174             unlock_user(v, arg3, 0);
11175         }
11176         return ret;
11177     case TARGET_NR_fsetxattr:
11178         {
11179             void *n, *v = 0;
11180             if (arg3) {
11181                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11182                 if (!v) {
11183                     return -TARGET_EFAULT;
11184                 }
11185             }
11186             n = lock_user_string(arg2);
11187             if (n) {
11188                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11189             } else {
11190                 ret = -TARGET_EFAULT;
11191             }
11192             unlock_user(n, arg2, 0);
11193             unlock_user(v, arg3, 0);
11194         }
11195         return ret;
11196     case TARGET_NR_getxattr:
11197     case TARGET_NR_lgetxattr:
11198         {
11199             void *p, *n, *v = 0;
11200             if (arg3) {
11201                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11202                 if (!v) {
11203                     return -TARGET_EFAULT;
11204                 }
11205             }
11206             p = lock_user_string(arg1);
11207             n = lock_user_string(arg2);
11208             if (p && n) {
11209                 if (num == TARGET_NR_getxattr) {
11210                     ret = get_errno(getxattr(p, n, v, arg4));
11211                 } else {
11212                     ret = get_errno(lgetxattr(p, n, v, arg4));
11213                 }
11214             } else {
11215                 ret = -TARGET_EFAULT;
11216             }
11217             unlock_user(p, arg1, 0);
11218             unlock_user(n, arg2, 0);
11219             unlock_user(v, arg3, arg4);
11220         }
11221         return ret;
11222     case TARGET_NR_fgetxattr:
11223         {
11224             void *n, *v = 0;
11225             if (arg3) {
11226                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11227                 if (!v) {
11228                     return -TARGET_EFAULT;
11229                 }
11230             }
11231             n = lock_user_string(arg2);
11232             if (n) {
11233                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11234             } else {
11235                 ret = -TARGET_EFAULT;
11236             }
11237             unlock_user(n, arg2, 0);
11238             unlock_user(v, arg3, arg4);
11239         }
11240         return ret;
11241     case TARGET_NR_removexattr:
11242     case TARGET_NR_lremovexattr:
11243         {
11244             void *p, *n;
11245             p = lock_user_string(arg1);
11246             n = lock_user_string(arg2);
11247             if (p && n) {
11248                 if (num == TARGET_NR_removexattr) {
11249                     ret = get_errno(removexattr(p, n));
11250                 } else {
11251                     ret = get_errno(lremovexattr(p, n));
11252                 }
11253             } else {
11254                 ret = -TARGET_EFAULT;
11255             }
11256             unlock_user(p, arg1, 0);
11257             unlock_user(n, arg2, 0);
11258         }
11259         return ret;
11260     case TARGET_NR_fremovexattr:
11261         {
11262             void *n;
11263             n = lock_user_string(arg2);
11264             if (n) {
11265                 ret = get_errno(fremovexattr(arg1, n));
11266             } else {
11267                 ret = -TARGET_EFAULT;
11268             }
11269             unlock_user(n, arg2, 0);
11270         }
11271         return ret;
11272 #endif
11273 #endif /* CONFIG_ATTR */
11274 #ifdef TARGET_NR_set_thread_area
11275     case TARGET_NR_set_thread_area:
11276 #if defined(TARGET_MIPS)
11277       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11278       return 0;
11279 #elif defined(TARGET_CRIS)
11280       if (arg1 & 0xff)
11281           ret = -TARGET_EINVAL;
11282       else {
11283           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11284           ret = 0;
11285       }
11286       return ret;
11287 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11288       return do_set_thread_area(cpu_env, arg1);
11289 #elif defined(TARGET_M68K)
11290       {
11291           TaskState *ts = cpu->opaque;
11292           ts->tp_value = arg1;
11293           return 0;
11294       }
11295 #else
11296       return -TARGET_ENOSYS;
11297 #endif
11298 #endif
11299 #ifdef TARGET_NR_get_thread_area
11300     case TARGET_NR_get_thread_area:
11301 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11302         return do_get_thread_area(cpu_env, arg1);
11303 #elif defined(TARGET_M68K)
11304         {
11305             TaskState *ts = cpu->opaque;
11306             return ts->tp_value;
11307         }
11308 #else
11309         return -TARGET_ENOSYS;
11310 #endif
11311 #endif
11312 #ifdef TARGET_NR_getdomainname
11313     case TARGET_NR_getdomainname:
11314         return -TARGET_ENOSYS;
11315 #endif
11316 
11317 #ifdef TARGET_NR_clock_settime
11318     case TARGET_NR_clock_settime:
11319     {
11320         struct timespec ts;
11321 
11322         ret = target_to_host_timespec(&ts, arg2);
11323         if (!is_error(ret)) {
11324             ret = get_errno(clock_settime(arg1, &ts));
11325         }
11326         return ret;
11327     }
11328 #endif
11329 #ifdef TARGET_NR_clock_gettime
11330     case TARGET_NR_clock_gettime:
11331     {
11332         struct timespec ts;
11333         ret = get_errno(clock_gettime(arg1, &ts));
11334         if (!is_error(ret)) {
11335             ret = host_to_target_timespec(arg2, &ts);
11336         }
11337         return ret;
11338     }
11339 #endif
11340 #ifdef TARGET_NR_clock_getres
11341     case TARGET_NR_clock_getres:
11342     {
11343         struct timespec ts;
11344         ret = get_errno(clock_getres(arg1, &ts));
11345         if (!is_error(ret)) {
11346             host_to_target_timespec(arg2, &ts);
11347         }
11348         return ret;
11349     }
11350 #endif
11351 #ifdef TARGET_NR_clock_nanosleep
11352     case TARGET_NR_clock_nanosleep:
11353     {
11354         struct timespec ts;
11355         target_to_host_timespec(&ts, arg3);
11356         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11357                                              &ts, arg4 ? &ts : NULL));
11358         if (arg4)
11359             host_to_target_timespec(arg4, &ts);
11360 
11361 #if defined(TARGET_PPC)
11362         /* clock_nanosleep is odd in that it returns positive errno values.
11363          * On PPC, CR0 bit 3 should be set in such a situation. */
11364         if (ret && ret != -TARGET_ERESTARTSYS) {
11365             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11366         }
11367 #endif
11368         return ret;
11369     }
11370 #endif
11371 
11372 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11373     case TARGET_NR_set_tid_address:
11374         return get_errno(set_tid_address((int *)g2h(arg1)));
11375 #endif
11376 
11377     case TARGET_NR_tkill:
11378         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11379 
11380     case TARGET_NR_tgkill:
11381         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11382                          target_to_host_signal(arg3)));
11383 
11384 #ifdef TARGET_NR_set_robust_list
11385     case TARGET_NR_set_robust_list:
11386     case TARGET_NR_get_robust_list:
11387         /* The ABI for supporting robust futexes has userspace pass
11388          * the kernel a pointer to a linked list which is updated by
11389          * userspace after the syscall; the list is walked by the kernel
11390          * when the thread exits. Since the linked list in QEMU guest
11391          * memory isn't a valid linked list for the host and we have
11392          * no way to reliably intercept the thread-death event, we can't
11393          * support these. Silently return ENOSYS so that guest userspace
11394          * falls back to a non-robust futex implementation (which should
11395          * be OK except in the corner case of the guest crashing while
11396          * holding a mutex that is shared with another process via
11397          * shared memory).
11398          */
11399         return -TARGET_ENOSYS;
11400 #endif
11401 
11402 #if defined(TARGET_NR_utimensat)
11403     case TARGET_NR_utimensat:
11404         {
11405             struct timespec *tsp, ts[2];
11406             if (!arg3) {
11407                 tsp = NULL;
11408             } else {
11409                 target_to_host_timespec(ts, arg3);
11410                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11411                 tsp = ts;
11412             }
11413             if (!arg2)
11414                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11415             else {
11416                 if (!(p = lock_user_string(arg2))) {
11417                     return -TARGET_EFAULT;
11418                 }
11419                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11420                 unlock_user(p, arg2, 0);
11421             }
11422         }
11423         return ret;
11424 #endif
11425     case TARGET_NR_futex:
11426         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11427 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11428     case TARGET_NR_inotify_init:
11429         ret = get_errno(sys_inotify_init());
11430         if (ret >= 0) {
11431             fd_trans_register(ret, &target_inotify_trans);
11432         }
11433         return ret;
11434 #endif
11435 #ifdef CONFIG_INOTIFY1
11436 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11437     case TARGET_NR_inotify_init1:
11438         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11439                                           fcntl_flags_tbl)));
11440         if (ret >= 0) {
11441             fd_trans_register(ret, &target_inotify_trans);
11442         }
11443         return ret;
11444 #endif
11445 #endif
11446 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11447     case TARGET_NR_inotify_add_watch:
11448         p = lock_user_string(arg2);
11449         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11450         unlock_user(p, arg2, 0);
11451         return ret;
11452 #endif
11453 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11454     case TARGET_NR_inotify_rm_watch:
11455         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11456 #endif
11457 
11458 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11459     case TARGET_NR_mq_open:
11460         {
11461             struct mq_attr posix_mq_attr;
11462             struct mq_attr *pposix_mq_attr;
11463             int host_flags;
11464 
11465             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11466             pposix_mq_attr = NULL;
11467             if (arg4) {
11468                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11469                     return -TARGET_EFAULT;
11470                 }
11471                 pposix_mq_attr = &posix_mq_attr;
11472             }
11473             p = lock_user_string(arg1 - 1);
11474             if (!p) {
11475                 return -TARGET_EFAULT;
11476             }
11477             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11478             unlock_user (p, arg1, 0);
11479         }
11480         return ret;
11481 
11482     case TARGET_NR_mq_unlink:
11483         p = lock_user_string(arg1 - 1);
11484         if (!p) {
11485             return -TARGET_EFAULT;
11486         }
11487         ret = get_errno(mq_unlink(p));
11488         unlock_user (p, arg1, 0);
11489         return ret;
11490 
11491     case TARGET_NR_mq_timedsend:
11492         {
11493             struct timespec ts;
11494 
11495             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11496             if (arg5 != 0) {
11497                 target_to_host_timespec(&ts, arg5);
11498                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11499                 host_to_target_timespec(arg5, &ts);
11500             } else {
11501                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11502             }
11503             unlock_user (p, arg2, arg3);
11504         }
11505         return ret;
11506 
11507     case TARGET_NR_mq_timedreceive:
11508         {
11509             struct timespec ts;
11510             unsigned int prio;
11511 
11512             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11513             if (arg5 != 0) {
11514                 target_to_host_timespec(&ts, arg5);
11515                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11516                                                      &prio, &ts));
11517                 host_to_target_timespec(arg5, &ts);
11518             } else {
11519                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11520                                                      &prio, NULL));
11521             }
11522             unlock_user (p, arg2, arg3);
11523             if (arg4 != 0)
11524                 put_user_u32(prio, arg4);
11525         }
11526         return ret;
11527 
11528     /* Not implemented for now... */
11529 /*     case TARGET_NR_mq_notify: */
11530 /*         break; */
11531 
11532     case TARGET_NR_mq_getsetattr:
11533         {
11534             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11535             ret = 0;
11536             if (arg2 != 0) {
11537                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11538                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11539                                            &posix_mq_attr_out));
11540             } else if (arg3 != 0) {
11541                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11542             }
11543             if (ret == 0 && arg3 != 0) {
11544                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11545             }
11546         }
11547         return ret;
11548 #endif
11549 
11550 #ifdef CONFIG_SPLICE
11551 #ifdef TARGET_NR_tee
11552     case TARGET_NR_tee:
11553         {
11554             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11555         }
11556         return ret;
11557 #endif
11558 #ifdef TARGET_NR_splice
11559     case TARGET_NR_splice:
11560         {
11561             loff_t loff_in, loff_out;
11562             loff_t *ploff_in = NULL, *ploff_out = NULL;
11563             if (arg2) {
11564                 if (get_user_u64(loff_in, arg2)) {
11565                     return -TARGET_EFAULT;
11566                 }
11567                 ploff_in = &loff_in;
11568             }
11569             if (arg4) {
11570                 if (get_user_u64(loff_out, arg4)) {
11571                     return -TARGET_EFAULT;
11572                 }
11573                 ploff_out = &loff_out;
11574             }
11575             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11576             if (arg2) {
11577                 if (put_user_u64(loff_in, arg2)) {
11578                     return -TARGET_EFAULT;
11579                 }
11580             }
11581             if (arg4) {
11582                 if (put_user_u64(loff_out, arg4)) {
11583                     return -TARGET_EFAULT;
11584                 }
11585             }
11586         }
11587         return ret;
11588 #endif
11589 #ifdef TARGET_NR_vmsplice
11590 	case TARGET_NR_vmsplice:
11591         {
11592             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11593             if (vec != NULL) {
11594                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11595                 unlock_iovec(vec, arg2, arg3, 0);
11596             } else {
11597                 ret = -host_to_target_errno(errno);
11598             }
11599         }
11600         return ret;
11601 #endif
11602 #endif /* CONFIG_SPLICE */
11603 #ifdef CONFIG_EVENTFD
11604 #if defined(TARGET_NR_eventfd)
11605     case TARGET_NR_eventfd:
11606         ret = get_errno(eventfd(arg1, 0));
11607         if (ret >= 0) {
11608             fd_trans_register(ret, &target_eventfd_trans);
11609         }
11610         return ret;
11611 #endif
11612 #if defined(TARGET_NR_eventfd2)
11613     case TARGET_NR_eventfd2:
11614     {
11615         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11616         if (arg2 & TARGET_O_NONBLOCK) {
11617             host_flags |= O_NONBLOCK;
11618         }
11619         if (arg2 & TARGET_O_CLOEXEC) {
11620             host_flags |= O_CLOEXEC;
11621         }
11622         ret = get_errno(eventfd(arg1, host_flags));
11623         if (ret >= 0) {
11624             fd_trans_register(ret, &target_eventfd_trans);
11625         }
11626         return ret;
11627     }
11628 #endif
11629 #endif /* CONFIG_EVENTFD  */
11630 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11631     case TARGET_NR_fallocate:
11632 #if TARGET_ABI_BITS == 32
11633         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11634                                   target_offset64(arg5, arg6)));
11635 #else
11636         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11637 #endif
11638         return ret;
11639 #endif
11640 #if defined(CONFIG_SYNC_FILE_RANGE)
11641 #if defined(TARGET_NR_sync_file_range)
11642     case TARGET_NR_sync_file_range:
11643 #if TARGET_ABI_BITS == 32
11644 #if defined(TARGET_MIPS)
11645         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11646                                         target_offset64(arg5, arg6), arg7));
11647 #else
11648         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11649                                         target_offset64(arg4, arg5), arg6));
11650 #endif /* !TARGET_MIPS */
11651 #else
11652         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11653 #endif
11654         return ret;
11655 #endif
11656 #if defined(TARGET_NR_sync_file_range2)
11657     case TARGET_NR_sync_file_range2:
11658         /* This is like sync_file_range but the arguments are reordered */
11659 #if TARGET_ABI_BITS == 32
11660         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11661                                         target_offset64(arg5, arg6), arg2));
11662 #else
11663         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11664 #endif
11665         return ret;
11666 #endif
11667 #endif
11668 #if defined(TARGET_NR_signalfd4)
11669     case TARGET_NR_signalfd4:
11670         return do_signalfd4(arg1, arg2, arg4);
11671 #endif
11672 #if defined(TARGET_NR_signalfd)
11673     case TARGET_NR_signalfd:
11674         return do_signalfd4(arg1, arg2, 0);
11675 #endif
11676 #if defined(CONFIG_EPOLL)
11677 #if defined(TARGET_NR_epoll_create)
11678     case TARGET_NR_epoll_create:
11679         return get_errno(epoll_create(arg1));
11680 #endif
11681 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11682     case TARGET_NR_epoll_create1:
11683         return get_errno(epoll_create1(arg1));
11684 #endif
11685 #if defined(TARGET_NR_epoll_ctl)
11686     case TARGET_NR_epoll_ctl:
11687     {
11688         struct epoll_event ep;
11689         struct epoll_event *epp = 0;
11690         if (arg4) {
11691             struct target_epoll_event *target_ep;
11692             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11693                 return -TARGET_EFAULT;
11694             }
11695             ep.events = tswap32(target_ep->events);
11696             /* The epoll_data_t union is just opaque data to the kernel,
11697              * so we transfer all 64 bits across and need not worry what
11698              * actual data type it is.
11699              */
11700             ep.data.u64 = tswap64(target_ep->data.u64);
11701             unlock_user_struct(target_ep, arg4, 0);
11702             epp = &ep;
11703         }
11704         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11705     }
11706 #endif
11707 
11708 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11709 #if defined(TARGET_NR_epoll_wait)
11710     case TARGET_NR_epoll_wait:
11711 #endif
11712 #if defined(TARGET_NR_epoll_pwait)
11713     case TARGET_NR_epoll_pwait:
11714 #endif
11715     {
11716         struct target_epoll_event *target_ep;
11717         struct epoll_event *ep;
11718         int epfd = arg1;
11719         int maxevents = arg3;
11720         int timeout = arg4;
11721 
11722         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11723             return -TARGET_EINVAL;
11724         }
11725 
11726         target_ep = lock_user(VERIFY_WRITE, arg2,
11727                               maxevents * sizeof(struct target_epoll_event), 1);
11728         if (!target_ep) {
11729             return -TARGET_EFAULT;
11730         }
11731 
11732         ep = g_try_new(struct epoll_event, maxevents);
11733         if (!ep) {
11734             unlock_user(target_ep, arg2, 0);
11735             return -TARGET_ENOMEM;
11736         }
11737 
11738         switch (num) {
11739 #if defined(TARGET_NR_epoll_pwait)
11740         case TARGET_NR_epoll_pwait:
11741         {
11742             target_sigset_t *target_set;
11743             sigset_t _set, *set = &_set;
11744 
11745             if (arg5) {
11746                 if (arg6 != sizeof(target_sigset_t)) {
11747                     ret = -TARGET_EINVAL;
11748                     break;
11749                 }
11750 
11751                 target_set = lock_user(VERIFY_READ, arg5,
11752                                        sizeof(target_sigset_t), 1);
11753                 if (!target_set) {
11754                     ret = -TARGET_EFAULT;
11755                     break;
11756                 }
11757                 target_to_host_sigset(set, target_set);
11758                 unlock_user(target_set, arg5, 0);
11759             } else {
11760                 set = NULL;
11761             }
11762 
11763             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11764                                              set, SIGSET_T_SIZE));
11765             break;
11766         }
11767 #endif
11768 #if defined(TARGET_NR_epoll_wait)
11769         case TARGET_NR_epoll_wait:
11770             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11771                                              NULL, 0));
11772             break;
11773 #endif
11774         default:
11775             ret = -TARGET_ENOSYS;
11776         }
11777         if (!is_error(ret)) {
11778             int i;
11779             for (i = 0; i < ret; i++) {
11780                 target_ep[i].events = tswap32(ep[i].events);
11781                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11782             }
11783             unlock_user(target_ep, arg2,
11784                         ret * sizeof(struct target_epoll_event));
11785         } else {
11786             unlock_user(target_ep, arg2, 0);
11787         }
11788         g_free(ep);
11789         return ret;
11790     }
11791 #endif
11792 #endif
11793 #ifdef TARGET_NR_prlimit64
11794     case TARGET_NR_prlimit64:
11795     {
11796         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11797         struct target_rlimit64 *target_rnew, *target_rold;
11798         struct host_rlimit64 rnew, rold, *rnewp = 0;
11799         int resource = target_to_host_resource(arg2);
11800         if (arg3) {
11801             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11802                 return -TARGET_EFAULT;
11803             }
11804             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11805             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11806             unlock_user_struct(target_rnew, arg3, 0);
11807             rnewp = &rnew;
11808         }
11809 
11810         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11811         if (!is_error(ret) && arg4) {
11812             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11813                 return -TARGET_EFAULT;
11814             }
11815             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11816             target_rold->rlim_max = tswap64(rold.rlim_max);
11817             unlock_user_struct(target_rold, arg4, 1);
11818         }
11819         return ret;
11820     }
11821 #endif
11822 #ifdef TARGET_NR_gethostname
11823     case TARGET_NR_gethostname:
11824     {
11825         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11826         if (name) {
11827             ret = get_errno(gethostname(name, arg2));
11828             unlock_user(name, arg1, arg2);
11829         } else {
11830             ret = -TARGET_EFAULT;
11831         }
11832         return ret;
11833     }
11834 #endif
11835 #ifdef TARGET_NR_atomic_cmpxchg_32
11836     case TARGET_NR_atomic_cmpxchg_32:
11837     {
11838         /* should use start_exclusive from main.c */
11839         abi_ulong mem_value;
11840         if (get_user_u32(mem_value, arg6)) {
11841             target_siginfo_t info;
11842             info.si_signo = SIGSEGV;
11843             info.si_errno = 0;
11844             info.si_code = TARGET_SEGV_MAPERR;
11845             info._sifields._sigfault._addr = arg6;
11846             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11847                          QEMU_SI_FAULT, &info);
11848             ret = 0xdeadbeef;
11849 
11850         }
11851         if (mem_value == arg2)
11852             put_user_u32(arg1, arg6);
11853         return mem_value;
11854     }
11855 #endif
11856 #ifdef TARGET_NR_atomic_barrier
11857     case TARGET_NR_atomic_barrier:
11858         /* Like the kernel implementation and the
11859            qemu arm barrier, no-op this? */
11860         return 0;
11861 #endif
11862 
11863 #ifdef TARGET_NR_timer_create
11864     case TARGET_NR_timer_create:
11865     {
11866         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11867 
11868         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11869 
11870         int clkid = arg1;
11871         int timer_index = next_free_host_timer();
11872 
11873         if (timer_index < 0) {
11874             ret = -TARGET_EAGAIN;
11875         } else {
11876             timer_t *phtimer = g_posix_timers  + timer_index;
11877 
11878             if (arg2) {
11879                 phost_sevp = &host_sevp;
11880                 ret = target_to_host_sigevent(phost_sevp, arg2);
11881                 if (ret != 0) {
11882                     return ret;
11883                 }
11884             }
11885 
11886             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11887             if (ret) {
11888                 phtimer = NULL;
11889             } else {
11890                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11891                     return -TARGET_EFAULT;
11892                 }
11893             }
11894         }
11895         return ret;
11896     }
11897 #endif
11898 
11899 #ifdef TARGET_NR_timer_settime
11900     case TARGET_NR_timer_settime:
11901     {
11902         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11903          * struct itimerspec * old_value */
11904         target_timer_t timerid = get_timer_id(arg1);
11905 
11906         if (timerid < 0) {
11907             ret = timerid;
11908         } else if (arg3 == 0) {
11909             ret = -TARGET_EINVAL;
11910         } else {
11911             timer_t htimer = g_posix_timers[timerid];
11912             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11913 
11914             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11915                 return -TARGET_EFAULT;
11916             }
11917             ret = get_errno(
11918                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11919             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         return ret;
11924     }
11925 #endif
11926 
11927 #ifdef TARGET_NR_timer_gettime
11928     case TARGET_NR_timer_gettime:
11929     {
11930         /* args: timer_t timerid, struct itimerspec *curr_value */
11931         target_timer_t timerid = get_timer_id(arg1);
11932 
11933         if (timerid < 0) {
11934             ret = timerid;
11935         } else if (!arg2) {
11936             ret = -TARGET_EFAULT;
11937         } else {
11938             timer_t htimer = g_posix_timers[timerid];
11939             struct itimerspec hspec;
11940             ret = get_errno(timer_gettime(htimer, &hspec));
11941 
11942             if (host_to_target_itimerspec(arg2, &hspec)) {
11943                 ret = -TARGET_EFAULT;
11944             }
11945         }
11946         return ret;
11947     }
11948 #endif
11949 
11950 #ifdef TARGET_NR_timer_getoverrun
11951     case TARGET_NR_timer_getoverrun:
11952     {
11953         /* args: timer_t timerid */
11954         target_timer_t timerid = get_timer_id(arg1);
11955 
11956         if (timerid < 0) {
11957             ret = timerid;
11958         } else {
11959             timer_t htimer = g_posix_timers[timerid];
11960             ret = get_errno(timer_getoverrun(htimer));
11961         }
11962         return ret;
11963     }
11964 #endif
11965 
11966 #ifdef TARGET_NR_timer_delete
11967     case TARGET_NR_timer_delete:
11968     {
11969         /* args: timer_t timerid */
11970         target_timer_t timerid = get_timer_id(arg1);
11971 
11972         if (timerid < 0) {
11973             ret = timerid;
11974         } else {
11975             timer_t htimer = g_posix_timers[timerid];
11976             ret = get_errno(timer_delete(htimer));
11977             g_posix_timers[timerid] = 0;
11978         }
11979         return ret;
11980     }
11981 #endif
11982 
11983 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11984     case TARGET_NR_timerfd_create:
11985         return get_errno(timerfd_create(arg1,
11986                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11987 #endif
11988 
11989 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11990     case TARGET_NR_timerfd_gettime:
11991         {
11992             struct itimerspec its_curr;
11993 
11994             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11995 
11996             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11997                 return -TARGET_EFAULT;
11998             }
11999         }
12000         return ret;
12001 #endif
12002 
12003 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12004     case TARGET_NR_timerfd_settime:
12005         {
12006             struct itimerspec its_new, its_old, *p_new;
12007 
12008             if (arg3) {
12009                 if (target_to_host_itimerspec(&its_new, arg3)) {
12010                     return -TARGET_EFAULT;
12011                 }
12012                 p_new = &its_new;
12013             } else {
12014                 p_new = NULL;
12015             }
12016 
12017             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12018 
12019             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12020                 return -TARGET_EFAULT;
12021             }
12022         }
12023         return ret;
12024 #endif
12025 
12026 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12027     case TARGET_NR_ioprio_get:
12028         return get_errno(ioprio_get(arg1, arg2));
12029 #endif
12030 
12031 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12032     case TARGET_NR_ioprio_set:
12033         return get_errno(ioprio_set(arg1, arg2, arg3));
12034 #endif
12035 
12036 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12037     case TARGET_NR_setns:
12038         return get_errno(setns(arg1, arg2));
12039 #endif
12040 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12041     case TARGET_NR_unshare:
12042         return get_errno(unshare(arg1));
12043 #endif
12044 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12045     case TARGET_NR_kcmp:
12046         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12047 #endif
12048 #ifdef TARGET_NR_swapcontext
12049     case TARGET_NR_swapcontext:
12050         /* PowerPC specific.  */
12051         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12052 #endif
12053 #ifdef TARGET_NR_memfd_create
12054     case TARGET_NR_memfd_create:
12055         p = lock_user_string(arg1);
12056         if (!p) {
12057             return -TARGET_EFAULT;
12058         }
12059         ret = get_errno(memfd_create(p, arg2));
12060         fd_trans_unregister(ret);
12061         unlock_user(p, arg1, 0);
12062         return ret;
12063 #endif
12064 
12065     default:
12066         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12067         return -TARGET_ENOSYS;
12068     }
12069     return ret;
12070 }
12071 
12072 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12073                     abi_long arg2, abi_long arg3, abi_long arg4,
12074                     abi_long arg5, abi_long arg6, abi_long arg7,
12075                     abi_long arg8)
12076 {
12077     CPUState *cpu = env_cpu(cpu_env);
12078     abi_long ret;
12079 
12080 #ifdef DEBUG_ERESTARTSYS
12081     /* Debug-only code for exercising the syscall-restart code paths
12082      * in the per-architecture cpu main loops: restart every syscall
12083      * the guest makes once before letting it through.
12084      */
12085     {
12086         static bool flag;
12087         flag = !flag;
12088         if (flag) {
12089             return -TARGET_ERESTARTSYS;
12090         }
12091     }
12092 #endif
12093 
12094     record_syscall_start(cpu, num, arg1,
12095                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12096 
12097     if (unlikely(do_strace)) {
12098         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12099         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12100                           arg5, arg6, arg7, arg8);
12101         print_syscall_ret(num, ret);
12102     } else {
12103         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12104                           arg5, arg6, arg7, arg8);
12105     }
12106 
12107     record_syscall_return(cpu, num, ret);
12108     return ret;
12109 }
12110