xref: /openbmc/qemu/linux-user/syscall.c (revision 979a8902)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
115 #include "uname.h"
116 
117 #include "qemu.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
122 #include "tcg/tcg.h"
123 
124 #ifndef CLONE_IO
125 #define CLONE_IO                0x80000000      /* Clone io context */
126 #endif
127 
128 /* We can't directly call the host clone syscall, because this will
129  * badly confuse libc (breaking mutexes, for example). So we must
130  * divide clone flags into:
131  *  * flag combinations that look like pthread_create()
132  *  * flag combinations that look like fork()
133  *  * flags we can implement within QEMU itself
134  *  * flags we can't support and will return an error for
135  */
136 /* For thread creation, all these flags must be present; for
137  * fork, none must be present.
138  */
139 #define CLONE_THREAD_FLAGS                              \
140     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
141      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
142 
143 /* These flags are ignored:
144  * CLONE_DETACHED is now ignored by the kernel;
145  * CLONE_IO is just an optimisation hint to the I/O scheduler
146  */
147 #define CLONE_IGNORED_FLAGS                     \
148     (CLONE_DETACHED | CLONE_IO)
149 
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS               \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
154 
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
159 
160 #define CLONE_INVALID_FORK_FLAGS                                        \
161     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
162 
163 #define CLONE_INVALID_THREAD_FLAGS                                      \
164     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
165        CLONE_IGNORED_FLAGS))
166 
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168  * have almost all been allocated. We cannot support any of
169  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171  * The checks against the invalid thread masks above will catch these.
172  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
173  */
174 
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176  * once. This exercises the codepaths for restart.
177  */
178 //#define DEBUG_ERESTARTSYS
179 
180 //#include <linux/msdos_fs.h>
181 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
182 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
183 
184 #undef _syscall0
185 #undef _syscall1
186 #undef _syscall2
187 #undef _syscall3
188 #undef _syscall4
189 #undef _syscall5
190 #undef _syscall6
191 
192 #define _syscall0(type,name)		\
193 static type name (void)			\
194 {					\
195 	return syscall(__NR_##name);	\
196 }
197 
198 #define _syscall1(type,name,type1,arg1)		\
199 static type name (type1 arg1)			\
200 {						\
201 	return syscall(__NR_##name, arg1);	\
202 }
203 
204 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
205 static type name (type1 arg1,type2 arg2)		\
206 {							\
207 	return syscall(__NR_##name, arg1, arg2);	\
208 }
209 
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
211 static type name (type1 arg1,type2 arg2,type3 arg3)		\
212 {								\
213 	return syscall(__NR_##name, arg1, arg2, arg3);		\
214 }
215 
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
218 {										\
219 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
220 }
221 
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5)							\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
227 }
228 
229 
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
231 		  type5,arg5,type6,arg6)					\
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
233                   type6 arg6)							\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
236 }
237 
238 
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
252 
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
255 #endif
256 
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
260 #endif
261 
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid)
264 
265 /* For the 64-bit guest on 32-bit host case we must emulate
266  * getdents using getdents64, because otherwise the host
267  * might hand us back more dirent records than we can fit
268  * into the guest buffer after structure format conversion.
269  * Otherwise we emulate getdents with getdents if the host has it.
270  */
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #endif
274 
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
277 #endif
278 #if (defined(TARGET_NR_getdents) && \
279       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
282 #endif
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
285           loff_t *, res, uint, wh);
286 #endif
287 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
288 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
289           siginfo_t *, uinfo)
290 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group,int,error_code)
293 #endif
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address,int *,tidptr)
296 #endif
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
299           const struct timespec *,timeout,int *,uaddr2,int,val3)
300 #endif
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
306           unsigned long *, user_mask_ptr);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
309 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
310           void *, arg);
311 _syscall2(int, capget, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 _syscall2(int, capset, struct __user_cap_header_struct *, header,
314           struct __user_cap_data_struct *, data);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get, int, which, int, who)
317 #endif
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
320 #endif
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
323 #endif
324 
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
327           unsigned long, idx1, unsigned long, idx2)
328 #endif
329 
330 /*
331  * It is assumed that struct statx is architecture independent.
332  */
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
335           unsigned int, mask, struct target_statx *, statxbuf)
336 #endif
337 
338 static bitmask_transtbl fcntl_flags_tbl[] = {
339   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
340   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
341   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
342   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
343   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
344   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
345   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
346   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
347   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
348   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
349   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
350   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
351   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
352 #if defined(O_DIRECT)
353   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
354 #endif
355 #if defined(O_NOATIME)
356   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
357 #endif
358 #if defined(O_CLOEXEC)
359   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
360 #endif
361 #if defined(O_PATH)
362   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
363 #endif
364 #if defined(O_TMPFILE)
365   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
366 #endif
367   /* Don't terminate the list prematurely on 64-bit host+guest.  */
368 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
369   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
370 #endif
371   { 0, 0, 0, 0 }
372 };
373 
374 static int sys_getcwd1(char *buf, size_t size)
375 {
376   if (getcwd(buf, size) == NULL) {
377       /* getcwd() sets errno */
378       return (-1);
379   }
380   return strlen(buf)+1;
381 }
382 
383 #ifdef TARGET_NR_utimensat
384 #if defined(__NR_utimensat)
385 #define __NR_sys_utimensat __NR_utimensat
386 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
387           const struct timespec *,tsp,int,flags)
388 #else
389 static int sys_utimensat(int dirfd, const char *pathname,
390                          const struct timespec times[2], int flags)
391 {
392     errno = ENOSYS;
393     return -1;
394 }
395 #endif
396 #endif /* TARGET_NR_utimensat */
397 
398 #ifdef TARGET_NR_renameat2
399 #if defined(__NR_renameat2)
400 #define __NR_sys_renameat2 __NR_renameat2
401 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
402           const char *, new, unsigned int, flags)
403 #else
404 static int sys_renameat2(int oldfd, const char *old,
405                          int newfd, const char *new, int flags)
406 {
407     if (flags == 0) {
408         return renameat(oldfd, old, newfd, new);
409     }
410     errno = ENOSYS;
411     return -1;
412 }
413 #endif
414 #endif /* TARGET_NR_renameat2 */
415 
416 #ifdef CONFIG_INOTIFY
417 #include <sys/inotify.h>
418 
419 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
420 static int sys_inotify_init(void)
421 {
422   return (inotify_init());
423 }
424 #endif
425 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
426 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
427 {
428   return (inotify_add_watch(fd, pathname, mask));
429 }
430 #endif
431 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
432 static int sys_inotify_rm_watch(int fd, int32_t wd)
433 {
434   return (inotify_rm_watch(fd, wd));
435 }
436 #endif
437 #ifdef CONFIG_INOTIFY1
438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
439 static int sys_inotify_init1(int flags)
440 {
441   return (inotify_init1(flags));
442 }
443 #endif
444 #endif
445 #else
446 /* Userspace can usually survive runtime without inotify */
447 #undef TARGET_NR_inotify_init
448 #undef TARGET_NR_inotify_init1
449 #undef TARGET_NR_inotify_add_watch
450 #undef TARGET_NR_inotify_rm_watch
451 #endif /* CONFIG_INOTIFY  */
452 
453 #if defined(TARGET_NR_prlimit64)
454 #ifndef __NR_prlimit64
455 # define __NR_prlimit64 -1
456 #endif
457 #define __NR_sys_prlimit64 __NR_prlimit64
458 /* The glibc rlimit structure may not be that used by the underlying syscall */
459 struct host_rlimit64 {
460     uint64_t rlim_cur;
461     uint64_t rlim_max;
462 };
463 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
464           const struct host_rlimit64 *, new_limit,
465           struct host_rlimit64 *, old_limit)
466 #endif
467 
468 
469 #if defined(TARGET_NR_timer_create)
470 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
471 static timer_t g_posix_timers[32] = { 0, } ;
472 
473 static inline int next_free_host_timer(void)
474 {
475     int k ;
476     /* FIXME: Does finding the next free slot require a lock? */
477     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
478         if (g_posix_timers[k] == 0) {
479             g_posix_timers[k] = (timer_t) 1;
480             return k;
481         }
482     }
483     return -1;
484 }
485 #endif
486 
487 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
488 #ifdef TARGET_ARM
489 static inline int regpairs_aligned(void *cpu_env, int num)
490 {
491     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
492 }
493 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
494 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
495 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
496 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
497  * of registers which translates to the same as ARM/MIPS, because we start with
498  * r3 as arg1 */
499 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
500 #elif defined(TARGET_SH4)
501 /* SH4 doesn't align register pairs, except for p{read,write}64 */
502 static inline int regpairs_aligned(void *cpu_env, int num)
503 {
504     switch (num) {
505     case TARGET_NR_pread64:
506     case TARGET_NR_pwrite64:
507         return 1;
508 
509     default:
510         return 0;
511     }
512 }
513 #elif defined(TARGET_XTENSA)
514 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
515 #else
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
517 #endif
518 
519 #define ERRNO_TABLE_SIZE 1200
520 
521 /* target_to_host_errno_table[] is initialized from
522  * host_to_target_errno_table[] in syscall_init(). */
523 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
524 };
525 
526 /*
527  * This list is the union of errno values overridden in asm-<arch>/errno.h
528  * minus the errnos that are not actually generic to all archs.
529  */
530 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
531     [EAGAIN]		= TARGET_EAGAIN,
532     [EIDRM]		= TARGET_EIDRM,
533     [ECHRNG]		= TARGET_ECHRNG,
534     [EL2NSYNC]		= TARGET_EL2NSYNC,
535     [EL3HLT]		= TARGET_EL3HLT,
536     [EL3RST]		= TARGET_EL3RST,
537     [ELNRNG]		= TARGET_ELNRNG,
538     [EUNATCH]		= TARGET_EUNATCH,
539     [ENOCSI]		= TARGET_ENOCSI,
540     [EL2HLT]		= TARGET_EL2HLT,
541     [EDEADLK]		= TARGET_EDEADLK,
542     [ENOLCK]		= TARGET_ENOLCK,
543     [EBADE]		= TARGET_EBADE,
544     [EBADR]		= TARGET_EBADR,
545     [EXFULL]		= TARGET_EXFULL,
546     [ENOANO]		= TARGET_ENOANO,
547     [EBADRQC]		= TARGET_EBADRQC,
548     [EBADSLT]		= TARGET_EBADSLT,
549     [EBFONT]		= TARGET_EBFONT,
550     [ENOSTR]		= TARGET_ENOSTR,
551     [ENODATA]		= TARGET_ENODATA,
552     [ETIME]		= TARGET_ETIME,
553     [ENOSR]		= TARGET_ENOSR,
554     [ENONET]		= TARGET_ENONET,
555     [ENOPKG]		= TARGET_ENOPKG,
556     [EREMOTE]		= TARGET_EREMOTE,
557     [ENOLINK]		= TARGET_ENOLINK,
558     [EADV]		= TARGET_EADV,
559     [ESRMNT]		= TARGET_ESRMNT,
560     [ECOMM]		= TARGET_ECOMM,
561     [EPROTO]		= TARGET_EPROTO,
562     [EDOTDOT]		= TARGET_EDOTDOT,
563     [EMULTIHOP]		= TARGET_EMULTIHOP,
564     [EBADMSG]		= TARGET_EBADMSG,
565     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
566     [EOVERFLOW]		= TARGET_EOVERFLOW,
567     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
568     [EBADFD]		= TARGET_EBADFD,
569     [EREMCHG]		= TARGET_EREMCHG,
570     [ELIBACC]		= TARGET_ELIBACC,
571     [ELIBBAD]		= TARGET_ELIBBAD,
572     [ELIBSCN]		= TARGET_ELIBSCN,
573     [ELIBMAX]		= TARGET_ELIBMAX,
574     [ELIBEXEC]		= TARGET_ELIBEXEC,
575     [EILSEQ]		= TARGET_EILSEQ,
576     [ENOSYS]		= TARGET_ENOSYS,
577     [ELOOP]		= TARGET_ELOOP,
578     [ERESTART]		= TARGET_ERESTART,
579     [ESTRPIPE]		= TARGET_ESTRPIPE,
580     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
581     [EUSERS]		= TARGET_EUSERS,
582     [ENOTSOCK]		= TARGET_ENOTSOCK,
583     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
584     [EMSGSIZE]		= TARGET_EMSGSIZE,
585     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
586     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
587     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
588     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
589     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
590     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
591     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
592     [EADDRINUSE]	= TARGET_EADDRINUSE,
593     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
594     [ENETDOWN]		= TARGET_ENETDOWN,
595     [ENETUNREACH]	= TARGET_ENETUNREACH,
596     [ENETRESET]		= TARGET_ENETRESET,
597     [ECONNABORTED]	= TARGET_ECONNABORTED,
598     [ECONNRESET]	= TARGET_ECONNRESET,
599     [ENOBUFS]		= TARGET_ENOBUFS,
600     [EISCONN]		= TARGET_EISCONN,
601     [ENOTCONN]		= TARGET_ENOTCONN,
602     [EUCLEAN]		= TARGET_EUCLEAN,
603     [ENOTNAM]		= TARGET_ENOTNAM,
604     [ENAVAIL]		= TARGET_ENAVAIL,
605     [EISNAM]		= TARGET_EISNAM,
606     [EREMOTEIO]		= TARGET_EREMOTEIO,
607     [EDQUOT]            = TARGET_EDQUOT,
608     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
609     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
610     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
611     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
612     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
613     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
614     [EALREADY]		= TARGET_EALREADY,
615     [EINPROGRESS]	= TARGET_EINPROGRESS,
616     [ESTALE]		= TARGET_ESTALE,
617     [ECANCELED]		= TARGET_ECANCELED,
618     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
619     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
620 #ifdef ENOKEY
621     [ENOKEY]		= TARGET_ENOKEY,
622 #endif
623 #ifdef EKEYEXPIRED
624     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
625 #endif
626 #ifdef EKEYREVOKED
627     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
628 #endif
629 #ifdef EKEYREJECTED
630     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
631 #endif
632 #ifdef EOWNERDEAD
633     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
634 #endif
635 #ifdef ENOTRECOVERABLE
636     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
637 #endif
638 #ifdef ENOMSG
639     [ENOMSG]            = TARGET_ENOMSG,
640 #endif
641 #ifdef ERKFILL
642     [ERFKILL]           = TARGET_ERFKILL,
643 #endif
644 #ifdef EHWPOISON
645     [EHWPOISON]         = TARGET_EHWPOISON,
646 #endif
647 };
648 
649 static inline int host_to_target_errno(int err)
650 {
651     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652         host_to_target_errno_table[err]) {
653         return host_to_target_errno_table[err];
654     }
655     return err;
656 }
657 
658 static inline int target_to_host_errno(int err)
659 {
660     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
661         target_to_host_errno_table[err]) {
662         return target_to_host_errno_table[err];
663     }
664     return err;
665 }
666 
667 static inline abi_long get_errno(abi_long ret)
668 {
669     if (ret == -1)
670         return -host_to_target_errno(errno);
671     else
672         return ret;
673 }
674 
675 const char *target_strerror(int err)
676 {
677     if (err == TARGET_ERESTARTSYS) {
678         return "To be restarted";
679     }
680     if (err == TARGET_QEMU_ESIGRETURN) {
681         return "Successful exit from sigreturn";
682     }
683 
684     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
685         return NULL;
686     }
687     return strerror(target_to_host_errno(err));
688 }
689 
690 #define safe_syscall0(type, name) \
691 static type safe_##name(void) \
692 { \
693     return safe_syscall(__NR_##name); \
694 }
695 
696 #define safe_syscall1(type, name, type1, arg1) \
697 static type safe_##name(type1 arg1) \
698 { \
699     return safe_syscall(__NR_##name, arg1); \
700 }
701 
702 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
703 static type safe_##name(type1 arg1, type2 arg2) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2); \
706 }
707 
708 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
710 { \
711     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
712 }
713 
714 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
715     type4, arg4) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
717 { \
718     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
719 }
720 
721 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
722     type4, arg4, type5, arg5) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
724     type5 arg5) \
725 { \
726     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
727 }
728 
729 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
730     type4, arg4, type5, arg5, type6, arg6) \
731 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
732     type5 arg5, type6 arg6) \
733 { \
734     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
735 }
736 
737 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
738 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
739 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
740               int, flags, mode_t, mode)
741 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
742               struct rusage *, rusage)
743 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
744               int, options, struct rusage *, rusage)
745 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
746 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
747               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
748 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
749               struct timespec *, tsp, const sigset_t *, sigmask,
750               size_t, sigsetsize)
751 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
752               int, maxevents, int, timeout, const sigset_t *, sigmask,
753               size_t, sigsetsize)
754 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
755               const struct timespec *,timeout,int *,uaddr2,int,val3)
756 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
757 safe_syscall2(int, kill, pid_t, pid, int, sig)
758 safe_syscall2(int, tkill, int, tid, int, sig)
759 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
760 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
761 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
762 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
763               unsigned long, pos_l, unsigned long, pos_h)
764 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
765               unsigned long, pos_l, unsigned long, pos_h)
766 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
767               socklen_t, addrlen)
768 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
769               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
770 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
771               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
772 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
773 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
774 safe_syscall2(int, flock, int, fd, int, operation)
775 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
776               const struct timespec *, uts, size_t, sigsetsize)
777 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
778               int, flags)
779 safe_syscall2(int, nanosleep, const struct timespec *, req,
780               struct timespec *, rem)
781 #ifdef TARGET_NR_clock_nanosleep
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783               const struct timespec *, req, struct timespec *, rem)
784 #endif
785 #ifdef __NR_ipc
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 #endif
789 #ifdef __NR_msgsnd
790 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
791               int, flags)
792 #endif
793 #ifdef __NR_msgrcv
794 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
795               long, msgtype, int, flags)
796 #endif
797 #ifdef __NR_semtimedop
798 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
799               unsigned, nsops, const struct timespec *, timeout)
800 #endif
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
803               size_t, len, unsigned, prio, const struct timespec *, timeout)
804 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
805               size_t, len, unsigned *, prio, const struct timespec *, timeout)
806 #endif
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808  * "third argument might be integer or pointer or not present" behaviour of
809  * the libc function.
810  */
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814  *  use the flock64 struct rather than unsuffixed flock
815  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
816  */
817 #ifdef __NR_fcntl64
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
819 #else
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
821 #endif
822 
823 static inline int host_to_target_sock_type(int host_type)
824 {
825     int target_type;
826 
827     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
828     case SOCK_DGRAM:
829         target_type = TARGET_SOCK_DGRAM;
830         break;
831     case SOCK_STREAM:
832         target_type = TARGET_SOCK_STREAM;
833         break;
834     default:
835         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
836         break;
837     }
838 
839 #if defined(SOCK_CLOEXEC)
840     if (host_type & SOCK_CLOEXEC) {
841         target_type |= TARGET_SOCK_CLOEXEC;
842     }
843 #endif
844 
845 #if defined(SOCK_NONBLOCK)
846     if (host_type & SOCK_NONBLOCK) {
847         target_type |= TARGET_SOCK_NONBLOCK;
848     }
849 #endif
850 
851     return target_type;
852 }
853 
854 static abi_ulong target_brk;
855 static abi_ulong target_original_brk;
856 static abi_ulong brk_page;
857 
858 void target_set_brk(abi_ulong new_brk)
859 {
860     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
861     brk_page = HOST_PAGE_ALIGN(target_brk);
862 }
863 
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
866 
867 /* do_brk() must return target values and target errnos. */
868 abi_long do_brk(abi_ulong new_brk)
869 {
870     abi_long mapped_addr;
871     abi_ulong new_alloc_size;
872 
873     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
874 
875     if (!new_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
877         return target_brk;
878     }
879     if (new_brk < target_original_brk) {
880         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
881                    target_brk);
882         return target_brk;
883     }
884 
885     /* If the new brk is less than the highest page reserved to the
886      * target heap allocation, set it and we're almost done...  */
887     if (new_brk <= brk_page) {
888         /* Heap contents are initialized to zero, as for anonymous
889          * mapped pages.  */
890         if (new_brk > target_brk) {
891             memset(g2h(target_brk), 0, new_brk - target_brk);
892         }
893 	target_brk = new_brk;
894         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
895 	return target_brk;
896     }
897 
898     /* We need to allocate more memory after the brk... Note that
899      * we don't use MAP_FIXED because that will map over the top of
900      * any existing mapping (like the one with the host libc or qemu
901      * itself); instead we treat "mapped but at wrong address" as
902      * a failure and unmap again.
903      */
904     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
905     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
906                                         PROT_READ|PROT_WRITE,
907                                         MAP_ANON|MAP_PRIVATE, 0, 0));
908 
909     if (mapped_addr == brk_page) {
910         /* Heap contents are initialized to zero, as for anonymous
911          * mapped pages.  Technically the new pages are already
912          * initialized to zero since they *are* anonymous mapped
913          * pages, however we have to take care with the contents that
914          * come from the remaining part of the previous page: it may
915          * contains garbage data due to a previous heap usage (grown
916          * then shrunken).  */
917         memset(g2h(target_brk), 0, brk_page - target_brk);
918 
919         target_brk = new_brk;
920         brk_page = HOST_PAGE_ALIGN(target_brk);
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
922             target_brk);
923         return target_brk;
924     } else if (mapped_addr != -1) {
925         /* Mapped but at wrong address, meaning there wasn't actually
926          * enough space for this brk.
927          */
928         target_munmap(mapped_addr, new_alloc_size);
929         mapped_addr = -1;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
931     }
932     else {
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
934     }
935 
936 #if defined(TARGET_ALPHA)
937     /* We (partially) emulate OSF/1 on Alpha, which requires we
938        return a proper errno, not an unchanged brk value.  */
939     return -TARGET_ENOMEM;
940 #endif
941     /* For everything else, return the previous break. */
942     return target_brk;
943 }
944 
945 static inline abi_long copy_from_user_fdset(fd_set *fds,
946                                             abi_ulong target_fds_addr,
947                                             int n)
948 {
949     int i, nw, j, k;
950     abi_ulong b, *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_READ,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  1)))
957         return -TARGET_EFAULT;
958 
959     FD_ZERO(fds);
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         /* grab the abi_ulong */
963         __get_user(b, &target_fds[i]);
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             /* check the bit inside the abi_ulong */
966             if ((b >> j) & 1)
967                 FD_SET(k, fds);
968             k++;
969         }
970     }
971 
972     unlock_user(target_fds, target_fds_addr, 0);
973 
974     return 0;
975 }
976 
977 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
978                                                  abi_ulong target_fds_addr,
979                                                  int n)
980 {
981     if (target_fds_addr) {
982         if (copy_from_user_fdset(fds, target_fds_addr, n))
983             return -TARGET_EFAULT;
984         *fds_ptr = fds;
985     } else {
986         *fds_ptr = NULL;
987     }
988     return 0;
989 }
990 
991 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
992                                           const fd_set *fds,
993                                           int n)
994 {
995     int i, nw, j, k;
996     abi_long v;
997     abi_ulong *target_fds;
998 
999     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1000     if (!(target_fds = lock_user(VERIFY_WRITE,
1001                                  target_fds_addr,
1002                                  sizeof(abi_ulong) * nw,
1003                                  0)))
1004         return -TARGET_EFAULT;
1005 
1006     k = 0;
1007     for (i = 0; i < nw; i++) {
1008         v = 0;
1009         for (j = 0; j < TARGET_ABI_BITS; j++) {
1010             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1011             k++;
1012         }
1013         __put_user(v, &target_fds[i]);
1014     }
1015 
1016     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1017 
1018     return 0;
1019 }
1020 
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1023 #else
1024 #define HOST_HZ 100
1025 #endif
1026 
1027 static inline abi_long host_to_target_clock_t(long ticks)
1028 {
1029 #if HOST_HZ == TARGET_HZ
1030     return ticks;
1031 #else
1032     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1033 #endif
1034 }
1035 
1036 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1037                                              const struct rusage *rusage)
1038 {
1039     struct target_rusage *target_rusage;
1040 
1041     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1042         return -TARGET_EFAULT;
1043     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1044     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1045     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1046     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1047     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1048     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1049     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1050     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1051     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1052     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1053     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1054     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1055     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1056     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1057     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1058     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1059     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1060     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1061     unlock_user_struct(target_rusage, target_addr, 1);
1062 
1063     return 0;
1064 }
1065 
1066 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1067 {
1068     abi_ulong target_rlim_swap;
1069     rlim_t result;
1070 
1071     target_rlim_swap = tswapal(target_rlim);
1072     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1073         return RLIM_INFINITY;
1074 
1075     result = target_rlim_swap;
1076     if (target_rlim_swap != (rlim_t)result)
1077         return RLIM_INFINITY;
1078 
1079     return result;
1080 }
1081 
1082 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1083 {
1084     abi_ulong target_rlim_swap;
1085     abi_ulong result;
1086 
1087     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1088         target_rlim_swap = TARGET_RLIM_INFINITY;
1089     else
1090         target_rlim_swap = rlim;
1091     result = tswapal(target_rlim_swap);
1092 
1093     return result;
1094 }
1095 
1096 static inline int target_to_host_resource(int code)
1097 {
1098     switch (code) {
1099     case TARGET_RLIMIT_AS:
1100         return RLIMIT_AS;
1101     case TARGET_RLIMIT_CORE:
1102         return RLIMIT_CORE;
1103     case TARGET_RLIMIT_CPU:
1104         return RLIMIT_CPU;
1105     case TARGET_RLIMIT_DATA:
1106         return RLIMIT_DATA;
1107     case TARGET_RLIMIT_FSIZE:
1108         return RLIMIT_FSIZE;
1109     case TARGET_RLIMIT_LOCKS:
1110         return RLIMIT_LOCKS;
1111     case TARGET_RLIMIT_MEMLOCK:
1112         return RLIMIT_MEMLOCK;
1113     case TARGET_RLIMIT_MSGQUEUE:
1114         return RLIMIT_MSGQUEUE;
1115     case TARGET_RLIMIT_NICE:
1116         return RLIMIT_NICE;
1117     case TARGET_RLIMIT_NOFILE:
1118         return RLIMIT_NOFILE;
1119     case TARGET_RLIMIT_NPROC:
1120         return RLIMIT_NPROC;
1121     case TARGET_RLIMIT_RSS:
1122         return RLIMIT_RSS;
1123     case TARGET_RLIMIT_RTPRIO:
1124         return RLIMIT_RTPRIO;
1125     case TARGET_RLIMIT_SIGPENDING:
1126         return RLIMIT_SIGPENDING;
1127     case TARGET_RLIMIT_STACK:
1128         return RLIMIT_STACK;
1129     default:
1130         return code;
1131     }
1132 }
1133 
1134 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1135                                               abi_ulong target_tv_addr)
1136 {
1137     struct target_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1140         return -TARGET_EFAULT;
1141     }
1142 
1143     __get_user(tv->tv_sec, &target_tv->tv_sec);
1144     __get_user(tv->tv_usec, &target_tv->tv_usec);
1145 
1146     unlock_user_struct(target_tv, target_tv_addr, 0);
1147 
1148     return 0;
1149 }
1150 
1151 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1152                                             const struct timeval *tv)
1153 {
1154     struct target_timeval *target_tv;
1155 
1156     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1157         return -TARGET_EFAULT;
1158     }
1159 
1160     __put_user(tv->tv_sec, &target_tv->tv_sec);
1161     __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 
1163     unlock_user_struct(target_tv, target_tv_addr, 1);
1164 
1165     return 0;
1166 }
1167 
1168 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1169                                              const struct timeval *tv)
1170 {
1171     struct target__kernel_sock_timeval *target_tv;
1172 
1173     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __put_user(tv->tv_sec, &target_tv->tv_sec);
1178     __put_user(tv->tv_usec, &target_tv->tv_usec);
1179 
1180     unlock_user_struct(target_tv, target_tv_addr, 1);
1181 
1182     return 0;
1183 }
1184 
1185 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1186                                                abi_ulong target_addr)
1187 {
1188     struct target_timespec *target_ts;
1189 
1190     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1191         return -TARGET_EFAULT;
1192     }
1193     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1194     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1195     unlock_user_struct(target_ts, target_addr, 0);
1196     return 0;
1197 }
1198 
1199 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1200                                                struct timespec *host_ts)
1201 {
1202     struct target_timespec *target_ts;
1203 
1204     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1205         return -TARGET_EFAULT;
1206     }
1207     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1208     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1209     unlock_user_struct(target_ts, target_addr, 1);
1210     return 0;
1211 }
1212 
1213 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1214                                                  struct timespec *host_ts)
1215 {
1216     struct target__kernel_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     unlock_user_struct(target_ts, target_addr, 1);
1224     return 0;
1225 }
1226 
1227 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1228                                                abi_ulong target_tz_addr)
1229 {
1230     struct target_timezone *target_tz;
1231 
1232     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1233         return -TARGET_EFAULT;
1234     }
1235 
1236     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1237     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1238 
1239     unlock_user_struct(target_tz, target_tz_addr, 0);
1240 
1241     return 0;
1242 }
1243 
1244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1245 #include <mqueue.h>
1246 
1247 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1248                                               abi_ulong target_mq_attr_addr)
1249 {
1250     struct target_mq_attr *target_mq_attr;
1251 
1252     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1253                           target_mq_attr_addr, 1))
1254         return -TARGET_EFAULT;
1255 
1256     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1257     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1258     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1259     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1260 
1261     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1262 
1263     return 0;
1264 }
1265 
1266 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1267                                             const struct mq_attr *attr)
1268 {
1269     struct target_mq_attr *target_mq_attr;
1270 
1271     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1272                           target_mq_attr_addr, 0))
1273         return -TARGET_EFAULT;
1274 
1275     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1276     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1277     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1278     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1279 
1280     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1281 
1282     return 0;
1283 }
1284 #endif
1285 
1286 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1287 /* do_select() must return target values and target errnos. */
1288 static abi_long do_select(int n,
1289                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1290                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1291 {
1292     fd_set rfds, wfds, efds;
1293     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1294     struct timeval tv;
1295     struct timespec ts, *ts_ptr;
1296     abi_long ret;
1297 
1298     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1299     if (ret) {
1300         return ret;
1301     }
1302     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1303     if (ret) {
1304         return ret;
1305     }
1306     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1307     if (ret) {
1308         return ret;
1309     }
1310 
1311     if (target_tv_addr) {
1312         if (copy_from_user_timeval(&tv, target_tv_addr))
1313             return -TARGET_EFAULT;
1314         ts.tv_sec = tv.tv_sec;
1315         ts.tv_nsec = tv.tv_usec * 1000;
1316         ts_ptr = &ts;
1317     } else {
1318         ts_ptr = NULL;
1319     }
1320 
1321     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1322                                   ts_ptr, NULL));
1323 
1324     if (!is_error(ret)) {
1325         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1326             return -TARGET_EFAULT;
1327         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1328             return -TARGET_EFAULT;
1329         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1330             return -TARGET_EFAULT;
1331 
1332         if (target_tv_addr) {
1333             tv.tv_sec = ts.tv_sec;
1334             tv.tv_usec = ts.tv_nsec / 1000;
1335             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1336                 return -TARGET_EFAULT;
1337             }
1338         }
1339     }
1340 
1341     return ret;
1342 }
1343 
1344 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1345 static abi_long do_old_select(abi_ulong arg1)
1346 {
1347     struct target_sel_arg_struct *sel;
1348     abi_ulong inp, outp, exp, tvp;
1349     long nsel;
1350 
1351     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1352         return -TARGET_EFAULT;
1353     }
1354 
1355     nsel = tswapal(sel->n);
1356     inp = tswapal(sel->inp);
1357     outp = tswapal(sel->outp);
1358     exp = tswapal(sel->exp);
1359     tvp = tswapal(sel->tvp);
1360 
1361     unlock_user_struct(sel, arg1, 0);
1362 
1363     return do_select(nsel, inp, outp, exp, tvp);
1364 }
1365 #endif
1366 #endif
1367 
1368 static abi_long do_pipe2(int host_pipe[], int flags)
1369 {
1370 #ifdef CONFIG_PIPE2
1371     return pipe2(host_pipe, flags);
1372 #else
1373     return -ENOSYS;
1374 #endif
1375 }
1376 
1377 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1378                         int flags, int is_pipe2)
1379 {
1380     int host_pipe[2];
1381     abi_long ret;
1382     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1383 
1384     if (is_error(ret))
1385         return get_errno(ret);
1386 
1387     /* Several targets have special calling conventions for the original
1388        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1389     if (!is_pipe2) {
1390 #if defined(TARGET_ALPHA)
1391         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1392         return host_pipe[0];
1393 #elif defined(TARGET_MIPS)
1394         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1395         return host_pipe[0];
1396 #elif defined(TARGET_SH4)
1397         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1398         return host_pipe[0];
1399 #elif defined(TARGET_SPARC)
1400         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1401         return host_pipe[0];
1402 #endif
1403     }
1404 
1405     if (put_user_s32(host_pipe[0], pipedes)
1406         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1407         return -TARGET_EFAULT;
1408     return get_errno(ret);
1409 }
1410 
1411 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1412                                               abi_ulong target_addr,
1413                                               socklen_t len)
1414 {
1415     struct target_ip_mreqn *target_smreqn;
1416 
1417     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1418     if (!target_smreqn)
1419         return -TARGET_EFAULT;
1420     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1421     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1422     if (len == sizeof(struct target_ip_mreqn))
1423         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1424     unlock_user(target_smreqn, target_addr, 0);
1425 
1426     return 0;
1427 }
1428 
1429 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1430                                                abi_ulong target_addr,
1431                                                socklen_t len)
1432 {
1433     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1434     sa_family_t sa_family;
1435     struct target_sockaddr *target_saddr;
1436 
1437     if (fd_trans_target_to_host_addr(fd)) {
1438         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1439     }
1440 
1441     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1442     if (!target_saddr)
1443         return -TARGET_EFAULT;
1444 
1445     sa_family = tswap16(target_saddr->sa_family);
1446 
1447     /* Oops. The caller might send a incomplete sun_path; sun_path
1448      * must be terminated by \0 (see the manual page), but
1449      * unfortunately it is quite common to specify sockaddr_un
1450      * length as "strlen(x->sun_path)" while it should be
1451      * "strlen(...) + 1". We'll fix that here if needed.
1452      * Linux kernel has a similar feature.
1453      */
1454 
1455     if (sa_family == AF_UNIX) {
1456         if (len < unix_maxlen && len > 0) {
1457             char *cp = (char*)target_saddr;
1458 
1459             if ( cp[len-1] && !cp[len] )
1460                 len++;
1461         }
1462         if (len > unix_maxlen)
1463             len = unix_maxlen;
1464     }
1465 
1466     memcpy(addr, target_saddr, len);
1467     addr->sa_family = sa_family;
1468     if (sa_family == AF_NETLINK) {
1469         struct sockaddr_nl *nladdr;
1470 
1471         nladdr = (struct sockaddr_nl *)addr;
1472         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1473         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1474     } else if (sa_family == AF_PACKET) {
1475 	struct target_sockaddr_ll *lladdr;
1476 
1477 	lladdr = (struct target_sockaddr_ll *)addr;
1478 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1479 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1480     }
1481     unlock_user(target_saddr, target_addr, 0);
1482 
1483     return 0;
1484 }
1485 
1486 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1487                                                struct sockaddr *addr,
1488                                                socklen_t len)
1489 {
1490     struct target_sockaddr *target_saddr;
1491 
1492     if (len == 0) {
1493         return 0;
1494     }
1495     assert(addr);
1496 
1497     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1498     if (!target_saddr)
1499         return -TARGET_EFAULT;
1500     memcpy(target_saddr, addr, len);
1501     if (len >= offsetof(struct target_sockaddr, sa_family) +
1502         sizeof(target_saddr->sa_family)) {
1503         target_saddr->sa_family = tswap16(addr->sa_family);
1504     }
1505     if (addr->sa_family == AF_NETLINK &&
1506         len >= sizeof(struct target_sockaddr_nl)) {
1507         struct target_sockaddr_nl *target_nl =
1508                (struct target_sockaddr_nl *)target_saddr;
1509         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1510         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1511     } else if (addr->sa_family == AF_PACKET) {
1512         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1513         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1514         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1515     } else if (addr->sa_family == AF_INET6 &&
1516                len >= sizeof(struct target_sockaddr_in6)) {
1517         struct target_sockaddr_in6 *target_in6 =
1518                (struct target_sockaddr_in6 *)target_saddr;
1519         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1520     }
1521     unlock_user(target_saddr, target_addr, len);
1522 
1523     return 0;
1524 }
1525 
1526 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1527                                            struct target_msghdr *target_msgh)
1528 {
1529     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1530     abi_long msg_controllen;
1531     abi_ulong target_cmsg_addr;
1532     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1533     socklen_t space = 0;
1534 
1535     msg_controllen = tswapal(target_msgh->msg_controllen);
1536     if (msg_controllen < sizeof (struct target_cmsghdr))
1537         goto the_end;
1538     target_cmsg_addr = tswapal(target_msgh->msg_control);
1539     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1540     target_cmsg_start = target_cmsg;
1541     if (!target_cmsg)
1542         return -TARGET_EFAULT;
1543 
1544     while (cmsg && target_cmsg) {
1545         void *data = CMSG_DATA(cmsg);
1546         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1547 
1548         int len = tswapal(target_cmsg->cmsg_len)
1549             - sizeof(struct target_cmsghdr);
1550 
1551         space += CMSG_SPACE(len);
1552         if (space > msgh->msg_controllen) {
1553             space -= CMSG_SPACE(len);
1554             /* This is a QEMU bug, since we allocated the payload
1555              * area ourselves (unlike overflow in host-to-target
1556              * conversion, which is just the guest giving us a buffer
1557              * that's too small). It can't happen for the payload types
1558              * we currently support; if it becomes an issue in future
1559              * we would need to improve our allocation strategy to
1560              * something more intelligent than "twice the size of the
1561              * target buffer we're reading from".
1562              */
1563             gemu_log("Host cmsg overflow\n");
1564             break;
1565         }
1566 
1567         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1568             cmsg->cmsg_level = SOL_SOCKET;
1569         } else {
1570             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1571         }
1572         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1573         cmsg->cmsg_len = CMSG_LEN(len);
1574 
1575         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1576             int *fd = (int *)data;
1577             int *target_fd = (int *)target_data;
1578             int i, numfds = len / sizeof(int);
1579 
1580             for (i = 0; i < numfds; i++) {
1581                 __get_user(fd[i], target_fd + i);
1582             }
1583         } else if (cmsg->cmsg_level == SOL_SOCKET
1584                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1585             struct ucred *cred = (struct ucred *)data;
1586             struct target_ucred *target_cred =
1587                 (struct target_ucred *)target_data;
1588 
1589             __get_user(cred->pid, &target_cred->pid);
1590             __get_user(cred->uid, &target_cred->uid);
1591             __get_user(cred->gid, &target_cred->gid);
1592         } else {
1593             gemu_log("Unsupported ancillary data: %d/%d\n",
1594                                         cmsg->cmsg_level, cmsg->cmsg_type);
1595             memcpy(data, target_data, len);
1596         }
1597 
1598         cmsg = CMSG_NXTHDR(msgh, cmsg);
1599         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1600                                          target_cmsg_start);
1601     }
1602     unlock_user(target_cmsg, target_cmsg_addr, 0);
1603  the_end:
1604     msgh->msg_controllen = space;
1605     return 0;
1606 }
1607 
1608 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1609                                            struct msghdr *msgh)
1610 {
1611     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1612     abi_long msg_controllen;
1613     abi_ulong target_cmsg_addr;
1614     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1615     socklen_t space = 0;
1616 
1617     msg_controllen = tswapal(target_msgh->msg_controllen);
1618     if (msg_controllen < sizeof (struct target_cmsghdr))
1619         goto the_end;
1620     target_cmsg_addr = tswapal(target_msgh->msg_control);
1621     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1622     target_cmsg_start = target_cmsg;
1623     if (!target_cmsg)
1624         return -TARGET_EFAULT;
1625 
1626     while (cmsg && target_cmsg) {
1627         void *data = CMSG_DATA(cmsg);
1628         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1629 
1630         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1631         int tgt_len, tgt_space;
1632 
1633         /* We never copy a half-header but may copy half-data;
1634          * this is Linux's behaviour in put_cmsg(). Note that
1635          * truncation here is a guest problem (which we report
1636          * to the guest via the CTRUNC bit), unlike truncation
1637          * in target_to_host_cmsg, which is a QEMU bug.
1638          */
1639         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1640             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1641             break;
1642         }
1643 
1644         if (cmsg->cmsg_level == SOL_SOCKET) {
1645             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1646         } else {
1647             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1648         }
1649         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1650 
1651         /* Payload types which need a different size of payload on
1652          * the target must adjust tgt_len here.
1653          */
1654         tgt_len = len;
1655         switch (cmsg->cmsg_level) {
1656         case SOL_SOCKET:
1657             switch (cmsg->cmsg_type) {
1658             case SO_TIMESTAMP:
1659                 tgt_len = sizeof(struct target_timeval);
1660                 break;
1661             default:
1662                 break;
1663             }
1664             break;
1665         default:
1666             break;
1667         }
1668 
1669         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1670             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1671             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1672         }
1673 
1674         /* We must now copy-and-convert len bytes of payload
1675          * into tgt_len bytes of destination space. Bear in mind
1676          * that in both source and destination we may be dealing
1677          * with a truncated value!
1678          */
1679         switch (cmsg->cmsg_level) {
1680         case SOL_SOCKET:
1681             switch (cmsg->cmsg_type) {
1682             case SCM_RIGHTS:
1683             {
1684                 int *fd = (int *)data;
1685                 int *target_fd = (int *)target_data;
1686                 int i, numfds = tgt_len / sizeof(int);
1687 
1688                 for (i = 0; i < numfds; i++) {
1689                     __put_user(fd[i], target_fd + i);
1690                 }
1691                 break;
1692             }
1693             case SO_TIMESTAMP:
1694             {
1695                 struct timeval *tv = (struct timeval *)data;
1696                 struct target_timeval *target_tv =
1697                     (struct target_timeval *)target_data;
1698 
1699                 if (len != sizeof(struct timeval) ||
1700                     tgt_len != sizeof(struct target_timeval)) {
1701                     goto unimplemented;
1702                 }
1703 
1704                 /* copy struct timeval to target */
1705                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1706                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1707                 break;
1708             }
1709             case SCM_CREDENTIALS:
1710             {
1711                 struct ucred *cred = (struct ucred *)data;
1712                 struct target_ucred *target_cred =
1713                     (struct target_ucred *)target_data;
1714 
1715                 __put_user(cred->pid, &target_cred->pid);
1716                 __put_user(cred->uid, &target_cred->uid);
1717                 __put_user(cred->gid, &target_cred->gid);
1718                 break;
1719             }
1720             default:
1721                 goto unimplemented;
1722             }
1723             break;
1724 
1725         case SOL_IP:
1726             switch (cmsg->cmsg_type) {
1727             case IP_TTL:
1728             {
1729                 uint32_t *v = (uint32_t *)data;
1730                 uint32_t *t_int = (uint32_t *)target_data;
1731 
1732                 if (len != sizeof(uint32_t) ||
1733                     tgt_len != sizeof(uint32_t)) {
1734                     goto unimplemented;
1735                 }
1736                 __put_user(*v, t_int);
1737                 break;
1738             }
1739             case IP_RECVERR:
1740             {
1741                 struct errhdr_t {
1742                    struct sock_extended_err ee;
1743                    struct sockaddr_in offender;
1744                 };
1745                 struct errhdr_t *errh = (struct errhdr_t *)data;
1746                 struct errhdr_t *target_errh =
1747                     (struct errhdr_t *)target_data;
1748 
1749                 if (len != sizeof(struct errhdr_t) ||
1750                     tgt_len != sizeof(struct errhdr_t)) {
1751                     goto unimplemented;
1752                 }
1753                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1754                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1755                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1756                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1757                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1758                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1759                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1760                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1761                     (void *) &errh->offender, sizeof(errh->offender));
1762                 break;
1763             }
1764             default:
1765                 goto unimplemented;
1766             }
1767             break;
1768 
1769         case SOL_IPV6:
1770             switch (cmsg->cmsg_type) {
1771             case IPV6_HOPLIMIT:
1772             {
1773                 uint32_t *v = (uint32_t *)data;
1774                 uint32_t *t_int = (uint32_t *)target_data;
1775 
1776                 if (len != sizeof(uint32_t) ||
1777                     tgt_len != sizeof(uint32_t)) {
1778                     goto unimplemented;
1779                 }
1780                 __put_user(*v, t_int);
1781                 break;
1782             }
1783             case IPV6_RECVERR:
1784             {
1785                 struct errhdr6_t {
1786                    struct sock_extended_err ee;
1787                    struct sockaddr_in6 offender;
1788                 };
1789                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1790                 struct errhdr6_t *target_errh =
1791                     (struct errhdr6_t *)target_data;
1792 
1793                 if (len != sizeof(struct errhdr6_t) ||
1794                     tgt_len != sizeof(struct errhdr6_t)) {
1795                     goto unimplemented;
1796                 }
1797                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1798                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1799                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1800                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1801                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1802                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1803                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1804                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1805                     (void *) &errh->offender, sizeof(errh->offender));
1806                 break;
1807             }
1808             default:
1809                 goto unimplemented;
1810             }
1811             break;
1812 
1813         default:
1814         unimplemented:
1815             gemu_log("Unsupported ancillary data: %d/%d\n",
1816                                         cmsg->cmsg_level, cmsg->cmsg_type);
1817             memcpy(target_data, data, MIN(len, tgt_len));
1818             if (tgt_len > len) {
1819                 memset(target_data + len, 0, tgt_len - len);
1820             }
1821         }
1822 
1823         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1824         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1825         if (msg_controllen < tgt_space) {
1826             tgt_space = msg_controllen;
1827         }
1828         msg_controllen -= tgt_space;
1829         space += tgt_space;
1830         cmsg = CMSG_NXTHDR(msgh, cmsg);
1831         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1832                                          target_cmsg_start);
1833     }
1834     unlock_user(target_cmsg, target_cmsg_addr, space);
1835  the_end:
1836     target_msgh->msg_controllen = tswapal(space);
1837     return 0;
1838 }
1839 
1840 /* do_setsockopt() Must return target values and target errnos. */
1841 static abi_long do_setsockopt(int sockfd, int level, int optname,
1842                               abi_ulong optval_addr, socklen_t optlen)
1843 {
1844     abi_long ret;
1845     int val;
1846     struct ip_mreqn *ip_mreq;
1847     struct ip_mreq_source *ip_mreq_source;
1848 
1849     switch(level) {
1850     case SOL_TCP:
1851         /* TCP options all take an 'int' value.  */
1852         if (optlen < sizeof(uint32_t))
1853             return -TARGET_EINVAL;
1854 
1855         if (get_user_u32(val, optval_addr))
1856             return -TARGET_EFAULT;
1857         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1858         break;
1859     case SOL_IP:
1860         switch(optname) {
1861         case IP_TOS:
1862         case IP_TTL:
1863         case IP_HDRINCL:
1864         case IP_ROUTER_ALERT:
1865         case IP_RECVOPTS:
1866         case IP_RETOPTS:
1867         case IP_PKTINFO:
1868         case IP_MTU_DISCOVER:
1869         case IP_RECVERR:
1870         case IP_RECVTTL:
1871         case IP_RECVTOS:
1872 #ifdef IP_FREEBIND
1873         case IP_FREEBIND:
1874 #endif
1875         case IP_MULTICAST_TTL:
1876         case IP_MULTICAST_LOOP:
1877             val = 0;
1878             if (optlen >= sizeof(uint32_t)) {
1879                 if (get_user_u32(val, optval_addr))
1880                     return -TARGET_EFAULT;
1881             } else if (optlen >= 1) {
1882                 if (get_user_u8(val, optval_addr))
1883                     return -TARGET_EFAULT;
1884             }
1885             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1886             break;
1887         case IP_ADD_MEMBERSHIP:
1888         case IP_DROP_MEMBERSHIP:
1889             if (optlen < sizeof (struct target_ip_mreq) ||
1890                 optlen > sizeof (struct target_ip_mreqn))
1891                 return -TARGET_EINVAL;
1892 
1893             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1894             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1895             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1896             break;
1897 
1898         case IP_BLOCK_SOURCE:
1899         case IP_UNBLOCK_SOURCE:
1900         case IP_ADD_SOURCE_MEMBERSHIP:
1901         case IP_DROP_SOURCE_MEMBERSHIP:
1902             if (optlen != sizeof (struct target_ip_mreq_source))
1903                 return -TARGET_EINVAL;
1904 
1905             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1906             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1907             unlock_user (ip_mreq_source, optval_addr, 0);
1908             break;
1909 
1910         default:
1911             goto unimplemented;
1912         }
1913         break;
1914     case SOL_IPV6:
1915         switch (optname) {
1916         case IPV6_MTU_DISCOVER:
1917         case IPV6_MTU:
1918         case IPV6_V6ONLY:
1919         case IPV6_RECVPKTINFO:
1920         case IPV6_UNICAST_HOPS:
1921         case IPV6_MULTICAST_HOPS:
1922         case IPV6_MULTICAST_LOOP:
1923         case IPV6_RECVERR:
1924         case IPV6_RECVHOPLIMIT:
1925         case IPV6_2292HOPLIMIT:
1926         case IPV6_CHECKSUM:
1927         case IPV6_ADDRFORM:
1928         case IPV6_2292PKTINFO:
1929         case IPV6_RECVTCLASS:
1930         case IPV6_RECVRTHDR:
1931         case IPV6_2292RTHDR:
1932         case IPV6_RECVHOPOPTS:
1933         case IPV6_2292HOPOPTS:
1934         case IPV6_RECVDSTOPTS:
1935         case IPV6_2292DSTOPTS:
1936         case IPV6_TCLASS:
1937 #ifdef IPV6_RECVPATHMTU
1938         case IPV6_RECVPATHMTU:
1939 #endif
1940 #ifdef IPV6_TRANSPARENT
1941         case IPV6_TRANSPARENT:
1942 #endif
1943 #ifdef IPV6_FREEBIND
1944         case IPV6_FREEBIND:
1945 #endif
1946 #ifdef IPV6_RECVORIGDSTADDR
1947         case IPV6_RECVORIGDSTADDR:
1948 #endif
1949             val = 0;
1950             if (optlen < sizeof(uint32_t)) {
1951                 return -TARGET_EINVAL;
1952             }
1953             if (get_user_u32(val, optval_addr)) {
1954                 return -TARGET_EFAULT;
1955             }
1956             ret = get_errno(setsockopt(sockfd, level, optname,
1957                                        &val, sizeof(val)));
1958             break;
1959         case IPV6_PKTINFO:
1960         {
1961             struct in6_pktinfo pki;
1962 
1963             if (optlen < sizeof(pki)) {
1964                 return -TARGET_EINVAL;
1965             }
1966 
1967             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1968                 return -TARGET_EFAULT;
1969             }
1970 
1971             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1972 
1973             ret = get_errno(setsockopt(sockfd, level, optname,
1974                                        &pki, sizeof(pki)));
1975             break;
1976         }
1977         case IPV6_ADD_MEMBERSHIP:
1978         case IPV6_DROP_MEMBERSHIP:
1979         {
1980             struct ipv6_mreq ipv6mreq;
1981 
1982             if (optlen < sizeof(ipv6mreq)) {
1983                 return -TARGET_EINVAL;
1984             }
1985 
1986             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1987                 return -TARGET_EFAULT;
1988             }
1989 
1990             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1991 
1992             ret = get_errno(setsockopt(sockfd, level, optname,
1993                                        &ipv6mreq, sizeof(ipv6mreq)));
1994             break;
1995         }
1996         default:
1997             goto unimplemented;
1998         }
1999         break;
2000     case SOL_ICMPV6:
2001         switch (optname) {
2002         case ICMPV6_FILTER:
2003         {
2004             struct icmp6_filter icmp6f;
2005 
2006             if (optlen > sizeof(icmp6f)) {
2007                 optlen = sizeof(icmp6f);
2008             }
2009 
2010             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2011                 return -TARGET_EFAULT;
2012             }
2013 
2014             for (val = 0; val < 8; val++) {
2015                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2016             }
2017 
2018             ret = get_errno(setsockopt(sockfd, level, optname,
2019                                        &icmp6f, optlen));
2020             break;
2021         }
2022         default:
2023             goto unimplemented;
2024         }
2025         break;
2026     case SOL_RAW:
2027         switch (optname) {
2028         case ICMP_FILTER:
2029         case IPV6_CHECKSUM:
2030             /* those take an u32 value */
2031             if (optlen < sizeof(uint32_t)) {
2032                 return -TARGET_EINVAL;
2033             }
2034 
2035             if (get_user_u32(val, optval_addr)) {
2036                 return -TARGET_EFAULT;
2037             }
2038             ret = get_errno(setsockopt(sockfd, level, optname,
2039                                        &val, sizeof(val)));
2040             break;
2041 
2042         default:
2043             goto unimplemented;
2044         }
2045         break;
2046 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2047     case SOL_ALG:
2048         switch (optname) {
2049         case ALG_SET_KEY:
2050         {
2051             char *alg_key = g_malloc(optlen);
2052 
2053             if (!alg_key) {
2054                 return -TARGET_ENOMEM;
2055             }
2056             if (copy_from_user(alg_key, optval_addr, optlen)) {
2057                 g_free(alg_key);
2058                 return -TARGET_EFAULT;
2059             }
2060             ret = get_errno(setsockopt(sockfd, level, optname,
2061                                        alg_key, optlen));
2062             g_free(alg_key);
2063             break;
2064         }
2065         case ALG_SET_AEAD_AUTHSIZE:
2066         {
2067             ret = get_errno(setsockopt(sockfd, level, optname,
2068                                        NULL, optlen));
2069             break;
2070         }
2071         default:
2072             goto unimplemented;
2073         }
2074         break;
2075 #endif
2076     case TARGET_SOL_SOCKET:
2077         switch (optname) {
2078         case TARGET_SO_RCVTIMEO:
2079         {
2080                 struct timeval tv;
2081 
2082                 optname = SO_RCVTIMEO;
2083 
2084 set_timeout:
2085                 if (optlen != sizeof(struct target_timeval)) {
2086                     return -TARGET_EINVAL;
2087                 }
2088 
2089                 if (copy_from_user_timeval(&tv, optval_addr)) {
2090                     return -TARGET_EFAULT;
2091                 }
2092 
2093                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2094                                 &tv, sizeof(tv)));
2095                 return ret;
2096         }
2097         case TARGET_SO_SNDTIMEO:
2098                 optname = SO_SNDTIMEO;
2099                 goto set_timeout;
2100         case TARGET_SO_ATTACH_FILTER:
2101         {
2102                 struct target_sock_fprog *tfprog;
2103                 struct target_sock_filter *tfilter;
2104                 struct sock_fprog fprog;
2105                 struct sock_filter *filter;
2106                 int i;
2107 
2108                 if (optlen != sizeof(*tfprog)) {
2109                     return -TARGET_EINVAL;
2110                 }
2111                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2112                     return -TARGET_EFAULT;
2113                 }
2114                 if (!lock_user_struct(VERIFY_READ, tfilter,
2115                                       tswapal(tfprog->filter), 0)) {
2116                     unlock_user_struct(tfprog, optval_addr, 1);
2117                     return -TARGET_EFAULT;
2118                 }
2119 
2120                 fprog.len = tswap16(tfprog->len);
2121                 filter = g_try_new(struct sock_filter, fprog.len);
2122                 if (filter == NULL) {
2123                     unlock_user_struct(tfilter, tfprog->filter, 1);
2124                     unlock_user_struct(tfprog, optval_addr, 1);
2125                     return -TARGET_ENOMEM;
2126                 }
2127                 for (i = 0; i < fprog.len; i++) {
2128                     filter[i].code = tswap16(tfilter[i].code);
2129                     filter[i].jt = tfilter[i].jt;
2130                     filter[i].jf = tfilter[i].jf;
2131                     filter[i].k = tswap32(tfilter[i].k);
2132                 }
2133                 fprog.filter = filter;
2134 
2135                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2136                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2137                 g_free(filter);
2138 
2139                 unlock_user_struct(tfilter, tfprog->filter, 1);
2140                 unlock_user_struct(tfprog, optval_addr, 1);
2141                 return ret;
2142         }
2143 	case TARGET_SO_BINDTODEVICE:
2144 	{
2145 		char *dev_ifname, *addr_ifname;
2146 
2147 		if (optlen > IFNAMSIZ - 1) {
2148 		    optlen = IFNAMSIZ - 1;
2149 		}
2150 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2151 		if (!dev_ifname) {
2152 		    return -TARGET_EFAULT;
2153 		}
2154 		optname = SO_BINDTODEVICE;
2155 		addr_ifname = alloca(IFNAMSIZ);
2156 		memcpy(addr_ifname, dev_ifname, optlen);
2157 		addr_ifname[optlen] = 0;
2158 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2159                                            addr_ifname, optlen));
2160 		unlock_user (dev_ifname, optval_addr, 0);
2161 		return ret;
2162 	}
2163         case TARGET_SO_LINGER:
2164         {
2165                 struct linger lg;
2166                 struct target_linger *tlg;
2167 
2168                 if (optlen != sizeof(struct target_linger)) {
2169                     return -TARGET_EINVAL;
2170                 }
2171                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2172                     return -TARGET_EFAULT;
2173                 }
2174                 __get_user(lg.l_onoff, &tlg->l_onoff);
2175                 __get_user(lg.l_linger, &tlg->l_linger);
2176                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2177                                 &lg, sizeof(lg)));
2178                 unlock_user_struct(tlg, optval_addr, 0);
2179                 return ret;
2180         }
2181             /* Options with 'int' argument.  */
2182         case TARGET_SO_DEBUG:
2183 		optname = SO_DEBUG;
2184 		break;
2185         case TARGET_SO_REUSEADDR:
2186 		optname = SO_REUSEADDR;
2187 		break;
2188 #ifdef SO_REUSEPORT
2189         case TARGET_SO_REUSEPORT:
2190                 optname = SO_REUSEPORT;
2191                 break;
2192 #endif
2193         case TARGET_SO_TYPE:
2194 		optname = SO_TYPE;
2195 		break;
2196         case TARGET_SO_ERROR:
2197 		optname = SO_ERROR;
2198 		break;
2199         case TARGET_SO_DONTROUTE:
2200 		optname = SO_DONTROUTE;
2201 		break;
2202         case TARGET_SO_BROADCAST:
2203 		optname = SO_BROADCAST;
2204 		break;
2205         case TARGET_SO_SNDBUF:
2206 		optname = SO_SNDBUF;
2207 		break;
2208         case TARGET_SO_SNDBUFFORCE:
2209                 optname = SO_SNDBUFFORCE;
2210                 break;
2211         case TARGET_SO_RCVBUF:
2212 		optname = SO_RCVBUF;
2213 		break;
2214         case TARGET_SO_RCVBUFFORCE:
2215                 optname = SO_RCVBUFFORCE;
2216                 break;
2217         case TARGET_SO_KEEPALIVE:
2218 		optname = SO_KEEPALIVE;
2219 		break;
2220         case TARGET_SO_OOBINLINE:
2221 		optname = SO_OOBINLINE;
2222 		break;
2223         case TARGET_SO_NO_CHECK:
2224 		optname = SO_NO_CHECK;
2225 		break;
2226         case TARGET_SO_PRIORITY:
2227 		optname = SO_PRIORITY;
2228 		break;
2229 #ifdef SO_BSDCOMPAT
2230         case TARGET_SO_BSDCOMPAT:
2231 		optname = SO_BSDCOMPAT;
2232 		break;
2233 #endif
2234         case TARGET_SO_PASSCRED:
2235 		optname = SO_PASSCRED;
2236 		break;
2237         case TARGET_SO_PASSSEC:
2238                 optname = SO_PASSSEC;
2239                 break;
2240         case TARGET_SO_TIMESTAMP:
2241 		optname = SO_TIMESTAMP;
2242 		break;
2243         case TARGET_SO_RCVLOWAT:
2244 		optname = SO_RCVLOWAT;
2245 		break;
2246         default:
2247             goto unimplemented;
2248         }
2249 	if (optlen < sizeof(uint32_t))
2250             return -TARGET_EINVAL;
2251 
2252 	if (get_user_u32(val, optval_addr))
2253             return -TARGET_EFAULT;
2254 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2255         break;
2256 #ifdef SOL_NETLINK
2257     case SOL_NETLINK:
2258         switch (optname) {
2259         case NETLINK_PKTINFO:
2260         case NETLINK_ADD_MEMBERSHIP:
2261         case NETLINK_DROP_MEMBERSHIP:
2262         case NETLINK_BROADCAST_ERROR:
2263         case NETLINK_NO_ENOBUFS:
2264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2265         case NETLINK_LISTEN_ALL_NSID:
2266         case NETLINK_CAP_ACK:
2267 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2269         case NETLINK_EXT_ACK:
2270 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2272         case NETLINK_GET_STRICT_CHK:
2273 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2274             break;
2275         default:
2276             goto unimplemented;
2277         }
2278         val = 0;
2279         if (optlen < sizeof(uint32_t)) {
2280             return -TARGET_EINVAL;
2281         }
2282         if (get_user_u32(val, optval_addr)) {
2283             return -TARGET_EFAULT;
2284         }
2285         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2286                                    sizeof(val)));
2287         break;
2288 #endif /* SOL_NETLINK */
2289     default:
2290     unimplemented:
2291         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2292         ret = -TARGET_ENOPROTOOPT;
2293     }
2294     return ret;
2295 }
2296 
2297 /* do_getsockopt() Must return target values and target errnos. */
2298 static abi_long do_getsockopt(int sockfd, int level, int optname,
2299                               abi_ulong optval_addr, abi_ulong optlen)
2300 {
2301     abi_long ret;
2302     int len, val;
2303     socklen_t lv;
2304 
2305     switch(level) {
2306     case TARGET_SOL_SOCKET:
2307         level = SOL_SOCKET;
2308         switch (optname) {
2309         /* These don't just return a single integer */
2310         case TARGET_SO_RCVTIMEO:
2311         case TARGET_SO_SNDTIMEO:
2312         case TARGET_SO_PEERNAME:
2313             goto unimplemented;
2314         case TARGET_SO_PEERCRED: {
2315             struct ucred cr;
2316             socklen_t crlen;
2317             struct target_ucred *tcr;
2318 
2319             if (get_user_u32(len, optlen)) {
2320                 return -TARGET_EFAULT;
2321             }
2322             if (len < 0) {
2323                 return -TARGET_EINVAL;
2324             }
2325 
2326             crlen = sizeof(cr);
2327             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2328                                        &cr, &crlen));
2329             if (ret < 0) {
2330                 return ret;
2331             }
2332             if (len > crlen) {
2333                 len = crlen;
2334             }
2335             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2336                 return -TARGET_EFAULT;
2337             }
2338             __put_user(cr.pid, &tcr->pid);
2339             __put_user(cr.uid, &tcr->uid);
2340             __put_user(cr.gid, &tcr->gid);
2341             unlock_user_struct(tcr, optval_addr, 1);
2342             if (put_user_u32(len, optlen)) {
2343                 return -TARGET_EFAULT;
2344             }
2345             break;
2346         }
2347         case TARGET_SO_LINGER:
2348         {
2349             struct linger lg;
2350             socklen_t lglen;
2351             struct target_linger *tlg;
2352 
2353             if (get_user_u32(len, optlen)) {
2354                 return -TARGET_EFAULT;
2355             }
2356             if (len < 0) {
2357                 return -TARGET_EINVAL;
2358             }
2359 
2360             lglen = sizeof(lg);
2361             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2362                                        &lg, &lglen));
2363             if (ret < 0) {
2364                 return ret;
2365             }
2366             if (len > lglen) {
2367                 len = lglen;
2368             }
2369             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2370                 return -TARGET_EFAULT;
2371             }
2372             __put_user(lg.l_onoff, &tlg->l_onoff);
2373             __put_user(lg.l_linger, &tlg->l_linger);
2374             unlock_user_struct(tlg, optval_addr, 1);
2375             if (put_user_u32(len, optlen)) {
2376                 return -TARGET_EFAULT;
2377             }
2378             break;
2379         }
2380         /* Options with 'int' argument.  */
2381         case TARGET_SO_DEBUG:
2382             optname = SO_DEBUG;
2383             goto int_case;
2384         case TARGET_SO_REUSEADDR:
2385             optname = SO_REUSEADDR;
2386             goto int_case;
2387 #ifdef SO_REUSEPORT
2388         case TARGET_SO_REUSEPORT:
2389             optname = SO_REUSEPORT;
2390             goto int_case;
2391 #endif
2392         case TARGET_SO_TYPE:
2393             optname = SO_TYPE;
2394             goto int_case;
2395         case TARGET_SO_ERROR:
2396             optname = SO_ERROR;
2397             goto int_case;
2398         case TARGET_SO_DONTROUTE:
2399             optname = SO_DONTROUTE;
2400             goto int_case;
2401         case TARGET_SO_BROADCAST:
2402             optname = SO_BROADCAST;
2403             goto int_case;
2404         case TARGET_SO_SNDBUF:
2405             optname = SO_SNDBUF;
2406             goto int_case;
2407         case TARGET_SO_RCVBUF:
2408             optname = SO_RCVBUF;
2409             goto int_case;
2410         case TARGET_SO_KEEPALIVE:
2411             optname = SO_KEEPALIVE;
2412             goto int_case;
2413         case TARGET_SO_OOBINLINE:
2414             optname = SO_OOBINLINE;
2415             goto int_case;
2416         case TARGET_SO_NO_CHECK:
2417             optname = SO_NO_CHECK;
2418             goto int_case;
2419         case TARGET_SO_PRIORITY:
2420             optname = SO_PRIORITY;
2421             goto int_case;
2422 #ifdef SO_BSDCOMPAT
2423         case TARGET_SO_BSDCOMPAT:
2424             optname = SO_BSDCOMPAT;
2425             goto int_case;
2426 #endif
2427         case TARGET_SO_PASSCRED:
2428             optname = SO_PASSCRED;
2429             goto int_case;
2430         case TARGET_SO_TIMESTAMP:
2431             optname = SO_TIMESTAMP;
2432             goto int_case;
2433         case TARGET_SO_RCVLOWAT:
2434             optname = SO_RCVLOWAT;
2435             goto int_case;
2436         case TARGET_SO_ACCEPTCONN:
2437             optname = SO_ACCEPTCONN;
2438             goto int_case;
2439         default:
2440             goto int_case;
2441         }
2442         break;
2443     case SOL_TCP:
2444         /* TCP options all take an 'int' value.  */
2445     int_case:
2446         if (get_user_u32(len, optlen))
2447             return -TARGET_EFAULT;
2448         if (len < 0)
2449             return -TARGET_EINVAL;
2450         lv = sizeof(lv);
2451         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2452         if (ret < 0)
2453             return ret;
2454         if (optname == SO_TYPE) {
2455             val = host_to_target_sock_type(val);
2456         }
2457         if (len > lv)
2458             len = lv;
2459         if (len == 4) {
2460             if (put_user_u32(val, optval_addr))
2461                 return -TARGET_EFAULT;
2462         } else {
2463             if (put_user_u8(val, optval_addr))
2464                 return -TARGET_EFAULT;
2465         }
2466         if (put_user_u32(len, optlen))
2467             return -TARGET_EFAULT;
2468         break;
2469     case SOL_IP:
2470         switch(optname) {
2471         case IP_TOS:
2472         case IP_TTL:
2473         case IP_HDRINCL:
2474         case IP_ROUTER_ALERT:
2475         case IP_RECVOPTS:
2476         case IP_RETOPTS:
2477         case IP_PKTINFO:
2478         case IP_MTU_DISCOVER:
2479         case IP_RECVERR:
2480         case IP_RECVTOS:
2481 #ifdef IP_FREEBIND
2482         case IP_FREEBIND:
2483 #endif
2484         case IP_MULTICAST_TTL:
2485         case IP_MULTICAST_LOOP:
2486             if (get_user_u32(len, optlen))
2487                 return -TARGET_EFAULT;
2488             if (len < 0)
2489                 return -TARGET_EINVAL;
2490             lv = sizeof(lv);
2491             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2492             if (ret < 0)
2493                 return ret;
2494             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2495                 len = 1;
2496                 if (put_user_u32(len, optlen)
2497                     || put_user_u8(val, optval_addr))
2498                     return -TARGET_EFAULT;
2499             } else {
2500                 if (len > sizeof(int))
2501                     len = sizeof(int);
2502                 if (put_user_u32(len, optlen)
2503                     || put_user_u32(val, optval_addr))
2504                     return -TARGET_EFAULT;
2505             }
2506             break;
2507         default:
2508             ret = -TARGET_ENOPROTOOPT;
2509             break;
2510         }
2511         break;
2512     case SOL_IPV6:
2513         switch (optname) {
2514         case IPV6_MTU_DISCOVER:
2515         case IPV6_MTU:
2516         case IPV6_V6ONLY:
2517         case IPV6_RECVPKTINFO:
2518         case IPV6_UNICAST_HOPS:
2519         case IPV6_MULTICAST_HOPS:
2520         case IPV6_MULTICAST_LOOP:
2521         case IPV6_RECVERR:
2522         case IPV6_RECVHOPLIMIT:
2523         case IPV6_2292HOPLIMIT:
2524         case IPV6_CHECKSUM:
2525         case IPV6_ADDRFORM:
2526         case IPV6_2292PKTINFO:
2527         case IPV6_RECVTCLASS:
2528         case IPV6_RECVRTHDR:
2529         case IPV6_2292RTHDR:
2530         case IPV6_RECVHOPOPTS:
2531         case IPV6_2292HOPOPTS:
2532         case IPV6_RECVDSTOPTS:
2533         case IPV6_2292DSTOPTS:
2534         case IPV6_TCLASS:
2535 #ifdef IPV6_RECVPATHMTU
2536         case IPV6_RECVPATHMTU:
2537 #endif
2538 #ifdef IPV6_TRANSPARENT
2539         case IPV6_TRANSPARENT:
2540 #endif
2541 #ifdef IPV6_FREEBIND
2542         case IPV6_FREEBIND:
2543 #endif
2544 #ifdef IPV6_RECVORIGDSTADDR
2545         case IPV6_RECVORIGDSTADDR:
2546 #endif
2547             if (get_user_u32(len, optlen))
2548                 return -TARGET_EFAULT;
2549             if (len < 0)
2550                 return -TARGET_EINVAL;
2551             lv = sizeof(lv);
2552             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2553             if (ret < 0)
2554                 return ret;
2555             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2556                 len = 1;
2557                 if (put_user_u32(len, optlen)
2558                     || put_user_u8(val, optval_addr))
2559                     return -TARGET_EFAULT;
2560             } else {
2561                 if (len > sizeof(int))
2562                     len = sizeof(int);
2563                 if (put_user_u32(len, optlen)
2564                     || put_user_u32(val, optval_addr))
2565                     return -TARGET_EFAULT;
2566             }
2567             break;
2568         default:
2569             ret = -TARGET_ENOPROTOOPT;
2570             break;
2571         }
2572         break;
2573 #ifdef SOL_NETLINK
2574     case SOL_NETLINK:
2575         switch (optname) {
2576         case NETLINK_PKTINFO:
2577         case NETLINK_BROADCAST_ERROR:
2578         case NETLINK_NO_ENOBUFS:
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2580         case NETLINK_LISTEN_ALL_NSID:
2581         case NETLINK_CAP_ACK:
2582 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2584         case NETLINK_EXT_ACK:
2585 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2586 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2587         case NETLINK_GET_STRICT_CHK:
2588 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2589             if (get_user_u32(len, optlen)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             if (len != sizeof(val)) {
2593                 return -TARGET_EINVAL;
2594             }
2595             lv = len;
2596             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2597             if (ret < 0) {
2598                 return ret;
2599             }
2600             if (put_user_u32(lv, optlen)
2601                 || put_user_u32(val, optval_addr)) {
2602                 return -TARGET_EFAULT;
2603             }
2604             break;
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606         case NETLINK_LIST_MEMBERSHIPS:
2607         {
2608             uint32_t *results;
2609             int i;
2610             if (get_user_u32(len, optlen)) {
2611                 return -TARGET_EFAULT;
2612             }
2613             if (len < 0) {
2614                 return -TARGET_EINVAL;
2615             }
2616             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2617             if (!results) {
2618                 return -TARGET_EFAULT;
2619             }
2620             lv = len;
2621             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2622             if (ret < 0) {
2623                 unlock_user(results, optval_addr, 0);
2624                 return ret;
2625             }
2626             /* swap host endianess to target endianess. */
2627             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2628                 results[i] = tswap32(results[i]);
2629             }
2630             if (put_user_u32(lv, optlen)) {
2631                 return -TARGET_EFAULT;
2632             }
2633             unlock_user(results, optval_addr, 0);
2634             break;
2635         }
2636 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2637         default:
2638             goto unimplemented;
2639         }
2640         break;
2641 #endif /* SOL_NETLINK */
2642     default:
2643     unimplemented:
2644         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2645                  level, optname);
2646         ret = -TARGET_EOPNOTSUPP;
2647         break;
2648     }
2649     return ret;
2650 }
2651 
2652 /* Convert target low/high pair representing file offset into the host
2653  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2654  * as the kernel doesn't handle them either.
2655  */
2656 static void target_to_host_low_high(abi_ulong tlow,
2657                                     abi_ulong thigh,
2658                                     unsigned long *hlow,
2659                                     unsigned long *hhigh)
2660 {
2661     uint64_t off = tlow |
2662         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2663         TARGET_LONG_BITS / 2;
2664 
2665     *hlow = off;
2666     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2667 }
2668 
2669 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2670                                 abi_ulong count, int copy)
2671 {
2672     struct target_iovec *target_vec;
2673     struct iovec *vec;
2674     abi_ulong total_len, max_len;
2675     int i;
2676     int err = 0;
2677     bool bad_address = false;
2678 
2679     if (count == 0) {
2680         errno = 0;
2681         return NULL;
2682     }
2683     if (count > IOV_MAX) {
2684         errno = EINVAL;
2685         return NULL;
2686     }
2687 
2688     vec = g_try_new0(struct iovec, count);
2689     if (vec == NULL) {
2690         errno = ENOMEM;
2691         return NULL;
2692     }
2693 
2694     target_vec = lock_user(VERIFY_READ, target_addr,
2695                            count * sizeof(struct target_iovec), 1);
2696     if (target_vec == NULL) {
2697         err = EFAULT;
2698         goto fail2;
2699     }
2700 
2701     /* ??? If host page size > target page size, this will result in a
2702        value larger than what we can actually support.  */
2703     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2704     total_len = 0;
2705 
2706     for (i = 0; i < count; i++) {
2707         abi_ulong base = tswapal(target_vec[i].iov_base);
2708         abi_long len = tswapal(target_vec[i].iov_len);
2709 
2710         if (len < 0) {
2711             err = EINVAL;
2712             goto fail;
2713         } else if (len == 0) {
2714             /* Zero length pointer is ignored.  */
2715             vec[i].iov_base = 0;
2716         } else {
2717             vec[i].iov_base = lock_user(type, base, len, copy);
2718             /* If the first buffer pointer is bad, this is a fault.  But
2719              * subsequent bad buffers will result in a partial write; this
2720              * is realized by filling the vector with null pointers and
2721              * zero lengths. */
2722             if (!vec[i].iov_base) {
2723                 if (i == 0) {
2724                     err = EFAULT;
2725                     goto fail;
2726                 } else {
2727                     bad_address = true;
2728                 }
2729             }
2730             if (bad_address) {
2731                 len = 0;
2732             }
2733             if (len > max_len - total_len) {
2734                 len = max_len - total_len;
2735             }
2736         }
2737         vec[i].iov_len = len;
2738         total_len += len;
2739     }
2740 
2741     unlock_user(target_vec, target_addr, 0);
2742     return vec;
2743 
2744  fail:
2745     while (--i >= 0) {
2746         if (tswapal(target_vec[i].iov_len) > 0) {
2747             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2748         }
2749     }
2750     unlock_user(target_vec, target_addr, 0);
2751  fail2:
2752     g_free(vec);
2753     errno = err;
2754     return NULL;
2755 }
2756 
2757 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2758                          abi_ulong count, int copy)
2759 {
2760     struct target_iovec *target_vec;
2761     int i;
2762 
2763     target_vec = lock_user(VERIFY_READ, target_addr,
2764                            count * sizeof(struct target_iovec), 1);
2765     if (target_vec) {
2766         for (i = 0; i < count; i++) {
2767             abi_ulong base = tswapal(target_vec[i].iov_base);
2768             abi_long len = tswapal(target_vec[i].iov_len);
2769             if (len < 0) {
2770                 break;
2771             }
2772             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2773         }
2774         unlock_user(target_vec, target_addr, 0);
2775     }
2776 
2777     g_free(vec);
2778 }
2779 
2780 static inline int target_to_host_sock_type(int *type)
2781 {
2782     int host_type = 0;
2783     int target_type = *type;
2784 
2785     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2786     case TARGET_SOCK_DGRAM:
2787         host_type = SOCK_DGRAM;
2788         break;
2789     case TARGET_SOCK_STREAM:
2790         host_type = SOCK_STREAM;
2791         break;
2792     default:
2793         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2794         break;
2795     }
2796     if (target_type & TARGET_SOCK_CLOEXEC) {
2797 #if defined(SOCK_CLOEXEC)
2798         host_type |= SOCK_CLOEXEC;
2799 #else
2800         return -TARGET_EINVAL;
2801 #endif
2802     }
2803     if (target_type & TARGET_SOCK_NONBLOCK) {
2804 #if defined(SOCK_NONBLOCK)
2805         host_type |= SOCK_NONBLOCK;
2806 #elif !defined(O_NONBLOCK)
2807         return -TARGET_EINVAL;
2808 #endif
2809     }
2810     *type = host_type;
2811     return 0;
2812 }
2813 
2814 /* Try to emulate socket type flags after socket creation.  */
2815 static int sock_flags_fixup(int fd, int target_type)
2816 {
2817 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2818     if (target_type & TARGET_SOCK_NONBLOCK) {
2819         int flags = fcntl(fd, F_GETFL);
2820         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2821             close(fd);
2822             return -TARGET_EINVAL;
2823         }
2824     }
2825 #endif
2826     return fd;
2827 }
2828 
2829 /* do_socket() Must return target values and target errnos. */
2830 static abi_long do_socket(int domain, int type, int protocol)
2831 {
2832     int target_type = type;
2833     int ret;
2834 
2835     ret = target_to_host_sock_type(&type);
2836     if (ret) {
2837         return ret;
2838     }
2839 
2840     if (domain == PF_NETLINK && !(
2841 #ifdef CONFIG_RTNETLINK
2842          protocol == NETLINK_ROUTE ||
2843 #endif
2844          protocol == NETLINK_KOBJECT_UEVENT ||
2845          protocol == NETLINK_AUDIT)) {
2846         return -EPFNOSUPPORT;
2847     }
2848 
2849     if (domain == AF_PACKET ||
2850         (domain == AF_INET && type == SOCK_PACKET)) {
2851         protocol = tswap16(protocol);
2852     }
2853 
2854     ret = get_errno(socket(domain, type, protocol));
2855     if (ret >= 0) {
2856         ret = sock_flags_fixup(ret, target_type);
2857         if (type == SOCK_PACKET) {
2858             /* Manage an obsolete case :
2859              * if socket type is SOCK_PACKET, bind by name
2860              */
2861             fd_trans_register(ret, &target_packet_trans);
2862         } else if (domain == PF_NETLINK) {
2863             switch (protocol) {
2864 #ifdef CONFIG_RTNETLINK
2865             case NETLINK_ROUTE:
2866                 fd_trans_register(ret, &target_netlink_route_trans);
2867                 break;
2868 #endif
2869             case NETLINK_KOBJECT_UEVENT:
2870                 /* nothing to do: messages are strings */
2871                 break;
2872             case NETLINK_AUDIT:
2873                 fd_trans_register(ret, &target_netlink_audit_trans);
2874                 break;
2875             default:
2876                 g_assert_not_reached();
2877             }
2878         }
2879     }
2880     return ret;
2881 }
2882 
2883 /* do_bind() Must return target values and target errnos. */
2884 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2885                         socklen_t addrlen)
2886 {
2887     void *addr;
2888     abi_long ret;
2889 
2890     if ((int)addrlen < 0) {
2891         return -TARGET_EINVAL;
2892     }
2893 
2894     addr = alloca(addrlen+1);
2895 
2896     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2897     if (ret)
2898         return ret;
2899 
2900     return get_errno(bind(sockfd, addr, addrlen));
2901 }
2902 
2903 /* do_connect() Must return target values and target errnos. */
2904 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2905                            socklen_t addrlen)
2906 {
2907     void *addr;
2908     abi_long ret;
2909 
2910     if ((int)addrlen < 0) {
2911         return -TARGET_EINVAL;
2912     }
2913 
2914     addr = alloca(addrlen+1);
2915 
2916     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2917     if (ret)
2918         return ret;
2919 
2920     return get_errno(safe_connect(sockfd, addr, addrlen));
2921 }
2922 
2923 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2924 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2925                                       int flags, int send)
2926 {
2927     abi_long ret, len;
2928     struct msghdr msg;
2929     abi_ulong count;
2930     struct iovec *vec;
2931     abi_ulong target_vec;
2932 
2933     if (msgp->msg_name) {
2934         msg.msg_namelen = tswap32(msgp->msg_namelen);
2935         msg.msg_name = alloca(msg.msg_namelen+1);
2936         ret = target_to_host_sockaddr(fd, msg.msg_name,
2937                                       tswapal(msgp->msg_name),
2938                                       msg.msg_namelen);
2939         if (ret == -TARGET_EFAULT) {
2940             /* For connected sockets msg_name and msg_namelen must
2941              * be ignored, so returning EFAULT immediately is wrong.
2942              * Instead, pass a bad msg_name to the host kernel, and
2943              * let it decide whether to return EFAULT or not.
2944              */
2945             msg.msg_name = (void *)-1;
2946         } else if (ret) {
2947             goto out2;
2948         }
2949     } else {
2950         msg.msg_name = NULL;
2951         msg.msg_namelen = 0;
2952     }
2953     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2954     msg.msg_control = alloca(msg.msg_controllen);
2955     memset(msg.msg_control, 0, msg.msg_controllen);
2956 
2957     msg.msg_flags = tswap32(msgp->msg_flags);
2958 
2959     count = tswapal(msgp->msg_iovlen);
2960     target_vec = tswapal(msgp->msg_iov);
2961 
2962     if (count > IOV_MAX) {
2963         /* sendrcvmsg returns a different errno for this condition than
2964          * readv/writev, so we must catch it here before lock_iovec() does.
2965          */
2966         ret = -TARGET_EMSGSIZE;
2967         goto out2;
2968     }
2969 
2970     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2971                      target_vec, count, send);
2972     if (vec == NULL) {
2973         ret = -host_to_target_errno(errno);
2974         goto out2;
2975     }
2976     msg.msg_iovlen = count;
2977     msg.msg_iov = vec;
2978 
2979     if (send) {
2980         if (fd_trans_target_to_host_data(fd)) {
2981             void *host_msg;
2982 
2983             host_msg = g_malloc(msg.msg_iov->iov_len);
2984             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2985             ret = fd_trans_target_to_host_data(fd)(host_msg,
2986                                                    msg.msg_iov->iov_len);
2987             if (ret >= 0) {
2988                 msg.msg_iov->iov_base = host_msg;
2989                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2990             }
2991             g_free(host_msg);
2992         } else {
2993             ret = target_to_host_cmsg(&msg, msgp);
2994             if (ret == 0) {
2995                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2996             }
2997         }
2998     } else {
2999         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3000         if (!is_error(ret)) {
3001             len = ret;
3002             if (fd_trans_host_to_target_data(fd)) {
3003                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3004                                                MIN(msg.msg_iov->iov_len, len));
3005             } else {
3006                 ret = host_to_target_cmsg(msgp, &msg);
3007             }
3008             if (!is_error(ret)) {
3009                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3010                 msgp->msg_flags = tswap32(msg.msg_flags);
3011                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3012                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3013                                     msg.msg_name, msg.msg_namelen);
3014                     if (ret) {
3015                         goto out;
3016                     }
3017                 }
3018 
3019                 ret = len;
3020             }
3021         }
3022     }
3023 
3024 out:
3025     unlock_iovec(vec, target_vec, count, !send);
3026 out2:
3027     return ret;
3028 }
3029 
3030 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3031                                int flags, int send)
3032 {
3033     abi_long ret;
3034     struct target_msghdr *msgp;
3035 
3036     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3037                           msgp,
3038                           target_msg,
3039                           send ? 1 : 0)) {
3040         return -TARGET_EFAULT;
3041     }
3042     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3043     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3044     return ret;
3045 }
3046 
3047 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3048  * so it might not have this *mmsg-specific flag either.
3049  */
3050 #ifndef MSG_WAITFORONE
3051 #define MSG_WAITFORONE 0x10000
3052 #endif
3053 
3054 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3055                                 unsigned int vlen, unsigned int flags,
3056                                 int send)
3057 {
3058     struct target_mmsghdr *mmsgp;
3059     abi_long ret = 0;
3060     int i;
3061 
3062     if (vlen > UIO_MAXIOV) {
3063         vlen = UIO_MAXIOV;
3064     }
3065 
3066     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3067     if (!mmsgp) {
3068         return -TARGET_EFAULT;
3069     }
3070 
3071     for (i = 0; i < vlen; i++) {
3072         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3073         if (is_error(ret)) {
3074             break;
3075         }
3076         mmsgp[i].msg_len = tswap32(ret);
3077         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3078         if (flags & MSG_WAITFORONE) {
3079             flags |= MSG_DONTWAIT;
3080         }
3081     }
3082 
3083     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3084 
3085     /* Return number of datagrams sent if we sent any at all;
3086      * otherwise return the error.
3087      */
3088     if (i) {
3089         return i;
3090     }
3091     return ret;
3092 }
3093 
3094 /* do_accept4() Must return target values and target errnos. */
3095 static abi_long do_accept4(int fd, abi_ulong target_addr,
3096                            abi_ulong target_addrlen_addr, int flags)
3097 {
3098     socklen_t addrlen, ret_addrlen;
3099     void *addr;
3100     abi_long ret;
3101     int host_flags;
3102 
3103     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3104 
3105     if (target_addr == 0) {
3106         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3107     }
3108 
3109     /* linux returns EINVAL if addrlen pointer is invalid */
3110     if (get_user_u32(addrlen, target_addrlen_addr))
3111         return -TARGET_EINVAL;
3112 
3113     if ((int)addrlen < 0) {
3114         return -TARGET_EINVAL;
3115     }
3116 
3117     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3118         return -TARGET_EINVAL;
3119 
3120     addr = alloca(addrlen);
3121 
3122     ret_addrlen = addrlen;
3123     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3124     if (!is_error(ret)) {
3125         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3126         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3127             ret = -TARGET_EFAULT;
3128         }
3129     }
3130     return ret;
3131 }
3132 
3133 /* do_getpeername() Must return target values and target errnos. */
3134 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3135                                abi_ulong target_addrlen_addr)
3136 {
3137     socklen_t addrlen, ret_addrlen;
3138     void *addr;
3139     abi_long ret;
3140 
3141     if (get_user_u32(addrlen, target_addrlen_addr))
3142         return -TARGET_EFAULT;
3143 
3144     if ((int)addrlen < 0) {
3145         return -TARGET_EINVAL;
3146     }
3147 
3148     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3149         return -TARGET_EFAULT;
3150 
3151     addr = alloca(addrlen);
3152 
3153     ret_addrlen = addrlen;
3154     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3155     if (!is_error(ret)) {
3156         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3157         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3158             ret = -TARGET_EFAULT;
3159         }
3160     }
3161     return ret;
3162 }
3163 
3164 /* do_getsockname() Must return target values and target errnos. */
3165 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3166                                abi_ulong target_addrlen_addr)
3167 {
3168     socklen_t addrlen, ret_addrlen;
3169     void *addr;
3170     abi_long ret;
3171 
3172     if (get_user_u32(addrlen, target_addrlen_addr))
3173         return -TARGET_EFAULT;
3174 
3175     if ((int)addrlen < 0) {
3176         return -TARGET_EINVAL;
3177     }
3178 
3179     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3180         return -TARGET_EFAULT;
3181 
3182     addr = alloca(addrlen);
3183 
3184     ret_addrlen = addrlen;
3185     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3186     if (!is_error(ret)) {
3187         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3188         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3189             ret = -TARGET_EFAULT;
3190         }
3191     }
3192     return ret;
3193 }
3194 
3195 /* do_socketpair() Must return target values and target errnos. */
3196 static abi_long do_socketpair(int domain, int type, int protocol,
3197                               abi_ulong target_tab_addr)
3198 {
3199     int tab[2];
3200     abi_long ret;
3201 
3202     target_to_host_sock_type(&type);
3203 
3204     ret = get_errno(socketpair(domain, type, protocol, tab));
3205     if (!is_error(ret)) {
3206         if (put_user_s32(tab[0], target_tab_addr)
3207             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3208             ret = -TARGET_EFAULT;
3209     }
3210     return ret;
3211 }
3212 
3213 /* do_sendto() Must return target values and target errnos. */
3214 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3215                           abi_ulong target_addr, socklen_t addrlen)
3216 {
3217     void *addr;
3218     void *host_msg;
3219     void *copy_msg = NULL;
3220     abi_long ret;
3221 
3222     if ((int)addrlen < 0) {
3223         return -TARGET_EINVAL;
3224     }
3225 
3226     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3227     if (!host_msg)
3228         return -TARGET_EFAULT;
3229     if (fd_trans_target_to_host_data(fd)) {
3230         copy_msg = host_msg;
3231         host_msg = g_malloc(len);
3232         memcpy(host_msg, copy_msg, len);
3233         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3234         if (ret < 0) {
3235             goto fail;
3236         }
3237     }
3238     if (target_addr) {
3239         addr = alloca(addrlen+1);
3240         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3241         if (ret) {
3242             goto fail;
3243         }
3244         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3245     } else {
3246         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3247     }
3248 fail:
3249     if (copy_msg) {
3250         g_free(host_msg);
3251         host_msg = copy_msg;
3252     }
3253     unlock_user(host_msg, msg, 0);
3254     return ret;
3255 }
3256 
3257 /* do_recvfrom() Must return target values and target errnos. */
3258 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3259                             abi_ulong target_addr,
3260                             abi_ulong target_addrlen)
3261 {
3262     socklen_t addrlen, ret_addrlen;
3263     void *addr;
3264     void *host_msg;
3265     abi_long ret;
3266 
3267     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3268     if (!host_msg)
3269         return -TARGET_EFAULT;
3270     if (target_addr) {
3271         if (get_user_u32(addrlen, target_addrlen)) {
3272             ret = -TARGET_EFAULT;
3273             goto fail;
3274         }
3275         if ((int)addrlen < 0) {
3276             ret = -TARGET_EINVAL;
3277             goto fail;
3278         }
3279         addr = alloca(addrlen);
3280         ret_addrlen = addrlen;
3281         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3282                                       addr, &ret_addrlen));
3283     } else {
3284         addr = NULL; /* To keep compiler quiet.  */
3285         addrlen = 0; /* To keep compiler quiet.  */
3286         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3287     }
3288     if (!is_error(ret)) {
3289         if (fd_trans_host_to_target_data(fd)) {
3290             abi_long trans;
3291             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3292             if (is_error(trans)) {
3293                 ret = trans;
3294                 goto fail;
3295             }
3296         }
3297         if (target_addr) {
3298             host_to_target_sockaddr(target_addr, addr,
3299                                     MIN(addrlen, ret_addrlen));
3300             if (put_user_u32(ret_addrlen, target_addrlen)) {
3301                 ret = -TARGET_EFAULT;
3302                 goto fail;
3303             }
3304         }
3305         unlock_user(host_msg, msg, len);
3306     } else {
3307 fail:
3308         unlock_user(host_msg, msg, 0);
3309     }
3310     return ret;
3311 }
3312 
3313 #ifdef TARGET_NR_socketcall
3314 /* do_socketcall() must return target values and target errnos. */
3315 static abi_long do_socketcall(int num, abi_ulong vptr)
3316 {
3317     static const unsigned nargs[] = { /* number of arguments per operation */
3318         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3319         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3320         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3321         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3322         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3323         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3324         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3325         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3326         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3327         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3328         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3329         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3330         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3331         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3332         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3333         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3334         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3335         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3336         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3337         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3338     };
3339     abi_long a[6]; /* max 6 args */
3340     unsigned i;
3341 
3342     /* check the range of the first argument num */
3343     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3344     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3345         return -TARGET_EINVAL;
3346     }
3347     /* ensure we have space for args */
3348     if (nargs[num] > ARRAY_SIZE(a)) {
3349         return -TARGET_EINVAL;
3350     }
3351     /* collect the arguments in a[] according to nargs[] */
3352     for (i = 0; i < nargs[num]; ++i) {
3353         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3354             return -TARGET_EFAULT;
3355         }
3356     }
3357     /* now when we have the args, invoke the appropriate underlying function */
3358     switch (num) {
3359     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3360         return do_socket(a[0], a[1], a[2]);
3361     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3362         return do_bind(a[0], a[1], a[2]);
3363     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3364         return do_connect(a[0], a[1], a[2]);
3365     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3366         return get_errno(listen(a[0], a[1]));
3367     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3368         return do_accept4(a[0], a[1], a[2], 0);
3369     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3370         return do_getsockname(a[0], a[1], a[2]);
3371     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3372         return do_getpeername(a[0], a[1], a[2]);
3373     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3374         return do_socketpair(a[0], a[1], a[2], a[3]);
3375     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3376         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3377     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3378         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3379     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3380         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3381     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3382         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3383     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3384         return get_errno(shutdown(a[0], a[1]));
3385     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3386         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3387     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3388         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3389     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3390         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3391     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3392         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3393     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3394         return do_accept4(a[0], a[1], a[2], a[3]);
3395     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3396         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3397     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3398         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3399     default:
3400         gemu_log("Unsupported socketcall: %d\n", num);
3401         return -TARGET_EINVAL;
3402     }
3403 }
3404 #endif
3405 
3406 #define N_SHM_REGIONS	32
3407 
3408 static struct shm_region {
3409     abi_ulong start;
3410     abi_ulong size;
3411     bool in_use;
3412 } shm_regions[N_SHM_REGIONS];
3413 
3414 #ifndef TARGET_SEMID64_DS
3415 /* asm-generic version of this struct */
3416 struct target_semid64_ds
3417 {
3418   struct target_ipc_perm sem_perm;
3419   abi_ulong sem_otime;
3420 #if TARGET_ABI_BITS == 32
3421   abi_ulong __unused1;
3422 #endif
3423   abi_ulong sem_ctime;
3424 #if TARGET_ABI_BITS == 32
3425   abi_ulong __unused2;
3426 #endif
3427   abi_ulong sem_nsems;
3428   abi_ulong __unused3;
3429   abi_ulong __unused4;
3430 };
3431 #endif
3432 
3433 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3434                                                abi_ulong target_addr)
3435 {
3436     struct target_ipc_perm *target_ip;
3437     struct target_semid64_ds *target_sd;
3438 
3439     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3440         return -TARGET_EFAULT;
3441     target_ip = &(target_sd->sem_perm);
3442     host_ip->__key = tswap32(target_ip->__key);
3443     host_ip->uid = tswap32(target_ip->uid);
3444     host_ip->gid = tswap32(target_ip->gid);
3445     host_ip->cuid = tswap32(target_ip->cuid);
3446     host_ip->cgid = tswap32(target_ip->cgid);
3447 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3448     host_ip->mode = tswap32(target_ip->mode);
3449 #else
3450     host_ip->mode = tswap16(target_ip->mode);
3451 #endif
3452 #if defined(TARGET_PPC)
3453     host_ip->__seq = tswap32(target_ip->__seq);
3454 #else
3455     host_ip->__seq = tswap16(target_ip->__seq);
3456 #endif
3457     unlock_user_struct(target_sd, target_addr, 0);
3458     return 0;
3459 }
3460 
3461 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3462                                                struct ipc_perm *host_ip)
3463 {
3464     struct target_ipc_perm *target_ip;
3465     struct target_semid64_ds *target_sd;
3466 
3467     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3468         return -TARGET_EFAULT;
3469     target_ip = &(target_sd->sem_perm);
3470     target_ip->__key = tswap32(host_ip->__key);
3471     target_ip->uid = tswap32(host_ip->uid);
3472     target_ip->gid = tswap32(host_ip->gid);
3473     target_ip->cuid = tswap32(host_ip->cuid);
3474     target_ip->cgid = tswap32(host_ip->cgid);
3475 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3476     target_ip->mode = tswap32(host_ip->mode);
3477 #else
3478     target_ip->mode = tswap16(host_ip->mode);
3479 #endif
3480 #if defined(TARGET_PPC)
3481     target_ip->__seq = tswap32(host_ip->__seq);
3482 #else
3483     target_ip->__seq = tswap16(host_ip->__seq);
3484 #endif
3485     unlock_user_struct(target_sd, target_addr, 1);
3486     return 0;
3487 }
3488 
3489 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3490                                                abi_ulong target_addr)
3491 {
3492     struct target_semid64_ds *target_sd;
3493 
3494     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3495         return -TARGET_EFAULT;
3496     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3497         return -TARGET_EFAULT;
3498     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3499     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3500     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3501     unlock_user_struct(target_sd, target_addr, 0);
3502     return 0;
3503 }
3504 
3505 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3506                                                struct semid_ds *host_sd)
3507 {
3508     struct target_semid64_ds *target_sd;
3509 
3510     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3511         return -TARGET_EFAULT;
3512     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3513         return -TARGET_EFAULT;
3514     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3515     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3516     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3517     unlock_user_struct(target_sd, target_addr, 1);
3518     return 0;
3519 }
3520 
3521 struct target_seminfo {
3522     int semmap;
3523     int semmni;
3524     int semmns;
3525     int semmnu;
3526     int semmsl;
3527     int semopm;
3528     int semume;
3529     int semusz;
3530     int semvmx;
3531     int semaem;
3532 };
3533 
3534 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3535                                               struct seminfo *host_seminfo)
3536 {
3537     struct target_seminfo *target_seminfo;
3538     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3539         return -TARGET_EFAULT;
3540     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3541     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3542     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3543     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3544     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3545     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3546     __put_user(host_seminfo->semume, &target_seminfo->semume);
3547     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3548     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3549     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3550     unlock_user_struct(target_seminfo, target_addr, 1);
3551     return 0;
3552 }
3553 
3554 union semun {
3555 	int val;
3556 	struct semid_ds *buf;
3557 	unsigned short *array;
3558 	struct seminfo *__buf;
3559 };
3560 
3561 union target_semun {
3562 	int val;
3563 	abi_ulong buf;
3564 	abi_ulong array;
3565 	abi_ulong __buf;
3566 };
3567 
3568 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3569                                                abi_ulong target_addr)
3570 {
3571     int nsems;
3572     unsigned short *array;
3573     union semun semun;
3574     struct semid_ds semid_ds;
3575     int i, ret;
3576 
3577     semun.buf = &semid_ds;
3578 
3579     ret = semctl(semid, 0, IPC_STAT, semun);
3580     if (ret == -1)
3581         return get_errno(ret);
3582 
3583     nsems = semid_ds.sem_nsems;
3584 
3585     *host_array = g_try_new(unsigned short, nsems);
3586     if (!*host_array) {
3587         return -TARGET_ENOMEM;
3588     }
3589     array = lock_user(VERIFY_READ, target_addr,
3590                       nsems*sizeof(unsigned short), 1);
3591     if (!array) {
3592         g_free(*host_array);
3593         return -TARGET_EFAULT;
3594     }
3595 
3596     for(i=0; i<nsems; i++) {
3597         __get_user((*host_array)[i], &array[i]);
3598     }
3599     unlock_user(array, target_addr, 0);
3600 
3601     return 0;
3602 }
3603 
3604 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3605                                                unsigned short **host_array)
3606 {
3607     int nsems;
3608     unsigned short *array;
3609     union semun semun;
3610     struct semid_ds semid_ds;
3611     int i, ret;
3612 
3613     semun.buf = &semid_ds;
3614 
3615     ret = semctl(semid, 0, IPC_STAT, semun);
3616     if (ret == -1)
3617         return get_errno(ret);
3618 
3619     nsems = semid_ds.sem_nsems;
3620 
3621     array = lock_user(VERIFY_WRITE, target_addr,
3622                       nsems*sizeof(unsigned short), 0);
3623     if (!array)
3624         return -TARGET_EFAULT;
3625 
3626     for(i=0; i<nsems; i++) {
3627         __put_user((*host_array)[i], &array[i]);
3628     }
3629     g_free(*host_array);
3630     unlock_user(array, target_addr, 1);
3631 
3632     return 0;
3633 }
3634 
3635 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3636                                  abi_ulong target_arg)
3637 {
3638     union target_semun target_su = { .buf = target_arg };
3639     union semun arg;
3640     struct semid_ds dsarg;
3641     unsigned short *array = NULL;
3642     struct seminfo seminfo;
3643     abi_long ret = -TARGET_EINVAL;
3644     abi_long err;
3645     cmd &= 0xff;
3646 
3647     switch( cmd ) {
3648 	case GETVAL:
3649 	case SETVAL:
3650             /* In 64 bit cross-endian situations, we will erroneously pick up
3651              * the wrong half of the union for the "val" element.  To rectify
3652              * this, the entire 8-byte structure is byteswapped, followed by
3653 	     * a swap of the 4 byte val field. In other cases, the data is
3654 	     * already in proper host byte order. */
3655 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3656 		target_su.buf = tswapal(target_su.buf);
3657 		arg.val = tswap32(target_su.val);
3658 	    } else {
3659 		arg.val = target_su.val;
3660 	    }
3661             ret = get_errno(semctl(semid, semnum, cmd, arg));
3662             break;
3663 	case GETALL:
3664 	case SETALL:
3665             err = target_to_host_semarray(semid, &array, target_su.array);
3666             if (err)
3667                 return err;
3668             arg.array = array;
3669             ret = get_errno(semctl(semid, semnum, cmd, arg));
3670             err = host_to_target_semarray(semid, target_su.array, &array);
3671             if (err)
3672                 return err;
3673             break;
3674 	case IPC_STAT:
3675 	case IPC_SET:
3676 	case SEM_STAT:
3677             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3678             if (err)
3679                 return err;
3680             arg.buf = &dsarg;
3681             ret = get_errno(semctl(semid, semnum, cmd, arg));
3682             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3683             if (err)
3684                 return err;
3685             break;
3686 	case IPC_INFO:
3687 	case SEM_INFO:
3688             arg.__buf = &seminfo;
3689             ret = get_errno(semctl(semid, semnum, cmd, arg));
3690             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3691             if (err)
3692                 return err;
3693             break;
3694 	case IPC_RMID:
3695 	case GETPID:
3696 	case GETNCNT:
3697 	case GETZCNT:
3698             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3699             break;
3700     }
3701 
3702     return ret;
3703 }
3704 
3705 struct target_sembuf {
3706     unsigned short sem_num;
3707     short sem_op;
3708     short sem_flg;
3709 };
3710 
3711 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3712                                              abi_ulong target_addr,
3713                                              unsigned nsops)
3714 {
3715     struct target_sembuf *target_sembuf;
3716     int i;
3717 
3718     target_sembuf = lock_user(VERIFY_READ, target_addr,
3719                               nsops*sizeof(struct target_sembuf), 1);
3720     if (!target_sembuf)
3721         return -TARGET_EFAULT;
3722 
3723     for(i=0; i<nsops; i++) {
3724         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3725         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3726         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3727     }
3728 
3729     unlock_user(target_sembuf, target_addr, 0);
3730 
3731     return 0;
3732 }
3733 
3734 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3735 {
3736     struct sembuf sops[nsops];
3737     abi_long ret;
3738 
3739     if (target_to_host_sembuf(sops, ptr, nsops))
3740         return -TARGET_EFAULT;
3741 
3742     ret = -TARGET_ENOSYS;
3743 #ifdef __NR_semtimedop
3744     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3745 #endif
3746 #ifdef __NR_ipc
3747     if (ret == -TARGET_ENOSYS) {
3748         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3749     }
3750 #endif
3751     return ret;
3752 }
3753 
3754 struct target_msqid_ds
3755 {
3756     struct target_ipc_perm msg_perm;
3757     abi_ulong msg_stime;
3758 #if TARGET_ABI_BITS == 32
3759     abi_ulong __unused1;
3760 #endif
3761     abi_ulong msg_rtime;
3762 #if TARGET_ABI_BITS == 32
3763     abi_ulong __unused2;
3764 #endif
3765     abi_ulong msg_ctime;
3766 #if TARGET_ABI_BITS == 32
3767     abi_ulong __unused3;
3768 #endif
3769     abi_ulong __msg_cbytes;
3770     abi_ulong msg_qnum;
3771     abi_ulong msg_qbytes;
3772     abi_ulong msg_lspid;
3773     abi_ulong msg_lrpid;
3774     abi_ulong __unused4;
3775     abi_ulong __unused5;
3776 };
3777 
3778 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3779                                                abi_ulong target_addr)
3780 {
3781     struct target_msqid_ds *target_md;
3782 
3783     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3784         return -TARGET_EFAULT;
3785     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3786         return -TARGET_EFAULT;
3787     host_md->msg_stime = tswapal(target_md->msg_stime);
3788     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3789     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3790     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3791     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3792     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3793     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3794     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3795     unlock_user_struct(target_md, target_addr, 0);
3796     return 0;
3797 }
3798 
3799 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3800                                                struct msqid_ds *host_md)
3801 {
3802     struct target_msqid_ds *target_md;
3803 
3804     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3805         return -TARGET_EFAULT;
3806     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3807         return -TARGET_EFAULT;
3808     target_md->msg_stime = tswapal(host_md->msg_stime);
3809     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3810     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3811     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3812     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3813     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3814     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3815     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3816     unlock_user_struct(target_md, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 struct target_msginfo {
3821     int msgpool;
3822     int msgmap;
3823     int msgmax;
3824     int msgmnb;
3825     int msgmni;
3826     int msgssz;
3827     int msgtql;
3828     unsigned short int msgseg;
3829 };
3830 
3831 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3832                                               struct msginfo *host_msginfo)
3833 {
3834     struct target_msginfo *target_msginfo;
3835     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3836         return -TARGET_EFAULT;
3837     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3838     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3839     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3840     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3841     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3842     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3843     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3844     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3845     unlock_user_struct(target_msginfo, target_addr, 1);
3846     return 0;
3847 }
3848 
3849 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3850 {
3851     struct msqid_ds dsarg;
3852     struct msginfo msginfo;
3853     abi_long ret = -TARGET_EINVAL;
3854 
3855     cmd &= 0xff;
3856 
3857     switch (cmd) {
3858     case IPC_STAT:
3859     case IPC_SET:
3860     case MSG_STAT:
3861         if (target_to_host_msqid_ds(&dsarg,ptr))
3862             return -TARGET_EFAULT;
3863         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3864         if (host_to_target_msqid_ds(ptr,&dsarg))
3865             return -TARGET_EFAULT;
3866         break;
3867     case IPC_RMID:
3868         ret = get_errno(msgctl(msgid, cmd, NULL));
3869         break;
3870     case IPC_INFO:
3871     case MSG_INFO:
3872         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3873         if (host_to_target_msginfo(ptr, &msginfo))
3874             return -TARGET_EFAULT;
3875         break;
3876     }
3877 
3878     return ret;
3879 }
3880 
3881 struct target_msgbuf {
3882     abi_long mtype;
3883     char	mtext[1];
3884 };
3885 
3886 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3887                                  ssize_t msgsz, int msgflg)
3888 {
3889     struct target_msgbuf *target_mb;
3890     struct msgbuf *host_mb;
3891     abi_long ret = 0;
3892 
3893     if (msgsz < 0) {
3894         return -TARGET_EINVAL;
3895     }
3896 
3897     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3898         return -TARGET_EFAULT;
3899     host_mb = g_try_malloc(msgsz + sizeof(long));
3900     if (!host_mb) {
3901         unlock_user_struct(target_mb, msgp, 0);
3902         return -TARGET_ENOMEM;
3903     }
3904     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3905     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3906     ret = -TARGET_ENOSYS;
3907 #ifdef __NR_msgsnd
3908     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3909 #endif
3910 #ifdef __NR_ipc
3911     if (ret == -TARGET_ENOSYS) {
3912         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3913                                  host_mb, 0));
3914     }
3915 #endif
3916     g_free(host_mb);
3917     unlock_user_struct(target_mb, msgp, 0);
3918 
3919     return ret;
3920 }
3921 
3922 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3923                                  ssize_t msgsz, abi_long msgtyp,
3924                                  int msgflg)
3925 {
3926     struct target_msgbuf *target_mb;
3927     char *target_mtext;
3928     struct msgbuf *host_mb;
3929     abi_long ret = 0;
3930 
3931     if (msgsz < 0) {
3932         return -TARGET_EINVAL;
3933     }
3934 
3935     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3936         return -TARGET_EFAULT;
3937 
3938     host_mb = g_try_malloc(msgsz + sizeof(long));
3939     if (!host_mb) {
3940         ret = -TARGET_ENOMEM;
3941         goto end;
3942     }
3943     ret = -TARGET_ENOSYS;
3944 #ifdef __NR_msgrcv
3945     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3946 #endif
3947 #ifdef __NR_ipc
3948     if (ret == -TARGET_ENOSYS) {
3949         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3950                         msgflg, host_mb, msgtyp));
3951     }
3952 #endif
3953 
3954     if (ret > 0) {
3955         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3956         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3957         if (!target_mtext) {
3958             ret = -TARGET_EFAULT;
3959             goto end;
3960         }
3961         memcpy(target_mb->mtext, host_mb->mtext, ret);
3962         unlock_user(target_mtext, target_mtext_addr, ret);
3963     }
3964 
3965     target_mb->mtype = tswapal(host_mb->mtype);
3966 
3967 end:
3968     if (target_mb)
3969         unlock_user_struct(target_mb, msgp, 1);
3970     g_free(host_mb);
3971     return ret;
3972 }
3973 
3974 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3975                                                abi_ulong target_addr)
3976 {
3977     struct target_shmid_ds *target_sd;
3978 
3979     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3980         return -TARGET_EFAULT;
3981     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3982         return -TARGET_EFAULT;
3983     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3984     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3985     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3986     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3987     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3988     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3989     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3990     unlock_user_struct(target_sd, target_addr, 0);
3991     return 0;
3992 }
3993 
3994 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3995                                                struct shmid_ds *host_sd)
3996 {
3997     struct target_shmid_ds *target_sd;
3998 
3999     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4000         return -TARGET_EFAULT;
4001     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4002         return -TARGET_EFAULT;
4003     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4004     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4005     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4006     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4007     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4008     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4009     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4010     unlock_user_struct(target_sd, target_addr, 1);
4011     return 0;
4012 }
4013 
4014 struct  target_shminfo {
4015     abi_ulong shmmax;
4016     abi_ulong shmmin;
4017     abi_ulong shmmni;
4018     abi_ulong shmseg;
4019     abi_ulong shmall;
4020 };
4021 
4022 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4023                                               struct shminfo *host_shminfo)
4024 {
4025     struct target_shminfo *target_shminfo;
4026     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4027         return -TARGET_EFAULT;
4028     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4029     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4030     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4031     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4032     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4033     unlock_user_struct(target_shminfo, target_addr, 1);
4034     return 0;
4035 }
4036 
4037 struct target_shm_info {
4038     int used_ids;
4039     abi_ulong shm_tot;
4040     abi_ulong shm_rss;
4041     abi_ulong shm_swp;
4042     abi_ulong swap_attempts;
4043     abi_ulong swap_successes;
4044 };
4045 
4046 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4047                                                struct shm_info *host_shm_info)
4048 {
4049     struct target_shm_info *target_shm_info;
4050     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4051         return -TARGET_EFAULT;
4052     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4053     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4054     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4055     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4056     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4057     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4058     unlock_user_struct(target_shm_info, target_addr, 1);
4059     return 0;
4060 }
4061 
4062 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4063 {
4064     struct shmid_ds dsarg;
4065     struct shminfo shminfo;
4066     struct shm_info shm_info;
4067     abi_long ret = -TARGET_EINVAL;
4068 
4069     cmd &= 0xff;
4070 
4071     switch(cmd) {
4072     case IPC_STAT:
4073     case IPC_SET:
4074     case SHM_STAT:
4075         if (target_to_host_shmid_ds(&dsarg, buf))
4076             return -TARGET_EFAULT;
4077         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4078         if (host_to_target_shmid_ds(buf, &dsarg))
4079             return -TARGET_EFAULT;
4080         break;
4081     case IPC_INFO:
4082         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4083         if (host_to_target_shminfo(buf, &shminfo))
4084             return -TARGET_EFAULT;
4085         break;
4086     case SHM_INFO:
4087         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4088         if (host_to_target_shm_info(buf, &shm_info))
4089             return -TARGET_EFAULT;
4090         break;
4091     case IPC_RMID:
4092     case SHM_LOCK:
4093     case SHM_UNLOCK:
4094         ret = get_errno(shmctl(shmid, cmd, NULL));
4095         break;
4096     }
4097 
4098     return ret;
4099 }
4100 
4101 #ifndef TARGET_FORCE_SHMLBA
4102 /* For most architectures, SHMLBA is the same as the page size;
4103  * some architectures have larger values, in which case they should
4104  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4105  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4106  * and defining its own value for SHMLBA.
4107  *
4108  * The kernel also permits SHMLBA to be set by the architecture to a
4109  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4110  * this means that addresses are rounded to the large size if
4111  * SHM_RND is set but addresses not aligned to that size are not rejected
4112  * as long as they are at least page-aligned. Since the only architecture
4113  * which uses this is ia64 this code doesn't provide for that oddity.
4114  */
4115 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4116 {
4117     return TARGET_PAGE_SIZE;
4118 }
4119 #endif
4120 
4121 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4122                                  int shmid, abi_ulong shmaddr, int shmflg)
4123 {
4124     abi_long raddr;
4125     void *host_raddr;
4126     struct shmid_ds shm_info;
4127     int i,ret;
4128     abi_ulong shmlba;
4129 
4130     /* find out the length of the shared memory segment */
4131     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4132     if (is_error(ret)) {
4133         /* can't get length, bail out */
4134         return ret;
4135     }
4136 
4137     shmlba = target_shmlba(cpu_env);
4138 
4139     if (shmaddr & (shmlba - 1)) {
4140         if (shmflg & SHM_RND) {
4141             shmaddr &= ~(shmlba - 1);
4142         } else {
4143             return -TARGET_EINVAL;
4144         }
4145     }
4146     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4147         return -TARGET_EINVAL;
4148     }
4149 
4150     mmap_lock();
4151 
4152     if (shmaddr)
4153         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4154     else {
4155         abi_ulong mmap_start;
4156 
4157         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4158         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4159 
4160         if (mmap_start == -1) {
4161             errno = ENOMEM;
4162             host_raddr = (void *)-1;
4163         } else
4164             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4165     }
4166 
4167     if (host_raddr == (void *)-1) {
4168         mmap_unlock();
4169         return get_errno((long)host_raddr);
4170     }
4171     raddr=h2g((unsigned long)host_raddr);
4172 
4173     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4174                    PAGE_VALID | PAGE_READ |
4175                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4176 
4177     for (i = 0; i < N_SHM_REGIONS; i++) {
4178         if (!shm_regions[i].in_use) {
4179             shm_regions[i].in_use = true;
4180             shm_regions[i].start = raddr;
4181             shm_regions[i].size = shm_info.shm_segsz;
4182             break;
4183         }
4184     }
4185 
4186     mmap_unlock();
4187     return raddr;
4188 
4189 }
4190 
4191 static inline abi_long do_shmdt(abi_ulong shmaddr)
4192 {
4193     int i;
4194     abi_long rv;
4195 
4196     mmap_lock();
4197 
4198     for (i = 0; i < N_SHM_REGIONS; ++i) {
4199         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4200             shm_regions[i].in_use = false;
4201             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4202             break;
4203         }
4204     }
4205     rv = get_errno(shmdt(g2h(shmaddr)));
4206 
4207     mmap_unlock();
4208 
4209     return rv;
4210 }
4211 
4212 #ifdef TARGET_NR_ipc
4213 /* ??? This only works with linear mappings.  */
4214 /* do_ipc() must return target values and target errnos. */
4215 static abi_long do_ipc(CPUArchState *cpu_env,
4216                        unsigned int call, abi_long first,
4217                        abi_long second, abi_long third,
4218                        abi_long ptr, abi_long fifth)
4219 {
4220     int version;
4221     abi_long ret = 0;
4222 
4223     version = call >> 16;
4224     call &= 0xffff;
4225 
4226     switch (call) {
4227     case IPCOP_semop:
4228         ret = do_semop(first, ptr, second);
4229         break;
4230 
4231     case IPCOP_semget:
4232         ret = get_errno(semget(first, second, third));
4233         break;
4234 
4235     case IPCOP_semctl: {
4236         /* The semun argument to semctl is passed by value, so dereference the
4237          * ptr argument. */
4238         abi_ulong atptr;
4239         get_user_ual(atptr, ptr);
4240         ret = do_semctl(first, second, third, atptr);
4241         break;
4242     }
4243 
4244     case IPCOP_msgget:
4245         ret = get_errno(msgget(first, second));
4246         break;
4247 
4248     case IPCOP_msgsnd:
4249         ret = do_msgsnd(first, ptr, second, third);
4250         break;
4251 
4252     case IPCOP_msgctl:
4253         ret = do_msgctl(first, second, ptr);
4254         break;
4255 
4256     case IPCOP_msgrcv:
4257         switch (version) {
4258         case 0:
4259             {
4260                 struct target_ipc_kludge {
4261                     abi_long msgp;
4262                     abi_long msgtyp;
4263                 } *tmp;
4264 
4265                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4266                     ret = -TARGET_EFAULT;
4267                     break;
4268                 }
4269 
4270                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4271 
4272                 unlock_user_struct(tmp, ptr, 0);
4273                 break;
4274             }
4275         default:
4276             ret = do_msgrcv(first, ptr, second, fifth, third);
4277         }
4278         break;
4279 
4280     case IPCOP_shmat:
4281         switch (version) {
4282         default:
4283         {
4284             abi_ulong raddr;
4285             raddr = do_shmat(cpu_env, first, ptr, second);
4286             if (is_error(raddr))
4287                 return get_errno(raddr);
4288             if (put_user_ual(raddr, third))
4289                 return -TARGET_EFAULT;
4290             break;
4291         }
4292         case 1:
4293             ret = -TARGET_EINVAL;
4294             break;
4295         }
4296 	break;
4297     case IPCOP_shmdt:
4298         ret = do_shmdt(ptr);
4299 	break;
4300 
4301     case IPCOP_shmget:
4302 	/* IPC_* flag values are the same on all linux platforms */
4303 	ret = get_errno(shmget(first, second, third));
4304 	break;
4305 
4306 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4307     case IPCOP_shmctl:
4308         ret = do_shmctl(first, second, ptr);
4309         break;
4310     default:
4311 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4312 	ret = -TARGET_ENOSYS;
4313 	break;
4314     }
4315     return ret;
4316 }
4317 #endif
4318 
4319 /* kernel structure types definitions */
4320 
4321 #define STRUCT(name, ...) STRUCT_ ## name,
4322 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4323 enum {
4324 #include "syscall_types.h"
4325 STRUCT_MAX
4326 };
4327 #undef STRUCT
4328 #undef STRUCT_SPECIAL
4329 
4330 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4331 #define STRUCT_SPECIAL(name)
4332 #include "syscall_types.h"
4333 #undef STRUCT
4334 #undef STRUCT_SPECIAL
4335 
4336 typedef struct IOCTLEntry IOCTLEntry;
4337 
4338 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4339                              int fd, int cmd, abi_long arg);
4340 
4341 struct IOCTLEntry {
4342     int target_cmd;
4343     unsigned int host_cmd;
4344     const char *name;
4345     int access;
4346     do_ioctl_fn *do_ioctl;
4347     const argtype arg_type[5];
4348 };
4349 
4350 #define IOC_R 0x0001
4351 #define IOC_W 0x0002
4352 #define IOC_RW (IOC_R | IOC_W)
4353 
4354 #define MAX_STRUCT_SIZE 4096
4355 
4356 #ifdef CONFIG_FIEMAP
4357 /* So fiemap access checks don't overflow on 32 bit systems.
4358  * This is very slightly smaller than the limit imposed by
4359  * the underlying kernel.
4360  */
4361 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4362                             / sizeof(struct fiemap_extent))
4363 
4364 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4365                                        int fd, int cmd, abi_long arg)
4366 {
4367     /* The parameter for this ioctl is a struct fiemap followed
4368      * by an array of struct fiemap_extent whose size is set
4369      * in fiemap->fm_extent_count. The array is filled in by the
4370      * ioctl.
4371      */
4372     int target_size_in, target_size_out;
4373     struct fiemap *fm;
4374     const argtype *arg_type = ie->arg_type;
4375     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4376     void *argptr, *p;
4377     abi_long ret;
4378     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4379     uint32_t outbufsz;
4380     int free_fm = 0;
4381 
4382     assert(arg_type[0] == TYPE_PTR);
4383     assert(ie->access == IOC_RW);
4384     arg_type++;
4385     target_size_in = thunk_type_size(arg_type, 0);
4386     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4387     if (!argptr) {
4388         return -TARGET_EFAULT;
4389     }
4390     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4391     unlock_user(argptr, arg, 0);
4392     fm = (struct fiemap *)buf_temp;
4393     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4394         return -TARGET_EINVAL;
4395     }
4396 
4397     outbufsz = sizeof (*fm) +
4398         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4399 
4400     if (outbufsz > MAX_STRUCT_SIZE) {
4401         /* We can't fit all the extents into the fixed size buffer.
4402          * Allocate one that is large enough and use it instead.
4403          */
4404         fm = g_try_malloc(outbufsz);
4405         if (!fm) {
4406             return -TARGET_ENOMEM;
4407         }
4408         memcpy(fm, buf_temp, sizeof(struct fiemap));
4409         free_fm = 1;
4410     }
4411     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4412     if (!is_error(ret)) {
4413         target_size_out = target_size_in;
4414         /* An extent_count of 0 means we were only counting the extents
4415          * so there are no structs to copy
4416          */
4417         if (fm->fm_extent_count != 0) {
4418             target_size_out += fm->fm_mapped_extents * extent_size;
4419         }
4420         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4421         if (!argptr) {
4422             ret = -TARGET_EFAULT;
4423         } else {
4424             /* Convert the struct fiemap */
4425             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4426             if (fm->fm_extent_count != 0) {
4427                 p = argptr + target_size_in;
4428                 /* ...and then all the struct fiemap_extents */
4429                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4430                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4431                                   THUNK_TARGET);
4432                     p += extent_size;
4433                 }
4434             }
4435             unlock_user(argptr, arg, target_size_out);
4436         }
4437     }
4438     if (free_fm) {
4439         g_free(fm);
4440     }
4441     return ret;
4442 }
4443 #endif
4444 
4445 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4446                                 int fd, int cmd, abi_long arg)
4447 {
4448     const argtype *arg_type = ie->arg_type;
4449     int target_size;
4450     void *argptr;
4451     int ret;
4452     struct ifconf *host_ifconf;
4453     uint32_t outbufsz;
4454     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4455     int target_ifreq_size;
4456     int nb_ifreq;
4457     int free_buf = 0;
4458     int i;
4459     int target_ifc_len;
4460     abi_long target_ifc_buf;
4461     int host_ifc_len;
4462     char *host_ifc_buf;
4463 
4464     assert(arg_type[0] == TYPE_PTR);
4465     assert(ie->access == IOC_RW);
4466 
4467     arg_type++;
4468     target_size = thunk_type_size(arg_type, 0);
4469 
4470     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4471     if (!argptr)
4472         return -TARGET_EFAULT;
4473     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4474     unlock_user(argptr, arg, 0);
4475 
4476     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4477     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4478     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4479 
4480     if (target_ifc_buf != 0) {
4481         target_ifc_len = host_ifconf->ifc_len;
4482         nb_ifreq = target_ifc_len / target_ifreq_size;
4483         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4484 
4485         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4486         if (outbufsz > MAX_STRUCT_SIZE) {
4487             /*
4488              * We can't fit all the extents into the fixed size buffer.
4489              * Allocate one that is large enough and use it instead.
4490              */
4491             host_ifconf = malloc(outbufsz);
4492             if (!host_ifconf) {
4493                 return -TARGET_ENOMEM;
4494             }
4495             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4496             free_buf = 1;
4497         }
4498         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4499 
4500         host_ifconf->ifc_len = host_ifc_len;
4501     } else {
4502       host_ifc_buf = NULL;
4503     }
4504     host_ifconf->ifc_buf = host_ifc_buf;
4505 
4506     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4507     if (!is_error(ret)) {
4508 	/* convert host ifc_len to target ifc_len */
4509 
4510         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4511         target_ifc_len = nb_ifreq * target_ifreq_size;
4512         host_ifconf->ifc_len = target_ifc_len;
4513 
4514 	/* restore target ifc_buf */
4515 
4516         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4517 
4518 	/* copy struct ifconf to target user */
4519 
4520         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4521         if (!argptr)
4522             return -TARGET_EFAULT;
4523         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4524         unlock_user(argptr, arg, target_size);
4525 
4526         if (target_ifc_buf != 0) {
4527             /* copy ifreq[] to target user */
4528             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4529             for (i = 0; i < nb_ifreq ; i++) {
4530                 thunk_convert(argptr + i * target_ifreq_size,
4531                               host_ifc_buf + i * sizeof(struct ifreq),
4532                               ifreq_arg_type, THUNK_TARGET);
4533             }
4534             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4535         }
4536     }
4537 
4538     if (free_buf) {
4539         free(host_ifconf);
4540     }
4541 
4542     return ret;
4543 }
4544 
4545 #if defined(CONFIG_USBFS)
4546 #if HOST_LONG_BITS > 64
4547 #error USBDEVFS thunks do not support >64 bit hosts yet.
4548 #endif
4549 struct live_urb {
4550     uint64_t target_urb_adr;
4551     uint64_t target_buf_adr;
4552     char *target_buf_ptr;
4553     struct usbdevfs_urb host_urb;
4554 };
4555 
4556 static GHashTable *usbdevfs_urb_hashtable(void)
4557 {
4558     static GHashTable *urb_hashtable;
4559 
4560     if (!urb_hashtable) {
4561         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4562     }
4563     return urb_hashtable;
4564 }
4565 
4566 static void urb_hashtable_insert(struct live_urb *urb)
4567 {
4568     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4569     g_hash_table_insert(urb_hashtable, urb, urb);
4570 }
4571 
4572 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4573 {
4574     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4575     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4576 }
4577 
4578 static void urb_hashtable_remove(struct live_urb *urb)
4579 {
4580     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4581     g_hash_table_remove(urb_hashtable, urb);
4582 }
4583 
4584 static abi_long
4585 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4586                           int fd, int cmd, abi_long arg)
4587 {
4588     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4589     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4590     struct live_urb *lurb;
4591     void *argptr;
4592     uint64_t hurb;
4593     int target_size;
4594     uintptr_t target_urb_adr;
4595     abi_long ret;
4596 
4597     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4598 
4599     memset(buf_temp, 0, sizeof(uint64_t));
4600     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4601     if (is_error(ret)) {
4602         return ret;
4603     }
4604 
4605     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4606     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4607     if (!lurb->target_urb_adr) {
4608         return -TARGET_EFAULT;
4609     }
4610     urb_hashtable_remove(lurb);
4611     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4612         lurb->host_urb.buffer_length);
4613     lurb->target_buf_ptr = NULL;
4614 
4615     /* restore the guest buffer pointer */
4616     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4617 
4618     /* update the guest urb struct */
4619     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4620     if (!argptr) {
4621         g_free(lurb);
4622         return -TARGET_EFAULT;
4623     }
4624     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4625     unlock_user(argptr, lurb->target_urb_adr, target_size);
4626 
4627     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4628     /* write back the urb handle */
4629     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4630     if (!argptr) {
4631         g_free(lurb);
4632         return -TARGET_EFAULT;
4633     }
4634 
4635     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4636     target_urb_adr = lurb->target_urb_adr;
4637     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4638     unlock_user(argptr, arg, target_size);
4639 
4640     g_free(lurb);
4641     return ret;
4642 }
4643 
4644 static abi_long
4645 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4646                              uint8_t *buf_temp __attribute__((unused)),
4647                              int fd, int cmd, abi_long arg)
4648 {
4649     struct live_urb *lurb;
4650 
4651     /* map target address back to host URB with metadata. */
4652     lurb = urb_hashtable_lookup(arg);
4653     if (!lurb) {
4654         return -TARGET_EFAULT;
4655     }
4656     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4657 }
4658 
4659 static abi_long
4660 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4661                             int fd, int cmd, abi_long arg)
4662 {
4663     const argtype *arg_type = ie->arg_type;
4664     int target_size;
4665     abi_long ret;
4666     void *argptr;
4667     int rw_dir;
4668     struct live_urb *lurb;
4669 
4670     /*
4671      * each submitted URB needs to map to a unique ID for the
4672      * kernel, and that unique ID needs to be a pointer to
4673      * host memory.  hence, we need to malloc for each URB.
4674      * isochronous transfers have a variable length struct.
4675      */
4676     arg_type++;
4677     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4678 
4679     /* construct host copy of urb and metadata */
4680     lurb = g_try_malloc0(sizeof(struct live_urb));
4681     if (!lurb) {
4682         return -TARGET_ENOMEM;
4683     }
4684 
4685     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4686     if (!argptr) {
4687         g_free(lurb);
4688         return -TARGET_EFAULT;
4689     }
4690     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4691     unlock_user(argptr, arg, 0);
4692 
4693     lurb->target_urb_adr = arg;
4694     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4695 
4696     /* buffer space used depends on endpoint type so lock the entire buffer */
4697     /* control type urbs should check the buffer contents for true direction */
4698     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4699     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4700         lurb->host_urb.buffer_length, 1);
4701     if (lurb->target_buf_ptr == NULL) {
4702         g_free(lurb);
4703         return -TARGET_EFAULT;
4704     }
4705 
4706     /* update buffer pointer in host copy */
4707     lurb->host_urb.buffer = lurb->target_buf_ptr;
4708 
4709     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4710     if (is_error(ret)) {
4711         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4712         g_free(lurb);
4713     } else {
4714         urb_hashtable_insert(lurb);
4715     }
4716 
4717     return ret;
4718 }
4719 #endif /* CONFIG_USBFS */
4720 
4721 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4722                             int cmd, abi_long arg)
4723 {
4724     void *argptr;
4725     struct dm_ioctl *host_dm;
4726     abi_long guest_data;
4727     uint32_t guest_data_size;
4728     int target_size;
4729     const argtype *arg_type = ie->arg_type;
4730     abi_long ret;
4731     void *big_buf = NULL;
4732     char *host_data;
4733 
4734     arg_type++;
4735     target_size = thunk_type_size(arg_type, 0);
4736     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4737     if (!argptr) {
4738         ret = -TARGET_EFAULT;
4739         goto out;
4740     }
4741     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4742     unlock_user(argptr, arg, 0);
4743 
4744     /* buf_temp is too small, so fetch things into a bigger buffer */
4745     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4746     memcpy(big_buf, buf_temp, target_size);
4747     buf_temp = big_buf;
4748     host_dm = big_buf;
4749 
4750     guest_data = arg + host_dm->data_start;
4751     if ((guest_data - arg) < 0) {
4752         ret = -TARGET_EINVAL;
4753         goto out;
4754     }
4755     guest_data_size = host_dm->data_size - host_dm->data_start;
4756     host_data = (char*)host_dm + host_dm->data_start;
4757 
4758     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4759     if (!argptr) {
4760         ret = -TARGET_EFAULT;
4761         goto out;
4762     }
4763 
4764     switch (ie->host_cmd) {
4765     case DM_REMOVE_ALL:
4766     case DM_LIST_DEVICES:
4767     case DM_DEV_CREATE:
4768     case DM_DEV_REMOVE:
4769     case DM_DEV_SUSPEND:
4770     case DM_DEV_STATUS:
4771     case DM_DEV_WAIT:
4772     case DM_TABLE_STATUS:
4773     case DM_TABLE_CLEAR:
4774     case DM_TABLE_DEPS:
4775     case DM_LIST_VERSIONS:
4776         /* no input data */
4777         break;
4778     case DM_DEV_RENAME:
4779     case DM_DEV_SET_GEOMETRY:
4780         /* data contains only strings */
4781         memcpy(host_data, argptr, guest_data_size);
4782         break;
4783     case DM_TARGET_MSG:
4784         memcpy(host_data, argptr, guest_data_size);
4785         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4786         break;
4787     case DM_TABLE_LOAD:
4788     {
4789         void *gspec = argptr;
4790         void *cur_data = host_data;
4791         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4792         int spec_size = thunk_type_size(arg_type, 0);
4793         int i;
4794 
4795         for (i = 0; i < host_dm->target_count; i++) {
4796             struct dm_target_spec *spec = cur_data;
4797             uint32_t next;
4798             int slen;
4799 
4800             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4801             slen = strlen((char*)gspec + spec_size) + 1;
4802             next = spec->next;
4803             spec->next = sizeof(*spec) + slen;
4804             strcpy((char*)&spec[1], gspec + spec_size);
4805             gspec += next;
4806             cur_data += spec->next;
4807         }
4808         break;
4809     }
4810     default:
4811         ret = -TARGET_EINVAL;
4812         unlock_user(argptr, guest_data, 0);
4813         goto out;
4814     }
4815     unlock_user(argptr, guest_data, 0);
4816 
4817     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4818     if (!is_error(ret)) {
4819         guest_data = arg + host_dm->data_start;
4820         guest_data_size = host_dm->data_size - host_dm->data_start;
4821         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4822         switch (ie->host_cmd) {
4823         case DM_REMOVE_ALL:
4824         case DM_DEV_CREATE:
4825         case DM_DEV_REMOVE:
4826         case DM_DEV_RENAME:
4827         case DM_DEV_SUSPEND:
4828         case DM_DEV_STATUS:
4829         case DM_TABLE_LOAD:
4830         case DM_TABLE_CLEAR:
4831         case DM_TARGET_MSG:
4832         case DM_DEV_SET_GEOMETRY:
4833             /* no return data */
4834             break;
4835         case DM_LIST_DEVICES:
4836         {
4837             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4838             uint32_t remaining_data = guest_data_size;
4839             void *cur_data = argptr;
4840             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4841             int nl_size = 12; /* can't use thunk_size due to alignment */
4842 
4843             while (1) {
4844                 uint32_t next = nl->next;
4845                 if (next) {
4846                     nl->next = nl_size + (strlen(nl->name) + 1);
4847                 }
4848                 if (remaining_data < nl->next) {
4849                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4850                     break;
4851                 }
4852                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4853                 strcpy(cur_data + nl_size, nl->name);
4854                 cur_data += nl->next;
4855                 remaining_data -= nl->next;
4856                 if (!next) {
4857                     break;
4858                 }
4859                 nl = (void*)nl + next;
4860             }
4861             break;
4862         }
4863         case DM_DEV_WAIT:
4864         case DM_TABLE_STATUS:
4865         {
4866             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4867             void *cur_data = argptr;
4868             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4869             int spec_size = thunk_type_size(arg_type, 0);
4870             int i;
4871 
4872             for (i = 0; i < host_dm->target_count; i++) {
4873                 uint32_t next = spec->next;
4874                 int slen = strlen((char*)&spec[1]) + 1;
4875                 spec->next = (cur_data - argptr) + spec_size + slen;
4876                 if (guest_data_size < spec->next) {
4877                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4878                     break;
4879                 }
4880                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4881                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4882                 cur_data = argptr + spec->next;
4883                 spec = (void*)host_dm + host_dm->data_start + next;
4884             }
4885             break;
4886         }
4887         case DM_TABLE_DEPS:
4888         {
4889             void *hdata = (void*)host_dm + host_dm->data_start;
4890             int count = *(uint32_t*)hdata;
4891             uint64_t *hdev = hdata + 8;
4892             uint64_t *gdev = argptr + 8;
4893             int i;
4894 
4895             *(uint32_t*)argptr = tswap32(count);
4896             for (i = 0; i < count; i++) {
4897                 *gdev = tswap64(*hdev);
4898                 gdev++;
4899                 hdev++;
4900             }
4901             break;
4902         }
4903         case DM_LIST_VERSIONS:
4904         {
4905             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4906             uint32_t remaining_data = guest_data_size;
4907             void *cur_data = argptr;
4908             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4909             int vers_size = thunk_type_size(arg_type, 0);
4910 
4911             while (1) {
4912                 uint32_t next = vers->next;
4913                 if (next) {
4914                     vers->next = vers_size + (strlen(vers->name) + 1);
4915                 }
4916                 if (remaining_data < vers->next) {
4917                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4918                     break;
4919                 }
4920                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4921                 strcpy(cur_data + vers_size, vers->name);
4922                 cur_data += vers->next;
4923                 remaining_data -= vers->next;
4924                 if (!next) {
4925                     break;
4926                 }
4927                 vers = (void*)vers + next;
4928             }
4929             break;
4930         }
4931         default:
4932             unlock_user(argptr, guest_data, 0);
4933             ret = -TARGET_EINVAL;
4934             goto out;
4935         }
4936         unlock_user(argptr, guest_data, guest_data_size);
4937 
4938         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4939         if (!argptr) {
4940             ret = -TARGET_EFAULT;
4941             goto out;
4942         }
4943         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4944         unlock_user(argptr, arg, target_size);
4945     }
4946 out:
4947     g_free(big_buf);
4948     return ret;
4949 }
4950 
4951 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4952                                int cmd, abi_long arg)
4953 {
4954     void *argptr;
4955     int target_size;
4956     const argtype *arg_type = ie->arg_type;
4957     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4958     abi_long ret;
4959 
4960     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4961     struct blkpg_partition host_part;
4962 
4963     /* Read and convert blkpg */
4964     arg_type++;
4965     target_size = thunk_type_size(arg_type, 0);
4966     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4967     if (!argptr) {
4968         ret = -TARGET_EFAULT;
4969         goto out;
4970     }
4971     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4972     unlock_user(argptr, arg, 0);
4973 
4974     switch (host_blkpg->op) {
4975     case BLKPG_ADD_PARTITION:
4976     case BLKPG_DEL_PARTITION:
4977         /* payload is struct blkpg_partition */
4978         break;
4979     default:
4980         /* Unknown opcode */
4981         ret = -TARGET_EINVAL;
4982         goto out;
4983     }
4984 
4985     /* Read and convert blkpg->data */
4986     arg = (abi_long)(uintptr_t)host_blkpg->data;
4987     target_size = thunk_type_size(part_arg_type, 0);
4988     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4989     if (!argptr) {
4990         ret = -TARGET_EFAULT;
4991         goto out;
4992     }
4993     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4994     unlock_user(argptr, arg, 0);
4995 
4996     /* Swizzle the data pointer to our local copy and call! */
4997     host_blkpg->data = &host_part;
4998     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4999 
5000 out:
5001     return ret;
5002 }
5003 
5004 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5005                                 int fd, int cmd, abi_long arg)
5006 {
5007     const argtype *arg_type = ie->arg_type;
5008     const StructEntry *se;
5009     const argtype *field_types;
5010     const int *dst_offsets, *src_offsets;
5011     int target_size;
5012     void *argptr;
5013     abi_ulong *target_rt_dev_ptr = NULL;
5014     unsigned long *host_rt_dev_ptr = NULL;
5015     abi_long ret;
5016     int i;
5017 
5018     assert(ie->access == IOC_W);
5019     assert(*arg_type == TYPE_PTR);
5020     arg_type++;
5021     assert(*arg_type == TYPE_STRUCT);
5022     target_size = thunk_type_size(arg_type, 0);
5023     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5024     if (!argptr) {
5025         return -TARGET_EFAULT;
5026     }
5027     arg_type++;
5028     assert(*arg_type == (int)STRUCT_rtentry);
5029     se = struct_entries + *arg_type++;
5030     assert(se->convert[0] == NULL);
5031     /* convert struct here to be able to catch rt_dev string */
5032     field_types = se->field_types;
5033     dst_offsets = se->field_offsets[THUNK_HOST];
5034     src_offsets = se->field_offsets[THUNK_TARGET];
5035     for (i = 0; i < se->nb_fields; i++) {
5036         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5037             assert(*field_types == TYPE_PTRVOID);
5038             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5039             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5040             if (*target_rt_dev_ptr != 0) {
5041                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5042                                                   tswapal(*target_rt_dev_ptr));
5043                 if (!*host_rt_dev_ptr) {
5044                     unlock_user(argptr, arg, 0);
5045                     return -TARGET_EFAULT;
5046                 }
5047             } else {
5048                 *host_rt_dev_ptr = 0;
5049             }
5050             field_types++;
5051             continue;
5052         }
5053         field_types = thunk_convert(buf_temp + dst_offsets[i],
5054                                     argptr + src_offsets[i],
5055                                     field_types, THUNK_HOST);
5056     }
5057     unlock_user(argptr, arg, 0);
5058 
5059     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5060 
5061     assert(host_rt_dev_ptr != NULL);
5062     assert(target_rt_dev_ptr != NULL);
5063     if (*host_rt_dev_ptr != 0) {
5064         unlock_user((void *)*host_rt_dev_ptr,
5065                     *target_rt_dev_ptr, 0);
5066     }
5067     return ret;
5068 }
5069 
5070 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5071                                      int fd, int cmd, abi_long arg)
5072 {
5073     int sig = target_to_host_signal(arg);
5074     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5075 }
5076 
5077 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5078                                     int fd, int cmd, abi_long arg)
5079 {
5080     struct timeval tv;
5081     abi_long ret;
5082 
5083     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5084     if (is_error(ret)) {
5085         return ret;
5086     }
5087 
5088     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5089         if (copy_to_user_timeval(arg, &tv)) {
5090             return -TARGET_EFAULT;
5091         }
5092     } else {
5093         if (copy_to_user_timeval64(arg, &tv)) {
5094             return -TARGET_EFAULT;
5095         }
5096     }
5097 
5098     return ret;
5099 }
5100 
5101 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5102                                       int fd, int cmd, abi_long arg)
5103 {
5104     struct timespec ts;
5105     abi_long ret;
5106 
5107     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5108     if (is_error(ret)) {
5109         return ret;
5110     }
5111 
5112     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5113         if (host_to_target_timespec(arg, &ts)) {
5114             return -TARGET_EFAULT;
5115         }
5116     } else{
5117         if (host_to_target_timespec64(arg, &ts)) {
5118             return -TARGET_EFAULT;
5119         }
5120     }
5121 
5122     return ret;
5123 }
5124 
5125 #ifdef TIOCGPTPEER
5126 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5127                                      int fd, int cmd, abi_long arg)
5128 {
5129     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5130     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5131 }
5132 #endif
5133 
5134 static IOCTLEntry ioctl_entries[] = {
5135 #define IOCTL(cmd, access, ...) \
5136     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5137 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5138     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5139 #define IOCTL_IGNORE(cmd) \
5140     { TARGET_ ## cmd, 0, #cmd },
5141 #include "ioctls.h"
5142     { 0, 0, },
5143 };
5144 
5145 /* ??? Implement proper locking for ioctls.  */
5146 /* do_ioctl() Must return target values and target errnos. */
5147 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5148 {
5149     const IOCTLEntry *ie;
5150     const argtype *arg_type;
5151     abi_long ret;
5152     uint8_t buf_temp[MAX_STRUCT_SIZE];
5153     int target_size;
5154     void *argptr;
5155 
5156     ie = ioctl_entries;
5157     for(;;) {
5158         if (ie->target_cmd == 0) {
5159             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5160             return -TARGET_ENOSYS;
5161         }
5162         if (ie->target_cmd == cmd)
5163             break;
5164         ie++;
5165     }
5166     arg_type = ie->arg_type;
5167     if (ie->do_ioctl) {
5168         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5169     } else if (!ie->host_cmd) {
5170         /* Some architectures define BSD ioctls in their headers
5171            that are not implemented in Linux.  */
5172         return -TARGET_ENOSYS;
5173     }
5174 
5175     switch(arg_type[0]) {
5176     case TYPE_NULL:
5177         /* no argument */
5178         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5179         break;
5180     case TYPE_PTRVOID:
5181     case TYPE_INT:
5182     case TYPE_LONG:
5183     case TYPE_ULONG:
5184         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5185         break;
5186     case TYPE_PTR:
5187         arg_type++;
5188         target_size = thunk_type_size(arg_type, 0);
5189         switch(ie->access) {
5190         case IOC_R:
5191             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5192             if (!is_error(ret)) {
5193                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5194                 if (!argptr)
5195                     return -TARGET_EFAULT;
5196                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5197                 unlock_user(argptr, arg, target_size);
5198             }
5199             break;
5200         case IOC_W:
5201             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5202             if (!argptr)
5203                 return -TARGET_EFAULT;
5204             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5205             unlock_user(argptr, arg, 0);
5206             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5207             break;
5208         default:
5209         case IOC_RW:
5210             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5211             if (!argptr)
5212                 return -TARGET_EFAULT;
5213             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5214             unlock_user(argptr, arg, 0);
5215             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5216             if (!is_error(ret)) {
5217                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5218                 if (!argptr)
5219                     return -TARGET_EFAULT;
5220                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5221                 unlock_user(argptr, arg, target_size);
5222             }
5223             break;
5224         }
5225         break;
5226     default:
5227         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5228                  (long)cmd, arg_type[0]);
5229         ret = -TARGET_ENOSYS;
5230         break;
5231     }
5232     return ret;
5233 }
5234 
5235 static const bitmask_transtbl iflag_tbl[] = {
5236         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5237         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5238         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5239         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5240         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5241         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5242         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5243         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5244         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5245         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5246         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5247         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5248         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5249         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5250         { 0, 0, 0, 0 }
5251 };
5252 
5253 static const bitmask_transtbl oflag_tbl[] = {
5254 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5255 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5256 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5257 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5258 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5259 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5260 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5261 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5262 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5263 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5264 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5265 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5266 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5267 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5268 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5269 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5270 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5271 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5272 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5273 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5274 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5275 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5276 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5277 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5278 	{ 0, 0, 0, 0 }
5279 };
5280 
5281 static const bitmask_transtbl cflag_tbl[] = {
5282 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5283 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5284 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5285 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5286 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5287 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5288 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5289 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5290 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5291 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5292 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5293 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5294 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5295 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5296 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5297 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5298 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5299 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5300 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5301 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5302 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5303 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5304 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5305 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5306 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5307 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5308 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5309 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5310 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5311 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5312 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5313 	{ 0, 0, 0, 0 }
5314 };
5315 
5316 static const bitmask_transtbl lflag_tbl[] = {
5317 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5318 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5319 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5320 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5321 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5322 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5323 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5324 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5325 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5326 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5327 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5328 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5329 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5330 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5331 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5332 	{ 0, 0, 0, 0 }
5333 };
5334 
5335 static void target_to_host_termios (void *dst, const void *src)
5336 {
5337     struct host_termios *host = dst;
5338     const struct target_termios *target = src;
5339 
5340     host->c_iflag =
5341         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5342     host->c_oflag =
5343         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5344     host->c_cflag =
5345         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5346     host->c_lflag =
5347         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5348     host->c_line = target->c_line;
5349 
5350     memset(host->c_cc, 0, sizeof(host->c_cc));
5351     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5352     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5353     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5354     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5355     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5356     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5357     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5358     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5359     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5360     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5361     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5362     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5363     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5364     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5365     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5366     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5367     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5368 }
5369 
5370 static void host_to_target_termios (void *dst, const void *src)
5371 {
5372     struct target_termios *target = dst;
5373     const struct host_termios *host = src;
5374 
5375     target->c_iflag =
5376         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5377     target->c_oflag =
5378         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5379     target->c_cflag =
5380         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5381     target->c_lflag =
5382         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5383     target->c_line = host->c_line;
5384 
5385     memset(target->c_cc, 0, sizeof(target->c_cc));
5386     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5387     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5388     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5389     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5390     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5391     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5392     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5393     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5394     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5395     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5396     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5397     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5398     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5399     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5400     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5401     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5402     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5403 }
5404 
5405 static const StructEntry struct_termios_def = {
5406     .convert = { host_to_target_termios, target_to_host_termios },
5407     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5408     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5409 };
5410 
5411 static bitmask_transtbl mmap_flags_tbl[] = {
5412     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5413     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5414     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5415     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5416       MAP_ANONYMOUS, MAP_ANONYMOUS },
5417     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5418       MAP_GROWSDOWN, MAP_GROWSDOWN },
5419     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5420       MAP_DENYWRITE, MAP_DENYWRITE },
5421     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5422       MAP_EXECUTABLE, MAP_EXECUTABLE },
5423     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5424     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5425       MAP_NORESERVE, MAP_NORESERVE },
5426     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5427     /* MAP_STACK had been ignored by the kernel for quite some time.
5428        Recognize it for the target insofar as we do not want to pass
5429        it through to the host.  */
5430     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5431     { 0, 0, 0, 0 }
5432 };
5433 
5434 #if defined(TARGET_I386)
5435 
5436 /* NOTE: there is really one LDT for all the threads */
5437 static uint8_t *ldt_table;
5438 
5439 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5440 {
5441     int size;
5442     void *p;
5443 
5444     if (!ldt_table)
5445         return 0;
5446     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5447     if (size > bytecount)
5448         size = bytecount;
5449     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5450     if (!p)
5451         return -TARGET_EFAULT;
5452     /* ??? Should this by byteswapped?  */
5453     memcpy(p, ldt_table, size);
5454     unlock_user(p, ptr, size);
5455     return size;
5456 }
5457 
5458 /* XXX: add locking support */
5459 static abi_long write_ldt(CPUX86State *env,
5460                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5461 {
5462     struct target_modify_ldt_ldt_s ldt_info;
5463     struct target_modify_ldt_ldt_s *target_ldt_info;
5464     int seg_32bit, contents, read_exec_only, limit_in_pages;
5465     int seg_not_present, useable, lm;
5466     uint32_t *lp, entry_1, entry_2;
5467 
5468     if (bytecount != sizeof(ldt_info))
5469         return -TARGET_EINVAL;
5470     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5471         return -TARGET_EFAULT;
5472     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5473     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5474     ldt_info.limit = tswap32(target_ldt_info->limit);
5475     ldt_info.flags = tswap32(target_ldt_info->flags);
5476     unlock_user_struct(target_ldt_info, ptr, 0);
5477 
5478     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5479         return -TARGET_EINVAL;
5480     seg_32bit = ldt_info.flags & 1;
5481     contents = (ldt_info.flags >> 1) & 3;
5482     read_exec_only = (ldt_info.flags >> 3) & 1;
5483     limit_in_pages = (ldt_info.flags >> 4) & 1;
5484     seg_not_present = (ldt_info.flags >> 5) & 1;
5485     useable = (ldt_info.flags >> 6) & 1;
5486 #ifdef TARGET_ABI32
5487     lm = 0;
5488 #else
5489     lm = (ldt_info.flags >> 7) & 1;
5490 #endif
5491     if (contents == 3) {
5492         if (oldmode)
5493             return -TARGET_EINVAL;
5494         if (seg_not_present == 0)
5495             return -TARGET_EINVAL;
5496     }
5497     /* allocate the LDT */
5498     if (!ldt_table) {
5499         env->ldt.base = target_mmap(0,
5500                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5501                                     PROT_READ|PROT_WRITE,
5502                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5503         if (env->ldt.base == -1)
5504             return -TARGET_ENOMEM;
5505         memset(g2h(env->ldt.base), 0,
5506                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5507         env->ldt.limit = 0xffff;
5508         ldt_table = g2h(env->ldt.base);
5509     }
5510 
5511     /* NOTE: same code as Linux kernel */
5512     /* Allow LDTs to be cleared by the user. */
5513     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5514         if (oldmode ||
5515             (contents == 0		&&
5516              read_exec_only == 1	&&
5517              seg_32bit == 0		&&
5518              limit_in_pages == 0	&&
5519              seg_not_present == 1	&&
5520              useable == 0 )) {
5521             entry_1 = 0;
5522             entry_2 = 0;
5523             goto install;
5524         }
5525     }
5526 
5527     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5528         (ldt_info.limit & 0x0ffff);
5529     entry_2 = (ldt_info.base_addr & 0xff000000) |
5530         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5531         (ldt_info.limit & 0xf0000) |
5532         ((read_exec_only ^ 1) << 9) |
5533         (contents << 10) |
5534         ((seg_not_present ^ 1) << 15) |
5535         (seg_32bit << 22) |
5536         (limit_in_pages << 23) |
5537         (lm << 21) |
5538         0x7000;
5539     if (!oldmode)
5540         entry_2 |= (useable << 20);
5541 
5542     /* Install the new entry ...  */
5543 install:
5544     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5545     lp[0] = tswap32(entry_1);
5546     lp[1] = tswap32(entry_2);
5547     return 0;
5548 }
5549 
5550 /* specific and weird i386 syscalls */
5551 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5552                               unsigned long bytecount)
5553 {
5554     abi_long ret;
5555 
5556     switch (func) {
5557     case 0:
5558         ret = read_ldt(ptr, bytecount);
5559         break;
5560     case 1:
5561         ret = write_ldt(env, ptr, bytecount, 1);
5562         break;
5563     case 0x11:
5564         ret = write_ldt(env, ptr, bytecount, 0);
5565         break;
5566     default:
5567         ret = -TARGET_ENOSYS;
5568         break;
5569     }
5570     return ret;
5571 }
5572 
5573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5574 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5575 {
5576     uint64_t *gdt_table = g2h(env->gdt.base);
5577     struct target_modify_ldt_ldt_s ldt_info;
5578     struct target_modify_ldt_ldt_s *target_ldt_info;
5579     int seg_32bit, contents, read_exec_only, limit_in_pages;
5580     int seg_not_present, useable, lm;
5581     uint32_t *lp, entry_1, entry_2;
5582     int i;
5583 
5584     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5585     if (!target_ldt_info)
5586         return -TARGET_EFAULT;
5587     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5588     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5589     ldt_info.limit = tswap32(target_ldt_info->limit);
5590     ldt_info.flags = tswap32(target_ldt_info->flags);
5591     if (ldt_info.entry_number == -1) {
5592         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5593             if (gdt_table[i] == 0) {
5594                 ldt_info.entry_number = i;
5595                 target_ldt_info->entry_number = tswap32(i);
5596                 break;
5597             }
5598         }
5599     }
5600     unlock_user_struct(target_ldt_info, ptr, 1);
5601 
5602     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5603         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5604            return -TARGET_EINVAL;
5605     seg_32bit = ldt_info.flags & 1;
5606     contents = (ldt_info.flags >> 1) & 3;
5607     read_exec_only = (ldt_info.flags >> 3) & 1;
5608     limit_in_pages = (ldt_info.flags >> 4) & 1;
5609     seg_not_present = (ldt_info.flags >> 5) & 1;
5610     useable = (ldt_info.flags >> 6) & 1;
5611 #ifdef TARGET_ABI32
5612     lm = 0;
5613 #else
5614     lm = (ldt_info.flags >> 7) & 1;
5615 #endif
5616 
5617     if (contents == 3) {
5618         if (seg_not_present == 0)
5619             return -TARGET_EINVAL;
5620     }
5621 
5622     /* NOTE: same code as Linux kernel */
5623     /* Allow LDTs to be cleared by the user. */
5624     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5625         if ((contents == 0             &&
5626              read_exec_only == 1       &&
5627              seg_32bit == 0            &&
5628              limit_in_pages == 0       &&
5629              seg_not_present == 1      &&
5630              useable == 0 )) {
5631             entry_1 = 0;
5632             entry_2 = 0;
5633             goto install;
5634         }
5635     }
5636 
5637     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5638         (ldt_info.limit & 0x0ffff);
5639     entry_2 = (ldt_info.base_addr & 0xff000000) |
5640         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5641         (ldt_info.limit & 0xf0000) |
5642         ((read_exec_only ^ 1) << 9) |
5643         (contents << 10) |
5644         ((seg_not_present ^ 1) << 15) |
5645         (seg_32bit << 22) |
5646         (limit_in_pages << 23) |
5647         (useable << 20) |
5648         (lm << 21) |
5649         0x7000;
5650 
5651     /* Install the new entry ...  */
5652 install:
5653     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5654     lp[0] = tswap32(entry_1);
5655     lp[1] = tswap32(entry_2);
5656     return 0;
5657 }
5658 
5659 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5660 {
5661     struct target_modify_ldt_ldt_s *target_ldt_info;
5662     uint64_t *gdt_table = g2h(env->gdt.base);
5663     uint32_t base_addr, limit, flags;
5664     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5665     int seg_not_present, useable, lm;
5666     uint32_t *lp, entry_1, entry_2;
5667 
5668     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5669     if (!target_ldt_info)
5670         return -TARGET_EFAULT;
5671     idx = tswap32(target_ldt_info->entry_number);
5672     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5673         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5674         unlock_user_struct(target_ldt_info, ptr, 1);
5675         return -TARGET_EINVAL;
5676     }
5677     lp = (uint32_t *)(gdt_table + idx);
5678     entry_1 = tswap32(lp[0]);
5679     entry_2 = tswap32(lp[1]);
5680 
5681     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5682     contents = (entry_2 >> 10) & 3;
5683     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5684     seg_32bit = (entry_2 >> 22) & 1;
5685     limit_in_pages = (entry_2 >> 23) & 1;
5686     useable = (entry_2 >> 20) & 1;
5687 #ifdef TARGET_ABI32
5688     lm = 0;
5689 #else
5690     lm = (entry_2 >> 21) & 1;
5691 #endif
5692     flags = (seg_32bit << 0) | (contents << 1) |
5693         (read_exec_only << 3) | (limit_in_pages << 4) |
5694         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5695     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5696     base_addr = (entry_1 >> 16) |
5697         (entry_2 & 0xff000000) |
5698         ((entry_2 & 0xff) << 16);
5699     target_ldt_info->base_addr = tswapal(base_addr);
5700     target_ldt_info->limit = tswap32(limit);
5701     target_ldt_info->flags = tswap32(flags);
5702     unlock_user_struct(target_ldt_info, ptr, 1);
5703     return 0;
5704 }
5705 #endif /* TARGET_I386 && TARGET_ABI32 */
5706 
5707 #ifndef TARGET_ABI32
5708 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5709 {
5710     abi_long ret = 0;
5711     abi_ulong val;
5712     int idx;
5713 
5714     switch(code) {
5715     case TARGET_ARCH_SET_GS:
5716     case TARGET_ARCH_SET_FS:
5717         if (code == TARGET_ARCH_SET_GS)
5718             idx = R_GS;
5719         else
5720             idx = R_FS;
5721         cpu_x86_load_seg(env, idx, 0);
5722         env->segs[idx].base = addr;
5723         break;
5724     case TARGET_ARCH_GET_GS:
5725     case TARGET_ARCH_GET_FS:
5726         if (code == TARGET_ARCH_GET_GS)
5727             idx = R_GS;
5728         else
5729             idx = R_FS;
5730         val = env->segs[idx].base;
5731         if (put_user(val, addr, abi_ulong))
5732             ret = -TARGET_EFAULT;
5733         break;
5734     default:
5735         ret = -TARGET_EINVAL;
5736         break;
5737     }
5738     return ret;
5739 }
5740 #endif
5741 
5742 #endif /* defined(TARGET_I386) */
5743 
5744 #define NEW_STACK_SIZE 0x40000
5745 
5746 
5747 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5748 typedef struct {
5749     CPUArchState *env;
5750     pthread_mutex_t mutex;
5751     pthread_cond_t cond;
5752     pthread_t thread;
5753     uint32_t tid;
5754     abi_ulong child_tidptr;
5755     abi_ulong parent_tidptr;
5756     sigset_t sigmask;
5757 } new_thread_info;
5758 
5759 static void *clone_func(void *arg)
5760 {
5761     new_thread_info *info = arg;
5762     CPUArchState *env;
5763     CPUState *cpu;
5764     TaskState *ts;
5765 
5766     rcu_register_thread();
5767     tcg_register_thread();
5768     env = info->env;
5769     cpu = env_cpu(env);
5770     thread_cpu = cpu;
5771     ts = (TaskState *)cpu->opaque;
5772     info->tid = sys_gettid();
5773     task_settid(ts);
5774     if (info->child_tidptr)
5775         put_user_u32(info->tid, info->child_tidptr);
5776     if (info->parent_tidptr)
5777         put_user_u32(info->tid, info->parent_tidptr);
5778     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5779     /* Enable signals.  */
5780     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5781     /* Signal to the parent that we're ready.  */
5782     pthread_mutex_lock(&info->mutex);
5783     pthread_cond_broadcast(&info->cond);
5784     pthread_mutex_unlock(&info->mutex);
5785     /* Wait until the parent has finished initializing the tls state.  */
5786     pthread_mutex_lock(&clone_lock);
5787     pthread_mutex_unlock(&clone_lock);
5788     cpu_loop(env);
5789     /* never exits */
5790     return NULL;
5791 }
5792 
5793 /* do_fork() Must return host values and target errnos (unlike most
5794    do_*() functions). */
5795 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5796                    abi_ulong parent_tidptr, target_ulong newtls,
5797                    abi_ulong child_tidptr)
5798 {
5799     CPUState *cpu = env_cpu(env);
5800     int ret;
5801     TaskState *ts;
5802     CPUState *new_cpu;
5803     CPUArchState *new_env;
5804     sigset_t sigmask;
5805 
5806     flags &= ~CLONE_IGNORED_FLAGS;
5807 
5808     /* Emulate vfork() with fork() */
5809     if (flags & CLONE_VFORK)
5810         flags &= ~(CLONE_VFORK | CLONE_VM);
5811 
5812     if (flags & CLONE_VM) {
5813         TaskState *parent_ts = (TaskState *)cpu->opaque;
5814         new_thread_info info;
5815         pthread_attr_t attr;
5816 
5817         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5818             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5819             return -TARGET_EINVAL;
5820         }
5821 
5822         ts = g_new0(TaskState, 1);
5823         init_task_state(ts);
5824 
5825         /* Grab a mutex so that thread setup appears atomic.  */
5826         pthread_mutex_lock(&clone_lock);
5827 
5828         /* we create a new CPU instance. */
5829         new_env = cpu_copy(env);
5830         /* Init regs that differ from the parent.  */
5831         cpu_clone_regs_child(new_env, newsp, flags);
5832         cpu_clone_regs_parent(env, flags);
5833         new_cpu = env_cpu(new_env);
5834         new_cpu->opaque = ts;
5835         ts->bprm = parent_ts->bprm;
5836         ts->info = parent_ts->info;
5837         ts->signal_mask = parent_ts->signal_mask;
5838 
5839         if (flags & CLONE_CHILD_CLEARTID) {
5840             ts->child_tidptr = child_tidptr;
5841         }
5842 
5843         if (flags & CLONE_SETTLS) {
5844             cpu_set_tls (new_env, newtls);
5845         }
5846 
5847         memset(&info, 0, sizeof(info));
5848         pthread_mutex_init(&info.mutex, NULL);
5849         pthread_mutex_lock(&info.mutex);
5850         pthread_cond_init(&info.cond, NULL);
5851         info.env = new_env;
5852         if (flags & CLONE_CHILD_SETTID) {
5853             info.child_tidptr = child_tidptr;
5854         }
5855         if (flags & CLONE_PARENT_SETTID) {
5856             info.parent_tidptr = parent_tidptr;
5857         }
5858 
5859         ret = pthread_attr_init(&attr);
5860         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5861         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5862         /* It is not safe to deliver signals until the child has finished
5863            initializing, so temporarily block all signals.  */
5864         sigfillset(&sigmask);
5865         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5866         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5867 
5868         /* If this is our first additional thread, we need to ensure we
5869          * generate code for parallel execution and flush old translations.
5870          */
5871         if (!parallel_cpus) {
5872             parallel_cpus = true;
5873             tb_flush(cpu);
5874         }
5875 
5876         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5877         /* TODO: Free new CPU state if thread creation failed.  */
5878 
5879         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5880         pthread_attr_destroy(&attr);
5881         if (ret == 0) {
5882             /* Wait for the child to initialize.  */
5883             pthread_cond_wait(&info.cond, &info.mutex);
5884             ret = info.tid;
5885         } else {
5886             ret = -1;
5887         }
5888         pthread_mutex_unlock(&info.mutex);
5889         pthread_cond_destroy(&info.cond);
5890         pthread_mutex_destroy(&info.mutex);
5891         pthread_mutex_unlock(&clone_lock);
5892     } else {
5893         /* if no CLONE_VM, we consider it is a fork */
5894         if (flags & CLONE_INVALID_FORK_FLAGS) {
5895             return -TARGET_EINVAL;
5896         }
5897 
5898         /* We can't support custom termination signals */
5899         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5900             return -TARGET_EINVAL;
5901         }
5902 
5903         if (block_signals()) {
5904             return -TARGET_ERESTARTSYS;
5905         }
5906 
5907         fork_start();
5908         ret = fork();
5909         if (ret == 0) {
5910             /* Child Process.  */
5911             cpu_clone_regs_child(env, newsp, flags);
5912             fork_end(1);
5913             /* There is a race condition here.  The parent process could
5914                theoretically read the TID in the child process before the child
5915                tid is set.  This would require using either ptrace
5916                (not implemented) or having *_tidptr to point at a shared memory
5917                mapping.  We can't repeat the spinlock hack used above because
5918                the child process gets its own copy of the lock.  */
5919             if (flags & CLONE_CHILD_SETTID)
5920                 put_user_u32(sys_gettid(), child_tidptr);
5921             if (flags & CLONE_PARENT_SETTID)
5922                 put_user_u32(sys_gettid(), parent_tidptr);
5923             ts = (TaskState *)cpu->opaque;
5924             if (flags & CLONE_SETTLS)
5925                 cpu_set_tls (env, newtls);
5926             if (flags & CLONE_CHILD_CLEARTID)
5927                 ts->child_tidptr = child_tidptr;
5928         } else {
5929             cpu_clone_regs_parent(env, flags);
5930             fork_end(0);
5931         }
5932     }
5933     return ret;
5934 }
5935 
5936 /* warning : doesn't handle linux specific flags... */
5937 static int target_to_host_fcntl_cmd(int cmd)
5938 {
5939     int ret;
5940 
5941     switch(cmd) {
5942     case TARGET_F_DUPFD:
5943     case TARGET_F_GETFD:
5944     case TARGET_F_SETFD:
5945     case TARGET_F_GETFL:
5946     case TARGET_F_SETFL:
5947         ret = cmd;
5948         break;
5949     case TARGET_F_GETLK:
5950         ret = F_GETLK64;
5951         break;
5952     case TARGET_F_SETLK:
5953         ret = F_SETLK64;
5954         break;
5955     case TARGET_F_SETLKW:
5956         ret = F_SETLKW64;
5957         break;
5958     case TARGET_F_GETOWN:
5959         ret = F_GETOWN;
5960         break;
5961     case TARGET_F_SETOWN:
5962         ret = F_SETOWN;
5963         break;
5964     case TARGET_F_GETSIG:
5965         ret = F_GETSIG;
5966         break;
5967     case TARGET_F_SETSIG:
5968         ret = F_SETSIG;
5969         break;
5970 #if TARGET_ABI_BITS == 32
5971     case TARGET_F_GETLK64:
5972         ret = F_GETLK64;
5973         break;
5974     case TARGET_F_SETLK64:
5975         ret = F_SETLK64;
5976         break;
5977     case TARGET_F_SETLKW64:
5978         ret = F_SETLKW64;
5979         break;
5980 #endif
5981     case TARGET_F_SETLEASE:
5982         ret = F_SETLEASE;
5983         break;
5984     case TARGET_F_GETLEASE:
5985         ret = F_GETLEASE;
5986         break;
5987 #ifdef F_DUPFD_CLOEXEC
5988     case TARGET_F_DUPFD_CLOEXEC:
5989         ret = F_DUPFD_CLOEXEC;
5990         break;
5991 #endif
5992     case TARGET_F_NOTIFY:
5993         ret = F_NOTIFY;
5994         break;
5995 #ifdef F_GETOWN_EX
5996     case TARGET_F_GETOWN_EX:
5997         ret = F_GETOWN_EX;
5998         break;
5999 #endif
6000 #ifdef F_SETOWN_EX
6001     case TARGET_F_SETOWN_EX:
6002         ret = F_SETOWN_EX;
6003         break;
6004 #endif
6005 #ifdef F_SETPIPE_SZ
6006     case TARGET_F_SETPIPE_SZ:
6007         ret = F_SETPIPE_SZ;
6008         break;
6009     case TARGET_F_GETPIPE_SZ:
6010         ret = F_GETPIPE_SZ;
6011         break;
6012 #endif
6013     default:
6014         ret = -TARGET_EINVAL;
6015         break;
6016     }
6017 
6018 #if defined(__powerpc64__)
6019     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6020      * is not supported by kernel. The glibc fcntl call actually adjusts
6021      * them to 5, 6 and 7 before making the syscall(). Since we make the
6022      * syscall directly, adjust to what is supported by the kernel.
6023      */
6024     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6025         ret -= F_GETLK64 - 5;
6026     }
6027 #endif
6028 
6029     return ret;
6030 }
6031 
6032 #define FLOCK_TRANSTBL \
6033     switch (type) { \
6034     TRANSTBL_CONVERT(F_RDLCK); \
6035     TRANSTBL_CONVERT(F_WRLCK); \
6036     TRANSTBL_CONVERT(F_UNLCK); \
6037     TRANSTBL_CONVERT(F_EXLCK); \
6038     TRANSTBL_CONVERT(F_SHLCK); \
6039     }
6040 
6041 static int target_to_host_flock(int type)
6042 {
6043 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6044     FLOCK_TRANSTBL
6045 #undef  TRANSTBL_CONVERT
6046     return -TARGET_EINVAL;
6047 }
6048 
6049 static int host_to_target_flock(int type)
6050 {
6051 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6052     FLOCK_TRANSTBL
6053 #undef  TRANSTBL_CONVERT
6054     /* if we don't know how to convert the value coming
6055      * from the host we copy to the target field as-is
6056      */
6057     return type;
6058 }
6059 
6060 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6061                                             abi_ulong target_flock_addr)
6062 {
6063     struct target_flock *target_fl;
6064     int l_type;
6065 
6066     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6067         return -TARGET_EFAULT;
6068     }
6069 
6070     __get_user(l_type, &target_fl->l_type);
6071     l_type = target_to_host_flock(l_type);
6072     if (l_type < 0) {
6073         return l_type;
6074     }
6075     fl->l_type = l_type;
6076     __get_user(fl->l_whence, &target_fl->l_whence);
6077     __get_user(fl->l_start, &target_fl->l_start);
6078     __get_user(fl->l_len, &target_fl->l_len);
6079     __get_user(fl->l_pid, &target_fl->l_pid);
6080     unlock_user_struct(target_fl, target_flock_addr, 0);
6081     return 0;
6082 }
6083 
6084 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6085                                           const struct flock64 *fl)
6086 {
6087     struct target_flock *target_fl;
6088     short l_type;
6089 
6090     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6091         return -TARGET_EFAULT;
6092     }
6093 
6094     l_type = host_to_target_flock(fl->l_type);
6095     __put_user(l_type, &target_fl->l_type);
6096     __put_user(fl->l_whence, &target_fl->l_whence);
6097     __put_user(fl->l_start, &target_fl->l_start);
6098     __put_user(fl->l_len, &target_fl->l_len);
6099     __put_user(fl->l_pid, &target_fl->l_pid);
6100     unlock_user_struct(target_fl, target_flock_addr, 1);
6101     return 0;
6102 }
6103 
6104 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6105 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6106 
6107 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6108 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6109                                                    abi_ulong target_flock_addr)
6110 {
6111     struct target_oabi_flock64 *target_fl;
6112     int l_type;
6113 
6114     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6115         return -TARGET_EFAULT;
6116     }
6117 
6118     __get_user(l_type, &target_fl->l_type);
6119     l_type = target_to_host_flock(l_type);
6120     if (l_type < 0) {
6121         return l_type;
6122     }
6123     fl->l_type = l_type;
6124     __get_user(fl->l_whence, &target_fl->l_whence);
6125     __get_user(fl->l_start, &target_fl->l_start);
6126     __get_user(fl->l_len, &target_fl->l_len);
6127     __get_user(fl->l_pid, &target_fl->l_pid);
6128     unlock_user_struct(target_fl, target_flock_addr, 0);
6129     return 0;
6130 }
6131 
6132 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6133                                                  const struct flock64 *fl)
6134 {
6135     struct target_oabi_flock64 *target_fl;
6136     short l_type;
6137 
6138     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6139         return -TARGET_EFAULT;
6140     }
6141 
6142     l_type = host_to_target_flock(fl->l_type);
6143     __put_user(l_type, &target_fl->l_type);
6144     __put_user(fl->l_whence, &target_fl->l_whence);
6145     __put_user(fl->l_start, &target_fl->l_start);
6146     __put_user(fl->l_len, &target_fl->l_len);
6147     __put_user(fl->l_pid, &target_fl->l_pid);
6148     unlock_user_struct(target_fl, target_flock_addr, 1);
6149     return 0;
6150 }
6151 #endif
6152 
6153 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6154                                               abi_ulong target_flock_addr)
6155 {
6156     struct target_flock64 *target_fl;
6157     int l_type;
6158 
6159     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6160         return -TARGET_EFAULT;
6161     }
6162 
6163     __get_user(l_type, &target_fl->l_type);
6164     l_type = target_to_host_flock(l_type);
6165     if (l_type < 0) {
6166         return l_type;
6167     }
6168     fl->l_type = l_type;
6169     __get_user(fl->l_whence, &target_fl->l_whence);
6170     __get_user(fl->l_start, &target_fl->l_start);
6171     __get_user(fl->l_len, &target_fl->l_len);
6172     __get_user(fl->l_pid, &target_fl->l_pid);
6173     unlock_user_struct(target_fl, target_flock_addr, 0);
6174     return 0;
6175 }
6176 
6177 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6178                                             const struct flock64 *fl)
6179 {
6180     struct target_flock64 *target_fl;
6181     short l_type;
6182 
6183     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6184         return -TARGET_EFAULT;
6185     }
6186 
6187     l_type = host_to_target_flock(fl->l_type);
6188     __put_user(l_type, &target_fl->l_type);
6189     __put_user(fl->l_whence, &target_fl->l_whence);
6190     __put_user(fl->l_start, &target_fl->l_start);
6191     __put_user(fl->l_len, &target_fl->l_len);
6192     __put_user(fl->l_pid, &target_fl->l_pid);
6193     unlock_user_struct(target_fl, target_flock_addr, 1);
6194     return 0;
6195 }
6196 
6197 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6198 {
6199     struct flock64 fl64;
6200 #ifdef F_GETOWN_EX
6201     struct f_owner_ex fox;
6202     struct target_f_owner_ex *target_fox;
6203 #endif
6204     abi_long ret;
6205     int host_cmd = target_to_host_fcntl_cmd(cmd);
6206 
6207     if (host_cmd == -TARGET_EINVAL)
6208 	    return host_cmd;
6209 
6210     switch(cmd) {
6211     case TARGET_F_GETLK:
6212         ret = copy_from_user_flock(&fl64, arg);
6213         if (ret) {
6214             return ret;
6215         }
6216         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6217         if (ret == 0) {
6218             ret = copy_to_user_flock(arg, &fl64);
6219         }
6220         break;
6221 
6222     case TARGET_F_SETLK:
6223     case TARGET_F_SETLKW:
6224         ret = copy_from_user_flock(&fl64, arg);
6225         if (ret) {
6226             return ret;
6227         }
6228         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6229         break;
6230 
6231     case TARGET_F_GETLK64:
6232         ret = copy_from_user_flock64(&fl64, arg);
6233         if (ret) {
6234             return ret;
6235         }
6236         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6237         if (ret == 0) {
6238             ret = copy_to_user_flock64(arg, &fl64);
6239         }
6240         break;
6241     case TARGET_F_SETLK64:
6242     case TARGET_F_SETLKW64:
6243         ret = copy_from_user_flock64(&fl64, arg);
6244         if (ret) {
6245             return ret;
6246         }
6247         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6248         break;
6249 
6250     case TARGET_F_GETFL:
6251         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6252         if (ret >= 0) {
6253             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6254         }
6255         break;
6256 
6257     case TARGET_F_SETFL:
6258         ret = get_errno(safe_fcntl(fd, host_cmd,
6259                                    target_to_host_bitmask(arg,
6260                                                           fcntl_flags_tbl)));
6261         break;
6262 
6263 #ifdef F_GETOWN_EX
6264     case TARGET_F_GETOWN_EX:
6265         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6266         if (ret >= 0) {
6267             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6268                 return -TARGET_EFAULT;
6269             target_fox->type = tswap32(fox.type);
6270             target_fox->pid = tswap32(fox.pid);
6271             unlock_user_struct(target_fox, arg, 1);
6272         }
6273         break;
6274 #endif
6275 
6276 #ifdef F_SETOWN_EX
6277     case TARGET_F_SETOWN_EX:
6278         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6279             return -TARGET_EFAULT;
6280         fox.type = tswap32(target_fox->type);
6281         fox.pid = tswap32(target_fox->pid);
6282         unlock_user_struct(target_fox, arg, 0);
6283         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6284         break;
6285 #endif
6286 
6287     case TARGET_F_SETOWN:
6288     case TARGET_F_GETOWN:
6289     case TARGET_F_SETSIG:
6290     case TARGET_F_GETSIG:
6291     case TARGET_F_SETLEASE:
6292     case TARGET_F_GETLEASE:
6293     case TARGET_F_SETPIPE_SZ:
6294     case TARGET_F_GETPIPE_SZ:
6295         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6296         break;
6297 
6298     default:
6299         ret = get_errno(safe_fcntl(fd, cmd, arg));
6300         break;
6301     }
6302     return ret;
6303 }
6304 
6305 #ifdef USE_UID16
6306 
6307 static inline int high2lowuid(int uid)
6308 {
6309     if (uid > 65535)
6310         return 65534;
6311     else
6312         return uid;
6313 }
6314 
6315 static inline int high2lowgid(int gid)
6316 {
6317     if (gid > 65535)
6318         return 65534;
6319     else
6320         return gid;
6321 }
6322 
6323 static inline int low2highuid(int uid)
6324 {
6325     if ((int16_t)uid == -1)
6326         return -1;
6327     else
6328         return uid;
6329 }
6330 
6331 static inline int low2highgid(int gid)
6332 {
6333     if ((int16_t)gid == -1)
6334         return -1;
6335     else
6336         return gid;
6337 }
6338 static inline int tswapid(int id)
6339 {
6340     return tswap16(id);
6341 }
6342 
6343 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6344 
6345 #else /* !USE_UID16 */
6346 static inline int high2lowuid(int uid)
6347 {
6348     return uid;
6349 }
6350 static inline int high2lowgid(int gid)
6351 {
6352     return gid;
6353 }
6354 static inline int low2highuid(int uid)
6355 {
6356     return uid;
6357 }
6358 static inline int low2highgid(int gid)
6359 {
6360     return gid;
6361 }
6362 static inline int tswapid(int id)
6363 {
6364     return tswap32(id);
6365 }
6366 
6367 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6368 
6369 #endif /* USE_UID16 */
6370 
6371 /* We must do direct syscalls for setting UID/GID, because we want to
6372  * implement the Linux system call semantics of "change only for this thread",
6373  * not the libc/POSIX semantics of "change for all threads in process".
6374  * (See http://ewontfix.com/17/ for more details.)
6375  * We use the 32-bit version of the syscalls if present; if it is not
6376  * then either the host architecture supports 32-bit UIDs natively with
6377  * the standard syscall, or the 16-bit UID is the best we can do.
6378  */
6379 #ifdef __NR_setuid32
6380 #define __NR_sys_setuid __NR_setuid32
6381 #else
6382 #define __NR_sys_setuid __NR_setuid
6383 #endif
6384 #ifdef __NR_setgid32
6385 #define __NR_sys_setgid __NR_setgid32
6386 #else
6387 #define __NR_sys_setgid __NR_setgid
6388 #endif
6389 #ifdef __NR_setresuid32
6390 #define __NR_sys_setresuid __NR_setresuid32
6391 #else
6392 #define __NR_sys_setresuid __NR_setresuid
6393 #endif
6394 #ifdef __NR_setresgid32
6395 #define __NR_sys_setresgid __NR_setresgid32
6396 #else
6397 #define __NR_sys_setresgid __NR_setresgid
6398 #endif
6399 
6400 _syscall1(int, sys_setuid, uid_t, uid)
6401 _syscall1(int, sys_setgid, gid_t, gid)
6402 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6403 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6404 
6405 void syscall_init(void)
6406 {
6407     IOCTLEntry *ie;
6408     const argtype *arg_type;
6409     int size;
6410     int i;
6411 
6412     thunk_init(STRUCT_MAX);
6413 
6414 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6415 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6416 #include "syscall_types.h"
6417 #undef STRUCT
6418 #undef STRUCT_SPECIAL
6419 
6420     /* Build target_to_host_errno_table[] table from
6421      * host_to_target_errno_table[]. */
6422     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6423         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6424     }
6425 
6426     /* we patch the ioctl size if necessary. We rely on the fact that
6427        no ioctl has all the bits at '1' in the size field */
6428     ie = ioctl_entries;
6429     while (ie->target_cmd != 0) {
6430         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6431             TARGET_IOC_SIZEMASK) {
6432             arg_type = ie->arg_type;
6433             if (arg_type[0] != TYPE_PTR) {
6434                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6435                         ie->target_cmd);
6436                 exit(1);
6437             }
6438             arg_type++;
6439             size = thunk_type_size(arg_type, 0);
6440             ie->target_cmd = (ie->target_cmd &
6441                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6442                 (size << TARGET_IOC_SIZESHIFT);
6443         }
6444 
6445         /* automatic consistency check if same arch */
6446 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6447     (defined(__x86_64__) && defined(TARGET_X86_64))
6448         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6449             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6450                     ie->name, ie->target_cmd, ie->host_cmd);
6451         }
6452 #endif
6453         ie++;
6454     }
6455 }
6456 
6457 #if TARGET_ABI_BITS == 32
6458 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6459 {
6460 #ifdef TARGET_WORDS_BIGENDIAN
6461     return ((uint64_t)word0 << 32) | word1;
6462 #else
6463     return ((uint64_t)word1 << 32) | word0;
6464 #endif
6465 }
6466 #else /* TARGET_ABI_BITS == 32 */
6467 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6468 {
6469     return word0;
6470 }
6471 #endif /* TARGET_ABI_BITS != 32 */
6472 
6473 #ifdef TARGET_NR_truncate64
6474 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6475                                          abi_long arg2,
6476                                          abi_long arg3,
6477                                          abi_long arg4)
6478 {
6479     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6480         arg2 = arg3;
6481         arg3 = arg4;
6482     }
6483     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6484 }
6485 #endif
6486 
6487 #ifdef TARGET_NR_ftruncate64
6488 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6489                                           abi_long arg2,
6490                                           abi_long arg3,
6491                                           abi_long arg4)
6492 {
6493     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6494         arg2 = arg3;
6495         arg3 = arg4;
6496     }
6497     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6498 }
6499 #endif
6500 
6501 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6502                                                  abi_ulong target_addr)
6503 {
6504     struct target_itimerspec *target_itspec;
6505 
6506     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6507         return -TARGET_EFAULT;
6508     }
6509 
6510     host_itspec->it_interval.tv_sec =
6511                             tswapal(target_itspec->it_interval.tv_sec);
6512     host_itspec->it_interval.tv_nsec =
6513                             tswapal(target_itspec->it_interval.tv_nsec);
6514     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6515     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6516 
6517     unlock_user_struct(target_itspec, target_addr, 1);
6518     return 0;
6519 }
6520 
6521 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6522                                                struct itimerspec *host_its)
6523 {
6524     struct target_itimerspec *target_itspec;
6525 
6526     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6527         return -TARGET_EFAULT;
6528     }
6529 
6530     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6531     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6532 
6533     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6534     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6535 
6536     unlock_user_struct(target_itspec, target_addr, 0);
6537     return 0;
6538 }
6539 
6540 static inline abi_long target_to_host_timex(struct timex *host_tx,
6541                                             abi_long target_addr)
6542 {
6543     struct target_timex *target_tx;
6544 
6545     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6546         return -TARGET_EFAULT;
6547     }
6548 
6549     __get_user(host_tx->modes, &target_tx->modes);
6550     __get_user(host_tx->offset, &target_tx->offset);
6551     __get_user(host_tx->freq, &target_tx->freq);
6552     __get_user(host_tx->maxerror, &target_tx->maxerror);
6553     __get_user(host_tx->esterror, &target_tx->esterror);
6554     __get_user(host_tx->status, &target_tx->status);
6555     __get_user(host_tx->constant, &target_tx->constant);
6556     __get_user(host_tx->precision, &target_tx->precision);
6557     __get_user(host_tx->tolerance, &target_tx->tolerance);
6558     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6559     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6560     __get_user(host_tx->tick, &target_tx->tick);
6561     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6562     __get_user(host_tx->jitter, &target_tx->jitter);
6563     __get_user(host_tx->shift, &target_tx->shift);
6564     __get_user(host_tx->stabil, &target_tx->stabil);
6565     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6566     __get_user(host_tx->calcnt, &target_tx->calcnt);
6567     __get_user(host_tx->errcnt, &target_tx->errcnt);
6568     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6569     __get_user(host_tx->tai, &target_tx->tai);
6570 
6571     unlock_user_struct(target_tx, target_addr, 0);
6572     return 0;
6573 }
6574 
6575 static inline abi_long host_to_target_timex(abi_long target_addr,
6576                                             struct timex *host_tx)
6577 {
6578     struct target_timex *target_tx;
6579 
6580     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6581         return -TARGET_EFAULT;
6582     }
6583 
6584     __put_user(host_tx->modes, &target_tx->modes);
6585     __put_user(host_tx->offset, &target_tx->offset);
6586     __put_user(host_tx->freq, &target_tx->freq);
6587     __put_user(host_tx->maxerror, &target_tx->maxerror);
6588     __put_user(host_tx->esterror, &target_tx->esterror);
6589     __put_user(host_tx->status, &target_tx->status);
6590     __put_user(host_tx->constant, &target_tx->constant);
6591     __put_user(host_tx->precision, &target_tx->precision);
6592     __put_user(host_tx->tolerance, &target_tx->tolerance);
6593     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6594     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6595     __put_user(host_tx->tick, &target_tx->tick);
6596     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6597     __put_user(host_tx->jitter, &target_tx->jitter);
6598     __put_user(host_tx->shift, &target_tx->shift);
6599     __put_user(host_tx->stabil, &target_tx->stabil);
6600     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6601     __put_user(host_tx->calcnt, &target_tx->calcnt);
6602     __put_user(host_tx->errcnt, &target_tx->errcnt);
6603     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6604     __put_user(host_tx->tai, &target_tx->tai);
6605 
6606     unlock_user_struct(target_tx, target_addr, 1);
6607     return 0;
6608 }
6609 
6610 
6611 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6612                                                abi_ulong target_addr)
6613 {
6614     struct target_sigevent *target_sevp;
6615 
6616     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6617         return -TARGET_EFAULT;
6618     }
6619 
6620     /* This union is awkward on 64 bit systems because it has a 32 bit
6621      * integer and a pointer in it; we follow the conversion approach
6622      * used for handling sigval types in signal.c so the guest should get
6623      * the correct value back even if we did a 64 bit byteswap and it's
6624      * using the 32 bit integer.
6625      */
6626     host_sevp->sigev_value.sival_ptr =
6627         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6628     host_sevp->sigev_signo =
6629         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6630     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6631     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6632 
6633     unlock_user_struct(target_sevp, target_addr, 1);
6634     return 0;
6635 }
6636 
6637 #if defined(TARGET_NR_mlockall)
6638 static inline int target_to_host_mlockall_arg(int arg)
6639 {
6640     int result = 0;
6641 
6642     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6643         result |= MCL_CURRENT;
6644     }
6645     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6646         result |= MCL_FUTURE;
6647     }
6648     return result;
6649 }
6650 #endif
6651 
6652 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6653      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6654      defined(TARGET_NR_newfstatat))
6655 static inline abi_long host_to_target_stat64(void *cpu_env,
6656                                              abi_ulong target_addr,
6657                                              struct stat *host_st)
6658 {
6659 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6660     if (((CPUARMState *)cpu_env)->eabi) {
6661         struct target_eabi_stat64 *target_st;
6662 
6663         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6664             return -TARGET_EFAULT;
6665         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6666         __put_user(host_st->st_dev, &target_st->st_dev);
6667         __put_user(host_st->st_ino, &target_st->st_ino);
6668 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6669         __put_user(host_st->st_ino, &target_st->__st_ino);
6670 #endif
6671         __put_user(host_st->st_mode, &target_st->st_mode);
6672         __put_user(host_st->st_nlink, &target_st->st_nlink);
6673         __put_user(host_st->st_uid, &target_st->st_uid);
6674         __put_user(host_st->st_gid, &target_st->st_gid);
6675         __put_user(host_st->st_rdev, &target_st->st_rdev);
6676         __put_user(host_st->st_size, &target_st->st_size);
6677         __put_user(host_st->st_blksize, &target_st->st_blksize);
6678         __put_user(host_st->st_blocks, &target_st->st_blocks);
6679         __put_user(host_st->st_atime, &target_st->target_st_atime);
6680         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6681         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6682 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6683         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6684         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6685         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6686 #endif
6687         unlock_user_struct(target_st, target_addr, 1);
6688     } else
6689 #endif
6690     {
6691 #if defined(TARGET_HAS_STRUCT_STAT64)
6692         struct target_stat64 *target_st;
6693 #else
6694         struct target_stat *target_st;
6695 #endif
6696 
6697         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6698             return -TARGET_EFAULT;
6699         memset(target_st, 0, sizeof(*target_st));
6700         __put_user(host_st->st_dev, &target_st->st_dev);
6701         __put_user(host_st->st_ino, &target_st->st_ino);
6702 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6703         __put_user(host_st->st_ino, &target_st->__st_ino);
6704 #endif
6705         __put_user(host_st->st_mode, &target_st->st_mode);
6706         __put_user(host_st->st_nlink, &target_st->st_nlink);
6707         __put_user(host_st->st_uid, &target_st->st_uid);
6708         __put_user(host_st->st_gid, &target_st->st_gid);
6709         __put_user(host_st->st_rdev, &target_st->st_rdev);
6710         /* XXX: better use of kernel struct */
6711         __put_user(host_st->st_size, &target_st->st_size);
6712         __put_user(host_st->st_blksize, &target_st->st_blksize);
6713         __put_user(host_st->st_blocks, &target_st->st_blocks);
6714         __put_user(host_st->st_atime, &target_st->target_st_atime);
6715         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6716         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6717 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6718         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6719         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6720         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6721 #endif
6722         unlock_user_struct(target_st, target_addr, 1);
6723     }
6724 
6725     return 0;
6726 }
6727 #endif
6728 
6729 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6730 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6731                                             abi_ulong target_addr)
6732 {
6733     struct target_statx *target_stx;
6734 
6735     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6736         return -TARGET_EFAULT;
6737     }
6738     memset(target_stx, 0, sizeof(*target_stx));
6739 
6740     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6741     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6742     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6743     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6744     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6745     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6746     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6747     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6748     __put_user(host_stx->stx_size, &target_stx->stx_size);
6749     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6750     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6751     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6752     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6753     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6754     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6755     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6756     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6757     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6758     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6759     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6760     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6761     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6762     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6763 
6764     unlock_user_struct(target_stx, target_addr, 1);
6765 
6766     return 0;
6767 }
6768 #endif
6769 
6770 
6771 /* ??? Using host futex calls even when target atomic operations
6772    are not really atomic probably breaks things.  However implementing
6773    futexes locally would make futexes shared between multiple processes
6774    tricky.  However they're probably useless because guest atomic
6775    operations won't work either.  */
6776 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6777                     target_ulong uaddr2, int val3)
6778 {
6779     struct timespec ts, *pts;
6780     int base_op;
6781 
6782     /* ??? We assume FUTEX_* constants are the same on both host
6783        and target.  */
6784 #ifdef FUTEX_CMD_MASK
6785     base_op = op & FUTEX_CMD_MASK;
6786 #else
6787     base_op = op;
6788 #endif
6789     switch (base_op) {
6790     case FUTEX_WAIT:
6791     case FUTEX_WAIT_BITSET:
6792         if (timeout) {
6793             pts = &ts;
6794             target_to_host_timespec(pts, timeout);
6795         } else {
6796             pts = NULL;
6797         }
6798         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6799                          pts, NULL, val3));
6800     case FUTEX_WAKE:
6801         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6802     case FUTEX_FD:
6803         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6804     case FUTEX_REQUEUE:
6805     case FUTEX_CMP_REQUEUE:
6806     case FUTEX_WAKE_OP:
6807         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6808            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6809            But the prototype takes a `struct timespec *'; insert casts
6810            to satisfy the compiler.  We do not need to tswap TIMEOUT
6811            since it's not compared to guest memory.  */
6812         pts = (struct timespec *)(uintptr_t) timeout;
6813         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6814                                     g2h(uaddr2),
6815                                     (base_op == FUTEX_CMP_REQUEUE
6816                                      ? tswap32(val3)
6817                                      : val3)));
6818     default:
6819         return -TARGET_ENOSYS;
6820     }
6821 }
6822 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6823 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6824                                      abi_long handle, abi_long mount_id,
6825                                      abi_long flags)
6826 {
6827     struct file_handle *target_fh;
6828     struct file_handle *fh;
6829     int mid = 0;
6830     abi_long ret;
6831     char *name;
6832     unsigned int size, total_size;
6833 
6834     if (get_user_s32(size, handle)) {
6835         return -TARGET_EFAULT;
6836     }
6837 
6838     name = lock_user_string(pathname);
6839     if (!name) {
6840         return -TARGET_EFAULT;
6841     }
6842 
6843     total_size = sizeof(struct file_handle) + size;
6844     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6845     if (!target_fh) {
6846         unlock_user(name, pathname, 0);
6847         return -TARGET_EFAULT;
6848     }
6849 
6850     fh = g_malloc0(total_size);
6851     fh->handle_bytes = size;
6852 
6853     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6854     unlock_user(name, pathname, 0);
6855 
6856     /* man name_to_handle_at(2):
6857      * Other than the use of the handle_bytes field, the caller should treat
6858      * the file_handle structure as an opaque data type
6859      */
6860 
6861     memcpy(target_fh, fh, total_size);
6862     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6863     target_fh->handle_type = tswap32(fh->handle_type);
6864     g_free(fh);
6865     unlock_user(target_fh, handle, total_size);
6866 
6867     if (put_user_s32(mid, mount_id)) {
6868         return -TARGET_EFAULT;
6869     }
6870 
6871     return ret;
6872 
6873 }
6874 #endif
6875 
6876 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6877 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6878                                      abi_long flags)
6879 {
6880     struct file_handle *target_fh;
6881     struct file_handle *fh;
6882     unsigned int size, total_size;
6883     abi_long ret;
6884 
6885     if (get_user_s32(size, handle)) {
6886         return -TARGET_EFAULT;
6887     }
6888 
6889     total_size = sizeof(struct file_handle) + size;
6890     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6891     if (!target_fh) {
6892         return -TARGET_EFAULT;
6893     }
6894 
6895     fh = g_memdup(target_fh, total_size);
6896     fh->handle_bytes = size;
6897     fh->handle_type = tswap32(target_fh->handle_type);
6898 
6899     ret = get_errno(open_by_handle_at(mount_fd, fh,
6900                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6901 
6902     g_free(fh);
6903 
6904     unlock_user(target_fh, handle, total_size);
6905 
6906     return ret;
6907 }
6908 #endif
6909 
6910 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6911 
6912 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6913 {
6914     int host_flags;
6915     target_sigset_t *target_mask;
6916     sigset_t host_mask;
6917     abi_long ret;
6918 
6919     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6920         return -TARGET_EINVAL;
6921     }
6922     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6923         return -TARGET_EFAULT;
6924     }
6925 
6926     target_to_host_sigset(&host_mask, target_mask);
6927 
6928     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6929 
6930     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6931     if (ret >= 0) {
6932         fd_trans_register(ret, &target_signalfd_trans);
6933     }
6934 
6935     unlock_user_struct(target_mask, mask, 0);
6936 
6937     return ret;
6938 }
6939 #endif
6940 
6941 /* Map host to target signal numbers for the wait family of syscalls.
6942    Assume all other status bits are the same.  */
6943 int host_to_target_waitstatus(int status)
6944 {
6945     if (WIFSIGNALED(status)) {
6946         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6947     }
6948     if (WIFSTOPPED(status)) {
6949         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6950                | (status & 0xff);
6951     }
6952     return status;
6953 }
6954 
6955 static int open_self_cmdline(void *cpu_env, int fd)
6956 {
6957     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6958     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6959     int i;
6960 
6961     for (i = 0; i < bprm->argc; i++) {
6962         size_t len = strlen(bprm->argv[i]) + 1;
6963 
6964         if (write(fd, bprm->argv[i], len) != len) {
6965             return -1;
6966         }
6967     }
6968 
6969     return 0;
6970 }
6971 
6972 static int open_self_maps(void *cpu_env, int fd)
6973 {
6974     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6975     TaskState *ts = cpu->opaque;
6976     FILE *fp;
6977     char *line = NULL;
6978     size_t len = 0;
6979     ssize_t read;
6980 
6981     fp = fopen("/proc/self/maps", "r");
6982     if (fp == NULL) {
6983         return -1;
6984     }
6985 
6986     while ((read = getline(&line, &len, fp)) != -1) {
6987         int fields, dev_maj, dev_min, inode;
6988         uint64_t min, max, offset;
6989         char flag_r, flag_w, flag_x, flag_p;
6990         char path[512] = "";
6991         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6992                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6993                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6994 
6995         if ((fields < 10) || (fields > 11)) {
6996             continue;
6997         }
6998         if (h2g_valid(min)) {
6999             int flags = page_get_flags(h2g(min));
7000             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7001             if (page_check_range(h2g(min), max - min, flags) == -1) {
7002                 continue;
7003             }
7004             if (h2g(min) == ts->info->stack_limit) {
7005                 pstrcpy(path, sizeof(path), "      [stack]");
7006             }
7007             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7008                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7009                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7010                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7011                     path[0] ? "         " : "", path);
7012         }
7013     }
7014 
7015     free(line);
7016     fclose(fp);
7017 
7018     return 0;
7019 }
7020 
7021 static int open_self_stat(void *cpu_env, int fd)
7022 {
7023     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7024     TaskState *ts = cpu->opaque;
7025     abi_ulong start_stack = ts->info->start_stack;
7026     int i;
7027 
7028     for (i = 0; i < 44; i++) {
7029       char buf[128];
7030       int len;
7031       uint64_t val = 0;
7032 
7033       if (i == 0) {
7034         /* pid */
7035         val = getpid();
7036         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7037       } else if (i == 1) {
7038         /* app name */
7039         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7040       } else if (i == 27) {
7041         /* stack bottom */
7042         val = start_stack;
7043         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7044       } else {
7045         /* for the rest, there is MasterCard */
7046         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7047       }
7048 
7049       len = strlen(buf);
7050       if (write(fd, buf, len) != len) {
7051           return -1;
7052       }
7053     }
7054 
7055     return 0;
7056 }
7057 
7058 static int open_self_auxv(void *cpu_env, int fd)
7059 {
7060     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7061     TaskState *ts = cpu->opaque;
7062     abi_ulong auxv = ts->info->saved_auxv;
7063     abi_ulong len = ts->info->auxv_len;
7064     char *ptr;
7065 
7066     /*
7067      * Auxiliary vector is stored in target process stack.
7068      * read in whole auxv vector and copy it to file
7069      */
7070     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7071     if (ptr != NULL) {
7072         while (len > 0) {
7073             ssize_t r;
7074             r = write(fd, ptr, len);
7075             if (r <= 0) {
7076                 break;
7077             }
7078             len -= r;
7079             ptr += r;
7080         }
7081         lseek(fd, 0, SEEK_SET);
7082         unlock_user(ptr, auxv, len);
7083     }
7084 
7085     return 0;
7086 }
7087 
7088 static int is_proc_myself(const char *filename, const char *entry)
7089 {
7090     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7091         filename += strlen("/proc/");
7092         if (!strncmp(filename, "self/", strlen("self/"))) {
7093             filename += strlen("self/");
7094         } else if (*filename >= '1' && *filename <= '9') {
7095             char myself[80];
7096             snprintf(myself, sizeof(myself), "%d/", getpid());
7097             if (!strncmp(filename, myself, strlen(myself))) {
7098                 filename += strlen(myself);
7099             } else {
7100                 return 0;
7101             }
7102         } else {
7103             return 0;
7104         }
7105         if (!strcmp(filename, entry)) {
7106             return 1;
7107         }
7108     }
7109     return 0;
7110 }
7111 
7112 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7113     defined(TARGET_SPARC) || defined(TARGET_M68K)
7114 static int is_proc(const char *filename, const char *entry)
7115 {
7116     return strcmp(filename, entry) == 0;
7117 }
7118 #endif
7119 
7120 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7121 static int open_net_route(void *cpu_env, int fd)
7122 {
7123     FILE *fp;
7124     char *line = NULL;
7125     size_t len = 0;
7126     ssize_t read;
7127 
7128     fp = fopen("/proc/net/route", "r");
7129     if (fp == NULL) {
7130         return -1;
7131     }
7132 
7133     /* read header */
7134 
7135     read = getline(&line, &len, fp);
7136     dprintf(fd, "%s", line);
7137 
7138     /* read routes */
7139 
7140     while ((read = getline(&line, &len, fp)) != -1) {
7141         char iface[16];
7142         uint32_t dest, gw, mask;
7143         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7144         int fields;
7145 
7146         fields = sscanf(line,
7147                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7148                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7149                         &mask, &mtu, &window, &irtt);
7150         if (fields != 11) {
7151             continue;
7152         }
7153         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7154                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7155                 metric, tswap32(mask), mtu, window, irtt);
7156     }
7157 
7158     free(line);
7159     fclose(fp);
7160 
7161     return 0;
7162 }
7163 #endif
7164 
7165 #if defined(TARGET_SPARC)
7166 static int open_cpuinfo(void *cpu_env, int fd)
7167 {
7168     dprintf(fd, "type\t\t: sun4u\n");
7169     return 0;
7170 }
7171 #endif
7172 
7173 #if defined(TARGET_M68K)
7174 static int open_hardware(void *cpu_env, int fd)
7175 {
7176     dprintf(fd, "Model:\t\tqemu-m68k\n");
7177     return 0;
7178 }
7179 #endif
7180 
7181 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7182 {
7183     struct fake_open {
7184         const char *filename;
7185         int (*fill)(void *cpu_env, int fd);
7186         int (*cmp)(const char *s1, const char *s2);
7187     };
7188     const struct fake_open *fake_open;
7189     static const struct fake_open fakes[] = {
7190         { "maps", open_self_maps, is_proc_myself },
7191         { "stat", open_self_stat, is_proc_myself },
7192         { "auxv", open_self_auxv, is_proc_myself },
7193         { "cmdline", open_self_cmdline, is_proc_myself },
7194 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7195         { "/proc/net/route", open_net_route, is_proc },
7196 #endif
7197 #if defined(TARGET_SPARC)
7198         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7199 #endif
7200 #if defined(TARGET_M68K)
7201         { "/proc/hardware", open_hardware, is_proc },
7202 #endif
7203         { NULL, NULL, NULL }
7204     };
7205 
7206     if (is_proc_myself(pathname, "exe")) {
7207         int execfd = qemu_getauxval(AT_EXECFD);
7208         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7209     }
7210 
7211     for (fake_open = fakes; fake_open->filename; fake_open++) {
7212         if (fake_open->cmp(pathname, fake_open->filename)) {
7213             break;
7214         }
7215     }
7216 
7217     if (fake_open->filename) {
7218         const char *tmpdir;
7219         char filename[PATH_MAX];
7220         int fd, r;
7221 
7222         /* create temporary file to map stat to */
7223         tmpdir = getenv("TMPDIR");
7224         if (!tmpdir)
7225             tmpdir = "/tmp";
7226         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7227         fd = mkstemp(filename);
7228         if (fd < 0) {
7229             return fd;
7230         }
7231         unlink(filename);
7232 
7233         if ((r = fake_open->fill(cpu_env, fd))) {
7234             int e = errno;
7235             close(fd);
7236             errno = e;
7237             return r;
7238         }
7239         lseek(fd, 0, SEEK_SET);
7240 
7241         return fd;
7242     }
7243 
7244     return safe_openat(dirfd, path(pathname), flags, mode);
7245 }
7246 
7247 #define TIMER_MAGIC 0x0caf0000
7248 #define TIMER_MAGIC_MASK 0xffff0000
7249 
7250 /* Convert QEMU provided timer ID back to internal 16bit index format */
7251 static target_timer_t get_timer_id(abi_long arg)
7252 {
7253     target_timer_t timerid = arg;
7254 
7255     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7256         return -TARGET_EINVAL;
7257     }
7258 
7259     timerid &= 0xffff;
7260 
7261     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7262         return -TARGET_EINVAL;
7263     }
7264 
7265     return timerid;
7266 }
7267 
7268 static int target_to_host_cpu_mask(unsigned long *host_mask,
7269                                    size_t host_size,
7270                                    abi_ulong target_addr,
7271                                    size_t target_size)
7272 {
7273     unsigned target_bits = sizeof(abi_ulong) * 8;
7274     unsigned host_bits = sizeof(*host_mask) * 8;
7275     abi_ulong *target_mask;
7276     unsigned i, j;
7277 
7278     assert(host_size >= target_size);
7279 
7280     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7281     if (!target_mask) {
7282         return -TARGET_EFAULT;
7283     }
7284     memset(host_mask, 0, host_size);
7285 
7286     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7287         unsigned bit = i * target_bits;
7288         abi_ulong val;
7289 
7290         __get_user(val, &target_mask[i]);
7291         for (j = 0; j < target_bits; j++, bit++) {
7292             if (val & (1UL << j)) {
7293                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7294             }
7295         }
7296     }
7297 
7298     unlock_user(target_mask, target_addr, 0);
7299     return 0;
7300 }
7301 
7302 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7303                                    size_t host_size,
7304                                    abi_ulong target_addr,
7305                                    size_t target_size)
7306 {
7307     unsigned target_bits = sizeof(abi_ulong) * 8;
7308     unsigned host_bits = sizeof(*host_mask) * 8;
7309     abi_ulong *target_mask;
7310     unsigned i, j;
7311 
7312     assert(host_size >= target_size);
7313 
7314     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7315     if (!target_mask) {
7316         return -TARGET_EFAULT;
7317     }
7318 
7319     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7320         unsigned bit = i * target_bits;
7321         abi_ulong val = 0;
7322 
7323         for (j = 0; j < target_bits; j++, bit++) {
7324             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7325                 val |= 1UL << j;
7326             }
7327         }
7328         __put_user(val, &target_mask[i]);
7329     }
7330 
7331     unlock_user(target_mask, target_addr, target_size);
7332     return 0;
7333 }
7334 
7335 /* This is an internal helper for do_syscall so that it is easier
7336  * to have a single return point, so that actions, such as logging
7337  * of syscall results, can be performed.
7338  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7339  */
7340 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7341                             abi_long arg2, abi_long arg3, abi_long arg4,
7342                             abi_long arg5, abi_long arg6, abi_long arg7,
7343                             abi_long arg8)
7344 {
7345     CPUState *cpu = env_cpu(cpu_env);
7346     abi_long ret;
7347 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7348     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7349     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7350     || defined(TARGET_NR_statx)
7351     struct stat st;
7352 #endif
7353 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7354     || defined(TARGET_NR_fstatfs)
7355     struct statfs stfs;
7356 #endif
7357     void *p;
7358 
7359     switch(num) {
7360     case TARGET_NR_exit:
7361         /* In old applications this may be used to implement _exit(2).
7362            However in threaded applictions it is used for thread termination,
7363            and _exit_group is used for application termination.
7364            Do thread termination if we have more then one thread.  */
7365 
7366         if (block_signals()) {
7367             return -TARGET_ERESTARTSYS;
7368         }
7369 
7370         cpu_list_lock();
7371 
7372         if (CPU_NEXT(first_cpu)) {
7373             TaskState *ts;
7374 
7375             /* Remove the CPU from the list.  */
7376             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7377 
7378             cpu_list_unlock();
7379 
7380             ts = cpu->opaque;
7381             if (ts->child_tidptr) {
7382                 put_user_u32(0, ts->child_tidptr);
7383                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7384                           NULL, NULL, 0);
7385             }
7386             thread_cpu = NULL;
7387             object_unref(OBJECT(cpu));
7388             g_free(ts);
7389             rcu_unregister_thread();
7390             pthread_exit(NULL);
7391         }
7392 
7393         cpu_list_unlock();
7394         preexit_cleanup(cpu_env, arg1);
7395         _exit(arg1);
7396         return 0; /* avoid warning */
7397     case TARGET_NR_read:
7398         if (arg2 == 0 && arg3 == 0) {
7399             return get_errno(safe_read(arg1, 0, 0));
7400         } else {
7401             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7402                 return -TARGET_EFAULT;
7403             ret = get_errno(safe_read(arg1, p, arg3));
7404             if (ret >= 0 &&
7405                 fd_trans_host_to_target_data(arg1)) {
7406                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7407             }
7408             unlock_user(p, arg2, ret);
7409         }
7410         return ret;
7411     case TARGET_NR_write:
7412         if (arg2 == 0 && arg3 == 0) {
7413             return get_errno(safe_write(arg1, 0, 0));
7414         }
7415         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7416             return -TARGET_EFAULT;
7417         if (fd_trans_target_to_host_data(arg1)) {
7418             void *copy = g_malloc(arg3);
7419             memcpy(copy, p, arg3);
7420             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7421             if (ret >= 0) {
7422                 ret = get_errno(safe_write(arg1, copy, ret));
7423             }
7424             g_free(copy);
7425         } else {
7426             ret = get_errno(safe_write(arg1, p, arg3));
7427         }
7428         unlock_user(p, arg2, 0);
7429         return ret;
7430 
7431 #ifdef TARGET_NR_open
7432     case TARGET_NR_open:
7433         if (!(p = lock_user_string(arg1)))
7434             return -TARGET_EFAULT;
7435         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7436                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7437                                   arg3));
7438         fd_trans_unregister(ret);
7439         unlock_user(p, arg1, 0);
7440         return ret;
7441 #endif
7442     case TARGET_NR_openat:
7443         if (!(p = lock_user_string(arg2)))
7444             return -TARGET_EFAULT;
7445         ret = get_errno(do_openat(cpu_env, arg1, p,
7446                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7447                                   arg4));
7448         fd_trans_unregister(ret);
7449         unlock_user(p, arg2, 0);
7450         return ret;
7451 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7452     case TARGET_NR_name_to_handle_at:
7453         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7454         return ret;
7455 #endif
7456 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7457     case TARGET_NR_open_by_handle_at:
7458         ret = do_open_by_handle_at(arg1, arg2, arg3);
7459         fd_trans_unregister(ret);
7460         return ret;
7461 #endif
7462     case TARGET_NR_close:
7463         fd_trans_unregister(arg1);
7464         return get_errno(close(arg1));
7465 
7466     case TARGET_NR_brk:
7467         return do_brk(arg1);
7468 #ifdef TARGET_NR_fork
7469     case TARGET_NR_fork:
7470         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7471 #endif
7472 #ifdef TARGET_NR_waitpid
7473     case TARGET_NR_waitpid:
7474         {
7475             int status;
7476             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7477             if (!is_error(ret) && arg2 && ret
7478                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7479                 return -TARGET_EFAULT;
7480         }
7481         return ret;
7482 #endif
7483 #ifdef TARGET_NR_waitid
7484     case TARGET_NR_waitid:
7485         {
7486             siginfo_t info;
7487             info.si_pid = 0;
7488             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7489             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7490                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7491                     return -TARGET_EFAULT;
7492                 host_to_target_siginfo(p, &info);
7493                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7494             }
7495         }
7496         return ret;
7497 #endif
7498 #ifdef TARGET_NR_creat /* not on alpha */
7499     case TARGET_NR_creat:
7500         if (!(p = lock_user_string(arg1)))
7501             return -TARGET_EFAULT;
7502         ret = get_errno(creat(p, arg2));
7503         fd_trans_unregister(ret);
7504         unlock_user(p, arg1, 0);
7505         return ret;
7506 #endif
7507 #ifdef TARGET_NR_link
7508     case TARGET_NR_link:
7509         {
7510             void * p2;
7511             p = lock_user_string(arg1);
7512             p2 = lock_user_string(arg2);
7513             if (!p || !p2)
7514                 ret = -TARGET_EFAULT;
7515             else
7516                 ret = get_errno(link(p, p2));
7517             unlock_user(p2, arg2, 0);
7518             unlock_user(p, arg1, 0);
7519         }
7520         return ret;
7521 #endif
7522 #if defined(TARGET_NR_linkat)
7523     case TARGET_NR_linkat:
7524         {
7525             void * p2 = NULL;
7526             if (!arg2 || !arg4)
7527                 return -TARGET_EFAULT;
7528             p  = lock_user_string(arg2);
7529             p2 = lock_user_string(arg4);
7530             if (!p || !p2)
7531                 ret = -TARGET_EFAULT;
7532             else
7533                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7534             unlock_user(p, arg2, 0);
7535             unlock_user(p2, arg4, 0);
7536         }
7537         return ret;
7538 #endif
7539 #ifdef TARGET_NR_unlink
7540     case TARGET_NR_unlink:
7541         if (!(p = lock_user_string(arg1)))
7542             return -TARGET_EFAULT;
7543         ret = get_errno(unlink(p));
7544         unlock_user(p, arg1, 0);
7545         return ret;
7546 #endif
7547 #if defined(TARGET_NR_unlinkat)
7548     case TARGET_NR_unlinkat:
7549         if (!(p = lock_user_string(arg2)))
7550             return -TARGET_EFAULT;
7551         ret = get_errno(unlinkat(arg1, p, arg3));
7552         unlock_user(p, arg2, 0);
7553         return ret;
7554 #endif
7555     case TARGET_NR_execve:
7556         {
7557             char **argp, **envp;
7558             int argc, envc;
7559             abi_ulong gp;
7560             abi_ulong guest_argp;
7561             abi_ulong guest_envp;
7562             abi_ulong addr;
7563             char **q;
7564             int total_size = 0;
7565 
7566             argc = 0;
7567             guest_argp = arg2;
7568             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7569                 if (get_user_ual(addr, gp))
7570                     return -TARGET_EFAULT;
7571                 if (!addr)
7572                     break;
7573                 argc++;
7574             }
7575             envc = 0;
7576             guest_envp = arg3;
7577             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7578                 if (get_user_ual(addr, gp))
7579                     return -TARGET_EFAULT;
7580                 if (!addr)
7581                     break;
7582                 envc++;
7583             }
7584 
7585             argp = g_new0(char *, argc + 1);
7586             envp = g_new0(char *, envc + 1);
7587 
7588             for (gp = guest_argp, q = argp; gp;
7589                   gp += sizeof(abi_ulong), q++) {
7590                 if (get_user_ual(addr, gp))
7591                     goto execve_efault;
7592                 if (!addr)
7593                     break;
7594                 if (!(*q = lock_user_string(addr)))
7595                     goto execve_efault;
7596                 total_size += strlen(*q) + 1;
7597             }
7598             *q = NULL;
7599 
7600             for (gp = guest_envp, q = envp; gp;
7601                   gp += sizeof(abi_ulong), q++) {
7602                 if (get_user_ual(addr, gp))
7603                     goto execve_efault;
7604                 if (!addr)
7605                     break;
7606                 if (!(*q = lock_user_string(addr)))
7607                     goto execve_efault;
7608                 total_size += strlen(*q) + 1;
7609             }
7610             *q = NULL;
7611 
7612             if (!(p = lock_user_string(arg1)))
7613                 goto execve_efault;
7614             /* Although execve() is not an interruptible syscall it is
7615              * a special case where we must use the safe_syscall wrapper:
7616              * if we allow a signal to happen before we make the host
7617              * syscall then we will 'lose' it, because at the point of
7618              * execve the process leaves QEMU's control. So we use the
7619              * safe syscall wrapper to ensure that we either take the
7620              * signal as a guest signal, or else it does not happen
7621              * before the execve completes and makes it the other
7622              * program's problem.
7623              */
7624             ret = get_errno(safe_execve(p, argp, envp));
7625             unlock_user(p, arg1, 0);
7626 
7627             goto execve_end;
7628 
7629         execve_efault:
7630             ret = -TARGET_EFAULT;
7631 
7632         execve_end:
7633             for (gp = guest_argp, q = argp; *q;
7634                   gp += sizeof(abi_ulong), q++) {
7635                 if (get_user_ual(addr, gp)
7636                     || !addr)
7637                     break;
7638                 unlock_user(*q, addr, 0);
7639             }
7640             for (gp = guest_envp, q = envp; *q;
7641                   gp += sizeof(abi_ulong), q++) {
7642                 if (get_user_ual(addr, gp)
7643                     || !addr)
7644                     break;
7645                 unlock_user(*q, addr, 0);
7646             }
7647 
7648             g_free(argp);
7649             g_free(envp);
7650         }
7651         return ret;
7652     case TARGET_NR_chdir:
7653         if (!(p = lock_user_string(arg1)))
7654             return -TARGET_EFAULT;
7655         ret = get_errno(chdir(p));
7656         unlock_user(p, arg1, 0);
7657         return ret;
7658 #ifdef TARGET_NR_time
7659     case TARGET_NR_time:
7660         {
7661             time_t host_time;
7662             ret = get_errno(time(&host_time));
7663             if (!is_error(ret)
7664                 && arg1
7665                 && put_user_sal(host_time, arg1))
7666                 return -TARGET_EFAULT;
7667         }
7668         return ret;
7669 #endif
7670 #ifdef TARGET_NR_mknod
7671     case TARGET_NR_mknod:
7672         if (!(p = lock_user_string(arg1)))
7673             return -TARGET_EFAULT;
7674         ret = get_errno(mknod(p, arg2, arg3));
7675         unlock_user(p, arg1, 0);
7676         return ret;
7677 #endif
7678 #if defined(TARGET_NR_mknodat)
7679     case TARGET_NR_mknodat:
7680         if (!(p = lock_user_string(arg2)))
7681             return -TARGET_EFAULT;
7682         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7683         unlock_user(p, arg2, 0);
7684         return ret;
7685 #endif
7686 #ifdef TARGET_NR_chmod
7687     case TARGET_NR_chmod:
7688         if (!(p = lock_user_string(arg1)))
7689             return -TARGET_EFAULT;
7690         ret = get_errno(chmod(p, arg2));
7691         unlock_user(p, arg1, 0);
7692         return ret;
7693 #endif
7694 #ifdef TARGET_NR_lseek
7695     case TARGET_NR_lseek:
7696         return get_errno(lseek(arg1, arg2, arg3));
7697 #endif
7698 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7699     /* Alpha specific */
7700     case TARGET_NR_getxpid:
7701         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7702         return get_errno(getpid());
7703 #endif
7704 #ifdef TARGET_NR_getpid
7705     case TARGET_NR_getpid:
7706         return get_errno(getpid());
7707 #endif
7708     case TARGET_NR_mount:
7709         {
7710             /* need to look at the data field */
7711             void *p2, *p3;
7712 
7713             if (arg1) {
7714                 p = lock_user_string(arg1);
7715                 if (!p) {
7716                     return -TARGET_EFAULT;
7717                 }
7718             } else {
7719                 p = NULL;
7720             }
7721 
7722             p2 = lock_user_string(arg2);
7723             if (!p2) {
7724                 if (arg1) {
7725                     unlock_user(p, arg1, 0);
7726                 }
7727                 return -TARGET_EFAULT;
7728             }
7729 
7730             if (arg3) {
7731                 p3 = lock_user_string(arg3);
7732                 if (!p3) {
7733                     if (arg1) {
7734                         unlock_user(p, arg1, 0);
7735                     }
7736                     unlock_user(p2, arg2, 0);
7737                     return -TARGET_EFAULT;
7738                 }
7739             } else {
7740                 p3 = NULL;
7741             }
7742 
7743             /* FIXME - arg5 should be locked, but it isn't clear how to
7744              * do that since it's not guaranteed to be a NULL-terminated
7745              * string.
7746              */
7747             if (!arg5) {
7748                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7749             } else {
7750                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7751             }
7752             ret = get_errno(ret);
7753 
7754             if (arg1) {
7755                 unlock_user(p, arg1, 0);
7756             }
7757             unlock_user(p2, arg2, 0);
7758             if (arg3) {
7759                 unlock_user(p3, arg3, 0);
7760             }
7761         }
7762         return ret;
7763 #ifdef TARGET_NR_umount
7764     case TARGET_NR_umount:
7765         if (!(p = lock_user_string(arg1)))
7766             return -TARGET_EFAULT;
7767         ret = get_errno(umount(p));
7768         unlock_user(p, arg1, 0);
7769         return ret;
7770 #endif
7771 #ifdef TARGET_NR_stime /* not on alpha */
7772     case TARGET_NR_stime:
7773         {
7774             struct timespec ts;
7775             ts.tv_nsec = 0;
7776             if (get_user_sal(ts.tv_sec, arg1)) {
7777                 return -TARGET_EFAULT;
7778             }
7779             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7780         }
7781 #endif
7782 #ifdef TARGET_NR_alarm /* not on alpha */
7783     case TARGET_NR_alarm:
7784         return alarm(arg1);
7785 #endif
7786 #ifdef TARGET_NR_pause /* not on alpha */
7787     case TARGET_NR_pause:
7788         if (!block_signals()) {
7789             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7790         }
7791         return -TARGET_EINTR;
7792 #endif
7793 #ifdef TARGET_NR_utime
7794     case TARGET_NR_utime:
7795         {
7796             struct utimbuf tbuf, *host_tbuf;
7797             struct target_utimbuf *target_tbuf;
7798             if (arg2) {
7799                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7800                     return -TARGET_EFAULT;
7801                 tbuf.actime = tswapal(target_tbuf->actime);
7802                 tbuf.modtime = tswapal(target_tbuf->modtime);
7803                 unlock_user_struct(target_tbuf, arg2, 0);
7804                 host_tbuf = &tbuf;
7805             } else {
7806                 host_tbuf = NULL;
7807             }
7808             if (!(p = lock_user_string(arg1)))
7809                 return -TARGET_EFAULT;
7810             ret = get_errno(utime(p, host_tbuf));
7811             unlock_user(p, arg1, 0);
7812         }
7813         return ret;
7814 #endif
7815 #ifdef TARGET_NR_utimes
7816     case TARGET_NR_utimes:
7817         {
7818             struct timeval *tvp, tv[2];
7819             if (arg2) {
7820                 if (copy_from_user_timeval(&tv[0], arg2)
7821                     || copy_from_user_timeval(&tv[1],
7822                                               arg2 + sizeof(struct target_timeval)))
7823                     return -TARGET_EFAULT;
7824                 tvp = tv;
7825             } else {
7826                 tvp = NULL;
7827             }
7828             if (!(p = lock_user_string(arg1)))
7829                 return -TARGET_EFAULT;
7830             ret = get_errno(utimes(p, tvp));
7831             unlock_user(p, arg1, 0);
7832         }
7833         return ret;
7834 #endif
7835 #if defined(TARGET_NR_futimesat)
7836     case TARGET_NR_futimesat:
7837         {
7838             struct timeval *tvp, tv[2];
7839             if (arg3) {
7840                 if (copy_from_user_timeval(&tv[0], arg3)
7841                     || copy_from_user_timeval(&tv[1],
7842                                               arg3 + sizeof(struct target_timeval)))
7843                     return -TARGET_EFAULT;
7844                 tvp = tv;
7845             } else {
7846                 tvp = NULL;
7847             }
7848             if (!(p = lock_user_string(arg2))) {
7849                 return -TARGET_EFAULT;
7850             }
7851             ret = get_errno(futimesat(arg1, path(p), tvp));
7852             unlock_user(p, arg2, 0);
7853         }
7854         return ret;
7855 #endif
7856 #ifdef TARGET_NR_access
7857     case TARGET_NR_access:
7858         if (!(p = lock_user_string(arg1))) {
7859             return -TARGET_EFAULT;
7860         }
7861         ret = get_errno(access(path(p), arg2));
7862         unlock_user(p, arg1, 0);
7863         return ret;
7864 #endif
7865 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7866     case TARGET_NR_faccessat:
7867         if (!(p = lock_user_string(arg2))) {
7868             return -TARGET_EFAULT;
7869         }
7870         ret = get_errno(faccessat(arg1, p, arg3, 0));
7871         unlock_user(p, arg2, 0);
7872         return ret;
7873 #endif
7874 #ifdef TARGET_NR_nice /* not on alpha */
7875     case TARGET_NR_nice:
7876         return get_errno(nice(arg1));
7877 #endif
7878     case TARGET_NR_sync:
7879         sync();
7880         return 0;
7881 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7882     case TARGET_NR_syncfs:
7883         return get_errno(syncfs(arg1));
7884 #endif
7885     case TARGET_NR_kill:
7886         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7887 #ifdef TARGET_NR_rename
7888     case TARGET_NR_rename:
7889         {
7890             void *p2;
7891             p = lock_user_string(arg1);
7892             p2 = lock_user_string(arg2);
7893             if (!p || !p2)
7894                 ret = -TARGET_EFAULT;
7895             else
7896                 ret = get_errno(rename(p, p2));
7897             unlock_user(p2, arg2, 0);
7898             unlock_user(p, arg1, 0);
7899         }
7900         return ret;
7901 #endif
7902 #if defined(TARGET_NR_renameat)
7903     case TARGET_NR_renameat:
7904         {
7905             void *p2;
7906             p  = lock_user_string(arg2);
7907             p2 = lock_user_string(arg4);
7908             if (!p || !p2)
7909                 ret = -TARGET_EFAULT;
7910             else
7911                 ret = get_errno(renameat(arg1, p, arg3, p2));
7912             unlock_user(p2, arg4, 0);
7913             unlock_user(p, arg2, 0);
7914         }
7915         return ret;
7916 #endif
7917 #if defined(TARGET_NR_renameat2)
7918     case TARGET_NR_renameat2:
7919         {
7920             void *p2;
7921             p  = lock_user_string(arg2);
7922             p2 = lock_user_string(arg4);
7923             if (!p || !p2) {
7924                 ret = -TARGET_EFAULT;
7925             } else {
7926                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7927             }
7928             unlock_user(p2, arg4, 0);
7929             unlock_user(p, arg2, 0);
7930         }
7931         return ret;
7932 #endif
7933 #ifdef TARGET_NR_mkdir
7934     case TARGET_NR_mkdir:
7935         if (!(p = lock_user_string(arg1)))
7936             return -TARGET_EFAULT;
7937         ret = get_errno(mkdir(p, arg2));
7938         unlock_user(p, arg1, 0);
7939         return ret;
7940 #endif
7941 #if defined(TARGET_NR_mkdirat)
7942     case TARGET_NR_mkdirat:
7943         if (!(p = lock_user_string(arg2)))
7944             return -TARGET_EFAULT;
7945         ret = get_errno(mkdirat(arg1, p, arg3));
7946         unlock_user(p, arg2, 0);
7947         return ret;
7948 #endif
7949 #ifdef TARGET_NR_rmdir
7950     case TARGET_NR_rmdir:
7951         if (!(p = lock_user_string(arg1)))
7952             return -TARGET_EFAULT;
7953         ret = get_errno(rmdir(p));
7954         unlock_user(p, arg1, 0);
7955         return ret;
7956 #endif
7957     case TARGET_NR_dup:
7958         ret = get_errno(dup(arg1));
7959         if (ret >= 0) {
7960             fd_trans_dup(arg1, ret);
7961         }
7962         return ret;
7963 #ifdef TARGET_NR_pipe
7964     case TARGET_NR_pipe:
7965         return do_pipe(cpu_env, arg1, 0, 0);
7966 #endif
7967 #ifdef TARGET_NR_pipe2
7968     case TARGET_NR_pipe2:
7969         return do_pipe(cpu_env, arg1,
7970                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7971 #endif
7972     case TARGET_NR_times:
7973         {
7974             struct target_tms *tmsp;
7975             struct tms tms;
7976             ret = get_errno(times(&tms));
7977             if (arg1) {
7978                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7979                 if (!tmsp)
7980                     return -TARGET_EFAULT;
7981                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7982                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7983                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7984                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7985             }
7986             if (!is_error(ret))
7987                 ret = host_to_target_clock_t(ret);
7988         }
7989         return ret;
7990     case TARGET_NR_acct:
7991         if (arg1 == 0) {
7992             ret = get_errno(acct(NULL));
7993         } else {
7994             if (!(p = lock_user_string(arg1))) {
7995                 return -TARGET_EFAULT;
7996             }
7997             ret = get_errno(acct(path(p)));
7998             unlock_user(p, arg1, 0);
7999         }
8000         return ret;
8001 #ifdef TARGET_NR_umount2
8002     case TARGET_NR_umount2:
8003         if (!(p = lock_user_string(arg1)))
8004             return -TARGET_EFAULT;
8005         ret = get_errno(umount2(p, arg2));
8006         unlock_user(p, arg1, 0);
8007         return ret;
8008 #endif
8009     case TARGET_NR_ioctl:
8010         return do_ioctl(arg1, arg2, arg3);
8011 #ifdef TARGET_NR_fcntl
8012     case TARGET_NR_fcntl:
8013         return do_fcntl(arg1, arg2, arg3);
8014 #endif
8015     case TARGET_NR_setpgid:
8016         return get_errno(setpgid(arg1, arg2));
8017     case TARGET_NR_umask:
8018         return get_errno(umask(arg1));
8019     case TARGET_NR_chroot:
8020         if (!(p = lock_user_string(arg1)))
8021             return -TARGET_EFAULT;
8022         ret = get_errno(chroot(p));
8023         unlock_user(p, arg1, 0);
8024         return ret;
8025 #ifdef TARGET_NR_dup2
8026     case TARGET_NR_dup2:
8027         ret = get_errno(dup2(arg1, arg2));
8028         if (ret >= 0) {
8029             fd_trans_dup(arg1, arg2);
8030         }
8031         return ret;
8032 #endif
8033 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8034     case TARGET_NR_dup3:
8035     {
8036         int host_flags;
8037 
8038         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8039             return -EINVAL;
8040         }
8041         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8042         ret = get_errno(dup3(arg1, arg2, host_flags));
8043         if (ret >= 0) {
8044             fd_trans_dup(arg1, arg2);
8045         }
8046         return ret;
8047     }
8048 #endif
8049 #ifdef TARGET_NR_getppid /* not on alpha */
8050     case TARGET_NR_getppid:
8051         return get_errno(getppid());
8052 #endif
8053 #ifdef TARGET_NR_getpgrp
8054     case TARGET_NR_getpgrp:
8055         return get_errno(getpgrp());
8056 #endif
8057     case TARGET_NR_setsid:
8058         return get_errno(setsid());
8059 #ifdef TARGET_NR_sigaction
8060     case TARGET_NR_sigaction:
8061         {
8062 #if defined(TARGET_ALPHA)
8063             struct target_sigaction act, oact, *pact = 0;
8064             struct target_old_sigaction *old_act;
8065             if (arg2) {
8066                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8067                     return -TARGET_EFAULT;
8068                 act._sa_handler = old_act->_sa_handler;
8069                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8070                 act.sa_flags = old_act->sa_flags;
8071                 act.sa_restorer = 0;
8072                 unlock_user_struct(old_act, arg2, 0);
8073                 pact = &act;
8074             }
8075             ret = get_errno(do_sigaction(arg1, pact, &oact));
8076             if (!is_error(ret) && arg3) {
8077                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8078                     return -TARGET_EFAULT;
8079                 old_act->_sa_handler = oact._sa_handler;
8080                 old_act->sa_mask = oact.sa_mask.sig[0];
8081                 old_act->sa_flags = oact.sa_flags;
8082                 unlock_user_struct(old_act, arg3, 1);
8083             }
8084 #elif defined(TARGET_MIPS)
8085 	    struct target_sigaction act, oact, *pact, *old_act;
8086 
8087 	    if (arg2) {
8088                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8089                     return -TARGET_EFAULT;
8090 		act._sa_handler = old_act->_sa_handler;
8091 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8092 		act.sa_flags = old_act->sa_flags;
8093 		unlock_user_struct(old_act, arg2, 0);
8094 		pact = &act;
8095 	    } else {
8096 		pact = NULL;
8097 	    }
8098 
8099 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8100 
8101 	    if (!is_error(ret) && arg3) {
8102                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8103                     return -TARGET_EFAULT;
8104 		old_act->_sa_handler = oact._sa_handler;
8105 		old_act->sa_flags = oact.sa_flags;
8106 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8107 		old_act->sa_mask.sig[1] = 0;
8108 		old_act->sa_mask.sig[2] = 0;
8109 		old_act->sa_mask.sig[3] = 0;
8110 		unlock_user_struct(old_act, arg3, 1);
8111 	    }
8112 #else
8113             struct target_old_sigaction *old_act;
8114             struct target_sigaction act, oact, *pact;
8115             if (arg2) {
8116                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8117                     return -TARGET_EFAULT;
8118                 act._sa_handler = old_act->_sa_handler;
8119                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8120                 act.sa_flags = old_act->sa_flags;
8121                 act.sa_restorer = old_act->sa_restorer;
8122 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8123                 act.ka_restorer = 0;
8124 #endif
8125                 unlock_user_struct(old_act, arg2, 0);
8126                 pact = &act;
8127             } else {
8128                 pact = NULL;
8129             }
8130             ret = get_errno(do_sigaction(arg1, pact, &oact));
8131             if (!is_error(ret) && arg3) {
8132                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8133                     return -TARGET_EFAULT;
8134                 old_act->_sa_handler = oact._sa_handler;
8135                 old_act->sa_mask = oact.sa_mask.sig[0];
8136                 old_act->sa_flags = oact.sa_flags;
8137                 old_act->sa_restorer = oact.sa_restorer;
8138                 unlock_user_struct(old_act, arg3, 1);
8139             }
8140 #endif
8141         }
8142         return ret;
8143 #endif
8144     case TARGET_NR_rt_sigaction:
8145         {
8146 #if defined(TARGET_ALPHA)
8147             /* For Alpha and SPARC this is a 5 argument syscall, with
8148              * a 'restorer' parameter which must be copied into the
8149              * sa_restorer field of the sigaction struct.
8150              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8151              * and arg5 is the sigsetsize.
8152              * Alpha also has a separate rt_sigaction struct that it uses
8153              * here; SPARC uses the usual sigaction struct.
8154              */
8155             struct target_rt_sigaction *rt_act;
8156             struct target_sigaction act, oact, *pact = 0;
8157 
8158             if (arg4 != sizeof(target_sigset_t)) {
8159                 return -TARGET_EINVAL;
8160             }
8161             if (arg2) {
8162                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8163                     return -TARGET_EFAULT;
8164                 act._sa_handler = rt_act->_sa_handler;
8165                 act.sa_mask = rt_act->sa_mask;
8166                 act.sa_flags = rt_act->sa_flags;
8167                 act.sa_restorer = arg5;
8168                 unlock_user_struct(rt_act, arg2, 0);
8169                 pact = &act;
8170             }
8171             ret = get_errno(do_sigaction(arg1, pact, &oact));
8172             if (!is_error(ret) && arg3) {
8173                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8174                     return -TARGET_EFAULT;
8175                 rt_act->_sa_handler = oact._sa_handler;
8176                 rt_act->sa_mask = oact.sa_mask;
8177                 rt_act->sa_flags = oact.sa_flags;
8178                 unlock_user_struct(rt_act, arg3, 1);
8179             }
8180 #else
8181 #ifdef TARGET_SPARC
8182             target_ulong restorer = arg4;
8183             target_ulong sigsetsize = arg5;
8184 #else
8185             target_ulong sigsetsize = arg4;
8186 #endif
8187             struct target_sigaction *act;
8188             struct target_sigaction *oact;
8189 
8190             if (sigsetsize != sizeof(target_sigset_t)) {
8191                 return -TARGET_EINVAL;
8192             }
8193             if (arg2) {
8194                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8195                     return -TARGET_EFAULT;
8196                 }
8197 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8198                 act->ka_restorer = restorer;
8199 #endif
8200             } else {
8201                 act = NULL;
8202             }
8203             if (arg3) {
8204                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8205                     ret = -TARGET_EFAULT;
8206                     goto rt_sigaction_fail;
8207                 }
8208             } else
8209                 oact = NULL;
8210             ret = get_errno(do_sigaction(arg1, act, oact));
8211 	rt_sigaction_fail:
8212             if (act)
8213                 unlock_user_struct(act, arg2, 0);
8214             if (oact)
8215                 unlock_user_struct(oact, arg3, 1);
8216 #endif
8217         }
8218         return ret;
8219 #ifdef TARGET_NR_sgetmask /* not on alpha */
8220     case TARGET_NR_sgetmask:
8221         {
8222             sigset_t cur_set;
8223             abi_ulong target_set;
8224             ret = do_sigprocmask(0, NULL, &cur_set);
8225             if (!ret) {
8226                 host_to_target_old_sigset(&target_set, &cur_set);
8227                 ret = target_set;
8228             }
8229         }
8230         return ret;
8231 #endif
8232 #ifdef TARGET_NR_ssetmask /* not on alpha */
8233     case TARGET_NR_ssetmask:
8234         {
8235             sigset_t set, oset;
8236             abi_ulong target_set = arg1;
8237             target_to_host_old_sigset(&set, &target_set);
8238             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8239             if (!ret) {
8240                 host_to_target_old_sigset(&target_set, &oset);
8241                 ret = target_set;
8242             }
8243         }
8244         return ret;
8245 #endif
8246 #ifdef TARGET_NR_sigprocmask
8247     case TARGET_NR_sigprocmask:
8248         {
8249 #if defined(TARGET_ALPHA)
8250             sigset_t set, oldset;
8251             abi_ulong mask;
8252             int how;
8253 
8254             switch (arg1) {
8255             case TARGET_SIG_BLOCK:
8256                 how = SIG_BLOCK;
8257                 break;
8258             case TARGET_SIG_UNBLOCK:
8259                 how = SIG_UNBLOCK;
8260                 break;
8261             case TARGET_SIG_SETMASK:
8262                 how = SIG_SETMASK;
8263                 break;
8264             default:
8265                 return -TARGET_EINVAL;
8266             }
8267             mask = arg2;
8268             target_to_host_old_sigset(&set, &mask);
8269 
8270             ret = do_sigprocmask(how, &set, &oldset);
8271             if (!is_error(ret)) {
8272                 host_to_target_old_sigset(&mask, &oldset);
8273                 ret = mask;
8274                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8275             }
8276 #else
8277             sigset_t set, oldset, *set_ptr;
8278             int how;
8279 
8280             if (arg2) {
8281                 switch (arg1) {
8282                 case TARGET_SIG_BLOCK:
8283                     how = SIG_BLOCK;
8284                     break;
8285                 case TARGET_SIG_UNBLOCK:
8286                     how = SIG_UNBLOCK;
8287                     break;
8288                 case TARGET_SIG_SETMASK:
8289                     how = SIG_SETMASK;
8290                     break;
8291                 default:
8292                     return -TARGET_EINVAL;
8293                 }
8294                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8295                     return -TARGET_EFAULT;
8296                 target_to_host_old_sigset(&set, p);
8297                 unlock_user(p, arg2, 0);
8298                 set_ptr = &set;
8299             } else {
8300                 how = 0;
8301                 set_ptr = NULL;
8302             }
8303             ret = do_sigprocmask(how, set_ptr, &oldset);
8304             if (!is_error(ret) && arg3) {
8305                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8306                     return -TARGET_EFAULT;
8307                 host_to_target_old_sigset(p, &oldset);
8308                 unlock_user(p, arg3, sizeof(target_sigset_t));
8309             }
8310 #endif
8311         }
8312         return ret;
8313 #endif
8314     case TARGET_NR_rt_sigprocmask:
8315         {
8316             int how = arg1;
8317             sigset_t set, oldset, *set_ptr;
8318 
8319             if (arg4 != sizeof(target_sigset_t)) {
8320                 return -TARGET_EINVAL;
8321             }
8322 
8323             if (arg2) {
8324                 switch(how) {
8325                 case TARGET_SIG_BLOCK:
8326                     how = SIG_BLOCK;
8327                     break;
8328                 case TARGET_SIG_UNBLOCK:
8329                     how = SIG_UNBLOCK;
8330                     break;
8331                 case TARGET_SIG_SETMASK:
8332                     how = SIG_SETMASK;
8333                     break;
8334                 default:
8335                     return -TARGET_EINVAL;
8336                 }
8337                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8338                     return -TARGET_EFAULT;
8339                 target_to_host_sigset(&set, p);
8340                 unlock_user(p, arg2, 0);
8341                 set_ptr = &set;
8342             } else {
8343                 how = 0;
8344                 set_ptr = NULL;
8345             }
8346             ret = do_sigprocmask(how, set_ptr, &oldset);
8347             if (!is_error(ret) && arg3) {
8348                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8349                     return -TARGET_EFAULT;
8350                 host_to_target_sigset(p, &oldset);
8351                 unlock_user(p, arg3, sizeof(target_sigset_t));
8352             }
8353         }
8354         return ret;
8355 #ifdef TARGET_NR_sigpending
8356     case TARGET_NR_sigpending:
8357         {
8358             sigset_t set;
8359             ret = get_errno(sigpending(&set));
8360             if (!is_error(ret)) {
8361                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8362                     return -TARGET_EFAULT;
8363                 host_to_target_old_sigset(p, &set);
8364                 unlock_user(p, arg1, sizeof(target_sigset_t));
8365             }
8366         }
8367         return ret;
8368 #endif
8369     case TARGET_NR_rt_sigpending:
8370         {
8371             sigset_t set;
8372 
8373             /* Yes, this check is >, not != like most. We follow the kernel's
8374              * logic and it does it like this because it implements
8375              * NR_sigpending through the same code path, and in that case
8376              * the old_sigset_t is smaller in size.
8377              */
8378             if (arg2 > sizeof(target_sigset_t)) {
8379                 return -TARGET_EINVAL;
8380             }
8381 
8382             ret = get_errno(sigpending(&set));
8383             if (!is_error(ret)) {
8384                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8385                     return -TARGET_EFAULT;
8386                 host_to_target_sigset(p, &set);
8387                 unlock_user(p, arg1, sizeof(target_sigset_t));
8388             }
8389         }
8390         return ret;
8391 #ifdef TARGET_NR_sigsuspend
8392     case TARGET_NR_sigsuspend:
8393         {
8394             TaskState *ts = cpu->opaque;
8395 #if defined(TARGET_ALPHA)
8396             abi_ulong mask = arg1;
8397             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8398 #else
8399             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8400                 return -TARGET_EFAULT;
8401             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8402             unlock_user(p, arg1, 0);
8403 #endif
8404             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8405                                                SIGSET_T_SIZE));
8406             if (ret != -TARGET_ERESTARTSYS) {
8407                 ts->in_sigsuspend = 1;
8408             }
8409         }
8410         return ret;
8411 #endif
8412     case TARGET_NR_rt_sigsuspend:
8413         {
8414             TaskState *ts = cpu->opaque;
8415 
8416             if (arg2 != sizeof(target_sigset_t)) {
8417                 return -TARGET_EINVAL;
8418             }
8419             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8420                 return -TARGET_EFAULT;
8421             target_to_host_sigset(&ts->sigsuspend_mask, p);
8422             unlock_user(p, arg1, 0);
8423             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8424                                                SIGSET_T_SIZE));
8425             if (ret != -TARGET_ERESTARTSYS) {
8426                 ts->in_sigsuspend = 1;
8427             }
8428         }
8429         return ret;
8430     case TARGET_NR_rt_sigtimedwait:
8431         {
8432             sigset_t set;
8433             struct timespec uts, *puts;
8434             siginfo_t uinfo;
8435 
8436             if (arg4 != sizeof(target_sigset_t)) {
8437                 return -TARGET_EINVAL;
8438             }
8439 
8440             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8441                 return -TARGET_EFAULT;
8442             target_to_host_sigset(&set, p);
8443             unlock_user(p, arg1, 0);
8444             if (arg3) {
8445                 puts = &uts;
8446                 target_to_host_timespec(puts, arg3);
8447             } else {
8448                 puts = NULL;
8449             }
8450             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8451                                                  SIGSET_T_SIZE));
8452             if (!is_error(ret)) {
8453                 if (arg2) {
8454                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8455                                   0);
8456                     if (!p) {
8457                         return -TARGET_EFAULT;
8458                     }
8459                     host_to_target_siginfo(p, &uinfo);
8460                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8461                 }
8462                 ret = host_to_target_signal(ret);
8463             }
8464         }
8465         return ret;
8466     case TARGET_NR_rt_sigqueueinfo:
8467         {
8468             siginfo_t uinfo;
8469 
8470             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8471             if (!p) {
8472                 return -TARGET_EFAULT;
8473             }
8474             target_to_host_siginfo(&uinfo, p);
8475             unlock_user(p, arg3, 0);
8476             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8477         }
8478         return ret;
8479     case TARGET_NR_rt_tgsigqueueinfo:
8480         {
8481             siginfo_t uinfo;
8482 
8483             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8484             if (!p) {
8485                 return -TARGET_EFAULT;
8486             }
8487             target_to_host_siginfo(&uinfo, p);
8488             unlock_user(p, arg4, 0);
8489             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8490         }
8491         return ret;
8492 #ifdef TARGET_NR_sigreturn
8493     case TARGET_NR_sigreturn:
8494         if (block_signals()) {
8495             return -TARGET_ERESTARTSYS;
8496         }
8497         return do_sigreturn(cpu_env);
8498 #endif
8499     case TARGET_NR_rt_sigreturn:
8500         if (block_signals()) {
8501             return -TARGET_ERESTARTSYS;
8502         }
8503         return do_rt_sigreturn(cpu_env);
8504     case TARGET_NR_sethostname:
8505         if (!(p = lock_user_string(arg1)))
8506             return -TARGET_EFAULT;
8507         ret = get_errno(sethostname(p, arg2));
8508         unlock_user(p, arg1, 0);
8509         return ret;
8510 #ifdef TARGET_NR_setrlimit
8511     case TARGET_NR_setrlimit:
8512         {
8513             int resource = target_to_host_resource(arg1);
8514             struct target_rlimit *target_rlim;
8515             struct rlimit rlim;
8516             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8517                 return -TARGET_EFAULT;
8518             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8519             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8520             unlock_user_struct(target_rlim, arg2, 0);
8521             /*
8522              * If we just passed through resource limit settings for memory then
8523              * they would also apply to QEMU's own allocations, and QEMU will
8524              * crash or hang or die if its allocations fail. Ideally we would
8525              * track the guest allocations in QEMU and apply the limits ourselves.
8526              * For now, just tell the guest the call succeeded but don't actually
8527              * limit anything.
8528              */
8529             if (resource != RLIMIT_AS &&
8530                 resource != RLIMIT_DATA &&
8531                 resource != RLIMIT_STACK) {
8532                 return get_errno(setrlimit(resource, &rlim));
8533             } else {
8534                 return 0;
8535             }
8536         }
8537 #endif
8538 #ifdef TARGET_NR_getrlimit
8539     case TARGET_NR_getrlimit:
8540         {
8541             int resource = target_to_host_resource(arg1);
8542             struct target_rlimit *target_rlim;
8543             struct rlimit rlim;
8544 
8545             ret = get_errno(getrlimit(resource, &rlim));
8546             if (!is_error(ret)) {
8547                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8548                     return -TARGET_EFAULT;
8549                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8550                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8551                 unlock_user_struct(target_rlim, arg2, 1);
8552             }
8553         }
8554         return ret;
8555 #endif
8556     case TARGET_NR_getrusage:
8557         {
8558             struct rusage rusage;
8559             ret = get_errno(getrusage(arg1, &rusage));
8560             if (!is_error(ret)) {
8561                 ret = host_to_target_rusage(arg2, &rusage);
8562             }
8563         }
8564         return ret;
8565     case TARGET_NR_gettimeofday:
8566         {
8567             struct timeval tv;
8568             ret = get_errno(gettimeofday(&tv, NULL));
8569             if (!is_error(ret)) {
8570                 if (copy_to_user_timeval(arg1, &tv))
8571                     return -TARGET_EFAULT;
8572             }
8573         }
8574         return ret;
8575     case TARGET_NR_settimeofday:
8576         {
8577             struct timeval tv, *ptv = NULL;
8578             struct timezone tz, *ptz = NULL;
8579 
8580             if (arg1) {
8581                 if (copy_from_user_timeval(&tv, arg1)) {
8582                     return -TARGET_EFAULT;
8583                 }
8584                 ptv = &tv;
8585             }
8586 
8587             if (arg2) {
8588                 if (copy_from_user_timezone(&tz, arg2)) {
8589                     return -TARGET_EFAULT;
8590                 }
8591                 ptz = &tz;
8592             }
8593 
8594             return get_errno(settimeofday(ptv, ptz));
8595         }
8596 #if defined(TARGET_NR_select)
8597     case TARGET_NR_select:
8598 #if defined(TARGET_WANT_NI_OLD_SELECT)
8599         /* some architectures used to have old_select here
8600          * but now ENOSYS it.
8601          */
8602         ret = -TARGET_ENOSYS;
8603 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8604         ret = do_old_select(arg1);
8605 #else
8606         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8607 #endif
8608         return ret;
8609 #endif
8610 #ifdef TARGET_NR_pselect6
8611     case TARGET_NR_pselect6:
8612         {
8613             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8614             fd_set rfds, wfds, efds;
8615             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8616             struct timespec ts, *ts_ptr;
8617 
8618             /*
8619              * The 6th arg is actually two args smashed together,
8620              * so we cannot use the C library.
8621              */
8622             sigset_t set;
8623             struct {
8624                 sigset_t *set;
8625                 size_t size;
8626             } sig, *sig_ptr;
8627 
8628             abi_ulong arg_sigset, arg_sigsize, *arg7;
8629             target_sigset_t *target_sigset;
8630 
8631             n = arg1;
8632             rfd_addr = arg2;
8633             wfd_addr = arg3;
8634             efd_addr = arg4;
8635             ts_addr = arg5;
8636 
8637             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8638             if (ret) {
8639                 return ret;
8640             }
8641             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8642             if (ret) {
8643                 return ret;
8644             }
8645             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8646             if (ret) {
8647                 return ret;
8648             }
8649 
8650             /*
8651              * This takes a timespec, and not a timeval, so we cannot
8652              * use the do_select() helper ...
8653              */
8654             if (ts_addr) {
8655                 if (target_to_host_timespec(&ts, ts_addr)) {
8656                     return -TARGET_EFAULT;
8657                 }
8658                 ts_ptr = &ts;
8659             } else {
8660                 ts_ptr = NULL;
8661             }
8662 
8663             /* Extract the two packed args for the sigset */
8664             if (arg6) {
8665                 sig_ptr = &sig;
8666                 sig.size = SIGSET_T_SIZE;
8667 
8668                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8669                 if (!arg7) {
8670                     return -TARGET_EFAULT;
8671                 }
8672                 arg_sigset = tswapal(arg7[0]);
8673                 arg_sigsize = tswapal(arg7[1]);
8674                 unlock_user(arg7, arg6, 0);
8675 
8676                 if (arg_sigset) {
8677                     sig.set = &set;
8678                     if (arg_sigsize != sizeof(*target_sigset)) {
8679                         /* Like the kernel, we enforce correct size sigsets */
8680                         return -TARGET_EINVAL;
8681                     }
8682                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8683                                               sizeof(*target_sigset), 1);
8684                     if (!target_sigset) {
8685                         return -TARGET_EFAULT;
8686                     }
8687                     target_to_host_sigset(&set, target_sigset);
8688                     unlock_user(target_sigset, arg_sigset, 0);
8689                 } else {
8690                     sig.set = NULL;
8691                 }
8692             } else {
8693                 sig_ptr = NULL;
8694             }
8695 
8696             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8697                                           ts_ptr, sig_ptr));
8698 
8699             if (!is_error(ret)) {
8700                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8701                     return -TARGET_EFAULT;
8702                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8703                     return -TARGET_EFAULT;
8704                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8705                     return -TARGET_EFAULT;
8706 
8707                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8708                     return -TARGET_EFAULT;
8709             }
8710         }
8711         return ret;
8712 #endif
8713 #ifdef TARGET_NR_symlink
8714     case TARGET_NR_symlink:
8715         {
8716             void *p2;
8717             p = lock_user_string(arg1);
8718             p2 = lock_user_string(arg2);
8719             if (!p || !p2)
8720                 ret = -TARGET_EFAULT;
8721             else
8722                 ret = get_errno(symlink(p, p2));
8723             unlock_user(p2, arg2, 0);
8724             unlock_user(p, arg1, 0);
8725         }
8726         return ret;
8727 #endif
8728 #if defined(TARGET_NR_symlinkat)
8729     case TARGET_NR_symlinkat:
8730         {
8731             void *p2;
8732             p  = lock_user_string(arg1);
8733             p2 = lock_user_string(arg3);
8734             if (!p || !p2)
8735                 ret = -TARGET_EFAULT;
8736             else
8737                 ret = get_errno(symlinkat(p, arg2, p2));
8738             unlock_user(p2, arg3, 0);
8739             unlock_user(p, arg1, 0);
8740         }
8741         return ret;
8742 #endif
8743 #ifdef TARGET_NR_readlink
8744     case TARGET_NR_readlink:
8745         {
8746             void *p2;
8747             p = lock_user_string(arg1);
8748             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8749             if (!p || !p2) {
8750                 ret = -TARGET_EFAULT;
8751             } else if (!arg3) {
8752                 /* Short circuit this for the magic exe check. */
8753                 ret = -TARGET_EINVAL;
8754             } else if (is_proc_myself((const char *)p, "exe")) {
8755                 char real[PATH_MAX], *temp;
8756                 temp = realpath(exec_path, real);
8757                 /* Return value is # of bytes that we wrote to the buffer. */
8758                 if (temp == NULL) {
8759                     ret = get_errno(-1);
8760                 } else {
8761                     /* Don't worry about sign mismatch as earlier mapping
8762                      * logic would have thrown a bad address error. */
8763                     ret = MIN(strlen(real), arg3);
8764                     /* We cannot NUL terminate the string. */
8765                     memcpy(p2, real, ret);
8766                 }
8767             } else {
8768                 ret = get_errno(readlink(path(p), p2, arg3));
8769             }
8770             unlock_user(p2, arg2, ret);
8771             unlock_user(p, arg1, 0);
8772         }
8773         return ret;
8774 #endif
8775 #if defined(TARGET_NR_readlinkat)
8776     case TARGET_NR_readlinkat:
8777         {
8778             void *p2;
8779             p  = lock_user_string(arg2);
8780             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8781             if (!p || !p2) {
8782                 ret = -TARGET_EFAULT;
8783             } else if (is_proc_myself((const char *)p, "exe")) {
8784                 char real[PATH_MAX], *temp;
8785                 temp = realpath(exec_path, real);
8786                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8787                 snprintf((char *)p2, arg4, "%s", real);
8788             } else {
8789                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8790             }
8791             unlock_user(p2, arg3, ret);
8792             unlock_user(p, arg2, 0);
8793         }
8794         return ret;
8795 #endif
8796 #ifdef TARGET_NR_swapon
8797     case TARGET_NR_swapon:
8798         if (!(p = lock_user_string(arg1)))
8799             return -TARGET_EFAULT;
8800         ret = get_errno(swapon(p, arg2));
8801         unlock_user(p, arg1, 0);
8802         return ret;
8803 #endif
8804     case TARGET_NR_reboot:
8805         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8806            /* arg4 must be ignored in all other cases */
8807            p = lock_user_string(arg4);
8808            if (!p) {
8809                return -TARGET_EFAULT;
8810            }
8811            ret = get_errno(reboot(arg1, arg2, arg3, p));
8812            unlock_user(p, arg4, 0);
8813         } else {
8814            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8815         }
8816         return ret;
8817 #ifdef TARGET_NR_mmap
8818     case TARGET_NR_mmap:
8819 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8820     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8821     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8822     || defined(TARGET_S390X)
8823         {
8824             abi_ulong *v;
8825             abi_ulong v1, v2, v3, v4, v5, v6;
8826             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8827                 return -TARGET_EFAULT;
8828             v1 = tswapal(v[0]);
8829             v2 = tswapal(v[1]);
8830             v3 = tswapal(v[2]);
8831             v4 = tswapal(v[3]);
8832             v5 = tswapal(v[4]);
8833             v6 = tswapal(v[5]);
8834             unlock_user(v, arg1, 0);
8835             ret = get_errno(target_mmap(v1, v2, v3,
8836                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8837                                         v5, v6));
8838         }
8839 #else
8840         ret = get_errno(target_mmap(arg1, arg2, arg3,
8841                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8842                                     arg5,
8843                                     arg6));
8844 #endif
8845         return ret;
8846 #endif
8847 #ifdef TARGET_NR_mmap2
8848     case TARGET_NR_mmap2:
8849 #ifndef MMAP_SHIFT
8850 #define MMAP_SHIFT 12
8851 #endif
8852         ret = target_mmap(arg1, arg2, arg3,
8853                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8854                           arg5, arg6 << MMAP_SHIFT);
8855         return get_errno(ret);
8856 #endif
8857     case TARGET_NR_munmap:
8858         return get_errno(target_munmap(arg1, arg2));
8859     case TARGET_NR_mprotect:
8860         {
8861             TaskState *ts = cpu->opaque;
8862             /* Special hack to detect libc making the stack executable.  */
8863             if ((arg3 & PROT_GROWSDOWN)
8864                 && arg1 >= ts->info->stack_limit
8865                 && arg1 <= ts->info->start_stack) {
8866                 arg3 &= ~PROT_GROWSDOWN;
8867                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8868                 arg1 = ts->info->stack_limit;
8869             }
8870         }
8871         return get_errno(target_mprotect(arg1, arg2, arg3));
8872 #ifdef TARGET_NR_mremap
8873     case TARGET_NR_mremap:
8874         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8875 #endif
8876         /* ??? msync/mlock/munlock are broken for softmmu.  */
8877 #ifdef TARGET_NR_msync
8878     case TARGET_NR_msync:
8879         return get_errno(msync(g2h(arg1), arg2, arg3));
8880 #endif
8881 #ifdef TARGET_NR_mlock
8882     case TARGET_NR_mlock:
8883         return get_errno(mlock(g2h(arg1), arg2));
8884 #endif
8885 #ifdef TARGET_NR_munlock
8886     case TARGET_NR_munlock:
8887         return get_errno(munlock(g2h(arg1), arg2));
8888 #endif
8889 #ifdef TARGET_NR_mlockall
8890     case TARGET_NR_mlockall:
8891         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8892 #endif
8893 #ifdef TARGET_NR_munlockall
8894     case TARGET_NR_munlockall:
8895         return get_errno(munlockall());
8896 #endif
8897 #ifdef TARGET_NR_truncate
8898     case TARGET_NR_truncate:
8899         if (!(p = lock_user_string(arg1)))
8900             return -TARGET_EFAULT;
8901         ret = get_errno(truncate(p, arg2));
8902         unlock_user(p, arg1, 0);
8903         return ret;
8904 #endif
8905 #ifdef TARGET_NR_ftruncate
8906     case TARGET_NR_ftruncate:
8907         return get_errno(ftruncate(arg1, arg2));
8908 #endif
8909     case TARGET_NR_fchmod:
8910         return get_errno(fchmod(arg1, arg2));
8911 #if defined(TARGET_NR_fchmodat)
8912     case TARGET_NR_fchmodat:
8913         if (!(p = lock_user_string(arg2)))
8914             return -TARGET_EFAULT;
8915         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8916         unlock_user(p, arg2, 0);
8917         return ret;
8918 #endif
8919     case TARGET_NR_getpriority:
8920         /* Note that negative values are valid for getpriority, so we must
8921            differentiate based on errno settings.  */
8922         errno = 0;
8923         ret = getpriority(arg1, arg2);
8924         if (ret == -1 && errno != 0) {
8925             return -host_to_target_errno(errno);
8926         }
8927 #ifdef TARGET_ALPHA
8928         /* Return value is the unbiased priority.  Signal no error.  */
8929         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8930 #else
8931         /* Return value is a biased priority to avoid negative numbers.  */
8932         ret = 20 - ret;
8933 #endif
8934         return ret;
8935     case TARGET_NR_setpriority:
8936         return get_errno(setpriority(arg1, arg2, arg3));
8937 #ifdef TARGET_NR_statfs
8938     case TARGET_NR_statfs:
8939         if (!(p = lock_user_string(arg1))) {
8940             return -TARGET_EFAULT;
8941         }
8942         ret = get_errno(statfs(path(p), &stfs));
8943         unlock_user(p, arg1, 0);
8944     convert_statfs:
8945         if (!is_error(ret)) {
8946             struct target_statfs *target_stfs;
8947 
8948             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8949                 return -TARGET_EFAULT;
8950             __put_user(stfs.f_type, &target_stfs->f_type);
8951             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8952             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8953             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8954             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8955             __put_user(stfs.f_files, &target_stfs->f_files);
8956             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8957             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8958             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8959             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8960             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8961 #ifdef _STATFS_F_FLAGS
8962             __put_user(stfs.f_flags, &target_stfs->f_flags);
8963 #else
8964             __put_user(0, &target_stfs->f_flags);
8965 #endif
8966             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8967             unlock_user_struct(target_stfs, arg2, 1);
8968         }
8969         return ret;
8970 #endif
8971 #ifdef TARGET_NR_fstatfs
8972     case TARGET_NR_fstatfs:
8973         ret = get_errno(fstatfs(arg1, &stfs));
8974         goto convert_statfs;
8975 #endif
8976 #ifdef TARGET_NR_statfs64
8977     case TARGET_NR_statfs64:
8978         if (!(p = lock_user_string(arg1))) {
8979             return -TARGET_EFAULT;
8980         }
8981         ret = get_errno(statfs(path(p), &stfs));
8982         unlock_user(p, arg1, 0);
8983     convert_statfs64:
8984         if (!is_error(ret)) {
8985             struct target_statfs64 *target_stfs;
8986 
8987             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8988                 return -TARGET_EFAULT;
8989             __put_user(stfs.f_type, &target_stfs->f_type);
8990             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8991             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8992             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8993             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8994             __put_user(stfs.f_files, &target_stfs->f_files);
8995             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8996             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8997             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8998             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8999             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9000             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9001             unlock_user_struct(target_stfs, arg3, 1);
9002         }
9003         return ret;
9004     case TARGET_NR_fstatfs64:
9005         ret = get_errno(fstatfs(arg1, &stfs));
9006         goto convert_statfs64;
9007 #endif
9008 #ifdef TARGET_NR_socketcall
9009     case TARGET_NR_socketcall:
9010         return do_socketcall(arg1, arg2);
9011 #endif
9012 #ifdef TARGET_NR_accept
9013     case TARGET_NR_accept:
9014         return do_accept4(arg1, arg2, arg3, 0);
9015 #endif
9016 #ifdef TARGET_NR_accept4
9017     case TARGET_NR_accept4:
9018         return do_accept4(arg1, arg2, arg3, arg4);
9019 #endif
9020 #ifdef TARGET_NR_bind
9021     case TARGET_NR_bind:
9022         return do_bind(arg1, arg2, arg3);
9023 #endif
9024 #ifdef TARGET_NR_connect
9025     case TARGET_NR_connect:
9026         return do_connect(arg1, arg2, arg3);
9027 #endif
9028 #ifdef TARGET_NR_getpeername
9029     case TARGET_NR_getpeername:
9030         return do_getpeername(arg1, arg2, arg3);
9031 #endif
9032 #ifdef TARGET_NR_getsockname
9033     case TARGET_NR_getsockname:
9034         return do_getsockname(arg1, arg2, arg3);
9035 #endif
9036 #ifdef TARGET_NR_getsockopt
9037     case TARGET_NR_getsockopt:
9038         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9039 #endif
9040 #ifdef TARGET_NR_listen
9041     case TARGET_NR_listen:
9042         return get_errno(listen(arg1, arg2));
9043 #endif
9044 #ifdef TARGET_NR_recv
9045     case TARGET_NR_recv:
9046         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9047 #endif
9048 #ifdef TARGET_NR_recvfrom
9049     case TARGET_NR_recvfrom:
9050         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9051 #endif
9052 #ifdef TARGET_NR_recvmsg
9053     case TARGET_NR_recvmsg:
9054         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9055 #endif
9056 #ifdef TARGET_NR_send
9057     case TARGET_NR_send:
9058         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9059 #endif
9060 #ifdef TARGET_NR_sendmsg
9061     case TARGET_NR_sendmsg:
9062         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9063 #endif
9064 #ifdef TARGET_NR_sendmmsg
9065     case TARGET_NR_sendmmsg:
9066         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9067     case TARGET_NR_recvmmsg:
9068         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9069 #endif
9070 #ifdef TARGET_NR_sendto
9071     case TARGET_NR_sendto:
9072         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9073 #endif
9074 #ifdef TARGET_NR_shutdown
9075     case TARGET_NR_shutdown:
9076         return get_errno(shutdown(arg1, arg2));
9077 #endif
9078 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9079     case TARGET_NR_getrandom:
9080         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9081         if (!p) {
9082             return -TARGET_EFAULT;
9083         }
9084         ret = get_errno(getrandom(p, arg2, arg3));
9085         unlock_user(p, arg1, ret);
9086         return ret;
9087 #endif
9088 #ifdef TARGET_NR_socket
9089     case TARGET_NR_socket:
9090         return do_socket(arg1, arg2, arg3);
9091 #endif
9092 #ifdef TARGET_NR_socketpair
9093     case TARGET_NR_socketpair:
9094         return do_socketpair(arg1, arg2, arg3, arg4);
9095 #endif
9096 #ifdef TARGET_NR_setsockopt
9097     case TARGET_NR_setsockopt:
9098         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9099 #endif
9100 #if defined(TARGET_NR_syslog)
9101     case TARGET_NR_syslog:
9102         {
9103             int len = arg2;
9104 
9105             switch (arg1) {
9106             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9107             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9108             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9109             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9110             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9111             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9112             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9113             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9114                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9115             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9116             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9117             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9118                 {
9119                     if (len < 0) {
9120                         return -TARGET_EINVAL;
9121                     }
9122                     if (len == 0) {
9123                         return 0;
9124                     }
9125                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9126                     if (!p) {
9127                         return -TARGET_EFAULT;
9128                     }
9129                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9130                     unlock_user(p, arg2, arg3);
9131                 }
9132                 return ret;
9133             default:
9134                 return -TARGET_EINVAL;
9135             }
9136         }
9137         break;
9138 #endif
9139     case TARGET_NR_setitimer:
9140         {
9141             struct itimerval value, ovalue, *pvalue;
9142 
9143             if (arg2) {
9144                 pvalue = &value;
9145                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9146                     || copy_from_user_timeval(&pvalue->it_value,
9147                                               arg2 + sizeof(struct target_timeval)))
9148                     return -TARGET_EFAULT;
9149             } else {
9150                 pvalue = NULL;
9151             }
9152             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9153             if (!is_error(ret) && arg3) {
9154                 if (copy_to_user_timeval(arg3,
9155                                          &ovalue.it_interval)
9156                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9157                                             &ovalue.it_value))
9158                     return -TARGET_EFAULT;
9159             }
9160         }
9161         return ret;
9162     case TARGET_NR_getitimer:
9163         {
9164             struct itimerval value;
9165 
9166             ret = get_errno(getitimer(arg1, &value));
9167             if (!is_error(ret) && arg2) {
9168                 if (copy_to_user_timeval(arg2,
9169                                          &value.it_interval)
9170                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9171                                             &value.it_value))
9172                     return -TARGET_EFAULT;
9173             }
9174         }
9175         return ret;
9176 #ifdef TARGET_NR_stat
9177     case TARGET_NR_stat:
9178         if (!(p = lock_user_string(arg1))) {
9179             return -TARGET_EFAULT;
9180         }
9181         ret = get_errno(stat(path(p), &st));
9182         unlock_user(p, arg1, 0);
9183         goto do_stat;
9184 #endif
9185 #ifdef TARGET_NR_lstat
9186     case TARGET_NR_lstat:
9187         if (!(p = lock_user_string(arg1))) {
9188             return -TARGET_EFAULT;
9189         }
9190         ret = get_errno(lstat(path(p), &st));
9191         unlock_user(p, arg1, 0);
9192         goto do_stat;
9193 #endif
9194 #ifdef TARGET_NR_fstat
9195     case TARGET_NR_fstat:
9196         {
9197             ret = get_errno(fstat(arg1, &st));
9198 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9199         do_stat:
9200 #endif
9201             if (!is_error(ret)) {
9202                 struct target_stat *target_st;
9203 
9204                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9205                     return -TARGET_EFAULT;
9206                 memset(target_st, 0, sizeof(*target_st));
9207                 __put_user(st.st_dev, &target_st->st_dev);
9208                 __put_user(st.st_ino, &target_st->st_ino);
9209                 __put_user(st.st_mode, &target_st->st_mode);
9210                 __put_user(st.st_uid, &target_st->st_uid);
9211                 __put_user(st.st_gid, &target_st->st_gid);
9212                 __put_user(st.st_nlink, &target_st->st_nlink);
9213                 __put_user(st.st_rdev, &target_st->st_rdev);
9214                 __put_user(st.st_size, &target_st->st_size);
9215                 __put_user(st.st_blksize, &target_st->st_blksize);
9216                 __put_user(st.st_blocks, &target_st->st_blocks);
9217                 __put_user(st.st_atime, &target_st->target_st_atime);
9218                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9219                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9220 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9221     defined(TARGET_STAT_HAVE_NSEC)
9222                 __put_user(st.st_atim.tv_nsec,
9223                            &target_st->target_st_atime_nsec);
9224                 __put_user(st.st_mtim.tv_nsec,
9225                            &target_st->target_st_mtime_nsec);
9226                 __put_user(st.st_ctim.tv_nsec,
9227                            &target_st->target_st_ctime_nsec);
9228 #endif
9229                 unlock_user_struct(target_st, arg2, 1);
9230             }
9231         }
9232         return ret;
9233 #endif
9234     case TARGET_NR_vhangup:
9235         return get_errno(vhangup());
9236 #ifdef TARGET_NR_syscall
9237     case TARGET_NR_syscall:
9238         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9239                           arg6, arg7, arg8, 0);
9240 #endif
9241     case TARGET_NR_wait4:
9242         {
9243             int status;
9244             abi_long status_ptr = arg2;
9245             struct rusage rusage, *rusage_ptr;
9246             abi_ulong target_rusage = arg4;
9247             abi_long rusage_err;
9248             if (target_rusage)
9249                 rusage_ptr = &rusage;
9250             else
9251                 rusage_ptr = NULL;
9252             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9253             if (!is_error(ret)) {
9254                 if (status_ptr && ret) {
9255                     status = host_to_target_waitstatus(status);
9256                     if (put_user_s32(status, status_ptr))
9257                         return -TARGET_EFAULT;
9258                 }
9259                 if (target_rusage) {
9260                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9261                     if (rusage_err) {
9262                         ret = rusage_err;
9263                     }
9264                 }
9265             }
9266         }
9267         return ret;
9268 #ifdef TARGET_NR_swapoff
9269     case TARGET_NR_swapoff:
9270         if (!(p = lock_user_string(arg1)))
9271             return -TARGET_EFAULT;
9272         ret = get_errno(swapoff(p));
9273         unlock_user(p, arg1, 0);
9274         return ret;
9275 #endif
9276     case TARGET_NR_sysinfo:
9277         {
9278             struct target_sysinfo *target_value;
9279             struct sysinfo value;
9280             ret = get_errno(sysinfo(&value));
9281             if (!is_error(ret) && arg1)
9282             {
9283                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9284                     return -TARGET_EFAULT;
9285                 __put_user(value.uptime, &target_value->uptime);
9286                 __put_user(value.loads[0], &target_value->loads[0]);
9287                 __put_user(value.loads[1], &target_value->loads[1]);
9288                 __put_user(value.loads[2], &target_value->loads[2]);
9289                 __put_user(value.totalram, &target_value->totalram);
9290                 __put_user(value.freeram, &target_value->freeram);
9291                 __put_user(value.sharedram, &target_value->sharedram);
9292                 __put_user(value.bufferram, &target_value->bufferram);
9293                 __put_user(value.totalswap, &target_value->totalswap);
9294                 __put_user(value.freeswap, &target_value->freeswap);
9295                 __put_user(value.procs, &target_value->procs);
9296                 __put_user(value.totalhigh, &target_value->totalhigh);
9297                 __put_user(value.freehigh, &target_value->freehigh);
9298                 __put_user(value.mem_unit, &target_value->mem_unit);
9299                 unlock_user_struct(target_value, arg1, 1);
9300             }
9301         }
9302         return ret;
9303 #ifdef TARGET_NR_ipc
9304     case TARGET_NR_ipc:
9305         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9306 #endif
9307 #ifdef TARGET_NR_semget
9308     case TARGET_NR_semget:
9309         return get_errno(semget(arg1, arg2, arg3));
9310 #endif
9311 #ifdef TARGET_NR_semop
9312     case TARGET_NR_semop:
9313         return do_semop(arg1, arg2, arg3);
9314 #endif
9315 #ifdef TARGET_NR_semctl
9316     case TARGET_NR_semctl:
9317         return do_semctl(arg1, arg2, arg3, arg4);
9318 #endif
9319 #ifdef TARGET_NR_msgctl
9320     case TARGET_NR_msgctl:
9321         return do_msgctl(arg1, arg2, arg3);
9322 #endif
9323 #ifdef TARGET_NR_msgget
9324     case TARGET_NR_msgget:
9325         return get_errno(msgget(arg1, arg2));
9326 #endif
9327 #ifdef TARGET_NR_msgrcv
9328     case TARGET_NR_msgrcv:
9329         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9330 #endif
9331 #ifdef TARGET_NR_msgsnd
9332     case TARGET_NR_msgsnd:
9333         return do_msgsnd(arg1, arg2, arg3, arg4);
9334 #endif
9335 #ifdef TARGET_NR_shmget
9336     case TARGET_NR_shmget:
9337         return get_errno(shmget(arg1, arg2, arg3));
9338 #endif
9339 #ifdef TARGET_NR_shmctl
9340     case TARGET_NR_shmctl:
9341         return do_shmctl(arg1, arg2, arg3);
9342 #endif
9343 #ifdef TARGET_NR_shmat
9344     case TARGET_NR_shmat:
9345         return do_shmat(cpu_env, arg1, arg2, arg3);
9346 #endif
9347 #ifdef TARGET_NR_shmdt
9348     case TARGET_NR_shmdt:
9349         return do_shmdt(arg1);
9350 #endif
9351     case TARGET_NR_fsync:
9352         return get_errno(fsync(arg1));
9353     case TARGET_NR_clone:
9354         /* Linux manages to have three different orderings for its
9355          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9356          * match the kernel's CONFIG_CLONE_* settings.
9357          * Microblaze is further special in that it uses a sixth
9358          * implicit argument to clone for the TLS pointer.
9359          */
9360 #if defined(TARGET_MICROBLAZE)
9361         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9362 #elif defined(TARGET_CLONE_BACKWARDS)
9363         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9364 #elif defined(TARGET_CLONE_BACKWARDS2)
9365         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9366 #else
9367         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9368 #endif
9369         return ret;
9370 #ifdef __NR_exit_group
9371         /* new thread calls */
9372     case TARGET_NR_exit_group:
9373         preexit_cleanup(cpu_env, arg1);
9374         return get_errno(exit_group(arg1));
9375 #endif
9376     case TARGET_NR_setdomainname:
9377         if (!(p = lock_user_string(arg1)))
9378             return -TARGET_EFAULT;
9379         ret = get_errno(setdomainname(p, arg2));
9380         unlock_user(p, arg1, 0);
9381         return ret;
9382     case TARGET_NR_uname:
9383         /* no need to transcode because we use the linux syscall */
9384         {
9385             struct new_utsname * buf;
9386 
9387             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9388                 return -TARGET_EFAULT;
9389             ret = get_errno(sys_uname(buf));
9390             if (!is_error(ret)) {
9391                 /* Overwrite the native machine name with whatever is being
9392                    emulated. */
9393                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9394                           sizeof(buf->machine));
9395                 /* Allow the user to override the reported release.  */
9396                 if (qemu_uname_release && *qemu_uname_release) {
9397                     g_strlcpy(buf->release, qemu_uname_release,
9398                               sizeof(buf->release));
9399                 }
9400             }
9401             unlock_user_struct(buf, arg1, 1);
9402         }
9403         return ret;
9404 #ifdef TARGET_I386
9405     case TARGET_NR_modify_ldt:
9406         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9407 #if !defined(TARGET_X86_64)
9408     case TARGET_NR_vm86:
9409         return do_vm86(cpu_env, arg1, arg2);
9410 #endif
9411 #endif
9412     case TARGET_NR_adjtimex:
9413         {
9414             struct timex host_buf;
9415 
9416             if (target_to_host_timex(&host_buf, arg1) != 0) {
9417                 return -TARGET_EFAULT;
9418             }
9419             ret = get_errno(adjtimex(&host_buf));
9420             if (!is_error(ret)) {
9421                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9422                     return -TARGET_EFAULT;
9423                 }
9424             }
9425         }
9426         return ret;
9427 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9428     case TARGET_NR_clock_adjtime:
9429         {
9430             struct timex htx, *phtx = &htx;
9431 
9432             if (target_to_host_timex(phtx, arg2) != 0) {
9433                 return -TARGET_EFAULT;
9434             }
9435             ret = get_errno(clock_adjtime(arg1, phtx));
9436             if (!is_error(ret) && phtx) {
9437                 if (host_to_target_timex(arg2, phtx) != 0) {
9438                     return -TARGET_EFAULT;
9439                 }
9440             }
9441         }
9442         return ret;
9443 #endif
9444     case TARGET_NR_getpgid:
9445         return get_errno(getpgid(arg1));
9446     case TARGET_NR_fchdir:
9447         return get_errno(fchdir(arg1));
9448     case TARGET_NR_personality:
9449         return get_errno(personality(arg1));
9450 #ifdef TARGET_NR__llseek /* Not on alpha */
9451     case TARGET_NR__llseek:
9452         {
9453             int64_t res;
9454 #if !defined(__NR_llseek)
9455             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9456             if (res == -1) {
9457                 ret = get_errno(res);
9458             } else {
9459                 ret = 0;
9460             }
9461 #else
9462             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9463 #endif
9464             if ((ret == 0) && put_user_s64(res, arg4)) {
9465                 return -TARGET_EFAULT;
9466             }
9467         }
9468         return ret;
9469 #endif
9470 #ifdef TARGET_NR_getdents
9471     case TARGET_NR_getdents:
9472 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9473 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9474         {
9475             struct target_dirent *target_dirp;
9476             struct linux_dirent *dirp;
9477             abi_long count = arg3;
9478 
9479             dirp = g_try_malloc(count);
9480             if (!dirp) {
9481                 return -TARGET_ENOMEM;
9482             }
9483 
9484             ret = get_errno(sys_getdents(arg1, dirp, count));
9485             if (!is_error(ret)) {
9486                 struct linux_dirent *de;
9487 		struct target_dirent *tde;
9488                 int len = ret;
9489                 int reclen, treclen;
9490 		int count1, tnamelen;
9491 
9492 		count1 = 0;
9493                 de = dirp;
9494                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9495                     return -TARGET_EFAULT;
9496 		tde = target_dirp;
9497                 while (len > 0) {
9498                     reclen = de->d_reclen;
9499                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9500                     assert(tnamelen >= 0);
9501                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9502                     assert(count1 + treclen <= count);
9503                     tde->d_reclen = tswap16(treclen);
9504                     tde->d_ino = tswapal(de->d_ino);
9505                     tde->d_off = tswapal(de->d_off);
9506                     memcpy(tde->d_name, de->d_name, tnamelen);
9507                     de = (struct linux_dirent *)((char *)de + reclen);
9508                     len -= reclen;
9509                     tde = (struct target_dirent *)((char *)tde + treclen);
9510 		    count1 += treclen;
9511                 }
9512 		ret = count1;
9513                 unlock_user(target_dirp, arg2, ret);
9514             }
9515             g_free(dirp);
9516         }
9517 #else
9518         {
9519             struct linux_dirent *dirp;
9520             abi_long count = arg3;
9521 
9522             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9523                 return -TARGET_EFAULT;
9524             ret = get_errno(sys_getdents(arg1, dirp, count));
9525             if (!is_error(ret)) {
9526                 struct linux_dirent *de;
9527                 int len = ret;
9528                 int reclen;
9529                 de = dirp;
9530                 while (len > 0) {
9531                     reclen = de->d_reclen;
9532                     if (reclen > len)
9533                         break;
9534                     de->d_reclen = tswap16(reclen);
9535                     tswapls(&de->d_ino);
9536                     tswapls(&de->d_off);
9537                     de = (struct linux_dirent *)((char *)de + reclen);
9538                     len -= reclen;
9539                 }
9540             }
9541             unlock_user(dirp, arg2, ret);
9542         }
9543 #endif
9544 #else
9545         /* Implement getdents in terms of getdents64 */
9546         {
9547             struct linux_dirent64 *dirp;
9548             abi_long count = arg3;
9549 
9550             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9551             if (!dirp) {
9552                 return -TARGET_EFAULT;
9553             }
9554             ret = get_errno(sys_getdents64(arg1, dirp, count));
9555             if (!is_error(ret)) {
9556                 /* Convert the dirent64 structs to target dirent.  We do this
9557                  * in-place, since we can guarantee that a target_dirent is no
9558                  * larger than a dirent64; however this means we have to be
9559                  * careful to read everything before writing in the new format.
9560                  */
9561                 struct linux_dirent64 *de;
9562                 struct target_dirent *tde;
9563                 int len = ret;
9564                 int tlen = 0;
9565 
9566                 de = dirp;
9567                 tde = (struct target_dirent *)dirp;
9568                 while (len > 0) {
9569                     int namelen, treclen;
9570                     int reclen = de->d_reclen;
9571                     uint64_t ino = de->d_ino;
9572                     int64_t off = de->d_off;
9573                     uint8_t type = de->d_type;
9574 
9575                     namelen = strlen(de->d_name);
9576                     treclen = offsetof(struct target_dirent, d_name)
9577                         + namelen + 2;
9578                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9579 
9580                     memmove(tde->d_name, de->d_name, namelen + 1);
9581                     tde->d_ino = tswapal(ino);
9582                     tde->d_off = tswapal(off);
9583                     tde->d_reclen = tswap16(treclen);
9584                     /* The target_dirent type is in what was formerly a padding
9585                      * byte at the end of the structure:
9586                      */
9587                     *(((char *)tde) + treclen - 1) = type;
9588 
9589                     de = (struct linux_dirent64 *)((char *)de + reclen);
9590                     tde = (struct target_dirent *)((char *)tde + treclen);
9591                     len -= reclen;
9592                     tlen += treclen;
9593                 }
9594                 ret = tlen;
9595             }
9596             unlock_user(dirp, arg2, ret);
9597         }
9598 #endif
9599         return ret;
9600 #endif /* TARGET_NR_getdents */
9601 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9602     case TARGET_NR_getdents64:
9603         {
9604             struct linux_dirent64 *dirp;
9605             abi_long count = arg3;
9606             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9607                 return -TARGET_EFAULT;
9608             ret = get_errno(sys_getdents64(arg1, dirp, count));
9609             if (!is_error(ret)) {
9610                 struct linux_dirent64 *de;
9611                 int len = ret;
9612                 int reclen;
9613                 de = dirp;
9614                 while (len > 0) {
9615                     reclen = de->d_reclen;
9616                     if (reclen > len)
9617                         break;
9618                     de->d_reclen = tswap16(reclen);
9619                     tswap64s((uint64_t *)&de->d_ino);
9620                     tswap64s((uint64_t *)&de->d_off);
9621                     de = (struct linux_dirent64 *)((char *)de + reclen);
9622                     len -= reclen;
9623                 }
9624             }
9625             unlock_user(dirp, arg2, ret);
9626         }
9627         return ret;
9628 #endif /* TARGET_NR_getdents64 */
9629 #if defined(TARGET_NR__newselect)
9630     case TARGET_NR__newselect:
9631         return do_select(arg1, arg2, arg3, arg4, arg5);
9632 #endif
9633 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9634 # ifdef TARGET_NR_poll
9635     case TARGET_NR_poll:
9636 # endif
9637 # ifdef TARGET_NR_ppoll
9638     case TARGET_NR_ppoll:
9639 # endif
9640         {
9641             struct target_pollfd *target_pfd;
9642             unsigned int nfds = arg2;
9643             struct pollfd *pfd;
9644             unsigned int i;
9645 
9646             pfd = NULL;
9647             target_pfd = NULL;
9648             if (nfds) {
9649                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9650                     return -TARGET_EINVAL;
9651                 }
9652 
9653                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9654                                        sizeof(struct target_pollfd) * nfds, 1);
9655                 if (!target_pfd) {
9656                     return -TARGET_EFAULT;
9657                 }
9658 
9659                 pfd = alloca(sizeof(struct pollfd) * nfds);
9660                 for (i = 0; i < nfds; i++) {
9661                     pfd[i].fd = tswap32(target_pfd[i].fd);
9662                     pfd[i].events = tswap16(target_pfd[i].events);
9663                 }
9664             }
9665 
9666             switch (num) {
9667 # ifdef TARGET_NR_ppoll
9668             case TARGET_NR_ppoll:
9669             {
9670                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9671                 target_sigset_t *target_set;
9672                 sigset_t _set, *set = &_set;
9673 
9674                 if (arg3) {
9675                     if (target_to_host_timespec(timeout_ts, arg3)) {
9676                         unlock_user(target_pfd, arg1, 0);
9677                         return -TARGET_EFAULT;
9678                     }
9679                 } else {
9680                     timeout_ts = NULL;
9681                 }
9682 
9683                 if (arg4) {
9684                     if (arg5 != sizeof(target_sigset_t)) {
9685                         unlock_user(target_pfd, arg1, 0);
9686                         return -TARGET_EINVAL;
9687                     }
9688 
9689                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9690                     if (!target_set) {
9691                         unlock_user(target_pfd, arg1, 0);
9692                         return -TARGET_EFAULT;
9693                     }
9694                     target_to_host_sigset(set, target_set);
9695                 } else {
9696                     set = NULL;
9697                 }
9698 
9699                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9700                                            set, SIGSET_T_SIZE));
9701 
9702                 if (!is_error(ret) && arg3) {
9703                     host_to_target_timespec(arg3, timeout_ts);
9704                 }
9705                 if (arg4) {
9706                     unlock_user(target_set, arg4, 0);
9707                 }
9708                 break;
9709             }
9710 # endif
9711 # ifdef TARGET_NR_poll
9712             case TARGET_NR_poll:
9713             {
9714                 struct timespec ts, *pts;
9715 
9716                 if (arg3 >= 0) {
9717                     /* Convert ms to secs, ns */
9718                     ts.tv_sec = arg3 / 1000;
9719                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9720                     pts = &ts;
9721                 } else {
9722                     /* -ve poll() timeout means "infinite" */
9723                     pts = NULL;
9724                 }
9725                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9726                 break;
9727             }
9728 # endif
9729             default:
9730                 g_assert_not_reached();
9731             }
9732 
9733             if (!is_error(ret)) {
9734                 for(i = 0; i < nfds; i++) {
9735                     target_pfd[i].revents = tswap16(pfd[i].revents);
9736                 }
9737             }
9738             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9739         }
9740         return ret;
9741 #endif
9742     case TARGET_NR_flock:
9743         /* NOTE: the flock constant seems to be the same for every
9744            Linux platform */
9745         return get_errno(safe_flock(arg1, arg2));
9746     case TARGET_NR_readv:
9747         {
9748             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9749             if (vec != NULL) {
9750                 ret = get_errno(safe_readv(arg1, vec, arg3));
9751                 unlock_iovec(vec, arg2, arg3, 1);
9752             } else {
9753                 ret = -host_to_target_errno(errno);
9754             }
9755         }
9756         return ret;
9757     case TARGET_NR_writev:
9758         {
9759             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9760             if (vec != NULL) {
9761                 ret = get_errno(safe_writev(arg1, vec, arg3));
9762                 unlock_iovec(vec, arg2, arg3, 0);
9763             } else {
9764                 ret = -host_to_target_errno(errno);
9765             }
9766         }
9767         return ret;
9768 #if defined(TARGET_NR_preadv)
9769     case TARGET_NR_preadv:
9770         {
9771             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9772             if (vec != NULL) {
9773                 unsigned long low, high;
9774 
9775                 target_to_host_low_high(arg4, arg5, &low, &high);
9776                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9777                 unlock_iovec(vec, arg2, arg3, 1);
9778             } else {
9779                 ret = -host_to_target_errno(errno);
9780            }
9781         }
9782         return ret;
9783 #endif
9784 #if defined(TARGET_NR_pwritev)
9785     case TARGET_NR_pwritev:
9786         {
9787             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9788             if (vec != NULL) {
9789                 unsigned long low, high;
9790 
9791                 target_to_host_low_high(arg4, arg5, &low, &high);
9792                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9793                 unlock_iovec(vec, arg2, arg3, 0);
9794             } else {
9795                 ret = -host_to_target_errno(errno);
9796            }
9797         }
9798         return ret;
9799 #endif
9800     case TARGET_NR_getsid:
9801         return get_errno(getsid(arg1));
9802 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9803     case TARGET_NR_fdatasync:
9804         return get_errno(fdatasync(arg1));
9805 #endif
9806 #ifdef TARGET_NR__sysctl
9807     case TARGET_NR__sysctl:
9808         /* We don't implement this, but ENOTDIR is always a safe
9809            return value. */
9810         return -TARGET_ENOTDIR;
9811 #endif
9812     case TARGET_NR_sched_getaffinity:
9813         {
9814             unsigned int mask_size;
9815             unsigned long *mask;
9816 
9817             /*
9818              * sched_getaffinity needs multiples of ulong, so need to take
9819              * care of mismatches between target ulong and host ulong sizes.
9820              */
9821             if (arg2 & (sizeof(abi_ulong) - 1)) {
9822                 return -TARGET_EINVAL;
9823             }
9824             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9825 
9826             mask = alloca(mask_size);
9827             memset(mask, 0, mask_size);
9828             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9829 
9830             if (!is_error(ret)) {
9831                 if (ret > arg2) {
9832                     /* More data returned than the caller's buffer will fit.
9833                      * This only happens if sizeof(abi_long) < sizeof(long)
9834                      * and the caller passed us a buffer holding an odd number
9835                      * of abi_longs. If the host kernel is actually using the
9836                      * extra 4 bytes then fail EINVAL; otherwise we can just
9837                      * ignore them and only copy the interesting part.
9838                      */
9839                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9840                     if (numcpus > arg2 * 8) {
9841                         return -TARGET_EINVAL;
9842                     }
9843                     ret = arg2;
9844                 }
9845 
9846                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9847                     return -TARGET_EFAULT;
9848                 }
9849             }
9850         }
9851         return ret;
9852     case TARGET_NR_sched_setaffinity:
9853         {
9854             unsigned int mask_size;
9855             unsigned long *mask;
9856 
9857             /*
9858              * sched_setaffinity needs multiples of ulong, so need to take
9859              * care of mismatches between target ulong and host ulong sizes.
9860              */
9861             if (arg2 & (sizeof(abi_ulong) - 1)) {
9862                 return -TARGET_EINVAL;
9863             }
9864             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9865             mask = alloca(mask_size);
9866 
9867             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9868             if (ret) {
9869                 return ret;
9870             }
9871 
9872             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9873         }
9874     case TARGET_NR_getcpu:
9875         {
9876             unsigned cpu, node;
9877             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9878                                        arg2 ? &node : NULL,
9879                                        NULL));
9880             if (is_error(ret)) {
9881                 return ret;
9882             }
9883             if (arg1 && put_user_u32(cpu, arg1)) {
9884                 return -TARGET_EFAULT;
9885             }
9886             if (arg2 && put_user_u32(node, arg2)) {
9887                 return -TARGET_EFAULT;
9888             }
9889         }
9890         return ret;
9891     case TARGET_NR_sched_setparam:
9892         {
9893             struct sched_param *target_schp;
9894             struct sched_param schp;
9895 
9896             if (arg2 == 0) {
9897                 return -TARGET_EINVAL;
9898             }
9899             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9900                 return -TARGET_EFAULT;
9901             schp.sched_priority = tswap32(target_schp->sched_priority);
9902             unlock_user_struct(target_schp, arg2, 0);
9903             return get_errno(sched_setparam(arg1, &schp));
9904         }
9905     case TARGET_NR_sched_getparam:
9906         {
9907             struct sched_param *target_schp;
9908             struct sched_param schp;
9909 
9910             if (arg2 == 0) {
9911                 return -TARGET_EINVAL;
9912             }
9913             ret = get_errno(sched_getparam(arg1, &schp));
9914             if (!is_error(ret)) {
9915                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9916                     return -TARGET_EFAULT;
9917                 target_schp->sched_priority = tswap32(schp.sched_priority);
9918                 unlock_user_struct(target_schp, arg2, 1);
9919             }
9920         }
9921         return ret;
9922     case TARGET_NR_sched_setscheduler:
9923         {
9924             struct sched_param *target_schp;
9925             struct sched_param schp;
9926             if (arg3 == 0) {
9927                 return -TARGET_EINVAL;
9928             }
9929             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9930                 return -TARGET_EFAULT;
9931             schp.sched_priority = tswap32(target_schp->sched_priority);
9932             unlock_user_struct(target_schp, arg3, 0);
9933             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9934         }
9935     case TARGET_NR_sched_getscheduler:
9936         return get_errno(sched_getscheduler(arg1));
9937     case TARGET_NR_sched_yield:
9938         return get_errno(sched_yield());
9939     case TARGET_NR_sched_get_priority_max:
9940         return get_errno(sched_get_priority_max(arg1));
9941     case TARGET_NR_sched_get_priority_min:
9942         return get_errno(sched_get_priority_min(arg1));
9943     case TARGET_NR_sched_rr_get_interval:
9944         {
9945             struct timespec ts;
9946             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9947             if (!is_error(ret)) {
9948                 ret = host_to_target_timespec(arg2, &ts);
9949             }
9950         }
9951         return ret;
9952     case TARGET_NR_nanosleep:
9953         {
9954             struct timespec req, rem;
9955             target_to_host_timespec(&req, arg1);
9956             ret = get_errno(safe_nanosleep(&req, &rem));
9957             if (is_error(ret) && arg2) {
9958                 host_to_target_timespec(arg2, &rem);
9959             }
9960         }
9961         return ret;
9962     case TARGET_NR_prctl:
9963         switch (arg1) {
9964         case PR_GET_PDEATHSIG:
9965         {
9966             int deathsig;
9967             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9968             if (!is_error(ret) && arg2
9969                 && put_user_ual(deathsig, arg2)) {
9970                 return -TARGET_EFAULT;
9971             }
9972             return ret;
9973         }
9974 #ifdef PR_GET_NAME
9975         case PR_GET_NAME:
9976         {
9977             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9978             if (!name) {
9979                 return -TARGET_EFAULT;
9980             }
9981             ret = get_errno(prctl(arg1, (unsigned long)name,
9982                                   arg3, arg4, arg5));
9983             unlock_user(name, arg2, 16);
9984             return ret;
9985         }
9986         case PR_SET_NAME:
9987         {
9988             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9989             if (!name) {
9990                 return -TARGET_EFAULT;
9991             }
9992             ret = get_errno(prctl(arg1, (unsigned long)name,
9993                                   arg3, arg4, arg5));
9994             unlock_user(name, arg2, 0);
9995             return ret;
9996         }
9997 #endif
9998 #ifdef TARGET_MIPS
9999         case TARGET_PR_GET_FP_MODE:
10000         {
10001             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10002             ret = 0;
10003             if (env->CP0_Status & (1 << CP0St_FR)) {
10004                 ret |= TARGET_PR_FP_MODE_FR;
10005             }
10006             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10007                 ret |= TARGET_PR_FP_MODE_FRE;
10008             }
10009             return ret;
10010         }
10011         case TARGET_PR_SET_FP_MODE:
10012         {
10013             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10014             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10015             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10016             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10017             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10018 
10019             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10020                                             TARGET_PR_FP_MODE_FRE;
10021 
10022             /* If nothing to change, return right away, successfully.  */
10023             if (old_fr == new_fr && old_fre == new_fre) {
10024                 return 0;
10025             }
10026             /* Check the value is valid */
10027             if (arg2 & ~known_bits) {
10028                 return -TARGET_EOPNOTSUPP;
10029             }
10030             /* Setting FRE without FR is not supported.  */
10031             if (new_fre && !new_fr) {
10032                 return -TARGET_EOPNOTSUPP;
10033             }
10034             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10035                 /* FR1 is not supported */
10036                 return -TARGET_EOPNOTSUPP;
10037             }
10038             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10039                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10040                 /* cannot set FR=0 */
10041                 return -TARGET_EOPNOTSUPP;
10042             }
10043             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10044                 /* Cannot set FRE=1 */
10045                 return -TARGET_EOPNOTSUPP;
10046             }
10047 
10048             int i;
10049             fpr_t *fpr = env->active_fpu.fpr;
10050             for (i = 0; i < 32 ; i += 2) {
10051                 if (!old_fr && new_fr) {
10052                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10053                 } else if (old_fr && !new_fr) {
10054                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10055                 }
10056             }
10057 
10058             if (new_fr) {
10059                 env->CP0_Status |= (1 << CP0St_FR);
10060                 env->hflags |= MIPS_HFLAG_F64;
10061             } else {
10062                 env->CP0_Status &= ~(1 << CP0St_FR);
10063                 env->hflags &= ~MIPS_HFLAG_F64;
10064             }
10065             if (new_fre) {
10066                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10067                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10068                     env->hflags |= MIPS_HFLAG_FRE;
10069                 }
10070             } else {
10071                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10072                 env->hflags &= ~MIPS_HFLAG_FRE;
10073             }
10074 
10075             return 0;
10076         }
10077 #endif /* MIPS */
10078 #ifdef TARGET_AARCH64
10079         case TARGET_PR_SVE_SET_VL:
10080             /*
10081              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10082              * PR_SVE_VL_INHERIT.  Note the kernel definition
10083              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10084              * even though the current architectural maximum is VQ=16.
10085              */
10086             ret = -TARGET_EINVAL;
10087             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10088                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10089                 CPUARMState *env = cpu_env;
10090                 ARMCPU *cpu = env_archcpu(env);
10091                 uint32_t vq, old_vq;
10092 
10093                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10094                 vq = MAX(arg2 / 16, 1);
10095                 vq = MIN(vq, cpu->sve_max_vq);
10096 
10097                 if (vq < old_vq) {
10098                     aarch64_sve_narrow_vq(env, vq);
10099                 }
10100                 env->vfp.zcr_el[1] = vq - 1;
10101                 arm_rebuild_hflags(env);
10102                 ret = vq * 16;
10103             }
10104             return ret;
10105         case TARGET_PR_SVE_GET_VL:
10106             ret = -TARGET_EINVAL;
10107             {
10108                 ARMCPU *cpu = env_archcpu(cpu_env);
10109                 if (cpu_isar_feature(aa64_sve, cpu)) {
10110                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10111                 }
10112             }
10113             return ret;
10114         case TARGET_PR_PAC_RESET_KEYS:
10115             {
10116                 CPUARMState *env = cpu_env;
10117                 ARMCPU *cpu = env_archcpu(env);
10118 
10119                 if (arg3 || arg4 || arg5) {
10120                     return -TARGET_EINVAL;
10121                 }
10122                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10123                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10124                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10125                                TARGET_PR_PAC_APGAKEY);
10126                     int ret = 0;
10127                     Error *err = NULL;
10128 
10129                     if (arg2 == 0) {
10130                         arg2 = all;
10131                     } else if (arg2 & ~all) {
10132                         return -TARGET_EINVAL;
10133                     }
10134                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10135                         ret |= qemu_guest_getrandom(&env->keys.apia,
10136                                                     sizeof(ARMPACKey), &err);
10137                     }
10138                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10139                         ret |= qemu_guest_getrandom(&env->keys.apib,
10140                                                     sizeof(ARMPACKey), &err);
10141                     }
10142                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10143                         ret |= qemu_guest_getrandom(&env->keys.apda,
10144                                                     sizeof(ARMPACKey), &err);
10145                     }
10146                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10147                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10148                                                     sizeof(ARMPACKey), &err);
10149                     }
10150                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10151                         ret |= qemu_guest_getrandom(&env->keys.apga,
10152                                                     sizeof(ARMPACKey), &err);
10153                     }
10154                     if (ret != 0) {
10155                         /*
10156                          * Some unknown failure in the crypto.  The best
10157                          * we can do is log it and fail the syscall.
10158                          * The real syscall cannot fail this way.
10159                          */
10160                         qemu_log_mask(LOG_UNIMP,
10161                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10162                                       error_get_pretty(err));
10163                         error_free(err);
10164                         return -TARGET_EIO;
10165                     }
10166                     return 0;
10167                 }
10168             }
10169             return -TARGET_EINVAL;
10170 #endif /* AARCH64 */
10171         case PR_GET_SECCOMP:
10172         case PR_SET_SECCOMP:
10173             /* Disable seccomp to prevent the target disabling syscalls we
10174              * need. */
10175             return -TARGET_EINVAL;
10176         default:
10177             /* Most prctl options have no pointer arguments */
10178             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10179         }
10180         break;
10181 #ifdef TARGET_NR_arch_prctl
10182     case TARGET_NR_arch_prctl:
10183 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10184         return do_arch_prctl(cpu_env, arg1, arg2);
10185 #else
10186 #error unreachable
10187 #endif
10188 #endif
10189 #ifdef TARGET_NR_pread64
10190     case TARGET_NR_pread64:
10191         if (regpairs_aligned(cpu_env, num)) {
10192             arg4 = arg5;
10193             arg5 = arg6;
10194         }
10195         if (arg2 == 0 && arg3 == 0) {
10196             /* Special-case NULL buffer and zero length, which should succeed */
10197             p = 0;
10198         } else {
10199             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10200             if (!p) {
10201                 return -TARGET_EFAULT;
10202             }
10203         }
10204         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10205         unlock_user(p, arg2, ret);
10206         return ret;
10207     case TARGET_NR_pwrite64:
10208         if (regpairs_aligned(cpu_env, num)) {
10209             arg4 = arg5;
10210             arg5 = arg6;
10211         }
10212         if (arg2 == 0 && arg3 == 0) {
10213             /* Special-case NULL buffer and zero length, which should succeed */
10214             p = 0;
10215         } else {
10216             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10217             if (!p) {
10218                 return -TARGET_EFAULT;
10219             }
10220         }
10221         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10222         unlock_user(p, arg2, 0);
10223         return ret;
10224 #endif
10225     case TARGET_NR_getcwd:
10226         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10227             return -TARGET_EFAULT;
10228         ret = get_errno(sys_getcwd1(p, arg2));
10229         unlock_user(p, arg1, ret);
10230         return ret;
10231     case TARGET_NR_capget:
10232     case TARGET_NR_capset:
10233     {
10234         struct target_user_cap_header *target_header;
10235         struct target_user_cap_data *target_data = NULL;
10236         struct __user_cap_header_struct header;
10237         struct __user_cap_data_struct data[2];
10238         struct __user_cap_data_struct *dataptr = NULL;
10239         int i, target_datalen;
10240         int data_items = 1;
10241 
10242         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10243             return -TARGET_EFAULT;
10244         }
10245         header.version = tswap32(target_header->version);
10246         header.pid = tswap32(target_header->pid);
10247 
10248         if (header.version != _LINUX_CAPABILITY_VERSION) {
10249             /* Version 2 and up takes pointer to two user_data structs */
10250             data_items = 2;
10251         }
10252 
10253         target_datalen = sizeof(*target_data) * data_items;
10254 
10255         if (arg2) {
10256             if (num == TARGET_NR_capget) {
10257                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10258             } else {
10259                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10260             }
10261             if (!target_data) {
10262                 unlock_user_struct(target_header, arg1, 0);
10263                 return -TARGET_EFAULT;
10264             }
10265 
10266             if (num == TARGET_NR_capset) {
10267                 for (i = 0; i < data_items; i++) {
10268                     data[i].effective = tswap32(target_data[i].effective);
10269                     data[i].permitted = tswap32(target_data[i].permitted);
10270                     data[i].inheritable = tswap32(target_data[i].inheritable);
10271                 }
10272             }
10273 
10274             dataptr = data;
10275         }
10276 
10277         if (num == TARGET_NR_capget) {
10278             ret = get_errno(capget(&header, dataptr));
10279         } else {
10280             ret = get_errno(capset(&header, dataptr));
10281         }
10282 
10283         /* The kernel always updates version for both capget and capset */
10284         target_header->version = tswap32(header.version);
10285         unlock_user_struct(target_header, arg1, 1);
10286 
10287         if (arg2) {
10288             if (num == TARGET_NR_capget) {
10289                 for (i = 0; i < data_items; i++) {
10290                     target_data[i].effective = tswap32(data[i].effective);
10291                     target_data[i].permitted = tswap32(data[i].permitted);
10292                     target_data[i].inheritable = tswap32(data[i].inheritable);
10293                 }
10294                 unlock_user(target_data, arg2, target_datalen);
10295             } else {
10296                 unlock_user(target_data, arg2, 0);
10297             }
10298         }
10299         return ret;
10300     }
10301     case TARGET_NR_sigaltstack:
10302         return do_sigaltstack(arg1, arg2,
10303                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10304 
10305 #ifdef CONFIG_SENDFILE
10306 #ifdef TARGET_NR_sendfile
10307     case TARGET_NR_sendfile:
10308     {
10309         off_t *offp = NULL;
10310         off_t off;
10311         if (arg3) {
10312             ret = get_user_sal(off, arg3);
10313             if (is_error(ret)) {
10314                 return ret;
10315             }
10316             offp = &off;
10317         }
10318         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10319         if (!is_error(ret) && arg3) {
10320             abi_long ret2 = put_user_sal(off, arg3);
10321             if (is_error(ret2)) {
10322                 ret = ret2;
10323             }
10324         }
10325         return ret;
10326     }
10327 #endif
10328 #ifdef TARGET_NR_sendfile64
10329     case TARGET_NR_sendfile64:
10330     {
10331         off_t *offp = NULL;
10332         off_t off;
10333         if (arg3) {
10334             ret = get_user_s64(off, arg3);
10335             if (is_error(ret)) {
10336                 return ret;
10337             }
10338             offp = &off;
10339         }
10340         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10341         if (!is_error(ret) && arg3) {
10342             abi_long ret2 = put_user_s64(off, arg3);
10343             if (is_error(ret2)) {
10344                 ret = ret2;
10345             }
10346         }
10347         return ret;
10348     }
10349 #endif
10350 #endif
10351 #ifdef TARGET_NR_vfork
10352     case TARGET_NR_vfork:
10353         return get_errno(do_fork(cpu_env,
10354                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10355                          0, 0, 0, 0));
10356 #endif
10357 #ifdef TARGET_NR_ugetrlimit
10358     case TARGET_NR_ugetrlimit:
10359     {
10360 	struct rlimit rlim;
10361 	int resource = target_to_host_resource(arg1);
10362 	ret = get_errno(getrlimit(resource, &rlim));
10363 	if (!is_error(ret)) {
10364 	    struct target_rlimit *target_rlim;
10365             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10366                 return -TARGET_EFAULT;
10367 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10368 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10369             unlock_user_struct(target_rlim, arg2, 1);
10370 	}
10371         return ret;
10372     }
10373 #endif
10374 #ifdef TARGET_NR_truncate64
10375     case TARGET_NR_truncate64:
10376         if (!(p = lock_user_string(arg1)))
10377             return -TARGET_EFAULT;
10378 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10379         unlock_user(p, arg1, 0);
10380         return ret;
10381 #endif
10382 #ifdef TARGET_NR_ftruncate64
10383     case TARGET_NR_ftruncate64:
10384         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10385 #endif
10386 #ifdef TARGET_NR_stat64
10387     case TARGET_NR_stat64:
10388         if (!(p = lock_user_string(arg1))) {
10389             return -TARGET_EFAULT;
10390         }
10391         ret = get_errno(stat(path(p), &st));
10392         unlock_user(p, arg1, 0);
10393         if (!is_error(ret))
10394             ret = host_to_target_stat64(cpu_env, arg2, &st);
10395         return ret;
10396 #endif
10397 #ifdef TARGET_NR_lstat64
10398     case TARGET_NR_lstat64:
10399         if (!(p = lock_user_string(arg1))) {
10400             return -TARGET_EFAULT;
10401         }
10402         ret = get_errno(lstat(path(p), &st));
10403         unlock_user(p, arg1, 0);
10404         if (!is_error(ret))
10405             ret = host_to_target_stat64(cpu_env, arg2, &st);
10406         return ret;
10407 #endif
10408 #ifdef TARGET_NR_fstat64
10409     case TARGET_NR_fstat64:
10410         ret = get_errno(fstat(arg1, &st));
10411         if (!is_error(ret))
10412             ret = host_to_target_stat64(cpu_env, arg2, &st);
10413         return ret;
10414 #endif
10415 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10416 #ifdef TARGET_NR_fstatat64
10417     case TARGET_NR_fstatat64:
10418 #endif
10419 #ifdef TARGET_NR_newfstatat
10420     case TARGET_NR_newfstatat:
10421 #endif
10422         if (!(p = lock_user_string(arg2))) {
10423             return -TARGET_EFAULT;
10424         }
10425         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10426         unlock_user(p, arg2, 0);
10427         if (!is_error(ret))
10428             ret = host_to_target_stat64(cpu_env, arg3, &st);
10429         return ret;
10430 #endif
10431 #if defined(TARGET_NR_statx)
10432     case TARGET_NR_statx:
10433         {
10434             struct target_statx *target_stx;
10435             int dirfd = arg1;
10436             int flags = arg3;
10437 
10438             p = lock_user_string(arg2);
10439             if (p == NULL) {
10440                 return -TARGET_EFAULT;
10441             }
10442 #if defined(__NR_statx)
10443             {
10444                 /*
10445                  * It is assumed that struct statx is architecture independent.
10446                  */
10447                 struct target_statx host_stx;
10448                 int mask = arg4;
10449 
10450                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10451                 if (!is_error(ret)) {
10452                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10453                         unlock_user(p, arg2, 0);
10454                         return -TARGET_EFAULT;
10455                     }
10456                 }
10457 
10458                 if (ret != -TARGET_ENOSYS) {
10459                     unlock_user(p, arg2, 0);
10460                     return ret;
10461                 }
10462             }
10463 #endif
10464             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10465             unlock_user(p, arg2, 0);
10466 
10467             if (!is_error(ret)) {
10468                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10469                     return -TARGET_EFAULT;
10470                 }
10471                 memset(target_stx, 0, sizeof(*target_stx));
10472                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10473                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10474                 __put_user(st.st_ino, &target_stx->stx_ino);
10475                 __put_user(st.st_mode, &target_stx->stx_mode);
10476                 __put_user(st.st_uid, &target_stx->stx_uid);
10477                 __put_user(st.st_gid, &target_stx->stx_gid);
10478                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10479                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10480                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10481                 __put_user(st.st_size, &target_stx->stx_size);
10482                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10483                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10484                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10485                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10486                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10487                 unlock_user_struct(target_stx, arg5, 1);
10488             }
10489         }
10490         return ret;
10491 #endif
10492 #ifdef TARGET_NR_lchown
10493     case TARGET_NR_lchown:
10494         if (!(p = lock_user_string(arg1)))
10495             return -TARGET_EFAULT;
10496         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10497         unlock_user(p, arg1, 0);
10498         return ret;
10499 #endif
10500 #ifdef TARGET_NR_getuid
10501     case TARGET_NR_getuid:
10502         return get_errno(high2lowuid(getuid()));
10503 #endif
10504 #ifdef TARGET_NR_getgid
10505     case TARGET_NR_getgid:
10506         return get_errno(high2lowgid(getgid()));
10507 #endif
10508 #ifdef TARGET_NR_geteuid
10509     case TARGET_NR_geteuid:
10510         return get_errno(high2lowuid(geteuid()));
10511 #endif
10512 #ifdef TARGET_NR_getegid
10513     case TARGET_NR_getegid:
10514         return get_errno(high2lowgid(getegid()));
10515 #endif
10516     case TARGET_NR_setreuid:
10517         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10518     case TARGET_NR_setregid:
10519         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10520     case TARGET_NR_getgroups:
10521         {
10522             int gidsetsize = arg1;
10523             target_id *target_grouplist;
10524             gid_t *grouplist;
10525             int i;
10526 
10527             grouplist = alloca(gidsetsize * sizeof(gid_t));
10528             ret = get_errno(getgroups(gidsetsize, grouplist));
10529             if (gidsetsize == 0)
10530                 return ret;
10531             if (!is_error(ret)) {
10532                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10533                 if (!target_grouplist)
10534                     return -TARGET_EFAULT;
10535                 for(i = 0;i < ret; i++)
10536                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10537                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10538             }
10539         }
10540         return ret;
10541     case TARGET_NR_setgroups:
10542         {
10543             int gidsetsize = arg1;
10544             target_id *target_grouplist;
10545             gid_t *grouplist = NULL;
10546             int i;
10547             if (gidsetsize) {
10548                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10549                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10550                 if (!target_grouplist) {
10551                     return -TARGET_EFAULT;
10552                 }
10553                 for (i = 0; i < gidsetsize; i++) {
10554                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10555                 }
10556                 unlock_user(target_grouplist, arg2, 0);
10557             }
10558             return get_errno(setgroups(gidsetsize, grouplist));
10559         }
10560     case TARGET_NR_fchown:
10561         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10562 #if defined(TARGET_NR_fchownat)
10563     case TARGET_NR_fchownat:
10564         if (!(p = lock_user_string(arg2)))
10565             return -TARGET_EFAULT;
10566         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10567                                  low2highgid(arg4), arg5));
10568         unlock_user(p, arg2, 0);
10569         return ret;
10570 #endif
10571 #ifdef TARGET_NR_setresuid
10572     case TARGET_NR_setresuid:
10573         return get_errno(sys_setresuid(low2highuid(arg1),
10574                                        low2highuid(arg2),
10575                                        low2highuid(arg3)));
10576 #endif
10577 #ifdef TARGET_NR_getresuid
10578     case TARGET_NR_getresuid:
10579         {
10580             uid_t ruid, euid, suid;
10581             ret = get_errno(getresuid(&ruid, &euid, &suid));
10582             if (!is_error(ret)) {
10583                 if (put_user_id(high2lowuid(ruid), arg1)
10584                     || put_user_id(high2lowuid(euid), arg2)
10585                     || put_user_id(high2lowuid(suid), arg3))
10586                     return -TARGET_EFAULT;
10587             }
10588         }
10589         return ret;
10590 #endif
10591 #ifdef TARGET_NR_getresgid
10592     case TARGET_NR_setresgid:
10593         return get_errno(sys_setresgid(low2highgid(arg1),
10594                                        low2highgid(arg2),
10595                                        low2highgid(arg3)));
10596 #endif
10597 #ifdef TARGET_NR_getresgid
10598     case TARGET_NR_getresgid:
10599         {
10600             gid_t rgid, egid, sgid;
10601             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10602             if (!is_error(ret)) {
10603                 if (put_user_id(high2lowgid(rgid), arg1)
10604                     || put_user_id(high2lowgid(egid), arg2)
10605                     || put_user_id(high2lowgid(sgid), arg3))
10606                     return -TARGET_EFAULT;
10607             }
10608         }
10609         return ret;
10610 #endif
10611 #ifdef TARGET_NR_chown
10612     case TARGET_NR_chown:
10613         if (!(p = lock_user_string(arg1)))
10614             return -TARGET_EFAULT;
10615         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10616         unlock_user(p, arg1, 0);
10617         return ret;
10618 #endif
10619     case TARGET_NR_setuid:
10620         return get_errno(sys_setuid(low2highuid(arg1)));
10621     case TARGET_NR_setgid:
10622         return get_errno(sys_setgid(low2highgid(arg1)));
10623     case TARGET_NR_setfsuid:
10624         return get_errno(setfsuid(arg1));
10625     case TARGET_NR_setfsgid:
10626         return get_errno(setfsgid(arg1));
10627 
10628 #ifdef TARGET_NR_lchown32
10629     case TARGET_NR_lchown32:
10630         if (!(p = lock_user_string(arg1)))
10631             return -TARGET_EFAULT;
10632         ret = get_errno(lchown(p, arg2, arg3));
10633         unlock_user(p, arg1, 0);
10634         return ret;
10635 #endif
10636 #ifdef TARGET_NR_getuid32
10637     case TARGET_NR_getuid32:
10638         return get_errno(getuid());
10639 #endif
10640 
10641 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10642    /* Alpha specific */
10643     case TARGET_NR_getxuid:
10644          {
10645             uid_t euid;
10646             euid=geteuid();
10647             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10648          }
10649         return get_errno(getuid());
10650 #endif
10651 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10652    /* Alpha specific */
10653     case TARGET_NR_getxgid:
10654          {
10655             uid_t egid;
10656             egid=getegid();
10657             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10658          }
10659         return get_errno(getgid());
10660 #endif
10661 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10662     /* Alpha specific */
10663     case TARGET_NR_osf_getsysinfo:
10664         ret = -TARGET_EOPNOTSUPP;
10665         switch (arg1) {
10666           case TARGET_GSI_IEEE_FP_CONTROL:
10667             {
10668                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10669                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10670 
10671                 swcr &= ~SWCR_STATUS_MASK;
10672                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10673 
10674                 if (put_user_u64 (swcr, arg2))
10675                         return -TARGET_EFAULT;
10676                 ret = 0;
10677             }
10678             break;
10679 
10680           /* case GSI_IEEE_STATE_AT_SIGNAL:
10681              -- Not implemented in linux kernel.
10682              case GSI_UACPROC:
10683              -- Retrieves current unaligned access state; not much used.
10684              case GSI_PROC_TYPE:
10685              -- Retrieves implver information; surely not used.
10686              case GSI_GET_HWRPB:
10687              -- Grabs a copy of the HWRPB; surely not used.
10688           */
10689         }
10690         return ret;
10691 #endif
10692 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10693     /* Alpha specific */
10694     case TARGET_NR_osf_setsysinfo:
10695         ret = -TARGET_EOPNOTSUPP;
10696         switch (arg1) {
10697           case TARGET_SSI_IEEE_FP_CONTROL:
10698             {
10699                 uint64_t swcr, fpcr;
10700 
10701                 if (get_user_u64 (swcr, arg2)) {
10702                     return -TARGET_EFAULT;
10703                 }
10704 
10705                 /*
10706                  * The kernel calls swcr_update_status to update the
10707                  * status bits from the fpcr at every point that it
10708                  * could be queried.  Therefore, we store the status
10709                  * bits only in FPCR.
10710                  */
10711                 ((CPUAlphaState *)cpu_env)->swcr
10712                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10713 
10714                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10715                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10716                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10717                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10718                 ret = 0;
10719             }
10720             break;
10721 
10722           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10723             {
10724                 uint64_t exc, fpcr, fex;
10725 
10726                 if (get_user_u64(exc, arg2)) {
10727                     return -TARGET_EFAULT;
10728                 }
10729                 exc &= SWCR_STATUS_MASK;
10730                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10731 
10732                 /* Old exceptions are not signaled.  */
10733                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10734                 fex = exc & ~fex;
10735                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10736                 fex &= ((CPUArchState *)cpu_env)->swcr;
10737 
10738                 /* Update the hardware fpcr.  */
10739                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10740                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10741 
10742                 if (fex) {
10743                     int si_code = TARGET_FPE_FLTUNK;
10744                     target_siginfo_t info;
10745 
10746                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10747                         si_code = TARGET_FPE_FLTUND;
10748                     }
10749                     if (fex & SWCR_TRAP_ENABLE_INE) {
10750                         si_code = TARGET_FPE_FLTRES;
10751                     }
10752                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10753                         si_code = TARGET_FPE_FLTUND;
10754                     }
10755                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10756                         si_code = TARGET_FPE_FLTOVF;
10757                     }
10758                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10759                         si_code = TARGET_FPE_FLTDIV;
10760                     }
10761                     if (fex & SWCR_TRAP_ENABLE_INV) {
10762                         si_code = TARGET_FPE_FLTINV;
10763                     }
10764 
10765                     info.si_signo = SIGFPE;
10766                     info.si_errno = 0;
10767                     info.si_code = si_code;
10768                     info._sifields._sigfault._addr
10769                         = ((CPUArchState *)cpu_env)->pc;
10770                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10771                                  QEMU_SI_FAULT, &info);
10772                 }
10773                 ret = 0;
10774             }
10775             break;
10776 
10777           /* case SSI_NVPAIRS:
10778              -- Used with SSIN_UACPROC to enable unaligned accesses.
10779              case SSI_IEEE_STATE_AT_SIGNAL:
10780              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10781              -- Not implemented in linux kernel
10782           */
10783         }
10784         return ret;
10785 #endif
10786 #ifdef TARGET_NR_osf_sigprocmask
10787     /* Alpha specific.  */
10788     case TARGET_NR_osf_sigprocmask:
10789         {
10790             abi_ulong mask;
10791             int how;
10792             sigset_t set, oldset;
10793 
10794             switch(arg1) {
10795             case TARGET_SIG_BLOCK:
10796                 how = SIG_BLOCK;
10797                 break;
10798             case TARGET_SIG_UNBLOCK:
10799                 how = SIG_UNBLOCK;
10800                 break;
10801             case TARGET_SIG_SETMASK:
10802                 how = SIG_SETMASK;
10803                 break;
10804             default:
10805                 return -TARGET_EINVAL;
10806             }
10807             mask = arg2;
10808             target_to_host_old_sigset(&set, &mask);
10809             ret = do_sigprocmask(how, &set, &oldset);
10810             if (!ret) {
10811                 host_to_target_old_sigset(&mask, &oldset);
10812                 ret = mask;
10813             }
10814         }
10815         return ret;
10816 #endif
10817 
10818 #ifdef TARGET_NR_getgid32
10819     case TARGET_NR_getgid32:
10820         return get_errno(getgid());
10821 #endif
10822 #ifdef TARGET_NR_geteuid32
10823     case TARGET_NR_geteuid32:
10824         return get_errno(geteuid());
10825 #endif
10826 #ifdef TARGET_NR_getegid32
10827     case TARGET_NR_getegid32:
10828         return get_errno(getegid());
10829 #endif
10830 #ifdef TARGET_NR_setreuid32
10831     case TARGET_NR_setreuid32:
10832         return get_errno(setreuid(arg1, arg2));
10833 #endif
10834 #ifdef TARGET_NR_setregid32
10835     case TARGET_NR_setregid32:
10836         return get_errno(setregid(arg1, arg2));
10837 #endif
10838 #ifdef TARGET_NR_getgroups32
10839     case TARGET_NR_getgroups32:
10840         {
10841             int gidsetsize = arg1;
10842             uint32_t *target_grouplist;
10843             gid_t *grouplist;
10844             int i;
10845 
10846             grouplist = alloca(gidsetsize * sizeof(gid_t));
10847             ret = get_errno(getgroups(gidsetsize, grouplist));
10848             if (gidsetsize == 0)
10849                 return ret;
10850             if (!is_error(ret)) {
10851                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10852                 if (!target_grouplist) {
10853                     return -TARGET_EFAULT;
10854                 }
10855                 for(i = 0;i < ret; i++)
10856                     target_grouplist[i] = tswap32(grouplist[i]);
10857                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10858             }
10859         }
10860         return ret;
10861 #endif
10862 #ifdef TARGET_NR_setgroups32
10863     case TARGET_NR_setgroups32:
10864         {
10865             int gidsetsize = arg1;
10866             uint32_t *target_grouplist;
10867             gid_t *grouplist;
10868             int i;
10869 
10870             grouplist = alloca(gidsetsize * sizeof(gid_t));
10871             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10872             if (!target_grouplist) {
10873                 return -TARGET_EFAULT;
10874             }
10875             for(i = 0;i < gidsetsize; i++)
10876                 grouplist[i] = tswap32(target_grouplist[i]);
10877             unlock_user(target_grouplist, arg2, 0);
10878             return get_errno(setgroups(gidsetsize, grouplist));
10879         }
10880 #endif
10881 #ifdef TARGET_NR_fchown32
10882     case TARGET_NR_fchown32:
10883         return get_errno(fchown(arg1, arg2, arg3));
10884 #endif
10885 #ifdef TARGET_NR_setresuid32
10886     case TARGET_NR_setresuid32:
10887         return get_errno(sys_setresuid(arg1, arg2, arg3));
10888 #endif
10889 #ifdef TARGET_NR_getresuid32
10890     case TARGET_NR_getresuid32:
10891         {
10892             uid_t ruid, euid, suid;
10893             ret = get_errno(getresuid(&ruid, &euid, &suid));
10894             if (!is_error(ret)) {
10895                 if (put_user_u32(ruid, arg1)
10896                     || put_user_u32(euid, arg2)
10897                     || put_user_u32(suid, arg3))
10898                     return -TARGET_EFAULT;
10899             }
10900         }
10901         return ret;
10902 #endif
10903 #ifdef TARGET_NR_setresgid32
10904     case TARGET_NR_setresgid32:
10905         return get_errno(sys_setresgid(arg1, arg2, arg3));
10906 #endif
10907 #ifdef TARGET_NR_getresgid32
10908     case TARGET_NR_getresgid32:
10909         {
10910             gid_t rgid, egid, sgid;
10911             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10912             if (!is_error(ret)) {
10913                 if (put_user_u32(rgid, arg1)
10914                     || put_user_u32(egid, arg2)
10915                     || put_user_u32(sgid, arg3))
10916                     return -TARGET_EFAULT;
10917             }
10918         }
10919         return ret;
10920 #endif
10921 #ifdef TARGET_NR_chown32
10922     case TARGET_NR_chown32:
10923         if (!(p = lock_user_string(arg1)))
10924             return -TARGET_EFAULT;
10925         ret = get_errno(chown(p, arg2, arg3));
10926         unlock_user(p, arg1, 0);
10927         return ret;
10928 #endif
10929 #ifdef TARGET_NR_setuid32
10930     case TARGET_NR_setuid32:
10931         return get_errno(sys_setuid(arg1));
10932 #endif
10933 #ifdef TARGET_NR_setgid32
10934     case TARGET_NR_setgid32:
10935         return get_errno(sys_setgid(arg1));
10936 #endif
10937 #ifdef TARGET_NR_setfsuid32
10938     case TARGET_NR_setfsuid32:
10939         return get_errno(setfsuid(arg1));
10940 #endif
10941 #ifdef TARGET_NR_setfsgid32
10942     case TARGET_NR_setfsgid32:
10943         return get_errno(setfsgid(arg1));
10944 #endif
10945 #ifdef TARGET_NR_mincore
10946     case TARGET_NR_mincore:
10947         {
10948             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10949             if (!a) {
10950                 return -TARGET_ENOMEM;
10951             }
10952             p = lock_user_string(arg3);
10953             if (!p) {
10954                 ret = -TARGET_EFAULT;
10955             } else {
10956                 ret = get_errno(mincore(a, arg2, p));
10957                 unlock_user(p, arg3, ret);
10958             }
10959             unlock_user(a, arg1, 0);
10960         }
10961         return ret;
10962 #endif
10963 #ifdef TARGET_NR_arm_fadvise64_64
10964     case TARGET_NR_arm_fadvise64_64:
10965         /* arm_fadvise64_64 looks like fadvise64_64 but
10966          * with different argument order: fd, advice, offset, len
10967          * rather than the usual fd, offset, len, advice.
10968          * Note that offset and len are both 64-bit so appear as
10969          * pairs of 32-bit registers.
10970          */
10971         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10972                             target_offset64(arg5, arg6), arg2);
10973         return -host_to_target_errno(ret);
10974 #endif
10975 
10976 #if TARGET_ABI_BITS == 32
10977 
10978 #ifdef TARGET_NR_fadvise64_64
10979     case TARGET_NR_fadvise64_64:
10980 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10981         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10982         ret = arg2;
10983         arg2 = arg3;
10984         arg3 = arg4;
10985         arg4 = arg5;
10986         arg5 = arg6;
10987         arg6 = ret;
10988 #else
10989         /* 6 args: fd, offset (high, low), len (high, low), advice */
10990         if (regpairs_aligned(cpu_env, num)) {
10991             /* offset is in (3,4), len in (5,6) and advice in 7 */
10992             arg2 = arg3;
10993             arg3 = arg4;
10994             arg4 = arg5;
10995             arg5 = arg6;
10996             arg6 = arg7;
10997         }
10998 #endif
10999         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11000                             target_offset64(arg4, arg5), arg6);
11001         return -host_to_target_errno(ret);
11002 #endif
11003 
11004 #ifdef TARGET_NR_fadvise64
11005     case TARGET_NR_fadvise64:
11006         /* 5 args: fd, offset (high, low), len, advice */
11007         if (regpairs_aligned(cpu_env, num)) {
11008             /* offset is in (3,4), len in 5 and advice in 6 */
11009             arg2 = arg3;
11010             arg3 = arg4;
11011             arg4 = arg5;
11012             arg5 = arg6;
11013         }
11014         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11015         return -host_to_target_errno(ret);
11016 #endif
11017 
11018 #else /* not a 32-bit ABI */
11019 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11020 #ifdef TARGET_NR_fadvise64_64
11021     case TARGET_NR_fadvise64_64:
11022 #endif
11023 #ifdef TARGET_NR_fadvise64
11024     case TARGET_NR_fadvise64:
11025 #endif
11026 #ifdef TARGET_S390X
11027         switch (arg4) {
11028         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11029         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11030         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11031         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11032         default: break;
11033         }
11034 #endif
11035         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11036 #endif
11037 #endif /* end of 64-bit ABI fadvise handling */
11038 
11039 #ifdef TARGET_NR_madvise
11040     case TARGET_NR_madvise:
11041         /* A straight passthrough may not be safe because qemu sometimes
11042            turns private file-backed mappings into anonymous mappings.
11043            This will break MADV_DONTNEED.
11044            This is a hint, so ignoring and returning success is ok.  */
11045         return 0;
11046 #endif
11047 #if TARGET_ABI_BITS == 32
11048     case TARGET_NR_fcntl64:
11049     {
11050 	int cmd;
11051 	struct flock64 fl;
11052         from_flock64_fn *copyfrom = copy_from_user_flock64;
11053         to_flock64_fn *copyto = copy_to_user_flock64;
11054 
11055 #ifdef TARGET_ARM
11056         if (!((CPUARMState *)cpu_env)->eabi) {
11057             copyfrom = copy_from_user_oabi_flock64;
11058             copyto = copy_to_user_oabi_flock64;
11059         }
11060 #endif
11061 
11062 	cmd = target_to_host_fcntl_cmd(arg2);
11063         if (cmd == -TARGET_EINVAL) {
11064             return cmd;
11065         }
11066 
11067         switch(arg2) {
11068         case TARGET_F_GETLK64:
11069             ret = copyfrom(&fl, arg3);
11070             if (ret) {
11071                 break;
11072             }
11073             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11074             if (ret == 0) {
11075                 ret = copyto(arg3, &fl);
11076             }
11077 	    break;
11078 
11079         case TARGET_F_SETLK64:
11080         case TARGET_F_SETLKW64:
11081             ret = copyfrom(&fl, arg3);
11082             if (ret) {
11083                 break;
11084             }
11085             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11086 	    break;
11087         default:
11088             ret = do_fcntl(arg1, arg2, arg3);
11089             break;
11090         }
11091         return ret;
11092     }
11093 #endif
11094 #ifdef TARGET_NR_cacheflush
11095     case TARGET_NR_cacheflush:
11096         /* self-modifying code is handled automatically, so nothing needed */
11097         return 0;
11098 #endif
11099 #ifdef TARGET_NR_getpagesize
11100     case TARGET_NR_getpagesize:
11101         return TARGET_PAGE_SIZE;
11102 #endif
11103     case TARGET_NR_gettid:
11104         return get_errno(sys_gettid());
11105 #ifdef TARGET_NR_readahead
11106     case TARGET_NR_readahead:
11107 #if TARGET_ABI_BITS == 32
11108         if (regpairs_aligned(cpu_env, num)) {
11109             arg2 = arg3;
11110             arg3 = arg4;
11111             arg4 = arg5;
11112         }
11113         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11114 #else
11115         ret = get_errno(readahead(arg1, arg2, arg3));
11116 #endif
11117         return ret;
11118 #endif
11119 #ifdef CONFIG_ATTR
11120 #ifdef TARGET_NR_setxattr
11121     case TARGET_NR_listxattr:
11122     case TARGET_NR_llistxattr:
11123     {
11124         void *p, *b = 0;
11125         if (arg2) {
11126             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11127             if (!b) {
11128                 return -TARGET_EFAULT;
11129             }
11130         }
11131         p = lock_user_string(arg1);
11132         if (p) {
11133             if (num == TARGET_NR_listxattr) {
11134                 ret = get_errno(listxattr(p, b, arg3));
11135             } else {
11136                 ret = get_errno(llistxattr(p, b, arg3));
11137             }
11138         } else {
11139             ret = -TARGET_EFAULT;
11140         }
11141         unlock_user(p, arg1, 0);
11142         unlock_user(b, arg2, arg3);
11143         return ret;
11144     }
11145     case TARGET_NR_flistxattr:
11146     {
11147         void *b = 0;
11148         if (arg2) {
11149             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11150             if (!b) {
11151                 return -TARGET_EFAULT;
11152             }
11153         }
11154         ret = get_errno(flistxattr(arg1, b, arg3));
11155         unlock_user(b, arg2, arg3);
11156         return ret;
11157     }
11158     case TARGET_NR_setxattr:
11159     case TARGET_NR_lsetxattr:
11160         {
11161             void *p, *n, *v = 0;
11162             if (arg3) {
11163                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11164                 if (!v) {
11165                     return -TARGET_EFAULT;
11166                 }
11167             }
11168             p = lock_user_string(arg1);
11169             n = lock_user_string(arg2);
11170             if (p && n) {
11171                 if (num == TARGET_NR_setxattr) {
11172                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11173                 } else {
11174                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11175                 }
11176             } else {
11177                 ret = -TARGET_EFAULT;
11178             }
11179             unlock_user(p, arg1, 0);
11180             unlock_user(n, arg2, 0);
11181             unlock_user(v, arg3, 0);
11182         }
11183         return ret;
11184     case TARGET_NR_fsetxattr:
11185         {
11186             void *n, *v = 0;
11187             if (arg3) {
11188                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11189                 if (!v) {
11190                     return -TARGET_EFAULT;
11191                 }
11192             }
11193             n = lock_user_string(arg2);
11194             if (n) {
11195                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11196             } else {
11197                 ret = -TARGET_EFAULT;
11198             }
11199             unlock_user(n, arg2, 0);
11200             unlock_user(v, arg3, 0);
11201         }
11202         return ret;
11203     case TARGET_NR_getxattr:
11204     case TARGET_NR_lgetxattr:
11205         {
11206             void *p, *n, *v = 0;
11207             if (arg3) {
11208                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11209                 if (!v) {
11210                     return -TARGET_EFAULT;
11211                 }
11212             }
11213             p = lock_user_string(arg1);
11214             n = lock_user_string(arg2);
11215             if (p && n) {
11216                 if (num == TARGET_NR_getxattr) {
11217                     ret = get_errno(getxattr(p, n, v, arg4));
11218                 } else {
11219                     ret = get_errno(lgetxattr(p, n, v, arg4));
11220                 }
11221             } else {
11222                 ret = -TARGET_EFAULT;
11223             }
11224             unlock_user(p, arg1, 0);
11225             unlock_user(n, arg2, 0);
11226             unlock_user(v, arg3, arg4);
11227         }
11228         return ret;
11229     case TARGET_NR_fgetxattr:
11230         {
11231             void *n, *v = 0;
11232             if (arg3) {
11233                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11234                 if (!v) {
11235                     return -TARGET_EFAULT;
11236                 }
11237             }
11238             n = lock_user_string(arg2);
11239             if (n) {
11240                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11241             } else {
11242                 ret = -TARGET_EFAULT;
11243             }
11244             unlock_user(n, arg2, 0);
11245             unlock_user(v, arg3, arg4);
11246         }
11247         return ret;
11248     case TARGET_NR_removexattr:
11249     case TARGET_NR_lremovexattr:
11250         {
11251             void *p, *n;
11252             p = lock_user_string(arg1);
11253             n = lock_user_string(arg2);
11254             if (p && n) {
11255                 if (num == TARGET_NR_removexattr) {
11256                     ret = get_errno(removexattr(p, n));
11257                 } else {
11258                     ret = get_errno(lremovexattr(p, n));
11259                 }
11260             } else {
11261                 ret = -TARGET_EFAULT;
11262             }
11263             unlock_user(p, arg1, 0);
11264             unlock_user(n, arg2, 0);
11265         }
11266         return ret;
11267     case TARGET_NR_fremovexattr:
11268         {
11269             void *n;
11270             n = lock_user_string(arg2);
11271             if (n) {
11272                 ret = get_errno(fremovexattr(arg1, n));
11273             } else {
11274                 ret = -TARGET_EFAULT;
11275             }
11276             unlock_user(n, arg2, 0);
11277         }
11278         return ret;
11279 #endif
11280 #endif /* CONFIG_ATTR */
11281 #ifdef TARGET_NR_set_thread_area
11282     case TARGET_NR_set_thread_area:
11283 #if defined(TARGET_MIPS)
11284       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11285       return 0;
11286 #elif defined(TARGET_CRIS)
11287       if (arg1 & 0xff)
11288           ret = -TARGET_EINVAL;
11289       else {
11290           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11291           ret = 0;
11292       }
11293       return ret;
11294 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11295       return do_set_thread_area(cpu_env, arg1);
11296 #elif defined(TARGET_M68K)
11297       {
11298           TaskState *ts = cpu->opaque;
11299           ts->tp_value = arg1;
11300           return 0;
11301       }
11302 #else
11303       return -TARGET_ENOSYS;
11304 #endif
11305 #endif
11306 #ifdef TARGET_NR_get_thread_area
11307     case TARGET_NR_get_thread_area:
11308 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11309         return do_get_thread_area(cpu_env, arg1);
11310 #elif defined(TARGET_M68K)
11311         {
11312             TaskState *ts = cpu->opaque;
11313             return ts->tp_value;
11314         }
11315 #else
11316         return -TARGET_ENOSYS;
11317 #endif
11318 #endif
11319 #ifdef TARGET_NR_getdomainname
11320     case TARGET_NR_getdomainname:
11321         return -TARGET_ENOSYS;
11322 #endif
11323 
11324 #ifdef TARGET_NR_clock_settime
11325     case TARGET_NR_clock_settime:
11326     {
11327         struct timespec ts;
11328 
11329         ret = target_to_host_timespec(&ts, arg2);
11330         if (!is_error(ret)) {
11331             ret = get_errno(clock_settime(arg1, &ts));
11332         }
11333         return ret;
11334     }
11335 #endif
11336 #ifdef TARGET_NR_clock_gettime
11337     case TARGET_NR_clock_gettime:
11338     {
11339         struct timespec ts;
11340         ret = get_errno(clock_gettime(arg1, &ts));
11341         if (!is_error(ret)) {
11342             ret = host_to_target_timespec(arg2, &ts);
11343         }
11344         return ret;
11345     }
11346 #endif
11347 #ifdef TARGET_NR_clock_getres
11348     case TARGET_NR_clock_getres:
11349     {
11350         struct timespec ts;
11351         ret = get_errno(clock_getres(arg1, &ts));
11352         if (!is_error(ret)) {
11353             host_to_target_timespec(arg2, &ts);
11354         }
11355         return ret;
11356     }
11357 #endif
11358 #ifdef TARGET_NR_clock_nanosleep
11359     case TARGET_NR_clock_nanosleep:
11360     {
11361         struct timespec ts;
11362         target_to_host_timespec(&ts, arg3);
11363         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11364                                              &ts, arg4 ? &ts : NULL));
11365         if (arg4)
11366             host_to_target_timespec(arg4, &ts);
11367 
11368 #if defined(TARGET_PPC)
11369         /* clock_nanosleep is odd in that it returns positive errno values.
11370          * On PPC, CR0 bit 3 should be set in such a situation. */
11371         if (ret && ret != -TARGET_ERESTARTSYS) {
11372             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11373         }
11374 #endif
11375         return ret;
11376     }
11377 #endif
11378 
11379 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11380     case TARGET_NR_set_tid_address:
11381         return get_errno(set_tid_address((int *)g2h(arg1)));
11382 #endif
11383 
11384     case TARGET_NR_tkill:
11385         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11386 
11387     case TARGET_NR_tgkill:
11388         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11389                          target_to_host_signal(arg3)));
11390 
11391 #ifdef TARGET_NR_set_robust_list
11392     case TARGET_NR_set_robust_list:
11393     case TARGET_NR_get_robust_list:
11394         /* The ABI for supporting robust futexes has userspace pass
11395          * the kernel a pointer to a linked list which is updated by
11396          * userspace after the syscall; the list is walked by the kernel
11397          * when the thread exits. Since the linked list in QEMU guest
11398          * memory isn't a valid linked list for the host and we have
11399          * no way to reliably intercept the thread-death event, we can't
11400          * support these. Silently return ENOSYS so that guest userspace
11401          * falls back to a non-robust futex implementation (which should
11402          * be OK except in the corner case of the guest crashing while
11403          * holding a mutex that is shared with another process via
11404          * shared memory).
11405          */
11406         return -TARGET_ENOSYS;
11407 #endif
11408 
11409 #if defined(TARGET_NR_utimensat)
11410     case TARGET_NR_utimensat:
11411         {
11412             struct timespec *tsp, ts[2];
11413             if (!arg3) {
11414                 tsp = NULL;
11415             } else {
11416                 target_to_host_timespec(ts, arg3);
11417                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11418                 tsp = ts;
11419             }
11420             if (!arg2)
11421                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11422             else {
11423                 if (!(p = lock_user_string(arg2))) {
11424                     return -TARGET_EFAULT;
11425                 }
11426                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11427                 unlock_user(p, arg2, 0);
11428             }
11429         }
11430         return ret;
11431 #endif
11432     case TARGET_NR_futex:
11433         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11434 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11435     case TARGET_NR_inotify_init:
11436         ret = get_errno(sys_inotify_init());
11437         if (ret >= 0) {
11438             fd_trans_register(ret, &target_inotify_trans);
11439         }
11440         return ret;
11441 #endif
11442 #ifdef CONFIG_INOTIFY1
11443 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11444     case TARGET_NR_inotify_init1:
11445         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11446                                           fcntl_flags_tbl)));
11447         if (ret >= 0) {
11448             fd_trans_register(ret, &target_inotify_trans);
11449         }
11450         return ret;
11451 #endif
11452 #endif
11453 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11454     case TARGET_NR_inotify_add_watch:
11455         p = lock_user_string(arg2);
11456         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11457         unlock_user(p, arg2, 0);
11458         return ret;
11459 #endif
11460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11461     case TARGET_NR_inotify_rm_watch:
11462         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11463 #endif
11464 
11465 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11466     case TARGET_NR_mq_open:
11467         {
11468             struct mq_attr posix_mq_attr;
11469             struct mq_attr *pposix_mq_attr;
11470             int host_flags;
11471 
11472             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11473             pposix_mq_attr = NULL;
11474             if (arg4) {
11475                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11476                     return -TARGET_EFAULT;
11477                 }
11478                 pposix_mq_attr = &posix_mq_attr;
11479             }
11480             p = lock_user_string(arg1 - 1);
11481             if (!p) {
11482                 return -TARGET_EFAULT;
11483             }
11484             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11485             unlock_user (p, arg1, 0);
11486         }
11487         return ret;
11488 
11489     case TARGET_NR_mq_unlink:
11490         p = lock_user_string(arg1 - 1);
11491         if (!p) {
11492             return -TARGET_EFAULT;
11493         }
11494         ret = get_errno(mq_unlink(p));
11495         unlock_user (p, arg1, 0);
11496         return ret;
11497 
11498     case TARGET_NR_mq_timedsend:
11499         {
11500             struct timespec ts;
11501 
11502             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11503             if (arg5 != 0) {
11504                 target_to_host_timespec(&ts, arg5);
11505                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11506                 host_to_target_timespec(arg5, &ts);
11507             } else {
11508                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11509             }
11510             unlock_user (p, arg2, arg3);
11511         }
11512         return ret;
11513 
11514     case TARGET_NR_mq_timedreceive:
11515         {
11516             struct timespec ts;
11517             unsigned int prio;
11518 
11519             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11520             if (arg5 != 0) {
11521                 target_to_host_timespec(&ts, arg5);
11522                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11523                                                      &prio, &ts));
11524                 host_to_target_timespec(arg5, &ts);
11525             } else {
11526                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11527                                                      &prio, NULL));
11528             }
11529             unlock_user (p, arg2, arg3);
11530             if (arg4 != 0)
11531                 put_user_u32(prio, arg4);
11532         }
11533         return ret;
11534 
11535     /* Not implemented for now... */
11536 /*     case TARGET_NR_mq_notify: */
11537 /*         break; */
11538 
11539     case TARGET_NR_mq_getsetattr:
11540         {
11541             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11542             ret = 0;
11543             if (arg2 != 0) {
11544                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11545                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11546                                            &posix_mq_attr_out));
11547             } else if (arg3 != 0) {
11548                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11549             }
11550             if (ret == 0 && arg3 != 0) {
11551                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11552             }
11553         }
11554         return ret;
11555 #endif
11556 
11557 #ifdef CONFIG_SPLICE
11558 #ifdef TARGET_NR_tee
11559     case TARGET_NR_tee:
11560         {
11561             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11562         }
11563         return ret;
11564 #endif
11565 #ifdef TARGET_NR_splice
11566     case TARGET_NR_splice:
11567         {
11568             loff_t loff_in, loff_out;
11569             loff_t *ploff_in = NULL, *ploff_out = NULL;
11570             if (arg2) {
11571                 if (get_user_u64(loff_in, arg2)) {
11572                     return -TARGET_EFAULT;
11573                 }
11574                 ploff_in = &loff_in;
11575             }
11576             if (arg4) {
11577                 if (get_user_u64(loff_out, arg4)) {
11578                     return -TARGET_EFAULT;
11579                 }
11580                 ploff_out = &loff_out;
11581             }
11582             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11583             if (arg2) {
11584                 if (put_user_u64(loff_in, arg2)) {
11585                     return -TARGET_EFAULT;
11586                 }
11587             }
11588             if (arg4) {
11589                 if (put_user_u64(loff_out, arg4)) {
11590                     return -TARGET_EFAULT;
11591                 }
11592             }
11593         }
11594         return ret;
11595 #endif
11596 #ifdef TARGET_NR_vmsplice
11597 	case TARGET_NR_vmsplice:
11598         {
11599             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11600             if (vec != NULL) {
11601                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11602                 unlock_iovec(vec, arg2, arg3, 0);
11603             } else {
11604                 ret = -host_to_target_errno(errno);
11605             }
11606         }
11607         return ret;
11608 #endif
11609 #endif /* CONFIG_SPLICE */
11610 #ifdef CONFIG_EVENTFD
11611 #if defined(TARGET_NR_eventfd)
11612     case TARGET_NR_eventfd:
11613         ret = get_errno(eventfd(arg1, 0));
11614         if (ret >= 0) {
11615             fd_trans_register(ret, &target_eventfd_trans);
11616         }
11617         return ret;
11618 #endif
11619 #if defined(TARGET_NR_eventfd2)
11620     case TARGET_NR_eventfd2:
11621     {
11622         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11623         if (arg2 & TARGET_O_NONBLOCK) {
11624             host_flags |= O_NONBLOCK;
11625         }
11626         if (arg2 & TARGET_O_CLOEXEC) {
11627             host_flags |= O_CLOEXEC;
11628         }
11629         ret = get_errno(eventfd(arg1, host_flags));
11630         if (ret >= 0) {
11631             fd_trans_register(ret, &target_eventfd_trans);
11632         }
11633         return ret;
11634     }
11635 #endif
11636 #endif /* CONFIG_EVENTFD  */
11637 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11638     case TARGET_NR_fallocate:
11639 #if TARGET_ABI_BITS == 32
11640         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11641                                   target_offset64(arg5, arg6)));
11642 #else
11643         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11644 #endif
11645         return ret;
11646 #endif
11647 #if defined(CONFIG_SYNC_FILE_RANGE)
11648 #if defined(TARGET_NR_sync_file_range)
11649     case TARGET_NR_sync_file_range:
11650 #if TARGET_ABI_BITS == 32
11651 #if defined(TARGET_MIPS)
11652         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11653                                         target_offset64(arg5, arg6), arg7));
11654 #else
11655         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11656                                         target_offset64(arg4, arg5), arg6));
11657 #endif /* !TARGET_MIPS */
11658 #else
11659         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11660 #endif
11661         return ret;
11662 #endif
11663 #if defined(TARGET_NR_sync_file_range2)
11664     case TARGET_NR_sync_file_range2:
11665         /* This is like sync_file_range but the arguments are reordered */
11666 #if TARGET_ABI_BITS == 32
11667         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11668                                         target_offset64(arg5, arg6), arg2));
11669 #else
11670         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11671 #endif
11672         return ret;
11673 #endif
11674 #endif
11675 #if defined(TARGET_NR_signalfd4)
11676     case TARGET_NR_signalfd4:
11677         return do_signalfd4(arg1, arg2, arg4);
11678 #endif
11679 #if defined(TARGET_NR_signalfd)
11680     case TARGET_NR_signalfd:
11681         return do_signalfd4(arg1, arg2, 0);
11682 #endif
11683 #if defined(CONFIG_EPOLL)
11684 #if defined(TARGET_NR_epoll_create)
11685     case TARGET_NR_epoll_create:
11686         return get_errno(epoll_create(arg1));
11687 #endif
11688 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11689     case TARGET_NR_epoll_create1:
11690         return get_errno(epoll_create1(arg1));
11691 #endif
11692 #if defined(TARGET_NR_epoll_ctl)
11693     case TARGET_NR_epoll_ctl:
11694     {
11695         struct epoll_event ep;
11696         struct epoll_event *epp = 0;
11697         if (arg4) {
11698             struct target_epoll_event *target_ep;
11699             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11700                 return -TARGET_EFAULT;
11701             }
11702             ep.events = tswap32(target_ep->events);
11703             /* The epoll_data_t union is just opaque data to the kernel,
11704              * so we transfer all 64 bits across and need not worry what
11705              * actual data type it is.
11706              */
11707             ep.data.u64 = tswap64(target_ep->data.u64);
11708             unlock_user_struct(target_ep, arg4, 0);
11709             epp = &ep;
11710         }
11711         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11712     }
11713 #endif
11714 
11715 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11716 #if defined(TARGET_NR_epoll_wait)
11717     case TARGET_NR_epoll_wait:
11718 #endif
11719 #if defined(TARGET_NR_epoll_pwait)
11720     case TARGET_NR_epoll_pwait:
11721 #endif
11722     {
11723         struct target_epoll_event *target_ep;
11724         struct epoll_event *ep;
11725         int epfd = arg1;
11726         int maxevents = arg3;
11727         int timeout = arg4;
11728 
11729         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11730             return -TARGET_EINVAL;
11731         }
11732 
11733         target_ep = lock_user(VERIFY_WRITE, arg2,
11734                               maxevents * sizeof(struct target_epoll_event), 1);
11735         if (!target_ep) {
11736             return -TARGET_EFAULT;
11737         }
11738 
11739         ep = g_try_new(struct epoll_event, maxevents);
11740         if (!ep) {
11741             unlock_user(target_ep, arg2, 0);
11742             return -TARGET_ENOMEM;
11743         }
11744 
11745         switch (num) {
11746 #if defined(TARGET_NR_epoll_pwait)
11747         case TARGET_NR_epoll_pwait:
11748         {
11749             target_sigset_t *target_set;
11750             sigset_t _set, *set = &_set;
11751 
11752             if (arg5) {
11753                 if (arg6 != sizeof(target_sigset_t)) {
11754                     ret = -TARGET_EINVAL;
11755                     break;
11756                 }
11757 
11758                 target_set = lock_user(VERIFY_READ, arg5,
11759                                        sizeof(target_sigset_t), 1);
11760                 if (!target_set) {
11761                     ret = -TARGET_EFAULT;
11762                     break;
11763                 }
11764                 target_to_host_sigset(set, target_set);
11765                 unlock_user(target_set, arg5, 0);
11766             } else {
11767                 set = NULL;
11768             }
11769 
11770             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11771                                              set, SIGSET_T_SIZE));
11772             break;
11773         }
11774 #endif
11775 #if defined(TARGET_NR_epoll_wait)
11776         case TARGET_NR_epoll_wait:
11777             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11778                                              NULL, 0));
11779             break;
11780 #endif
11781         default:
11782             ret = -TARGET_ENOSYS;
11783         }
11784         if (!is_error(ret)) {
11785             int i;
11786             for (i = 0; i < ret; i++) {
11787                 target_ep[i].events = tswap32(ep[i].events);
11788                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11789             }
11790             unlock_user(target_ep, arg2,
11791                         ret * sizeof(struct target_epoll_event));
11792         } else {
11793             unlock_user(target_ep, arg2, 0);
11794         }
11795         g_free(ep);
11796         return ret;
11797     }
11798 #endif
11799 #endif
11800 #ifdef TARGET_NR_prlimit64
11801     case TARGET_NR_prlimit64:
11802     {
11803         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11804         struct target_rlimit64 *target_rnew, *target_rold;
11805         struct host_rlimit64 rnew, rold, *rnewp = 0;
11806         int resource = target_to_host_resource(arg2);
11807         if (arg3) {
11808             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11809                 return -TARGET_EFAULT;
11810             }
11811             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11812             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11813             unlock_user_struct(target_rnew, arg3, 0);
11814             rnewp = &rnew;
11815         }
11816 
11817         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11818         if (!is_error(ret) && arg4) {
11819             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11820                 return -TARGET_EFAULT;
11821             }
11822             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11823             target_rold->rlim_max = tswap64(rold.rlim_max);
11824             unlock_user_struct(target_rold, arg4, 1);
11825         }
11826         return ret;
11827     }
11828 #endif
11829 #ifdef TARGET_NR_gethostname
11830     case TARGET_NR_gethostname:
11831     {
11832         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11833         if (name) {
11834             ret = get_errno(gethostname(name, arg2));
11835             unlock_user(name, arg1, arg2);
11836         } else {
11837             ret = -TARGET_EFAULT;
11838         }
11839         return ret;
11840     }
11841 #endif
11842 #ifdef TARGET_NR_atomic_cmpxchg_32
11843     case TARGET_NR_atomic_cmpxchg_32:
11844     {
11845         /* should use start_exclusive from main.c */
11846         abi_ulong mem_value;
11847         if (get_user_u32(mem_value, arg6)) {
11848             target_siginfo_t info;
11849             info.si_signo = SIGSEGV;
11850             info.si_errno = 0;
11851             info.si_code = TARGET_SEGV_MAPERR;
11852             info._sifields._sigfault._addr = arg6;
11853             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11854                          QEMU_SI_FAULT, &info);
11855             ret = 0xdeadbeef;
11856 
11857         }
11858         if (mem_value == arg2)
11859             put_user_u32(arg1, arg6);
11860         return mem_value;
11861     }
11862 #endif
11863 #ifdef TARGET_NR_atomic_barrier
11864     case TARGET_NR_atomic_barrier:
11865         /* Like the kernel implementation and the
11866            qemu arm barrier, no-op this? */
11867         return 0;
11868 #endif
11869 
11870 #ifdef TARGET_NR_timer_create
11871     case TARGET_NR_timer_create:
11872     {
11873         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11874 
11875         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11876 
11877         int clkid = arg1;
11878         int timer_index = next_free_host_timer();
11879 
11880         if (timer_index < 0) {
11881             ret = -TARGET_EAGAIN;
11882         } else {
11883             timer_t *phtimer = g_posix_timers  + timer_index;
11884 
11885             if (arg2) {
11886                 phost_sevp = &host_sevp;
11887                 ret = target_to_host_sigevent(phost_sevp, arg2);
11888                 if (ret != 0) {
11889                     return ret;
11890                 }
11891             }
11892 
11893             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11894             if (ret) {
11895                 phtimer = NULL;
11896             } else {
11897                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11898                     return -TARGET_EFAULT;
11899                 }
11900             }
11901         }
11902         return ret;
11903     }
11904 #endif
11905 
11906 #ifdef TARGET_NR_timer_settime
11907     case TARGET_NR_timer_settime:
11908     {
11909         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11910          * struct itimerspec * old_value */
11911         target_timer_t timerid = get_timer_id(arg1);
11912 
11913         if (timerid < 0) {
11914             ret = timerid;
11915         } else if (arg3 == 0) {
11916             ret = -TARGET_EINVAL;
11917         } else {
11918             timer_t htimer = g_posix_timers[timerid];
11919             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11920 
11921             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11922                 return -TARGET_EFAULT;
11923             }
11924             ret = get_errno(
11925                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11926             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11927                 return -TARGET_EFAULT;
11928             }
11929         }
11930         return ret;
11931     }
11932 #endif
11933 
11934 #ifdef TARGET_NR_timer_gettime
11935     case TARGET_NR_timer_gettime:
11936     {
11937         /* args: timer_t timerid, struct itimerspec *curr_value */
11938         target_timer_t timerid = get_timer_id(arg1);
11939 
11940         if (timerid < 0) {
11941             ret = timerid;
11942         } else if (!arg2) {
11943             ret = -TARGET_EFAULT;
11944         } else {
11945             timer_t htimer = g_posix_timers[timerid];
11946             struct itimerspec hspec;
11947             ret = get_errno(timer_gettime(htimer, &hspec));
11948 
11949             if (host_to_target_itimerspec(arg2, &hspec)) {
11950                 ret = -TARGET_EFAULT;
11951             }
11952         }
11953         return ret;
11954     }
11955 #endif
11956 
11957 #ifdef TARGET_NR_timer_getoverrun
11958     case TARGET_NR_timer_getoverrun:
11959     {
11960         /* args: timer_t timerid */
11961         target_timer_t timerid = get_timer_id(arg1);
11962 
11963         if (timerid < 0) {
11964             ret = timerid;
11965         } else {
11966             timer_t htimer = g_posix_timers[timerid];
11967             ret = get_errno(timer_getoverrun(htimer));
11968         }
11969         return ret;
11970     }
11971 #endif
11972 
11973 #ifdef TARGET_NR_timer_delete
11974     case TARGET_NR_timer_delete:
11975     {
11976         /* args: timer_t timerid */
11977         target_timer_t timerid = get_timer_id(arg1);
11978 
11979         if (timerid < 0) {
11980             ret = timerid;
11981         } else {
11982             timer_t htimer = g_posix_timers[timerid];
11983             ret = get_errno(timer_delete(htimer));
11984             g_posix_timers[timerid] = 0;
11985         }
11986         return ret;
11987     }
11988 #endif
11989 
11990 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11991     case TARGET_NR_timerfd_create:
11992         return get_errno(timerfd_create(arg1,
11993                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11994 #endif
11995 
11996 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11997     case TARGET_NR_timerfd_gettime:
11998         {
11999             struct itimerspec its_curr;
12000 
12001             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12002 
12003             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12004                 return -TARGET_EFAULT;
12005             }
12006         }
12007         return ret;
12008 #endif
12009 
12010 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12011     case TARGET_NR_timerfd_settime:
12012         {
12013             struct itimerspec its_new, its_old, *p_new;
12014 
12015             if (arg3) {
12016                 if (target_to_host_itimerspec(&its_new, arg3)) {
12017                     return -TARGET_EFAULT;
12018                 }
12019                 p_new = &its_new;
12020             } else {
12021                 p_new = NULL;
12022             }
12023 
12024             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12025 
12026             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12027                 return -TARGET_EFAULT;
12028             }
12029         }
12030         return ret;
12031 #endif
12032 
12033 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12034     case TARGET_NR_ioprio_get:
12035         return get_errno(ioprio_get(arg1, arg2));
12036 #endif
12037 
12038 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12039     case TARGET_NR_ioprio_set:
12040         return get_errno(ioprio_set(arg1, arg2, arg3));
12041 #endif
12042 
12043 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12044     case TARGET_NR_setns:
12045         return get_errno(setns(arg1, arg2));
12046 #endif
12047 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12048     case TARGET_NR_unshare:
12049         return get_errno(unshare(arg1));
12050 #endif
12051 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12052     case TARGET_NR_kcmp:
12053         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12054 #endif
12055 #ifdef TARGET_NR_swapcontext
12056     case TARGET_NR_swapcontext:
12057         /* PowerPC specific.  */
12058         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12059 #endif
12060 #ifdef TARGET_NR_memfd_create
12061     case TARGET_NR_memfd_create:
12062         p = lock_user_string(arg1);
12063         if (!p) {
12064             return -TARGET_EFAULT;
12065         }
12066         ret = get_errno(memfd_create(p, arg2));
12067         fd_trans_unregister(ret);
12068         unlock_user(p, arg1, 0);
12069         return ret;
12070 #endif
12071 
12072     default:
12073         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12074         return -TARGET_ENOSYS;
12075     }
12076     return ret;
12077 }
12078 
12079 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12080                     abi_long arg2, abi_long arg3, abi_long arg4,
12081                     abi_long arg5, abi_long arg6, abi_long arg7,
12082                     abi_long arg8)
12083 {
12084     CPUState *cpu = env_cpu(cpu_env);
12085     abi_long ret;
12086 
12087 #ifdef DEBUG_ERESTARTSYS
12088     /* Debug-only code for exercising the syscall-restart code paths
12089      * in the per-architecture cpu main loops: restart every syscall
12090      * the guest makes once before letting it through.
12091      */
12092     {
12093         static bool flag;
12094         flag = !flag;
12095         if (flag) {
12096             return -TARGET_ERESTARTSYS;
12097         }
12098     }
12099 #endif
12100 
12101     record_syscall_start(cpu, num, arg1,
12102                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12103 
12104     if (unlikely(do_strace)) {
12105         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12106         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12107                           arg5, arg6, arg7, arg8);
12108         print_syscall_ret(num, ret);
12109     } else {
12110         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12111                           arg5, arg6, arg7, arg8);
12112     }
12113 
12114     record_syscall_return(cpu, num, ret);
12115     return ret;
12116 }
12117