xref: /openbmc/qemu/linux-user/syscall.c (revision 2113aed6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "strace.h"
131 #include "signal-common.h"
132 #include "qemu/guest-random.h"
133 #include "qemu/selfmap.h"
134 #include "user/syscall-trace.h"
135 #include "qapi/error.h"
136 #include "fd-trans.h"
137 #include "tcg/tcg.h"
138 
139 #ifndef CLONE_IO
140 #define CLONE_IO                0x80000000      /* Clone io context */
141 #endif
142 
143 /* We can't directly call the host clone syscall, because this will
144  * badly confuse libc (breaking mutexes, for example). So we must
145  * divide clone flags into:
146  *  * flag combinations that look like pthread_create()
147  *  * flag combinations that look like fork()
148  *  * flags we can implement within QEMU itself
149  *  * flags we can't support and will return an error for
150  */
151 /* For thread creation, all these flags must be present; for
152  * fork, none must be present.
153  */
154 #define CLONE_THREAD_FLAGS                              \
155     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
156      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
157 
158 /* These flags are ignored:
159  * CLONE_DETACHED is now ignored by the kernel;
160  * CLONE_IO is just an optimisation hint to the I/O scheduler
161  */
162 #define CLONE_IGNORED_FLAGS                     \
163     (CLONE_DETACHED | CLONE_IO)
164 
165 /* Flags for fork which we can implement within QEMU itself */
166 #define CLONE_OPTIONAL_FORK_FLAGS               \
167     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
168      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
169 
170 /* Flags for thread creation which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
174 
175 #define CLONE_INVALID_FORK_FLAGS                                        \
176     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
177 
178 #define CLONE_INVALID_THREAD_FLAGS                                      \
179     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
180        CLONE_IGNORED_FLAGS))
181 
182 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
183  * have almost all been allocated. We cannot support any of
184  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
185  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
186  * The checks against the invalid thread masks above will catch these.
187  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
188  */
189 
190 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
191  * once. This exercises the codepaths for restart.
192  */
193 //#define DEBUG_ERESTARTSYS
194 
195 //#include <linux/msdos_fs.h>
196 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
197 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
198 
199 #undef _syscall0
200 #undef _syscall1
201 #undef _syscall2
202 #undef _syscall3
203 #undef _syscall4
204 #undef _syscall5
205 #undef _syscall6
206 
207 #define _syscall0(type,name)		\
208 static type name (void)			\
209 {					\
210 	return syscall(__NR_##name);	\
211 }
212 
213 #define _syscall1(type,name,type1,arg1)		\
214 static type name (type1 arg1)			\
215 {						\
216 	return syscall(__NR_##name, arg1);	\
217 }
218 
219 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
220 static type name (type1 arg1,type2 arg2)		\
221 {							\
222 	return syscall(__NR_##name, arg1, arg2);	\
223 }
224 
225 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
226 static type name (type1 arg1,type2 arg2,type3 arg3)		\
227 {								\
228 	return syscall(__NR_##name, arg1, arg2, arg3);		\
229 }
230 
231 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
233 {										\
234 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
235 }
236 
237 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
238 		  type5,arg5)							\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
242 }
243 
244 
245 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5,type6,arg6)					\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
248                   type6 arg6)							\
249 {										\
250 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
251 }
252 
253 
254 #define __NR_sys_uname __NR_uname
255 #define __NR_sys_getcwd1 __NR_getcwd
256 #define __NR_sys_getdents __NR_getdents
257 #define __NR_sys_getdents64 __NR_getdents64
258 #define __NR_sys_getpriority __NR_getpriority
259 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
260 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
261 #define __NR_sys_syslog __NR_syslog
262 #if defined(__NR_futex)
263 # define __NR_sys_futex __NR_futex
264 #endif
265 #if defined(__NR_futex_time64)
266 # define __NR_sys_futex_time64 __NR_futex_time64
267 #endif
268 #define __NR_sys_inotify_init __NR_inotify_init
269 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
270 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
271 #define __NR_sys_statx __NR_statx
272 
273 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
274 #define __NR__llseek __NR_lseek
275 #endif
276 
277 /* Newer kernel ports have llseek() instead of _llseek() */
278 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
279 #define TARGET_NR__llseek TARGET_NR_llseek
280 #endif
281 
282 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
283 #ifndef TARGET_O_NONBLOCK_MASK
284 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
285 #endif
286 
287 #define __NR_sys_gettid __NR_gettid
288 _syscall0(int, sys_gettid)
289 
290 /* For the 64-bit guest on 32-bit host case we must emulate
291  * getdents using getdents64, because otherwise the host
292  * might hand us back more dirent records than we can fit
293  * into the guest buffer after structure format conversion.
294  * Otherwise we emulate getdents with getdents if the host has it.
295  */
296 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
297 #define EMULATE_GETDENTS_WITH_GETDENTS
298 #endif
299 
300 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
301 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
302 #endif
303 #if (defined(TARGET_NR_getdents) && \
304       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
305     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
306 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
307 #endif
308 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
309 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
310           loff_t *, res, uint, wh);
311 #endif
312 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
313 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
314           siginfo_t *, uinfo)
315 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
316 #ifdef __NR_exit_group
317 _syscall1(int,exit_group,int,error_code)
318 #endif
319 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
320 _syscall1(int,set_tid_address,int *,tidptr)
321 #endif
322 #if defined(__NR_futex)
323 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
324           const struct timespec *,timeout,int *,uaddr2,int,val3)
325 #endif
326 #if defined(__NR_futex_time64)
327 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
331 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
332           unsigned long *, user_mask_ptr);
333 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
334 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
335           unsigned long *, user_mask_ptr);
336 #define __NR_sys_getcpu __NR_getcpu
337 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
338 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
339           void *, arg);
340 _syscall2(int, capget, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 _syscall2(int, capset, struct __user_cap_header_struct *, header,
343           struct __user_cap_data_struct *, data);
344 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
345 _syscall2(int, ioprio_get, int, which, int, who)
346 #endif
347 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
348 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
349 #endif
350 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
351 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
352 #endif
353 
354 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
355 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
356           unsigned long, idx1, unsigned long, idx2)
357 #endif
358 
359 /*
360  * It is assumed that struct statx is architecture independent.
361  */
362 #if defined(TARGET_NR_statx) && defined(__NR_statx)
363 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
364           unsigned int, mask, struct target_statx *, statxbuf)
365 #endif
366 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
367 _syscall2(int, membarrier, int, cmd, int, flags)
368 #endif
369 
370 static const bitmask_transtbl fcntl_flags_tbl[] = {
371   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
372   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
373   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
374   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
375   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
376   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
377   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
378   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
379   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
380   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
381   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
382   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
383   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
384 #if defined(O_DIRECT)
385   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
386 #endif
387 #if defined(O_NOATIME)
388   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
389 #endif
390 #if defined(O_CLOEXEC)
391   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
392 #endif
393 #if defined(O_PATH)
394   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
395 #endif
396 #if defined(O_TMPFILE)
397   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
398 #endif
399   /* Don't terminate the list prematurely on 64-bit host+guest.  */
400 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
401   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
402 #endif
403   { 0, 0, 0, 0 }
404 };
405 
406 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
407 
408 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
409 #if defined(__NR_utimensat)
410 #define __NR_sys_utimensat __NR_utimensat
411 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
412           const struct timespec *,tsp,int,flags)
413 #else
414 static int sys_utimensat(int dirfd, const char *pathname,
415                          const struct timespec times[2], int flags)
416 {
417     errno = ENOSYS;
418     return -1;
419 }
420 #endif
421 #endif /* TARGET_NR_utimensat */
422 
423 #ifdef TARGET_NR_renameat2
424 #if defined(__NR_renameat2)
425 #define __NR_sys_renameat2 __NR_renameat2
426 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
427           const char *, new, unsigned int, flags)
428 #else
429 static int sys_renameat2(int oldfd, const char *old,
430                          int newfd, const char *new, int flags)
431 {
432     if (flags == 0) {
433         return renameat(oldfd, old, newfd, new);
434     }
435     errno = ENOSYS;
436     return -1;
437 }
438 #endif
439 #endif /* TARGET_NR_renameat2 */
440 
441 #ifdef CONFIG_INOTIFY
442 #include <sys/inotify.h>
443 
444 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
445 static int sys_inotify_init(void)
446 {
447   return (inotify_init());
448 }
449 #endif
450 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
451 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
452 {
453   return (inotify_add_watch(fd, pathname, mask));
454 }
455 #endif
456 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
457 static int sys_inotify_rm_watch(int fd, int32_t wd)
458 {
459   return (inotify_rm_watch(fd, wd));
460 }
461 #endif
462 #ifdef CONFIG_INOTIFY1
463 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
464 static int sys_inotify_init1(int flags)
465 {
466   return (inotify_init1(flags));
467 }
468 #endif
469 #endif
470 #else
471 /* Userspace can usually survive runtime without inotify */
472 #undef TARGET_NR_inotify_init
473 #undef TARGET_NR_inotify_init1
474 #undef TARGET_NR_inotify_add_watch
475 #undef TARGET_NR_inotify_rm_watch
476 #endif /* CONFIG_INOTIFY  */
477 
478 #if defined(TARGET_NR_prlimit64)
479 #ifndef __NR_prlimit64
480 # define __NR_prlimit64 -1
481 #endif
482 #define __NR_sys_prlimit64 __NR_prlimit64
483 /* The glibc rlimit structure may not be that used by the underlying syscall */
484 struct host_rlimit64 {
485     uint64_t rlim_cur;
486     uint64_t rlim_max;
487 };
488 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
489           const struct host_rlimit64 *, new_limit,
490           struct host_rlimit64 *, old_limit)
491 #endif
492 
493 
494 #if defined(TARGET_NR_timer_create)
495 /* Maximum of 32 active POSIX timers allowed at any one time. */
496 static timer_t g_posix_timers[32] = { 0, } ;
497 
498 static inline int next_free_host_timer(void)
499 {
500     int k ;
501     /* FIXME: Does finding the next free slot require a lock? */
502     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
503         if (g_posix_timers[k] == 0) {
504             g_posix_timers[k] = (timer_t) 1;
505             return k;
506         }
507     }
508     return -1;
509 }
510 #endif
511 
512 static inline int host_to_target_errno(int host_errno)
513 {
514     switch (host_errno) {
515 #define E(X)  case X: return TARGET_##X;
516 #include "errnos.c.inc"
517 #undef E
518     default:
519         return host_errno;
520     }
521 }
522 
523 static inline int target_to_host_errno(int target_errno)
524 {
525     switch (target_errno) {
526 #define E(X)  case TARGET_##X: return X;
527 #include "errnos.c.inc"
528 #undef E
529     default:
530         return target_errno;
531     }
532 }
533 
534 static inline abi_long get_errno(abi_long ret)
535 {
536     if (ret == -1)
537         return -host_to_target_errno(errno);
538     else
539         return ret;
540 }
541 
542 const char *target_strerror(int err)
543 {
544     if (err == TARGET_ERESTARTSYS) {
545         return "To be restarted";
546     }
547     if (err == TARGET_QEMU_ESIGRETURN) {
548         return "Successful exit from sigreturn";
549     }
550 
551     return strerror(target_to_host_errno(err));
552 }
553 
554 #define safe_syscall0(type, name) \
555 static type safe_##name(void) \
556 { \
557     return safe_syscall(__NR_##name); \
558 }
559 
560 #define safe_syscall1(type, name, type1, arg1) \
561 static type safe_##name(type1 arg1) \
562 { \
563     return safe_syscall(__NR_##name, arg1); \
564 }
565 
566 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
567 static type safe_##name(type1 arg1, type2 arg2) \
568 { \
569     return safe_syscall(__NR_##name, arg1, arg2); \
570 }
571 
572 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
573 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
574 { \
575     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
576 }
577 
578 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
579     type4, arg4) \
580 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
581 { \
582     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
583 }
584 
585 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
586     type4, arg4, type5, arg5) \
587 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
588     type5 arg5) \
589 { \
590     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
591 }
592 
593 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
594     type4, arg4, type5, arg5, type6, arg6) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
596     type5 arg5, type6 arg6) \
597 { \
598     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
599 }
600 
601 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
602 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
603 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
604               int, flags, mode_t, mode)
605 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
606 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
607               struct rusage *, rusage)
608 #endif
609 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
610               int, options, struct rusage *, rusage)
611 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
612 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
613     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
614 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
615               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
616 #endif
617 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
618 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
619               struct timespec *, tsp, const sigset_t *, sigmask,
620               size_t, sigsetsize)
621 #endif
622 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
623               int, maxevents, int, timeout, const sigset_t *, sigmask,
624               size_t, sigsetsize)
625 #if defined(__NR_futex)
626 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
627               const struct timespec *,timeout,int *,uaddr2,int,val3)
628 #endif
629 #if defined(__NR_futex_time64)
630 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
631               const struct timespec *,timeout,int *,uaddr2,int,val3)
632 #endif
633 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
634 safe_syscall2(int, kill, pid_t, pid, int, sig)
635 safe_syscall2(int, tkill, int, tid, int, sig)
636 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
637 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
638 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
639 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
640               unsigned long, pos_l, unsigned long, pos_h)
641 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
642               unsigned long, pos_l, unsigned long, pos_h)
643 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
644               socklen_t, addrlen)
645 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
646               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
647 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
648               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
649 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
650 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
651 safe_syscall2(int, flock, int, fd, int, operation)
652 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
653 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
654               const struct timespec *, uts, size_t, sigsetsize)
655 #endif
656 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
657               int, flags)
658 #if defined(TARGET_NR_nanosleep)
659 safe_syscall2(int, nanosleep, const struct timespec *, req,
660               struct timespec *, rem)
661 #endif
662 #if defined(TARGET_NR_clock_nanosleep) || \
663     defined(TARGET_NR_clock_nanosleep_time64)
664 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
665               const struct timespec *, req, struct timespec *, rem)
666 #endif
667 #ifdef __NR_ipc
668 #ifdef __s390x__
669 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
670               void *, ptr)
671 #else
672 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
673               void *, ptr, long, fifth)
674 #endif
675 #endif
676 #ifdef __NR_msgsnd
677 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
678               int, flags)
679 #endif
680 #ifdef __NR_msgrcv
681 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
682               long, msgtype, int, flags)
683 #endif
684 #ifdef __NR_semtimedop
685 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
686               unsigned, nsops, const struct timespec *, timeout)
687 #endif
688 #if defined(TARGET_NR_mq_timedsend) || \
689     defined(TARGET_NR_mq_timedsend_time64)
690 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
691               size_t, len, unsigned, prio, const struct timespec *, timeout)
692 #endif
693 #if defined(TARGET_NR_mq_timedreceive) || \
694     defined(TARGET_NR_mq_timedreceive_time64)
695 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
696               size_t, len, unsigned *, prio, const struct timespec *, timeout)
697 #endif
698 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
699 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
700               int, outfd, loff_t *, poutoff, size_t, length,
701               unsigned int, flags)
702 #endif
703 
704 /* We do ioctl like this rather than via safe_syscall3 to preserve the
705  * "third argument might be integer or pointer or not present" behaviour of
706  * the libc function.
707  */
708 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
709 /* Similarly for fcntl. Note that callers must always:
710  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
711  *  use the flock64 struct rather than unsuffixed flock
712  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
713  */
714 #ifdef __NR_fcntl64
715 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
716 #else
717 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
718 #endif
719 
720 static inline int host_to_target_sock_type(int host_type)
721 {
722     int target_type;
723 
724     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
725     case SOCK_DGRAM:
726         target_type = TARGET_SOCK_DGRAM;
727         break;
728     case SOCK_STREAM:
729         target_type = TARGET_SOCK_STREAM;
730         break;
731     default:
732         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
733         break;
734     }
735 
736 #if defined(SOCK_CLOEXEC)
737     if (host_type & SOCK_CLOEXEC) {
738         target_type |= TARGET_SOCK_CLOEXEC;
739     }
740 #endif
741 
742 #if defined(SOCK_NONBLOCK)
743     if (host_type & SOCK_NONBLOCK) {
744         target_type |= TARGET_SOCK_NONBLOCK;
745     }
746 #endif
747 
748     return target_type;
749 }
750 
751 static abi_ulong target_brk;
752 static abi_ulong target_original_brk;
753 static abi_ulong brk_page;
754 
755 void target_set_brk(abi_ulong new_brk)
756 {
757     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
758     brk_page = HOST_PAGE_ALIGN(target_brk);
759 }
760 
761 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
762 #define DEBUGF_BRK(message, args...)
763 
764 /* do_brk() must return target values and target errnos. */
765 abi_long do_brk(abi_ulong new_brk)
766 {
767     abi_long mapped_addr;
768     abi_ulong new_alloc_size;
769 
770     /* brk pointers are always untagged */
771 
772     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
773 
774     if (!new_brk) {
775         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
776         return target_brk;
777     }
778     if (new_brk < target_original_brk) {
779         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
780                    target_brk);
781         return target_brk;
782     }
783 
784     /* If the new brk is less than the highest page reserved to the
785      * target heap allocation, set it and we're almost done...  */
786     if (new_brk <= brk_page) {
787         /* Heap contents are initialized to zero, as for anonymous
788          * mapped pages.  */
789         if (new_brk > target_brk) {
790             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
791         }
792 	target_brk = new_brk;
793         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
794 	return target_brk;
795     }
796 
797     /* We need to allocate more memory after the brk... Note that
798      * we don't use MAP_FIXED because that will map over the top of
799      * any existing mapping (like the one with the host libc or qemu
800      * itself); instead we treat "mapped but at wrong address" as
801      * a failure and unmap again.
802      */
803     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
804     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
805                                         PROT_READ|PROT_WRITE,
806                                         MAP_ANON|MAP_PRIVATE, 0, 0));
807 
808     if (mapped_addr == brk_page) {
809         /* Heap contents are initialized to zero, as for anonymous
810          * mapped pages.  Technically the new pages are already
811          * initialized to zero since they *are* anonymous mapped
812          * pages, however we have to take care with the contents that
813          * come from the remaining part of the previous page: it may
814          * contains garbage data due to a previous heap usage (grown
815          * then shrunken).  */
816         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
817 
818         target_brk = new_brk;
819         brk_page = HOST_PAGE_ALIGN(target_brk);
820         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
821             target_brk);
822         return target_brk;
823     } else if (mapped_addr != -1) {
824         /* Mapped but at wrong address, meaning there wasn't actually
825          * enough space for this brk.
826          */
827         target_munmap(mapped_addr, new_alloc_size);
828         mapped_addr = -1;
829         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
830     }
831     else {
832         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
833     }
834 
835 #if defined(TARGET_ALPHA)
836     /* We (partially) emulate OSF/1 on Alpha, which requires we
837        return a proper errno, not an unchanged brk value.  */
838     return -TARGET_ENOMEM;
839 #endif
840     /* For everything else, return the previous break. */
841     return target_brk;
842 }
843 
844 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
845     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
846 static inline abi_long copy_from_user_fdset(fd_set *fds,
847                                             abi_ulong target_fds_addr,
848                                             int n)
849 {
850     int i, nw, j, k;
851     abi_ulong b, *target_fds;
852 
853     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
854     if (!(target_fds = lock_user(VERIFY_READ,
855                                  target_fds_addr,
856                                  sizeof(abi_ulong) * nw,
857                                  1)))
858         return -TARGET_EFAULT;
859 
860     FD_ZERO(fds);
861     k = 0;
862     for (i = 0; i < nw; i++) {
863         /* grab the abi_ulong */
864         __get_user(b, &target_fds[i]);
865         for (j = 0; j < TARGET_ABI_BITS; j++) {
866             /* check the bit inside the abi_ulong */
867             if ((b >> j) & 1)
868                 FD_SET(k, fds);
869             k++;
870         }
871     }
872 
873     unlock_user(target_fds, target_fds_addr, 0);
874 
875     return 0;
876 }
877 
878 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
879                                                  abi_ulong target_fds_addr,
880                                                  int n)
881 {
882     if (target_fds_addr) {
883         if (copy_from_user_fdset(fds, target_fds_addr, n))
884             return -TARGET_EFAULT;
885         *fds_ptr = fds;
886     } else {
887         *fds_ptr = NULL;
888     }
889     return 0;
890 }
891 
892 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
893                                           const fd_set *fds,
894                                           int n)
895 {
896     int i, nw, j, k;
897     abi_long v;
898     abi_ulong *target_fds;
899 
900     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
901     if (!(target_fds = lock_user(VERIFY_WRITE,
902                                  target_fds_addr,
903                                  sizeof(abi_ulong) * nw,
904                                  0)))
905         return -TARGET_EFAULT;
906 
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         v = 0;
910         for (j = 0; j < TARGET_ABI_BITS; j++) {
911             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
912             k++;
913         }
914         __put_user(v, &target_fds[i]);
915     }
916 
917     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
918 
919     return 0;
920 }
921 #endif
922 
923 #if defined(__alpha__)
924 #define HOST_HZ 1024
925 #else
926 #define HOST_HZ 100
927 #endif
928 
929 static inline abi_long host_to_target_clock_t(long ticks)
930 {
931 #if HOST_HZ == TARGET_HZ
932     return ticks;
933 #else
934     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
935 #endif
936 }
937 
938 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
939                                              const struct rusage *rusage)
940 {
941     struct target_rusage *target_rusage;
942 
943     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
944         return -TARGET_EFAULT;
945     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
946     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
947     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
948     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
949     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
950     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
951     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
952     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
953     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
954     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
955     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
956     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
957     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
958     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
959     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
960     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
961     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
962     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
963     unlock_user_struct(target_rusage, target_addr, 1);
964 
965     return 0;
966 }
967 
968 #ifdef TARGET_NR_setrlimit
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
970 {
971     abi_ulong target_rlim_swap;
972     rlim_t result;
973 
974     target_rlim_swap = tswapal(target_rlim);
975     if (target_rlim_swap == TARGET_RLIM_INFINITY)
976         return RLIM_INFINITY;
977 
978     result = target_rlim_swap;
979     if (target_rlim_swap != (rlim_t)result)
980         return RLIM_INFINITY;
981 
982     return result;
983 }
984 #endif
985 
986 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
987 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
988 {
989     abi_ulong target_rlim_swap;
990     abi_ulong result;
991 
992     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
993         target_rlim_swap = TARGET_RLIM_INFINITY;
994     else
995         target_rlim_swap = rlim;
996     result = tswapal(target_rlim_swap);
997 
998     return result;
999 }
1000 #endif
1001 
1002 static inline int target_to_host_resource(int code)
1003 {
1004     switch (code) {
1005     case TARGET_RLIMIT_AS:
1006         return RLIMIT_AS;
1007     case TARGET_RLIMIT_CORE:
1008         return RLIMIT_CORE;
1009     case TARGET_RLIMIT_CPU:
1010         return RLIMIT_CPU;
1011     case TARGET_RLIMIT_DATA:
1012         return RLIMIT_DATA;
1013     case TARGET_RLIMIT_FSIZE:
1014         return RLIMIT_FSIZE;
1015     case TARGET_RLIMIT_LOCKS:
1016         return RLIMIT_LOCKS;
1017     case TARGET_RLIMIT_MEMLOCK:
1018         return RLIMIT_MEMLOCK;
1019     case TARGET_RLIMIT_MSGQUEUE:
1020         return RLIMIT_MSGQUEUE;
1021     case TARGET_RLIMIT_NICE:
1022         return RLIMIT_NICE;
1023     case TARGET_RLIMIT_NOFILE:
1024         return RLIMIT_NOFILE;
1025     case TARGET_RLIMIT_NPROC:
1026         return RLIMIT_NPROC;
1027     case TARGET_RLIMIT_RSS:
1028         return RLIMIT_RSS;
1029     case TARGET_RLIMIT_RTPRIO:
1030         return RLIMIT_RTPRIO;
1031     case TARGET_RLIMIT_SIGPENDING:
1032         return RLIMIT_SIGPENDING;
1033     case TARGET_RLIMIT_STACK:
1034         return RLIMIT_STACK;
1035     default:
1036         return code;
1037     }
1038 }
1039 
1040 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1041                                               abi_ulong target_tv_addr)
1042 {
1043     struct target_timeval *target_tv;
1044 
1045     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1046         return -TARGET_EFAULT;
1047     }
1048 
1049     __get_user(tv->tv_sec, &target_tv->tv_sec);
1050     __get_user(tv->tv_usec, &target_tv->tv_usec);
1051 
1052     unlock_user_struct(target_tv, target_tv_addr, 0);
1053 
1054     return 0;
1055 }
1056 
1057 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1058                                             const struct timeval *tv)
1059 {
1060     struct target_timeval *target_tv;
1061 
1062     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1063         return -TARGET_EFAULT;
1064     }
1065 
1066     __put_user(tv->tv_sec, &target_tv->tv_sec);
1067     __put_user(tv->tv_usec, &target_tv->tv_usec);
1068 
1069     unlock_user_struct(target_tv, target_tv_addr, 1);
1070 
1071     return 0;
1072 }
1073 
1074 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1075 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1076                                                 abi_ulong target_tv_addr)
1077 {
1078     struct target__kernel_sock_timeval *target_tv;
1079 
1080     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1081         return -TARGET_EFAULT;
1082     }
1083 
1084     __get_user(tv->tv_sec, &target_tv->tv_sec);
1085     __get_user(tv->tv_usec, &target_tv->tv_usec);
1086 
1087     unlock_user_struct(target_tv, target_tv_addr, 0);
1088 
1089     return 0;
1090 }
1091 #endif
1092 
1093 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1094                                               const struct timeval *tv)
1095 {
1096     struct target__kernel_sock_timeval *target_tv;
1097 
1098     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1099         return -TARGET_EFAULT;
1100     }
1101 
1102     __put_user(tv->tv_sec, &target_tv->tv_sec);
1103     __put_user(tv->tv_usec, &target_tv->tv_usec);
1104 
1105     unlock_user_struct(target_tv, target_tv_addr, 1);
1106 
1107     return 0;
1108 }
1109 
1110 #if defined(TARGET_NR_futex) || \
1111     defined(TARGET_NR_rt_sigtimedwait) || \
1112     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1113     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1114     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1115     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1116     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1117     defined(TARGET_NR_timer_settime) || \
1118     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1119 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1120                                                abi_ulong target_addr)
1121 {
1122     struct target_timespec *target_ts;
1123 
1124     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1125         return -TARGET_EFAULT;
1126     }
1127     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1128     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1129     unlock_user_struct(target_ts, target_addr, 0);
1130     return 0;
1131 }
1132 #endif
1133 
1134 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1135     defined(TARGET_NR_timer_settime64) || \
1136     defined(TARGET_NR_mq_timedsend_time64) || \
1137     defined(TARGET_NR_mq_timedreceive_time64) || \
1138     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1139     defined(TARGET_NR_clock_nanosleep_time64) || \
1140     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1141     defined(TARGET_NR_utimensat) || \
1142     defined(TARGET_NR_utimensat_time64) || \
1143     defined(TARGET_NR_semtimedop_time64) || \
1144     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1145 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1146                                                  abi_ulong target_addr)
1147 {
1148     struct target__kernel_timespec *target_ts;
1149 
1150     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1151         return -TARGET_EFAULT;
1152     }
1153     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1154     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1155     /* in 32bit mode, this drops the padding */
1156     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1157     unlock_user_struct(target_ts, target_addr, 0);
1158     return 0;
1159 }
1160 #endif
1161 
1162 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1163                                                struct timespec *host_ts)
1164 {
1165     struct target_timespec *target_ts;
1166 
1167     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1168         return -TARGET_EFAULT;
1169     }
1170     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1171     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1172     unlock_user_struct(target_ts, target_addr, 1);
1173     return 0;
1174 }
1175 
1176 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1177                                                  struct timespec *host_ts)
1178 {
1179     struct target__kernel_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 1);
1187     return 0;
1188 }
1189 
1190 #if defined(TARGET_NR_gettimeofday)
1191 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1192                                              struct timezone *tz)
1193 {
1194     struct target_timezone *target_tz;
1195 
1196     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1197         return -TARGET_EFAULT;
1198     }
1199 
1200     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1201     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1202 
1203     unlock_user_struct(target_tz, target_tz_addr, 1);
1204 
1205     return 0;
1206 }
1207 #endif
1208 
1209 #if defined(TARGET_NR_settimeofday)
1210 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1211                                                abi_ulong target_tz_addr)
1212 {
1213     struct target_timezone *target_tz;
1214 
1215     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1216         return -TARGET_EFAULT;
1217     }
1218 
1219     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1220     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1221 
1222     unlock_user_struct(target_tz, target_tz_addr, 0);
1223 
1224     return 0;
1225 }
1226 #endif
1227 
1228 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1229 #include <mqueue.h>
1230 
1231 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1232                                               abi_ulong target_mq_attr_addr)
1233 {
1234     struct target_mq_attr *target_mq_attr;
1235 
1236     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1237                           target_mq_attr_addr, 1))
1238         return -TARGET_EFAULT;
1239 
1240     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1241     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1242     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1243     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1244 
1245     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1246 
1247     return 0;
1248 }
1249 
1250 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1251                                             const struct mq_attr *attr)
1252 {
1253     struct target_mq_attr *target_mq_attr;
1254 
1255     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1256                           target_mq_attr_addr, 0))
1257         return -TARGET_EFAULT;
1258 
1259     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1260     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1261     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1262     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1263 
1264     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1265 
1266     return 0;
1267 }
1268 #endif
1269 
1270 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1271 /* do_select() must return target values and target errnos. */
1272 static abi_long do_select(int n,
1273                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1274                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1275 {
1276     fd_set rfds, wfds, efds;
1277     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1278     struct timeval tv;
1279     struct timespec ts, *ts_ptr;
1280     abi_long ret;
1281 
1282     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1283     if (ret) {
1284         return ret;
1285     }
1286     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1287     if (ret) {
1288         return ret;
1289     }
1290     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1291     if (ret) {
1292         return ret;
1293     }
1294 
1295     if (target_tv_addr) {
1296         if (copy_from_user_timeval(&tv, target_tv_addr))
1297             return -TARGET_EFAULT;
1298         ts.tv_sec = tv.tv_sec;
1299         ts.tv_nsec = tv.tv_usec * 1000;
1300         ts_ptr = &ts;
1301     } else {
1302         ts_ptr = NULL;
1303     }
1304 
1305     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1306                                   ts_ptr, NULL));
1307 
1308     if (!is_error(ret)) {
1309         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1310             return -TARGET_EFAULT;
1311         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1312             return -TARGET_EFAULT;
1313         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1314             return -TARGET_EFAULT;
1315 
1316         if (target_tv_addr) {
1317             tv.tv_sec = ts.tv_sec;
1318             tv.tv_usec = ts.tv_nsec / 1000;
1319             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1320                 return -TARGET_EFAULT;
1321             }
1322         }
1323     }
1324 
1325     return ret;
1326 }
1327 
1328 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1329 static abi_long do_old_select(abi_ulong arg1)
1330 {
1331     struct target_sel_arg_struct *sel;
1332     abi_ulong inp, outp, exp, tvp;
1333     long nsel;
1334 
1335     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1336         return -TARGET_EFAULT;
1337     }
1338 
1339     nsel = tswapal(sel->n);
1340     inp = tswapal(sel->inp);
1341     outp = tswapal(sel->outp);
1342     exp = tswapal(sel->exp);
1343     tvp = tswapal(sel->tvp);
1344 
1345     unlock_user_struct(sel, arg1, 0);
1346 
1347     return do_select(nsel, inp, outp, exp, tvp);
1348 }
1349 #endif
1350 #endif
1351 
1352 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1353 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1354                             abi_long arg4, abi_long arg5, abi_long arg6,
1355                             bool time64)
1356 {
1357     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1358     fd_set rfds, wfds, efds;
1359     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1360     struct timespec ts, *ts_ptr;
1361     abi_long ret;
1362 
1363     /*
1364      * The 6th arg is actually two args smashed together,
1365      * so we cannot use the C library.
1366      */
1367     sigset_t set;
1368     struct {
1369         sigset_t *set;
1370         size_t size;
1371     } sig, *sig_ptr;
1372 
1373     abi_ulong arg_sigset, arg_sigsize, *arg7;
1374     target_sigset_t *target_sigset;
1375 
1376     n = arg1;
1377     rfd_addr = arg2;
1378     wfd_addr = arg3;
1379     efd_addr = arg4;
1380     ts_addr = arg5;
1381 
1382     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1383     if (ret) {
1384         return ret;
1385     }
1386     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1387     if (ret) {
1388         return ret;
1389     }
1390     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1391     if (ret) {
1392         return ret;
1393     }
1394 
1395     /*
1396      * This takes a timespec, and not a timeval, so we cannot
1397      * use the do_select() helper ...
1398      */
1399     if (ts_addr) {
1400         if (time64) {
1401             if (target_to_host_timespec64(&ts, ts_addr)) {
1402                 return -TARGET_EFAULT;
1403             }
1404         } else {
1405             if (target_to_host_timespec(&ts, ts_addr)) {
1406                 return -TARGET_EFAULT;
1407             }
1408         }
1409             ts_ptr = &ts;
1410     } else {
1411         ts_ptr = NULL;
1412     }
1413 
1414     /* Extract the two packed args for the sigset */
1415     if (arg6) {
1416         sig_ptr = &sig;
1417         sig.size = SIGSET_T_SIZE;
1418 
1419         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1420         if (!arg7) {
1421             return -TARGET_EFAULT;
1422         }
1423         arg_sigset = tswapal(arg7[0]);
1424         arg_sigsize = tswapal(arg7[1]);
1425         unlock_user(arg7, arg6, 0);
1426 
1427         if (arg_sigset) {
1428             sig.set = &set;
1429             if (arg_sigsize != sizeof(*target_sigset)) {
1430                 /* Like the kernel, we enforce correct size sigsets */
1431                 return -TARGET_EINVAL;
1432             }
1433             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1434                                       sizeof(*target_sigset), 1);
1435             if (!target_sigset) {
1436                 return -TARGET_EFAULT;
1437             }
1438             target_to_host_sigset(&set, target_sigset);
1439             unlock_user(target_sigset, arg_sigset, 0);
1440         } else {
1441             sig.set = NULL;
1442         }
1443     } else {
1444         sig_ptr = NULL;
1445     }
1446 
1447     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1448                                   ts_ptr, sig_ptr));
1449 
1450     if (!is_error(ret)) {
1451         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1452             return -TARGET_EFAULT;
1453         }
1454         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1455             return -TARGET_EFAULT;
1456         }
1457         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1458             return -TARGET_EFAULT;
1459         }
1460         if (time64) {
1461             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1462                 return -TARGET_EFAULT;
1463             }
1464         } else {
1465             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1466                 return -TARGET_EFAULT;
1467             }
1468         }
1469     }
1470     return ret;
1471 }
1472 #endif
1473 
1474 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1475     defined(TARGET_NR_ppoll_time64)
1476 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1477                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1478 {
1479     struct target_pollfd *target_pfd;
1480     unsigned int nfds = arg2;
1481     struct pollfd *pfd;
1482     unsigned int i;
1483     abi_long ret;
1484 
1485     pfd = NULL;
1486     target_pfd = NULL;
1487     if (nfds) {
1488         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1489             return -TARGET_EINVAL;
1490         }
1491         target_pfd = lock_user(VERIFY_WRITE, arg1,
1492                                sizeof(struct target_pollfd) * nfds, 1);
1493         if (!target_pfd) {
1494             return -TARGET_EFAULT;
1495         }
1496 
1497         pfd = alloca(sizeof(struct pollfd) * nfds);
1498         for (i = 0; i < nfds; i++) {
1499             pfd[i].fd = tswap32(target_pfd[i].fd);
1500             pfd[i].events = tswap16(target_pfd[i].events);
1501         }
1502     }
1503     if (ppoll) {
1504         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1505         target_sigset_t *target_set;
1506         sigset_t _set, *set = &_set;
1507 
1508         if (arg3) {
1509             if (time64) {
1510                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1511                     unlock_user(target_pfd, arg1, 0);
1512                     return -TARGET_EFAULT;
1513                 }
1514             } else {
1515                 if (target_to_host_timespec(timeout_ts, arg3)) {
1516                     unlock_user(target_pfd, arg1, 0);
1517                     return -TARGET_EFAULT;
1518                 }
1519             }
1520         } else {
1521             timeout_ts = NULL;
1522         }
1523 
1524         if (arg4) {
1525             if (arg5 != sizeof(target_sigset_t)) {
1526                 unlock_user(target_pfd, arg1, 0);
1527                 return -TARGET_EINVAL;
1528             }
1529 
1530             target_set = lock_user(VERIFY_READ, arg4,
1531                                    sizeof(target_sigset_t), 1);
1532             if (!target_set) {
1533                 unlock_user(target_pfd, arg1, 0);
1534                 return -TARGET_EFAULT;
1535             }
1536             target_to_host_sigset(set, target_set);
1537         } else {
1538             set = NULL;
1539         }
1540 
1541         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1542                                    set, SIGSET_T_SIZE));
1543 
1544         if (!is_error(ret) && arg3) {
1545             if (time64) {
1546                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1547                     return -TARGET_EFAULT;
1548                 }
1549             } else {
1550                 if (host_to_target_timespec(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             }
1554         }
1555         if (arg4) {
1556             unlock_user(target_set, arg4, 0);
1557         }
1558     } else {
1559           struct timespec ts, *pts;
1560 
1561           if (arg3 >= 0) {
1562               /* Convert ms to secs, ns */
1563               ts.tv_sec = arg3 / 1000;
1564               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1565               pts = &ts;
1566           } else {
1567               /* -ve poll() timeout means "infinite" */
1568               pts = NULL;
1569           }
1570           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1571     }
1572 
1573     if (!is_error(ret)) {
1574         for (i = 0; i < nfds; i++) {
1575             target_pfd[i].revents = tswap16(pfd[i].revents);
1576         }
1577     }
1578     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1579     return ret;
1580 }
1581 #endif
1582 
1583 static abi_long do_pipe2(int host_pipe[], int flags)
1584 {
1585 #ifdef CONFIG_PIPE2
1586     return pipe2(host_pipe, flags);
1587 #else
1588     return -ENOSYS;
1589 #endif
1590 }
1591 
1592 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1593                         int flags, int is_pipe2)
1594 {
1595     int host_pipe[2];
1596     abi_long ret;
1597     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1598 
1599     if (is_error(ret))
1600         return get_errno(ret);
1601 
1602     /* Several targets have special calling conventions for the original
1603        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1604     if (!is_pipe2) {
1605 #if defined(TARGET_ALPHA)
1606         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1607         return host_pipe[0];
1608 #elif defined(TARGET_MIPS)
1609         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1610         return host_pipe[0];
1611 #elif defined(TARGET_SH4)
1612         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1613         return host_pipe[0];
1614 #elif defined(TARGET_SPARC)
1615         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1616         return host_pipe[0];
1617 #endif
1618     }
1619 
1620     if (put_user_s32(host_pipe[0], pipedes)
1621         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1622         return -TARGET_EFAULT;
1623     return get_errno(ret);
1624 }
1625 
1626 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1627                                               abi_ulong target_addr,
1628                                               socklen_t len)
1629 {
1630     struct target_ip_mreqn *target_smreqn;
1631 
1632     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1633     if (!target_smreqn)
1634         return -TARGET_EFAULT;
1635     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1636     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1637     if (len == sizeof(struct target_ip_mreqn))
1638         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1639     unlock_user(target_smreqn, target_addr, 0);
1640 
1641     return 0;
1642 }
1643 
1644 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1645                                                abi_ulong target_addr,
1646                                                socklen_t len)
1647 {
1648     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1649     sa_family_t sa_family;
1650     struct target_sockaddr *target_saddr;
1651 
1652     if (fd_trans_target_to_host_addr(fd)) {
1653         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1654     }
1655 
1656     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1657     if (!target_saddr)
1658         return -TARGET_EFAULT;
1659 
1660     sa_family = tswap16(target_saddr->sa_family);
1661 
1662     /* Oops. The caller might send a incomplete sun_path; sun_path
1663      * must be terminated by \0 (see the manual page), but
1664      * unfortunately it is quite common to specify sockaddr_un
1665      * length as "strlen(x->sun_path)" while it should be
1666      * "strlen(...) + 1". We'll fix that here if needed.
1667      * Linux kernel has a similar feature.
1668      */
1669 
1670     if (sa_family == AF_UNIX) {
1671         if (len < unix_maxlen && len > 0) {
1672             char *cp = (char*)target_saddr;
1673 
1674             if ( cp[len-1] && !cp[len] )
1675                 len++;
1676         }
1677         if (len > unix_maxlen)
1678             len = unix_maxlen;
1679     }
1680 
1681     memcpy(addr, target_saddr, len);
1682     addr->sa_family = sa_family;
1683     if (sa_family == AF_NETLINK) {
1684         struct sockaddr_nl *nladdr;
1685 
1686         nladdr = (struct sockaddr_nl *)addr;
1687         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1688         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1689     } else if (sa_family == AF_PACKET) {
1690 	struct target_sockaddr_ll *lladdr;
1691 
1692 	lladdr = (struct target_sockaddr_ll *)addr;
1693 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1694 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1695     }
1696     unlock_user(target_saddr, target_addr, 0);
1697 
1698     return 0;
1699 }
1700 
1701 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1702                                                struct sockaddr *addr,
1703                                                socklen_t len)
1704 {
1705     struct target_sockaddr *target_saddr;
1706 
1707     if (len == 0) {
1708         return 0;
1709     }
1710     assert(addr);
1711 
1712     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1713     if (!target_saddr)
1714         return -TARGET_EFAULT;
1715     memcpy(target_saddr, addr, len);
1716     if (len >= offsetof(struct target_sockaddr, sa_family) +
1717         sizeof(target_saddr->sa_family)) {
1718         target_saddr->sa_family = tswap16(addr->sa_family);
1719     }
1720     if (addr->sa_family == AF_NETLINK &&
1721         len >= sizeof(struct target_sockaddr_nl)) {
1722         struct target_sockaddr_nl *target_nl =
1723                (struct target_sockaddr_nl *)target_saddr;
1724         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1725         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1726     } else if (addr->sa_family == AF_PACKET) {
1727         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1728         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1729         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1730     } else if (addr->sa_family == AF_INET6 &&
1731                len >= sizeof(struct target_sockaddr_in6)) {
1732         struct target_sockaddr_in6 *target_in6 =
1733                (struct target_sockaddr_in6 *)target_saddr;
1734         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1735     }
1736     unlock_user(target_saddr, target_addr, len);
1737 
1738     return 0;
1739 }
1740 
1741 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1742                                            struct target_msghdr *target_msgh)
1743 {
1744     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1745     abi_long msg_controllen;
1746     abi_ulong target_cmsg_addr;
1747     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1748     socklen_t space = 0;
1749 
1750     msg_controllen = tswapal(target_msgh->msg_controllen);
1751     if (msg_controllen < sizeof (struct target_cmsghdr))
1752         goto the_end;
1753     target_cmsg_addr = tswapal(target_msgh->msg_control);
1754     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1755     target_cmsg_start = target_cmsg;
1756     if (!target_cmsg)
1757         return -TARGET_EFAULT;
1758 
1759     while (cmsg && target_cmsg) {
1760         void *data = CMSG_DATA(cmsg);
1761         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1762 
1763         int len = tswapal(target_cmsg->cmsg_len)
1764             - sizeof(struct target_cmsghdr);
1765 
1766         space += CMSG_SPACE(len);
1767         if (space > msgh->msg_controllen) {
1768             space -= CMSG_SPACE(len);
1769             /* This is a QEMU bug, since we allocated the payload
1770              * area ourselves (unlike overflow in host-to-target
1771              * conversion, which is just the guest giving us a buffer
1772              * that's too small). It can't happen for the payload types
1773              * we currently support; if it becomes an issue in future
1774              * we would need to improve our allocation strategy to
1775              * something more intelligent than "twice the size of the
1776              * target buffer we're reading from".
1777              */
1778             qemu_log_mask(LOG_UNIMP,
1779                           ("Unsupported ancillary data %d/%d: "
1780                            "unhandled msg size\n"),
1781                           tswap32(target_cmsg->cmsg_level),
1782                           tswap32(target_cmsg->cmsg_type));
1783             break;
1784         }
1785 
1786         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1787             cmsg->cmsg_level = SOL_SOCKET;
1788         } else {
1789             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1790         }
1791         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1792         cmsg->cmsg_len = CMSG_LEN(len);
1793 
1794         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1795             int *fd = (int *)data;
1796             int *target_fd = (int *)target_data;
1797             int i, numfds = len / sizeof(int);
1798 
1799             for (i = 0; i < numfds; i++) {
1800                 __get_user(fd[i], target_fd + i);
1801             }
1802         } else if (cmsg->cmsg_level == SOL_SOCKET
1803                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1804             struct ucred *cred = (struct ucred *)data;
1805             struct target_ucred *target_cred =
1806                 (struct target_ucred *)target_data;
1807 
1808             __get_user(cred->pid, &target_cred->pid);
1809             __get_user(cred->uid, &target_cred->uid);
1810             __get_user(cred->gid, &target_cred->gid);
1811         } else {
1812             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1813                           cmsg->cmsg_level, cmsg->cmsg_type);
1814             memcpy(data, target_data, len);
1815         }
1816 
1817         cmsg = CMSG_NXTHDR(msgh, cmsg);
1818         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1819                                          target_cmsg_start);
1820     }
1821     unlock_user(target_cmsg, target_cmsg_addr, 0);
1822  the_end:
1823     msgh->msg_controllen = space;
1824     return 0;
1825 }
1826 
1827 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1828                                            struct msghdr *msgh)
1829 {
1830     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1831     abi_long msg_controllen;
1832     abi_ulong target_cmsg_addr;
1833     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1834     socklen_t space = 0;
1835 
1836     msg_controllen = tswapal(target_msgh->msg_controllen);
1837     if (msg_controllen < sizeof (struct target_cmsghdr))
1838         goto the_end;
1839     target_cmsg_addr = tswapal(target_msgh->msg_control);
1840     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1841     target_cmsg_start = target_cmsg;
1842     if (!target_cmsg)
1843         return -TARGET_EFAULT;
1844 
1845     while (cmsg && target_cmsg) {
1846         void *data = CMSG_DATA(cmsg);
1847         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1848 
1849         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1850         int tgt_len, tgt_space;
1851 
1852         /* We never copy a half-header but may copy half-data;
1853          * this is Linux's behaviour in put_cmsg(). Note that
1854          * truncation here is a guest problem (which we report
1855          * to the guest via the CTRUNC bit), unlike truncation
1856          * in target_to_host_cmsg, which is a QEMU bug.
1857          */
1858         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1859             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1860             break;
1861         }
1862 
1863         if (cmsg->cmsg_level == SOL_SOCKET) {
1864             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1865         } else {
1866             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1867         }
1868         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1869 
1870         /* Payload types which need a different size of payload on
1871          * the target must adjust tgt_len here.
1872          */
1873         tgt_len = len;
1874         switch (cmsg->cmsg_level) {
1875         case SOL_SOCKET:
1876             switch (cmsg->cmsg_type) {
1877             case SO_TIMESTAMP:
1878                 tgt_len = sizeof(struct target_timeval);
1879                 break;
1880             default:
1881                 break;
1882             }
1883             break;
1884         default:
1885             break;
1886         }
1887 
1888         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1889             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1890             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1891         }
1892 
1893         /* We must now copy-and-convert len bytes of payload
1894          * into tgt_len bytes of destination space. Bear in mind
1895          * that in both source and destination we may be dealing
1896          * with a truncated value!
1897          */
1898         switch (cmsg->cmsg_level) {
1899         case SOL_SOCKET:
1900             switch (cmsg->cmsg_type) {
1901             case SCM_RIGHTS:
1902             {
1903                 int *fd = (int *)data;
1904                 int *target_fd = (int *)target_data;
1905                 int i, numfds = tgt_len / sizeof(int);
1906 
1907                 for (i = 0; i < numfds; i++) {
1908                     __put_user(fd[i], target_fd + i);
1909                 }
1910                 break;
1911             }
1912             case SO_TIMESTAMP:
1913             {
1914                 struct timeval *tv = (struct timeval *)data;
1915                 struct target_timeval *target_tv =
1916                     (struct target_timeval *)target_data;
1917 
1918                 if (len != sizeof(struct timeval) ||
1919                     tgt_len != sizeof(struct target_timeval)) {
1920                     goto unimplemented;
1921                 }
1922 
1923                 /* copy struct timeval to target */
1924                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1925                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1926                 break;
1927             }
1928             case SCM_CREDENTIALS:
1929             {
1930                 struct ucred *cred = (struct ucred *)data;
1931                 struct target_ucred *target_cred =
1932                     (struct target_ucred *)target_data;
1933 
1934                 __put_user(cred->pid, &target_cred->pid);
1935                 __put_user(cred->uid, &target_cred->uid);
1936                 __put_user(cred->gid, &target_cred->gid);
1937                 break;
1938             }
1939             default:
1940                 goto unimplemented;
1941             }
1942             break;
1943 
1944         case SOL_IP:
1945             switch (cmsg->cmsg_type) {
1946             case IP_TTL:
1947             {
1948                 uint32_t *v = (uint32_t *)data;
1949                 uint32_t *t_int = (uint32_t *)target_data;
1950 
1951                 if (len != sizeof(uint32_t) ||
1952                     tgt_len != sizeof(uint32_t)) {
1953                     goto unimplemented;
1954                 }
1955                 __put_user(*v, t_int);
1956                 break;
1957             }
1958             case IP_RECVERR:
1959             {
1960                 struct errhdr_t {
1961                    struct sock_extended_err ee;
1962                    struct sockaddr_in offender;
1963                 };
1964                 struct errhdr_t *errh = (struct errhdr_t *)data;
1965                 struct errhdr_t *target_errh =
1966                     (struct errhdr_t *)target_data;
1967 
1968                 if (len != sizeof(struct errhdr_t) ||
1969                     tgt_len != sizeof(struct errhdr_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1973                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1974                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1975                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1976                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1977                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1978                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1979                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1980                     (void *) &errh->offender, sizeof(errh->offender));
1981                 break;
1982             }
1983             default:
1984                 goto unimplemented;
1985             }
1986             break;
1987 
1988         case SOL_IPV6:
1989             switch (cmsg->cmsg_type) {
1990             case IPV6_HOPLIMIT:
1991             {
1992                 uint32_t *v = (uint32_t *)data;
1993                 uint32_t *t_int = (uint32_t *)target_data;
1994 
1995                 if (len != sizeof(uint32_t) ||
1996                     tgt_len != sizeof(uint32_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(*v, t_int);
2000                 break;
2001             }
2002             case IPV6_RECVERR:
2003             {
2004                 struct errhdr6_t {
2005                    struct sock_extended_err ee;
2006                    struct sockaddr_in6 offender;
2007                 };
2008                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2009                 struct errhdr6_t *target_errh =
2010                     (struct errhdr6_t *)target_data;
2011 
2012                 if (len != sizeof(struct errhdr6_t) ||
2013                     tgt_len != sizeof(struct errhdr6_t)) {
2014                     goto unimplemented;
2015                 }
2016                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2017                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2018                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2019                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2020                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2021                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2022                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2023                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2024                     (void *) &errh->offender, sizeof(errh->offender));
2025                 break;
2026             }
2027             default:
2028                 goto unimplemented;
2029             }
2030             break;
2031 
2032         default:
2033         unimplemented:
2034             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2035                           cmsg->cmsg_level, cmsg->cmsg_type);
2036             memcpy(target_data, data, MIN(len, tgt_len));
2037             if (tgt_len > len) {
2038                 memset(target_data + len, 0, tgt_len - len);
2039             }
2040         }
2041 
2042         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2043         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2044         if (msg_controllen < tgt_space) {
2045             tgt_space = msg_controllen;
2046         }
2047         msg_controllen -= tgt_space;
2048         space += tgt_space;
2049         cmsg = CMSG_NXTHDR(msgh, cmsg);
2050         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2051                                          target_cmsg_start);
2052     }
2053     unlock_user(target_cmsg, target_cmsg_addr, space);
2054  the_end:
2055     target_msgh->msg_controllen = tswapal(space);
2056     return 0;
2057 }
2058 
2059 /* do_setsockopt() Must return target values and target errnos. */
2060 static abi_long do_setsockopt(int sockfd, int level, int optname,
2061                               abi_ulong optval_addr, socklen_t optlen)
2062 {
2063     abi_long ret;
2064     int val;
2065     struct ip_mreqn *ip_mreq;
2066     struct ip_mreq_source *ip_mreq_source;
2067 
2068     switch(level) {
2069     case SOL_TCP:
2070     case SOL_UDP:
2071         /* TCP and UDP options all take an 'int' value.  */
2072         if (optlen < sizeof(uint32_t))
2073             return -TARGET_EINVAL;
2074 
2075         if (get_user_u32(val, optval_addr))
2076             return -TARGET_EFAULT;
2077         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2078         break;
2079     case SOL_IP:
2080         switch(optname) {
2081         case IP_TOS:
2082         case IP_TTL:
2083         case IP_HDRINCL:
2084         case IP_ROUTER_ALERT:
2085         case IP_RECVOPTS:
2086         case IP_RETOPTS:
2087         case IP_PKTINFO:
2088         case IP_MTU_DISCOVER:
2089         case IP_RECVERR:
2090         case IP_RECVTTL:
2091         case IP_RECVTOS:
2092 #ifdef IP_FREEBIND
2093         case IP_FREEBIND:
2094 #endif
2095         case IP_MULTICAST_TTL:
2096         case IP_MULTICAST_LOOP:
2097             val = 0;
2098             if (optlen >= sizeof(uint32_t)) {
2099                 if (get_user_u32(val, optval_addr))
2100                     return -TARGET_EFAULT;
2101             } else if (optlen >= 1) {
2102                 if (get_user_u8(val, optval_addr))
2103                     return -TARGET_EFAULT;
2104             }
2105             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2106             break;
2107         case IP_ADD_MEMBERSHIP:
2108         case IP_DROP_MEMBERSHIP:
2109             if (optlen < sizeof (struct target_ip_mreq) ||
2110                 optlen > sizeof (struct target_ip_mreqn))
2111                 return -TARGET_EINVAL;
2112 
2113             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2114             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2115             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2116             break;
2117 
2118         case IP_BLOCK_SOURCE:
2119         case IP_UNBLOCK_SOURCE:
2120         case IP_ADD_SOURCE_MEMBERSHIP:
2121         case IP_DROP_SOURCE_MEMBERSHIP:
2122             if (optlen != sizeof (struct target_ip_mreq_source))
2123                 return -TARGET_EINVAL;
2124 
2125             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2126             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2127             unlock_user (ip_mreq_source, optval_addr, 0);
2128             break;
2129 
2130         default:
2131             goto unimplemented;
2132         }
2133         break;
2134     case SOL_IPV6:
2135         switch (optname) {
2136         case IPV6_MTU_DISCOVER:
2137         case IPV6_MTU:
2138         case IPV6_V6ONLY:
2139         case IPV6_RECVPKTINFO:
2140         case IPV6_UNICAST_HOPS:
2141         case IPV6_MULTICAST_HOPS:
2142         case IPV6_MULTICAST_LOOP:
2143         case IPV6_RECVERR:
2144         case IPV6_RECVHOPLIMIT:
2145         case IPV6_2292HOPLIMIT:
2146         case IPV6_CHECKSUM:
2147         case IPV6_ADDRFORM:
2148         case IPV6_2292PKTINFO:
2149         case IPV6_RECVTCLASS:
2150         case IPV6_RECVRTHDR:
2151         case IPV6_2292RTHDR:
2152         case IPV6_RECVHOPOPTS:
2153         case IPV6_2292HOPOPTS:
2154         case IPV6_RECVDSTOPTS:
2155         case IPV6_2292DSTOPTS:
2156         case IPV6_TCLASS:
2157         case IPV6_ADDR_PREFERENCES:
2158 #ifdef IPV6_RECVPATHMTU
2159         case IPV6_RECVPATHMTU:
2160 #endif
2161 #ifdef IPV6_TRANSPARENT
2162         case IPV6_TRANSPARENT:
2163 #endif
2164 #ifdef IPV6_FREEBIND
2165         case IPV6_FREEBIND:
2166 #endif
2167 #ifdef IPV6_RECVORIGDSTADDR
2168         case IPV6_RECVORIGDSTADDR:
2169 #endif
2170             val = 0;
2171             if (optlen < sizeof(uint32_t)) {
2172                 return -TARGET_EINVAL;
2173             }
2174             if (get_user_u32(val, optval_addr)) {
2175                 return -TARGET_EFAULT;
2176             }
2177             ret = get_errno(setsockopt(sockfd, level, optname,
2178                                        &val, sizeof(val)));
2179             break;
2180         case IPV6_PKTINFO:
2181         {
2182             struct in6_pktinfo pki;
2183 
2184             if (optlen < sizeof(pki)) {
2185                 return -TARGET_EINVAL;
2186             }
2187 
2188             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2189                 return -TARGET_EFAULT;
2190             }
2191 
2192             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2193 
2194             ret = get_errno(setsockopt(sockfd, level, optname,
2195                                        &pki, sizeof(pki)));
2196             break;
2197         }
2198         case IPV6_ADD_MEMBERSHIP:
2199         case IPV6_DROP_MEMBERSHIP:
2200         {
2201             struct ipv6_mreq ipv6mreq;
2202 
2203             if (optlen < sizeof(ipv6mreq)) {
2204                 return -TARGET_EINVAL;
2205             }
2206 
2207             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2208                 return -TARGET_EFAULT;
2209             }
2210 
2211             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2212 
2213             ret = get_errno(setsockopt(sockfd, level, optname,
2214                                        &ipv6mreq, sizeof(ipv6mreq)));
2215             break;
2216         }
2217         default:
2218             goto unimplemented;
2219         }
2220         break;
2221     case SOL_ICMPV6:
2222         switch (optname) {
2223         case ICMPV6_FILTER:
2224         {
2225             struct icmp6_filter icmp6f;
2226 
2227             if (optlen > sizeof(icmp6f)) {
2228                 optlen = sizeof(icmp6f);
2229             }
2230 
2231             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             for (val = 0; val < 8; val++) {
2236                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2237             }
2238 
2239             ret = get_errno(setsockopt(sockfd, level, optname,
2240                                        &icmp6f, optlen));
2241             break;
2242         }
2243         default:
2244             goto unimplemented;
2245         }
2246         break;
2247     case SOL_RAW:
2248         switch (optname) {
2249         case ICMP_FILTER:
2250         case IPV6_CHECKSUM:
2251             /* those take an u32 value */
2252             if (optlen < sizeof(uint32_t)) {
2253                 return -TARGET_EINVAL;
2254             }
2255 
2256             if (get_user_u32(val, optval_addr)) {
2257                 return -TARGET_EFAULT;
2258             }
2259             ret = get_errno(setsockopt(sockfd, level, optname,
2260                                        &val, sizeof(val)));
2261             break;
2262 
2263         default:
2264             goto unimplemented;
2265         }
2266         break;
2267 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2268     case SOL_ALG:
2269         switch (optname) {
2270         case ALG_SET_KEY:
2271         {
2272             char *alg_key = g_malloc(optlen);
2273 
2274             if (!alg_key) {
2275                 return -TARGET_ENOMEM;
2276             }
2277             if (copy_from_user(alg_key, optval_addr, optlen)) {
2278                 g_free(alg_key);
2279                 return -TARGET_EFAULT;
2280             }
2281             ret = get_errno(setsockopt(sockfd, level, optname,
2282                                        alg_key, optlen));
2283             g_free(alg_key);
2284             break;
2285         }
2286         case ALG_SET_AEAD_AUTHSIZE:
2287         {
2288             ret = get_errno(setsockopt(sockfd, level, optname,
2289                                        NULL, optlen));
2290             break;
2291         }
2292         default:
2293             goto unimplemented;
2294         }
2295         break;
2296 #endif
2297     case TARGET_SOL_SOCKET:
2298         switch (optname) {
2299         case TARGET_SO_RCVTIMEO:
2300         {
2301                 struct timeval tv;
2302 
2303                 optname = SO_RCVTIMEO;
2304 
2305 set_timeout:
2306                 if (optlen != sizeof(struct target_timeval)) {
2307                     return -TARGET_EINVAL;
2308                 }
2309 
2310                 if (copy_from_user_timeval(&tv, optval_addr)) {
2311                     return -TARGET_EFAULT;
2312                 }
2313 
2314                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2315                                 &tv, sizeof(tv)));
2316                 return ret;
2317         }
2318         case TARGET_SO_SNDTIMEO:
2319                 optname = SO_SNDTIMEO;
2320                 goto set_timeout;
2321         case TARGET_SO_ATTACH_FILTER:
2322         {
2323                 struct target_sock_fprog *tfprog;
2324                 struct target_sock_filter *tfilter;
2325                 struct sock_fprog fprog;
2326                 struct sock_filter *filter;
2327                 int i;
2328 
2329                 if (optlen != sizeof(*tfprog)) {
2330                     return -TARGET_EINVAL;
2331                 }
2332                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2333                     return -TARGET_EFAULT;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfilter,
2336                                       tswapal(tfprog->filter), 0)) {
2337                     unlock_user_struct(tfprog, optval_addr, 1);
2338                     return -TARGET_EFAULT;
2339                 }
2340 
2341                 fprog.len = tswap16(tfprog->len);
2342                 filter = g_try_new(struct sock_filter, fprog.len);
2343                 if (filter == NULL) {
2344                     unlock_user_struct(tfilter, tfprog->filter, 1);
2345                     unlock_user_struct(tfprog, optval_addr, 1);
2346                     return -TARGET_ENOMEM;
2347                 }
2348                 for (i = 0; i < fprog.len; i++) {
2349                     filter[i].code = tswap16(tfilter[i].code);
2350                     filter[i].jt = tfilter[i].jt;
2351                     filter[i].jf = tfilter[i].jf;
2352                     filter[i].k = tswap32(tfilter[i].k);
2353                 }
2354                 fprog.filter = filter;
2355 
2356                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2357                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2358                 g_free(filter);
2359 
2360                 unlock_user_struct(tfilter, tfprog->filter, 1);
2361                 unlock_user_struct(tfprog, optval_addr, 1);
2362                 return ret;
2363         }
2364 	case TARGET_SO_BINDTODEVICE:
2365 	{
2366 		char *dev_ifname, *addr_ifname;
2367 
2368 		if (optlen > IFNAMSIZ - 1) {
2369 		    optlen = IFNAMSIZ - 1;
2370 		}
2371 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2372 		if (!dev_ifname) {
2373 		    return -TARGET_EFAULT;
2374 		}
2375 		optname = SO_BINDTODEVICE;
2376 		addr_ifname = alloca(IFNAMSIZ);
2377 		memcpy(addr_ifname, dev_ifname, optlen);
2378 		addr_ifname[optlen] = 0;
2379 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2380                                            addr_ifname, optlen));
2381 		unlock_user (dev_ifname, optval_addr, 0);
2382 		return ret;
2383 	}
2384         case TARGET_SO_LINGER:
2385         {
2386                 struct linger lg;
2387                 struct target_linger *tlg;
2388 
2389                 if (optlen != sizeof(struct target_linger)) {
2390                     return -TARGET_EINVAL;
2391                 }
2392                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2393                     return -TARGET_EFAULT;
2394                 }
2395                 __get_user(lg.l_onoff, &tlg->l_onoff);
2396                 __get_user(lg.l_linger, &tlg->l_linger);
2397                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2398                                 &lg, sizeof(lg)));
2399                 unlock_user_struct(tlg, optval_addr, 0);
2400                 return ret;
2401         }
2402             /* Options with 'int' argument.  */
2403         case TARGET_SO_DEBUG:
2404 		optname = SO_DEBUG;
2405 		break;
2406         case TARGET_SO_REUSEADDR:
2407 		optname = SO_REUSEADDR;
2408 		break;
2409 #ifdef SO_REUSEPORT
2410         case TARGET_SO_REUSEPORT:
2411                 optname = SO_REUSEPORT;
2412                 break;
2413 #endif
2414         case TARGET_SO_TYPE:
2415 		optname = SO_TYPE;
2416 		break;
2417         case TARGET_SO_ERROR:
2418 		optname = SO_ERROR;
2419 		break;
2420         case TARGET_SO_DONTROUTE:
2421 		optname = SO_DONTROUTE;
2422 		break;
2423         case TARGET_SO_BROADCAST:
2424 		optname = SO_BROADCAST;
2425 		break;
2426         case TARGET_SO_SNDBUF:
2427 		optname = SO_SNDBUF;
2428 		break;
2429         case TARGET_SO_SNDBUFFORCE:
2430                 optname = SO_SNDBUFFORCE;
2431                 break;
2432         case TARGET_SO_RCVBUF:
2433 		optname = SO_RCVBUF;
2434 		break;
2435         case TARGET_SO_RCVBUFFORCE:
2436                 optname = SO_RCVBUFFORCE;
2437                 break;
2438         case TARGET_SO_KEEPALIVE:
2439 		optname = SO_KEEPALIVE;
2440 		break;
2441         case TARGET_SO_OOBINLINE:
2442 		optname = SO_OOBINLINE;
2443 		break;
2444         case TARGET_SO_NO_CHECK:
2445 		optname = SO_NO_CHECK;
2446 		break;
2447         case TARGET_SO_PRIORITY:
2448 		optname = SO_PRIORITY;
2449 		break;
2450 #ifdef SO_BSDCOMPAT
2451         case TARGET_SO_BSDCOMPAT:
2452 		optname = SO_BSDCOMPAT;
2453 		break;
2454 #endif
2455         case TARGET_SO_PASSCRED:
2456 		optname = SO_PASSCRED;
2457 		break;
2458         case TARGET_SO_PASSSEC:
2459                 optname = SO_PASSSEC;
2460                 break;
2461         case TARGET_SO_TIMESTAMP:
2462 		optname = SO_TIMESTAMP;
2463 		break;
2464         case TARGET_SO_RCVLOWAT:
2465 		optname = SO_RCVLOWAT;
2466 		break;
2467         default:
2468             goto unimplemented;
2469         }
2470 	if (optlen < sizeof(uint32_t))
2471             return -TARGET_EINVAL;
2472 
2473 	if (get_user_u32(val, optval_addr))
2474             return -TARGET_EFAULT;
2475 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2476         break;
2477 #ifdef SOL_NETLINK
2478     case SOL_NETLINK:
2479         switch (optname) {
2480         case NETLINK_PKTINFO:
2481         case NETLINK_ADD_MEMBERSHIP:
2482         case NETLINK_DROP_MEMBERSHIP:
2483         case NETLINK_BROADCAST_ERROR:
2484         case NETLINK_NO_ENOBUFS:
2485 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2486         case NETLINK_LISTEN_ALL_NSID:
2487         case NETLINK_CAP_ACK:
2488 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2490         case NETLINK_EXT_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2493         case NETLINK_GET_STRICT_CHK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495             break;
2496         default:
2497             goto unimplemented;
2498         }
2499         val = 0;
2500         if (optlen < sizeof(uint32_t)) {
2501             return -TARGET_EINVAL;
2502         }
2503         if (get_user_u32(val, optval_addr)) {
2504             return -TARGET_EFAULT;
2505         }
2506         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2507                                    sizeof(val)));
2508         break;
2509 #endif /* SOL_NETLINK */
2510     default:
2511     unimplemented:
2512         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2513                       level, optname);
2514         ret = -TARGET_ENOPROTOOPT;
2515     }
2516     return ret;
2517 }
2518 
2519 /* do_getsockopt() Must return target values and target errnos. */
2520 static abi_long do_getsockopt(int sockfd, int level, int optname,
2521                               abi_ulong optval_addr, abi_ulong optlen)
2522 {
2523     abi_long ret;
2524     int len, val;
2525     socklen_t lv;
2526 
2527     switch(level) {
2528     case TARGET_SOL_SOCKET:
2529         level = SOL_SOCKET;
2530         switch (optname) {
2531         /* These don't just return a single integer */
2532         case TARGET_SO_PEERNAME:
2533             goto unimplemented;
2534         case TARGET_SO_RCVTIMEO: {
2535             struct timeval tv;
2536             socklen_t tvlen;
2537 
2538             optname = SO_RCVTIMEO;
2539 
2540 get_timeout:
2541             if (get_user_u32(len, optlen)) {
2542                 return -TARGET_EFAULT;
2543             }
2544             if (len < 0) {
2545                 return -TARGET_EINVAL;
2546             }
2547 
2548             tvlen = sizeof(tv);
2549             ret = get_errno(getsockopt(sockfd, level, optname,
2550                                        &tv, &tvlen));
2551             if (ret < 0) {
2552                 return ret;
2553             }
2554             if (len > sizeof(struct target_timeval)) {
2555                 len = sizeof(struct target_timeval);
2556             }
2557             if (copy_to_user_timeval(optval_addr, &tv)) {
2558                 return -TARGET_EFAULT;
2559             }
2560             if (put_user_u32(len, optlen)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             break;
2564         }
2565         case TARGET_SO_SNDTIMEO:
2566             optname = SO_SNDTIMEO;
2567             goto get_timeout;
2568         case TARGET_SO_PEERCRED: {
2569             struct ucred cr;
2570             socklen_t crlen;
2571             struct target_ucred *tcr;
2572 
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             crlen = sizeof(cr);
2581             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2582                                        &cr, &crlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > crlen) {
2587                 len = crlen;
2588             }
2589             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             __put_user(cr.pid, &tcr->pid);
2593             __put_user(cr.uid, &tcr->uid);
2594             __put_user(cr.gid, &tcr->gid);
2595             unlock_user_struct(tcr, optval_addr, 1);
2596             if (put_user_u32(len, optlen)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600         }
2601         case TARGET_SO_PEERSEC: {
2602             char *name;
2603 
2604             if (get_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (len < 0) {
2608                 return -TARGET_EINVAL;
2609             }
2610             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2611             if (!name) {
2612                 return -TARGET_EFAULT;
2613             }
2614             lv = len;
2615             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2616                                        name, &lv));
2617             if (put_user_u32(lv, optlen)) {
2618                 ret = -TARGET_EFAULT;
2619             }
2620             unlock_user(name, optval_addr, lv);
2621             break;
2622         }
2623         case TARGET_SO_LINGER:
2624         {
2625             struct linger lg;
2626             socklen_t lglen;
2627             struct target_linger *tlg;
2628 
2629             if (get_user_u32(len, optlen)) {
2630                 return -TARGET_EFAULT;
2631             }
2632             if (len < 0) {
2633                 return -TARGET_EINVAL;
2634             }
2635 
2636             lglen = sizeof(lg);
2637             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2638                                        &lg, &lglen));
2639             if (ret < 0) {
2640                 return ret;
2641             }
2642             if (len > lglen) {
2643                 len = lglen;
2644             }
2645             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2646                 return -TARGET_EFAULT;
2647             }
2648             __put_user(lg.l_onoff, &tlg->l_onoff);
2649             __put_user(lg.l_linger, &tlg->l_linger);
2650             unlock_user_struct(tlg, optval_addr, 1);
2651             if (put_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             break;
2655         }
2656         /* Options with 'int' argument.  */
2657         case TARGET_SO_DEBUG:
2658             optname = SO_DEBUG;
2659             goto int_case;
2660         case TARGET_SO_REUSEADDR:
2661             optname = SO_REUSEADDR;
2662             goto int_case;
2663 #ifdef SO_REUSEPORT
2664         case TARGET_SO_REUSEPORT:
2665             optname = SO_REUSEPORT;
2666             goto int_case;
2667 #endif
2668         case TARGET_SO_TYPE:
2669             optname = SO_TYPE;
2670             goto int_case;
2671         case TARGET_SO_ERROR:
2672             optname = SO_ERROR;
2673             goto int_case;
2674         case TARGET_SO_DONTROUTE:
2675             optname = SO_DONTROUTE;
2676             goto int_case;
2677         case TARGET_SO_BROADCAST:
2678             optname = SO_BROADCAST;
2679             goto int_case;
2680         case TARGET_SO_SNDBUF:
2681             optname = SO_SNDBUF;
2682             goto int_case;
2683         case TARGET_SO_RCVBUF:
2684             optname = SO_RCVBUF;
2685             goto int_case;
2686         case TARGET_SO_KEEPALIVE:
2687             optname = SO_KEEPALIVE;
2688             goto int_case;
2689         case TARGET_SO_OOBINLINE:
2690             optname = SO_OOBINLINE;
2691             goto int_case;
2692         case TARGET_SO_NO_CHECK:
2693             optname = SO_NO_CHECK;
2694             goto int_case;
2695         case TARGET_SO_PRIORITY:
2696             optname = SO_PRIORITY;
2697             goto int_case;
2698 #ifdef SO_BSDCOMPAT
2699         case TARGET_SO_BSDCOMPAT:
2700             optname = SO_BSDCOMPAT;
2701             goto int_case;
2702 #endif
2703         case TARGET_SO_PASSCRED:
2704             optname = SO_PASSCRED;
2705             goto int_case;
2706         case TARGET_SO_TIMESTAMP:
2707             optname = SO_TIMESTAMP;
2708             goto int_case;
2709         case TARGET_SO_RCVLOWAT:
2710             optname = SO_RCVLOWAT;
2711             goto int_case;
2712         case TARGET_SO_ACCEPTCONN:
2713             optname = SO_ACCEPTCONN;
2714             goto int_case;
2715         case TARGET_SO_PROTOCOL:
2716             optname = SO_PROTOCOL;
2717             goto int_case;
2718         case TARGET_SO_DOMAIN:
2719             optname = SO_DOMAIN;
2720             goto int_case;
2721         default:
2722             goto int_case;
2723         }
2724         break;
2725     case SOL_TCP:
2726     case SOL_UDP:
2727         /* TCP and UDP options all take an 'int' value.  */
2728     int_case:
2729         if (get_user_u32(len, optlen))
2730             return -TARGET_EFAULT;
2731         if (len < 0)
2732             return -TARGET_EINVAL;
2733         lv = sizeof(lv);
2734         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2735         if (ret < 0)
2736             return ret;
2737         if (optname == SO_TYPE) {
2738             val = host_to_target_sock_type(val);
2739         }
2740         if (len > lv)
2741             len = lv;
2742         if (len == 4) {
2743             if (put_user_u32(val, optval_addr))
2744                 return -TARGET_EFAULT;
2745         } else {
2746             if (put_user_u8(val, optval_addr))
2747                 return -TARGET_EFAULT;
2748         }
2749         if (put_user_u32(len, optlen))
2750             return -TARGET_EFAULT;
2751         break;
2752     case SOL_IP:
2753         switch(optname) {
2754         case IP_TOS:
2755         case IP_TTL:
2756         case IP_HDRINCL:
2757         case IP_ROUTER_ALERT:
2758         case IP_RECVOPTS:
2759         case IP_RETOPTS:
2760         case IP_PKTINFO:
2761         case IP_MTU_DISCOVER:
2762         case IP_RECVERR:
2763         case IP_RECVTOS:
2764 #ifdef IP_FREEBIND
2765         case IP_FREEBIND:
2766 #endif
2767         case IP_MULTICAST_TTL:
2768         case IP_MULTICAST_LOOP:
2769             if (get_user_u32(len, optlen))
2770                 return -TARGET_EFAULT;
2771             if (len < 0)
2772                 return -TARGET_EINVAL;
2773             lv = sizeof(lv);
2774             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2775             if (ret < 0)
2776                 return ret;
2777             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2778                 len = 1;
2779                 if (put_user_u32(len, optlen)
2780                     || put_user_u8(val, optval_addr))
2781                     return -TARGET_EFAULT;
2782             } else {
2783                 if (len > sizeof(int))
2784                     len = sizeof(int);
2785                 if (put_user_u32(len, optlen)
2786                     || put_user_u32(val, optval_addr))
2787                     return -TARGET_EFAULT;
2788             }
2789             break;
2790         default:
2791             ret = -TARGET_ENOPROTOOPT;
2792             break;
2793         }
2794         break;
2795     case SOL_IPV6:
2796         switch (optname) {
2797         case IPV6_MTU_DISCOVER:
2798         case IPV6_MTU:
2799         case IPV6_V6ONLY:
2800         case IPV6_RECVPKTINFO:
2801         case IPV6_UNICAST_HOPS:
2802         case IPV6_MULTICAST_HOPS:
2803         case IPV6_MULTICAST_LOOP:
2804         case IPV6_RECVERR:
2805         case IPV6_RECVHOPLIMIT:
2806         case IPV6_2292HOPLIMIT:
2807         case IPV6_CHECKSUM:
2808         case IPV6_ADDRFORM:
2809         case IPV6_2292PKTINFO:
2810         case IPV6_RECVTCLASS:
2811         case IPV6_RECVRTHDR:
2812         case IPV6_2292RTHDR:
2813         case IPV6_RECVHOPOPTS:
2814         case IPV6_2292HOPOPTS:
2815         case IPV6_RECVDSTOPTS:
2816         case IPV6_2292DSTOPTS:
2817         case IPV6_TCLASS:
2818         case IPV6_ADDR_PREFERENCES:
2819 #ifdef IPV6_RECVPATHMTU
2820         case IPV6_RECVPATHMTU:
2821 #endif
2822 #ifdef IPV6_TRANSPARENT
2823         case IPV6_TRANSPARENT:
2824 #endif
2825 #ifdef IPV6_FREEBIND
2826         case IPV6_FREEBIND:
2827 #endif
2828 #ifdef IPV6_RECVORIGDSTADDR
2829         case IPV6_RECVORIGDSTADDR:
2830 #endif
2831             if (get_user_u32(len, optlen))
2832                 return -TARGET_EFAULT;
2833             if (len < 0)
2834                 return -TARGET_EINVAL;
2835             lv = sizeof(lv);
2836             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2837             if (ret < 0)
2838                 return ret;
2839             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2840                 len = 1;
2841                 if (put_user_u32(len, optlen)
2842                     || put_user_u8(val, optval_addr))
2843                     return -TARGET_EFAULT;
2844             } else {
2845                 if (len > sizeof(int))
2846                     len = sizeof(int);
2847                 if (put_user_u32(len, optlen)
2848                     || put_user_u32(val, optval_addr))
2849                     return -TARGET_EFAULT;
2850             }
2851             break;
2852         default:
2853             ret = -TARGET_ENOPROTOOPT;
2854             break;
2855         }
2856         break;
2857 #ifdef SOL_NETLINK
2858     case SOL_NETLINK:
2859         switch (optname) {
2860         case NETLINK_PKTINFO:
2861         case NETLINK_BROADCAST_ERROR:
2862         case NETLINK_NO_ENOBUFS:
2863 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2864         case NETLINK_LISTEN_ALL_NSID:
2865         case NETLINK_CAP_ACK:
2866 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2867 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2868         case NETLINK_EXT_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2871         case NETLINK_GET_STRICT_CHK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873             if (get_user_u32(len, optlen)) {
2874                 return -TARGET_EFAULT;
2875             }
2876             if (len != sizeof(val)) {
2877                 return -TARGET_EINVAL;
2878             }
2879             lv = len;
2880             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2881             if (ret < 0) {
2882                 return ret;
2883             }
2884             if (put_user_u32(lv, optlen)
2885                 || put_user_u32(val, optval_addr)) {
2886                 return -TARGET_EFAULT;
2887             }
2888             break;
2889 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2890         case NETLINK_LIST_MEMBERSHIPS:
2891         {
2892             uint32_t *results;
2893             int i;
2894             if (get_user_u32(len, optlen)) {
2895                 return -TARGET_EFAULT;
2896             }
2897             if (len < 0) {
2898                 return -TARGET_EINVAL;
2899             }
2900             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2901             if (!results && len > 0) {
2902                 return -TARGET_EFAULT;
2903             }
2904             lv = len;
2905             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2906             if (ret < 0) {
2907                 unlock_user(results, optval_addr, 0);
2908                 return ret;
2909             }
2910             /* swap host endianess to target endianess. */
2911             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2912                 results[i] = tswap32(results[i]);
2913             }
2914             if (put_user_u32(lv, optlen)) {
2915                 return -TARGET_EFAULT;
2916             }
2917             unlock_user(results, optval_addr, 0);
2918             break;
2919         }
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2921         default:
2922             goto unimplemented;
2923         }
2924         break;
2925 #endif /* SOL_NETLINK */
2926     default:
2927     unimplemented:
2928         qemu_log_mask(LOG_UNIMP,
2929                       "getsockopt level=%d optname=%d not yet supported\n",
2930                       level, optname);
2931         ret = -TARGET_EOPNOTSUPP;
2932         break;
2933     }
2934     return ret;
2935 }
2936 
2937 /* Convert target low/high pair representing file offset into the host
2938  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2939  * as the kernel doesn't handle them either.
2940  */
2941 static void target_to_host_low_high(abi_ulong tlow,
2942                                     abi_ulong thigh,
2943                                     unsigned long *hlow,
2944                                     unsigned long *hhigh)
2945 {
2946     uint64_t off = tlow |
2947         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2948         TARGET_LONG_BITS / 2;
2949 
2950     *hlow = off;
2951     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2952 }
2953 
2954 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2955                                 abi_ulong count, int copy)
2956 {
2957     struct target_iovec *target_vec;
2958     struct iovec *vec;
2959     abi_ulong total_len, max_len;
2960     int i;
2961     int err = 0;
2962     bool bad_address = false;
2963 
2964     if (count == 0) {
2965         errno = 0;
2966         return NULL;
2967     }
2968     if (count > IOV_MAX) {
2969         errno = EINVAL;
2970         return NULL;
2971     }
2972 
2973     vec = g_try_new0(struct iovec, count);
2974     if (vec == NULL) {
2975         errno = ENOMEM;
2976         return NULL;
2977     }
2978 
2979     target_vec = lock_user(VERIFY_READ, target_addr,
2980                            count * sizeof(struct target_iovec), 1);
2981     if (target_vec == NULL) {
2982         err = EFAULT;
2983         goto fail2;
2984     }
2985 
2986     /* ??? If host page size > target page size, this will result in a
2987        value larger than what we can actually support.  */
2988     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2989     total_len = 0;
2990 
2991     for (i = 0; i < count; i++) {
2992         abi_ulong base = tswapal(target_vec[i].iov_base);
2993         abi_long len = tswapal(target_vec[i].iov_len);
2994 
2995         if (len < 0) {
2996             err = EINVAL;
2997             goto fail;
2998         } else if (len == 0) {
2999             /* Zero length pointer is ignored.  */
3000             vec[i].iov_base = 0;
3001         } else {
3002             vec[i].iov_base = lock_user(type, base, len, copy);
3003             /* If the first buffer pointer is bad, this is a fault.  But
3004              * subsequent bad buffers will result in a partial write; this
3005              * is realized by filling the vector with null pointers and
3006              * zero lengths. */
3007             if (!vec[i].iov_base) {
3008                 if (i == 0) {
3009                     err = EFAULT;
3010                     goto fail;
3011                 } else {
3012                     bad_address = true;
3013                 }
3014             }
3015             if (bad_address) {
3016                 len = 0;
3017             }
3018             if (len > max_len - total_len) {
3019                 len = max_len - total_len;
3020             }
3021         }
3022         vec[i].iov_len = len;
3023         total_len += len;
3024     }
3025 
3026     unlock_user(target_vec, target_addr, 0);
3027     return vec;
3028 
3029  fail:
3030     while (--i >= 0) {
3031         if (tswapal(target_vec[i].iov_len) > 0) {
3032             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3033         }
3034     }
3035     unlock_user(target_vec, target_addr, 0);
3036  fail2:
3037     g_free(vec);
3038     errno = err;
3039     return NULL;
3040 }
3041 
3042 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3043                          abi_ulong count, int copy)
3044 {
3045     struct target_iovec *target_vec;
3046     int i;
3047 
3048     target_vec = lock_user(VERIFY_READ, target_addr,
3049                            count * sizeof(struct target_iovec), 1);
3050     if (target_vec) {
3051         for (i = 0; i < count; i++) {
3052             abi_ulong base = tswapal(target_vec[i].iov_base);
3053             abi_long len = tswapal(target_vec[i].iov_len);
3054             if (len < 0) {
3055                 break;
3056             }
3057             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3058         }
3059         unlock_user(target_vec, target_addr, 0);
3060     }
3061 
3062     g_free(vec);
3063 }
3064 
3065 static inline int target_to_host_sock_type(int *type)
3066 {
3067     int host_type = 0;
3068     int target_type = *type;
3069 
3070     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3071     case TARGET_SOCK_DGRAM:
3072         host_type = SOCK_DGRAM;
3073         break;
3074     case TARGET_SOCK_STREAM:
3075         host_type = SOCK_STREAM;
3076         break;
3077     default:
3078         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3079         break;
3080     }
3081     if (target_type & TARGET_SOCK_CLOEXEC) {
3082 #if defined(SOCK_CLOEXEC)
3083         host_type |= SOCK_CLOEXEC;
3084 #else
3085         return -TARGET_EINVAL;
3086 #endif
3087     }
3088     if (target_type & TARGET_SOCK_NONBLOCK) {
3089 #if defined(SOCK_NONBLOCK)
3090         host_type |= SOCK_NONBLOCK;
3091 #elif !defined(O_NONBLOCK)
3092         return -TARGET_EINVAL;
3093 #endif
3094     }
3095     *type = host_type;
3096     return 0;
3097 }
3098 
3099 /* Try to emulate socket type flags after socket creation.  */
3100 static int sock_flags_fixup(int fd, int target_type)
3101 {
3102 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3103     if (target_type & TARGET_SOCK_NONBLOCK) {
3104         int flags = fcntl(fd, F_GETFL);
3105         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3106             close(fd);
3107             return -TARGET_EINVAL;
3108         }
3109     }
3110 #endif
3111     return fd;
3112 }
3113 
3114 /* do_socket() Must return target values and target errnos. */
3115 static abi_long do_socket(int domain, int type, int protocol)
3116 {
3117     int target_type = type;
3118     int ret;
3119 
3120     ret = target_to_host_sock_type(&type);
3121     if (ret) {
3122         return ret;
3123     }
3124 
3125     if (domain == PF_NETLINK && !(
3126 #ifdef CONFIG_RTNETLINK
3127          protocol == NETLINK_ROUTE ||
3128 #endif
3129          protocol == NETLINK_KOBJECT_UEVENT ||
3130          protocol == NETLINK_AUDIT)) {
3131         return -TARGET_EPROTONOSUPPORT;
3132     }
3133 
3134     if (domain == AF_PACKET ||
3135         (domain == AF_INET && type == SOCK_PACKET)) {
3136         protocol = tswap16(protocol);
3137     }
3138 
3139     ret = get_errno(socket(domain, type, protocol));
3140     if (ret >= 0) {
3141         ret = sock_flags_fixup(ret, target_type);
3142         if (type == SOCK_PACKET) {
3143             /* Manage an obsolete case :
3144              * if socket type is SOCK_PACKET, bind by name
3145              */
3146             fd_trans_register(ret, &target_packet_trans);
3147         } else if (domain == PF_NETLINK) {
3148             switch (protocol) {
3149 #ifdef CONFIG_RTNETLINK
3150             case NETLINK_ROUTE:
3151                 fd_trans_register(ret, &target_netlink_route_trans);
3152                 break;
3153 #endif
3154             case NETLINK_KOBJECT_UEVENT:
3155                 /* nothing to do: messages are strings */
3156                 break;
3157             case NETLINK_AUDIT:
3158                 fd_trans_register(ret, &target_netlink_audit_trans);
3159                 break;
3160             default:
3161                 g_assert_not_reached();
3162             }
3163         }
3164     }
3165     return ret;
3166 }
3167 
3168 /* do_bind() Must return target values and target errnos. */
3169 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3170                         socklen_t addrlen)
3171 {
3172     void *addr;
3173     abi_long ret;
3174 
3175     if ((int)addrlen < 0) {
3176         return -TARGET_EINVAL;
3177     }
3178 
3179     addr = alloca(addrlen+1);
3180 
3181     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3182     if (ret)
3183         return ret;
3184 
3185     return get_errno(bind(sockfd, addr, addrlen));
3186 }
3187 
3188 /* do_connect() Must return target values and target errnos. */
3189 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3190                            socklen_t addrlen)
3191 {
3192     void *addr;
3193     abi_long ret;
3194 
3195     if ((int)addrlen < 0) {
3196         return -TARGET_EINVAL;
3197     }
3198 
3199     addr = alloca(addrlen+1);
3200 
3201     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3202     if (ret)
3203         return ret;
3204 
3205     return get_errno(safe_connect(sockfd, addr, addrlen));
3206 }
3207 
3208 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3209 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3210                                       int flags, int send)
3211 {
3212     abi_long ret, len;
3213     struct msghdr msg;
3214     abi_ulong count;
3215     struct iovec *vec;
3216     abi_ulong target_vec;
3217 
3218     if (msgp->msg_name) {
3219         msg.msg_namelen = tswap32(msgp->msg_namelen);
3220         msg.msg_name = alloca(msg.msg_namelen+1);
3221         ret = target_to_host_sockaddr(fd, msg.msg_name,
3222                                       tswapal(msgp->msg_name),
3223                                       msg.msg_namelen);
3224         if (ret == -TARGET_EFAULT) {
3225             /* For connected sockets msg_name and msg_namelen must
3226              * be ignored, so returning EFAULT immediately is wrong.
3227              * Instead, pass a bad msg_name to the host kernel, and
3228              * let it decide whether to return EFAULT or not.
3229              */
3230             msg.msg_name = (void *)-1;
3231         } else if (ret) {
3232             goto out2;
3233         }
3234     } else {
3235         msg.msg_name = NULL;
3236         msg.msg_namelen = 0;
3237     }
3238     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3239     msg.msg_control = alloca(msg.msg_controllen);
3240     memset(msg.msg_control, 0, msg.msg_controllen);
3241 
3242     msg.msg_flags = tswap32(msgp->msg_flags);
3243 
3244     count = tswapal(msgp->msg_iovlen);
3245     target_vec = tswapal(msgp->msg_iov);
3246 
3247     if (count > IOV_MAX) {
3248         /* sendrcvmsg returns a different errno for this condition than
3249          * readv/writev, so we must catch it here before lock_iovec() does.
3250          */
3251         ret = -TARGET_EMSGSIZE;
3252         goto out2;
3253     }
3254 
3255     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3256                      target_vec, count, send);
3257     if (vec == NULL) {
3258         ret = -host_to_target_errno(errno);
3259         goto out2;
3260     }
3261     msg.msg_iovlen = count;
3262     msg.msg_iov = vec;
3263 
3264     if (send) {
3265         if (fd_trans_target_to_host_data(fd)) {
3266             void *host_msg;
3267 
3268             host_msg = g_malloc(msg.msg_iov->iov_len);
3269             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3270             ret = fd_trans_target_to_host_data(fd)(host_msg,
3271                                                    msg.msg_iov->iov_len);
3272             if (ret >= 0) {
3273                 msg.msg_iov->iov_base = host_msg;
3274                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3275             }
3276             g_free(host_msg);
3277         } else {
3278             ret = target_to_host_cmsg(&msg, msgp);
3279             if (ret == 0) {
3280                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3281             }
3282         }
3283     } else {
3284         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3285         if (!is_error(ret)) {
3286             len = ret;
3287             if (fd_trans_host_to_target_data(fd)) {
3288                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3289                                                MIN(msg.msg_iov->iov_len, len));
3290             } else {
3291                 ret = host_to_target_cmsg(msgp, &msg);
3292             }
3293             if (!is_error(ret)) {
3294                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3295                 msgp->msg_flags = tswap32(msg.msg_flags);
3296                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3297                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3298                                     msg.msg_name, msg.msg_namelen);
3299                     if (ret) {
3300                         goto out;
3301                     }
3302                 }
3303 
3304                 ret = len;
3305             }
3306         }
3307     }
3308 
3309 out:
3310     unlock_iovec(vec, target_vec, count, !send);
3311 out2:
3312     return ret;
3313 }
3314 
3315 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3316                                int flags, int send)
3317 {
3318     abi_long ret;
3319     struct target_msghdr *msgp;
3320 
3321     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3322                           msgp,
3323                           target_msg,
3324                           send ? 1 : 0)) {
3325         return -TARGET_EFAULT;
3326     }
3327     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3328     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3329     return ret;
3330 }
3331 
3332 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3333  * so it might not have this *mmsg-specific flag either.
3334  */
3335 #ifndef MSG_WAITFORONE
3336 #define MSG_WAITFORONE 0x10000
3337 #endif
3338 
3339 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3340                                 unsigned int vlen, unsigned int flags,
3341                                 int send)
3342 {
3343     struct target_mmsghdr *mmsgp;
3344     abi_long ret = 0;
3345     int i;
3346 
3347     if (vlen > UIO_MAXIOV) {
3348         vlen = UIO_MAXIOV;
3349     }
3350 
3351     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3352     if (!mmsgp) {
3353         return -TARGET_EFAULT;
3354     }
3355 
3356     for (i = 0; i < vlen; i++) {
3357         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3358         if (is_error(ret)) {
3359             break;
3360         }
3361         mmsgp[i].msg_len = tswap32(ret);
3362         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3363         if (flags & MSG_WAITFORONE) {
3364             flags |= MSG_DONTWAIT;
3365         }
3366     }
3367 
3368     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3369 
3370     /* Return number of datagrams sent if we sent any at all;
3371      * otherwise return the error.
3372      */
3373     if (i) {
3374         return i;
3375     }
3376     return ret;
3377 }
3378 
3379 /* do_accept4() Must return target values and target errnos. */
3380 static abi_long do_accept4(int fd, abi_ulong target_addr,
3381                            abi_ulong target_addrlen_addr, int flags)
3382 {
3383     socklen_t addrlen, ret_addrlen;
3384     void *addr;
3385     abi_long ret;
3386     int host_flags;
3387 
3388     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3389 
3390     if (target_addr == 0) {
3391         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3392     }
3393 
3394     /* linux returns EFAULT if addrlen pointer is invalid */
3395     if (get_user_u32(addrlen, target_addrlen_addr))
3396         return -TARGET_EFAULT;
3397 
3398     if ((int)addrlen < 0) {
3399         return -TARGET_EINVAL;
3400     }
3401 
3402     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3403         return -TARGET_EFAULT;
3404     }
3405 
3406     addr = alloca(addrlen);
3407 
3408     ret_addrlen = addrlen;
3409     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3410     if (!is_error(ret)) {
3411         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3412         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3413             ret = -TARGET_EFAULT;
3414         }
3415     }
3416     return ret;
3417 }
3418 
3419 /* do_getpeername() Must return target values and target errnos. */
3420 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3421                                abi_ulong target_addrlen_addr)
3422 {
3423     socklen_t addrlen, ret_addrlen;
3424     void *addr;
3425     abi_long ret;
3426 
3427     if (get_user_u32(addrlen, target_addrlen_addr))
3428         return -TARGET_EFAULT;
3429 
3430     if ((int)addrlen < 0) {
3431         return -TARGET_EINVAL;
3432     }
3433 
3434     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3435         return -TARGET_EFAULT;
3436     }
3437 
3438     addr = alloca(addrlen);
3439 
3440     ret_addrlen = addrlen;
3441     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3442     if (!is_error(ret)) {
3443         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3444         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3445             ret = -TARGET_EFAULT;
3446         }
3447     }
3448     return ret;
3449 }
3450 
3451 /* do_getsockname() Must return target values and target errnos. */
3452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3453                                abi_ulong target_addrlen_addr)
3454 {
3455     socklen_t addrlen, ret_addrlen;
3456     void *addr;
3457     abi_long ret;
3458 
3459     if (get_user_u32(addrlen, target_addrlen_addr))
3460         return -TARGET_EFAULT;
3461 
3462     if ((int)addrlen < 0) {
3463         return -TARGET_EINVAL;
3464     }
3465 
3466     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3467         return -TARGET_EFAULT;
3468     }
3469 
3470     addr = alloca(addrlen);
3471 
3472     ret_addrlen = addrlen;
3473     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3474     if (!is_error(ret)) {
3475         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3476         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3477             ret = -TARGET_EFAULT;
3478         }
3479     }
3480     return ret;
3481 }
3482 
3483 /* do_socketpair() Must return target values and target errnos. */
3484 static abi_long do_socketpair(int domain, int type, int protocol,
3485                               abi_ulong target_tab_addr)
3486 {
3487     int tab[2];
3488     abi_long ret;
3489 
3490     target_to_host_sock_type(&type);
3491 
3492     ret = get_errno(socketpair(domain, type, protocol, tab));
3493     if (!is_error(ret)) {
3494         if (put_user_s32(tab[0], target_tab_addr)
3495             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3496             ret = -TARGET_EFAULT;
3497     }
3498     return ret;
3499 }
3500 
3501 /* do_sendto() Must return target values and target errnos. */
3502 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3503                           abi_ulong target_addr, socklen_t addrlen)
3504 {
3505     void *addr;
3506     void *host_msg;
3507     void *copy_msg = NULL;
3508     abi_long ret;
3509 
3510     if ((int)addrlen < 0) {
3511         return -TARGET_EINVAL;
3512     }
3513 
3514     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3515     if (!host_msg)
3516         return -TARGET_EFAULT;
3517     if (fd_trans_target_to_host_data(fd)) {
3518         copy_msg = host_msg;
3519         host_msg = g_malloc(len);
3520         memcpy(host_msg, copy_msg, len);
3521         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3522         if (ret < 0) {
3523             goto fail;
3524         }
3525     }
3526     if (target_addr) {
3527         addr = alloca(addrlen+1);
3528         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3529         if (ret) {
3530             goto fail;
3531         }
3532         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3533     } else {
3534         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3535     }
3536 fail:
3537     if (copy_msg) {
3538         g_free(host_msg);
3539         host_msg = copy_msg;
3540     }
3541     unlock_user(host_msg, msg, 0);
3542     return ret;
3543 }
3544 
3545 /* do_recvfrom() Must return target values and target errnos. */
3546 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3547                             abi_ulong target_addr,
3548                             abi_ulong target_addrlen)
3549 {
3550     socklen_t addrlen, ret_addrlen;
3551     void *addr;
3552     void *host_msg;
3553     abi_long ret;
3554 
3555     if (!msg) {
3556         host_msg = NULL;
3557     } else {
3558         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3559         if (!host_msg) {
3560             return -TARGET_EFAULT;
3561         }
3562     }
3563     if (target_addr) {
3564         if (get_user_u32(addrlen, target_addrlen)) {
3565             ret = -TARGET_EFAULT;
3566             goto fail;
3567         }
3568         if ((int)addrlen < 0) {
3569             ret = -TARGET_EINVAL;
3570             goto fail;
3571         }
3572         addr = alloca(addrlen);
3573         ret_addrlen = addrlen;
3574         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3575                                       addr, &ret_addrlen));
3576     } else {
3577         addr = NULL; /* To keep compiler quiet.  */
3578         addrlen = 0; /* To keep compiler quiet.  */
3579         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3580     }
3581     if (!is_error(ret)) {
3582         if (fd_trans_host_to_target_data(fd)) {
3583             abi_long trans;
3584             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3585             if (is_error(trans)) {
3586                 ret = trans;
3587                 goto fail;
3588             }
3589         }
3590         if (target_addr) {
3591             host_to_target_sockaddr(target_addr, addr,
3592                                     MIN(addrlen, ret_addrlen));
3593             if (put_user_u32(ret_addrlen, target_addrlen)) {
3594                 ret = -TARGET_EFAULT;
3595                 goto fail;
3596             }
3597         }
3598         unlock_user(host_msg, msg, len);
3599     } else {
3600 fail:
3601         unlock_user(host_msg, msg, 0);
3602     }
3603     return ret;
3604 }
3605 
3606 #ifdef TARGET_NR_socketcall
3607 /* do_socketcall() must return target values and target errnos. */
3608 static abi_long do_socketcall(int num, abi_ulong vptr)
3609 {
3610     static const unsigned nargs[] = { /* number of arguments per operation */
3611         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3612         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3613         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3614         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3615         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3616         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3617         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3618         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3619         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3620         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3621         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3622         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3623         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3624         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3625         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3626         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3627         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3628         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3629         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3630         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3631     };
3632     abi_long a[6]; /* max 6 args */
3633     unsigned i;
3634 
3635     /* check the range of the first argument num */
3636     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3637     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3638         return -TARGET_EINVAL;
3639     }
3640     /* ensure we have space for args */
3641     if (nargs[num] > ARRAY_SIZE(a)) {
3642         return -TARGET_EINVAL;
3643     }
3644     /* collect the arguments in a[] according to nargs[] */
3645     for (i = 0; i < nargs[num]; ++i) {
3646         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3647             return -TARGET_EFAULT;
3648         }
3649     }
3650     /* now when we have the args, invoke the appropriate underlying function */
3651     switch (num) {
3652     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3653         return do_socket(a[0], a[1], a[2]);
3654     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3655         return do_bind(a[0], a[1], a[2]);
3656     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3657         return do_connect(a[0], a[1], a[2]);
3658     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3659         return get_errno(listen(a[0], a[1]));
3660     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3661         return do_accept4(a[0], a[1], a[2], 0);
3662     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3663         return do_getsockname(a[0], a[1], a[2]);
3664     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3665         return do_getpeername(a[0], a[1], a[2]);
3666     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3667         return do_socketpair(a[0], a[1], a[2], a[3]);
3668     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3669         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3670     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3671         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3672     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3673         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3674     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3675         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3676     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3677         return get_errno(shutdown(a[0], a[1]));
3678     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3679         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3680     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3681         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3682     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3683         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3684     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3685         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3686     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3687         return do_accept4(a[0], a[1], a[2], a[3]);
3688     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3689         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3690     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3691         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3692     default:
3693         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3694         return -TARGET_EINVAL;
3695     }
3696 }
3697 #endif
3698 
3699 #define N_SHM_REGIONS	32
3700 
3701 static struct shm_region {
3702     abi_ulong start;
3703     abi_ulong size;
3704     bool in_use;
3705 } shm_regions[N_SHM_REGIONS];
3706 
3707 #ifndef TARGET_SEMID64_DS
3708 /* asm-generic version of this struct */
3709 struct target_semid64_ds
3710 {
3711   struct target_ipc_perm sem_perm;
3712   abi_ulong sem_otime;
3713 #if TARGET_ABI_BITS == 32
3714   abi_ulong __unused1;
3715 #endif
3716   abi_ulong sem_ctime;
3717 #if TARGET_ABI_BITS == 32
3718   abi_ulong __unused2;
3719 #endif
3720   abi_ulong sem_nsems;
3721   abi_ulong __unused3;
3722   abi_ulong __unused4;
3723 };
3724 #endif
3725 
3726 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3727                                                abi_ulong target_addr)
3728 {
3729     struct target_ipc_perm *target_ip;
3730     struct target_semid64_ds *target_sd;
3731 
3732     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3733         return -TARGET_EFAULT;
3734     target_ip = &(target_sd->sem_perm);
3735     host_ip->__key = tswap32(target_ip->__key);
3736     host_ip->uid = tswap32(target_ip->uid);
3737     host_ip->gid = tswap32(target_ip->gid);
3738     host_ip->cuid = tswap32(target_ip->cuid);
3739     host_ip->cgid = tswap32(target_ip->cgid);
3740 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3741     host_ip->mode = tswap32(target_ip->mode);
3742 #else
3743     host_ip->mode = tswap16(target_ip->mode);
3744 #endif
3745 #if defined(TARGET_PPC)
3746     host_ip->__seq = tswap32(target_ip->__seq);
3747 #else
3748     host_ip->__seq = tswap16(target_ip->__seq);
3749 #endif
3750     unlock_user_struct(target_sd, target_addr, 0);
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3755                                                struct ipc_perm *host_ip)
3756 {
3757     struct target_ipc_perm *target_ip;
3758     struct target_semid64_ds *target_sd;
3759 
3760     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3761         return -TARGET_EFAULT;
3762     target_ip = &(target_sd->sem_perm);
3763     target_ip->__key = tswap32(host_ip->__key);
3764     target_ip->uid = tswap32(host_ip->uid);
3765     target_ip->gid = tswap32(host_ip->gid);
3766     target_ip->cuid = tswap32(host_ip->cuid);
3767     target_ip->cgid = tswap32(host_ip->cgid);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769     target_ip->mode = tswap32(host_ip->mode);
3770 #else
3771     target_ip->mode = tswap16(host_ip->mode);
3772 #endif
3773 #if defined(TARGET_PPC)
3774     target_ip->__seq = tswap32(host_ip->__seq);
3775 #else
3776     target_ip->__seq = tswap16(host_ip->__seq);
3777 #endif
3778     unlock_user_struct(target_sd, target_addr, 1);
3779     return 0;
3780 }
3781 
3782 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3783                                                abi_ulong target_addr)
3784 {
3785     struct target_semid64_ds *target_sd;
3786 
3787     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3788         return -TARGET_EFAULT;
3789     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3790         return -TARGET_EFAULT;
3791     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3792     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3793     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3794     unlock_user_struct(target_sd, target_addr, 0);
3795     return 0;
3796 }
3797 
3798 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3799                                                struct semid_ds *host_sd)
3800 {
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3806         return -TARGET_EFAULT;
3807     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3808     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3809     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3810     unlock_user_struct(target_sd, target_addr, 1);
3811     return 0;
3812 }
3813 
3814 struct target_seminfo {
3815     int semmap;
3816     int semmni;
3817     int semmns;
3818     int semmnu;
3819     int semmsl;
3820     int semopm;
3821     int semume;
3822     int semusz;
3823     int semvmx;
3824     int semaem;
3825 };
3826 
3827 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3828                                               struct seminfo *host_seminfo)
3829 {
3830     struct target_seminfo *target_seminfo;
3831     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3832         return -TARGET_EFAULT;
3833     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3834     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3835     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3836     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3837     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3838     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3839     __put_user(host_seminfo->semume, &target_seminfo->semume);
3840     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3841     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3842     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3843     unlock_user_struct(target_seminfo, target_addr, 1);
3844     return 0;
3845 }
3846 
3847 union semun {
3848 	int val;
3849 	struct semid_ds *buf;
3850 	unsigned short *array;
3851 	struct seminfo *__buf;
3852 };
3853 
3854 union target_semun {
3855 	int val;
3856 	abi_ulong buf;
3857 	abi_ulong array;
3858 	abi_ulong __buf;
3859 };
3860 
3861 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3862                                                abi_ulong target_addr)
3863 {
3864     int nsems;
3865     unsigned short *array;
3866     union semun semun;
3867     struct semid_ds semid_ds;
3868     int i, ret;
3869 
3870     semun.buf = &semid_ds;
3871 
3872     ret = semctl(semid, 0, IPC_STAT, semun);
3873     if (ret == -1)
3874         return get_errno(ret);
3875 
3876     nsems = semid_ds.sem_nsems;
3877 
3878     *host_array = g_try_new(unsigned short, nsems);
3879     if (!*host_array) {
3880         return -TARGET_ENOMEM;
3881     }
3882     array = lock_user(VERIFY_READ, target_addr,
3883                       nsems*sizeof(unsigned short), 1);
3884     if (!array) {
3885         g_free(*host_array);
3886         return -TARGET_EFAULT;
3887     }
3888 
3889     for(i=0; i<nsems; i++) {
3890         __get_user((*host_array)[i], &array[i]);
3891     }
3892     unlock_user(array, target_addr, 0);
3893 
3894     return 0;
3895 }
3896 
3897 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3898                                                unsigned short **host_array)
3899 {
3900     int nsems;
3901     unsigned short *array;
3902     union semun semun;
3903     struct semid_ds semid_ds;
3904     int i, ret;
3905 
3906     semun.buf = &semid_ds;
3907 
3908     ret = semctl(semid, 0, IPC_STAT, semun);
3909     if (ret == -1)
3910         return get_errno(ret);
3911 
3912     nsems = semid_ds.sem_nsems;
3913 
3914     array = lock_user(VERIFY_WRITE, target_addr,
3915                       nsems*sizeof(unsigned short), 0);
3916     if (!array)
3917         return -TARGET_EFAULT;
3918 
3919     for(i=0; i<nsems; i++) {
3920         __put_user((*host_array)[i], &array[i]);
3921     }
3922     g_free(*host_array);
3923     unlock_user(array, target_addr, 1);
3924 
3925     return 0;
3926 }
3927 
3928 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3929                                  abi_ulong target_arg)
3930 {
3931     union target_semun target_su = { .buf = target_arg };
3932     union semun arg;
3933     struct semid_ds dsarg;
3934     unsigned short *array = NULL;
3935     struct seminfo seminfo;
3936     abi_long ret = -TARGET_EINVAL;
3937     abi_long err;
3938     cmd &= 0xff;
3939 
3940     switch( cmd ) {
3941 	case GETVAL:
3942 	case SETVAL:
3943             /* In 64 bit cross-endian situations, we will erroneously pick up
3944              * the wrong half of the union for the "val" element.  To rectify
3945              * this, the entire 8-byte structure is byteswapped, followed by
3946 	     * a swap of the 4 byte val field. In other cases, the data is
3947 	     * already in proper host byte order. */
3948 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3949 		target_su.buf = tswapal(target_su.buf);
3950 		arg.val = tswap32(target_su.val);
3951 	    } else {
3952 		arg.val = target_su.val;
3953 	    }
3954             ret = get_errno(semctl(semid, semnum, cmd, arg));
3955             break;
3956 	case GETALL:
3957 	case SETALL:
3958             err = target_to_host_semarray(semid, &array, target_su.array);
3959             if (err)
3960                 return err;
3961             arg.array = array;
3962             ret = get_errno(semctl(semid, semnum, cmd, arg));
3963             err = host_to_target_semarray(semid, target_su.array, &array);
3964             if (err)
3965                 return err;
3966             break;
3967 	case IPC_STAT:
3968 	case IPC_SET:
3969 	case SEM_STAT:
3970             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3971             if (err)
3972                 return err;
3973             arg.buf = &dsarg;
3974             ret = get_errno(semctl(semid, semnum, cmd, arg));
3975             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3976             if (err)
3977                 return err;
3978             break;
3979 	case IPC_INFO:
3980 	case SEM_INFO:
3981             arg.__buf = &seminfo;
3982             ret = get_errno(semctl(semid, semnum, cmd, arg));
3983             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3984             if (err)
3985                 return err;
3986             break;
3987 	case IPC_RMID:
3988 	case GETPID:
3989 	case GETNCNT:
3990 	case GETZCNT:
3991             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3992             break;
3993     }
3994 
3995     return ret;
3996 }
3997 
3998 struct target_sembuf {
3999     unsigned short sem_num;
4000     short sem_op;
4001     short sem_flg;
4002 };
4003 
4004 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4005                                              abi_ulong target_addr,
4006                                              unsigned nsops)
4007 {
4008     struct target_sembuf *target_sembuf;
4009     int i;
4010 
4011     target_sembuf = lock_user(VERIFY_READ, target_addr,
4012                               nsops*sizeof(struct target_sembuf), 1);
4013     if (!target_sembuf)
4014         return -TARGET_EFAULT;
4015 
4016     for(i=0; i<nsops; i++) {
4017         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4018         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4019         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4020     }
4021 
4022     unlock_user(target_sembuf, target_addr, 0);
4023 
4024     return 0;
4025 }
4026 
4027 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4028     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4029 
4030 /*
4031  * This macro is required to handle the s390 variants, which passes the
4032  * arguments in a different order than default.
4033  */
4034 #ifdef __s390x__
4035 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4036   (__nsops), (__timeout), (__sops)
4037 #else
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039   (__nsops), 0, (__sops), (__timeout)
4040 #endif
4041 
4042 static inline abi_long do_semtimedop(int semid,
4043                                      abi_long ptr,
4044                                      unsigned nsops,
4045                                      abi_long timeout, bool time64)
4046 {
4047     struct sembuf *sops;
4048     struct timespec ts, *pts = NULL;
4049     abi_long ret;
4050 
4051     if (timeout) {
4052         pts = &ts;
4053         if (time64) {
4054             if (target_to_host_timespec64(pts, timeout)) {
4055                 return -TARGET_EFAULT;
4056             }
4057         } else {
4058             if (target_to_host_timespec(pts, timeout)) {
4059                 return -TARGET_EFAULT;
4060             }
4061         }
4062     }
4063 
4064     if (nsops > TARGET_SEMOPM) {
4065         return -TARGET_E2BIG;
4066     }
4067 
4068     sops = g_new(struct sembuf, nsops);
4069 
4070     if (target_to_host_sembuf(sops, ptr, nsops)) {
4071         g_free(sops);
4072         return -TARGET_EFAULT;
4073     }
4074 
4075     ret = -TARGET_ENOSYS;
4076 #ifdef __NR_semtimedop
4077     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4078 #endif
4079 #ifdef __NR_ipc
4080     if (ret == -TARGET_ENOSYS) {
4081         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4082                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4083     }
4084 #endif
4085     g_free(sops);
4086     return ret;
4087 }
4088 #endif
4089 
4090 struct target_msqid_ds
4091 {
4092     struct target_ipc_perm msg_perm;
4093     abi_ulong msg_stime;
4094 #if TARGET_ABI_BITS == 32
4095     abi_ulong __unused1;
4096 #endif
4097     abi_ulong msg_rtime;
4098 #if TARGET_ABI_BITS == 32
4099     abi_ulong __unused2;
4100 #endif
4101     abi_ulong msg_ctime;
4102 #if TARGET_ABI_BITS == 32
4103     abi_ulong __unused3;
4104 #endif
4105     abi_ulong __msg_cbytes;
4106     abi_ulong msg_qnum;
4107     abi_ulong msg_qbytes;
4108     abi_ulong msg_lspid;
4109     abi_ulong msg_lrpid;
4110     abi_ulong __unused4;
4111     abi_ulong __unused5;
4112 };
4113 
4114 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4115                                                abi_ulong target_addr)
4116 {
4117     struct target_msqid_ds *target_md;
4118 
4119     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4120         return -TARGET_EFAULT;
4121     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4122         return -TARGET_EFAULT;
4123     host_md->msg_stime = tswapal(target_md->msg_stime);
4124     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4125     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4126     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4127     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4128     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4129     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4130     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4131     unlock_user_struct(target_md, target_addr, 0);
4132     return 0;
4133 }
4134 
4135 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4136                                                struct msqid_ds *host_md)
4137 {
4138     struct target_msqid_ds *target_md;
4139 
4140     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4141         return -TARGET_EFAULT;
4142     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4143         return -TARGET_EFAULT;
4144     target_md->msg_stime = tswapal(host_md->msg_stime);
4145     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4146     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4147     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4148     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4149     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4150     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4151     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4152     unlock_user_struct(target_md, target_addr, 1);
4153     return 0;
4154 }
4155 
4156 struct target_msginfo {
4157     int msgpool;
4158     int msgmap;
4159     int msgmax;
4160     int msgmnb;
4161     int msgmni;
4162     int msgssz;
4163     int msgtql;
4164     unsigned short int msgseg;
4165 };
4166 
4167 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4168                                               struct msginfo *host_msginfo)
4169 {
4170     struct target_msginfo *target_msginfo;
4171     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4172         return -TARGET_EFAULT;
4173     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4174     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4175     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4176     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4177     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4178     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4179     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4180     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4181     unlock_user_struct(target_msginfo, target_addr, 1);
4182     return 0;
4183 }
4184 
4185 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4186 {
4187     struct msqid_ds dsarg;
4188     struct msginfo msginfo;
4189     abi_long ret = -TARGET_EINVAL;
4190 
4191     cmd &= 0xff;
4192 
4193     switch (cmd) {
4194     case IPC_STAT:
4195     case IPC_SET:
4196     case MSG_STAT:
4197         if (target_to_host_msqid_ds(&dsarg,ptr))
4198             return -TARGET_EFAULT;
4199         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4200         if (host_to_target_msqid_ds(ptr,&dsarg))
4201             return -TARGET_EFAULT;
4202         break;
4203     case IPC_RMID:
4204         ret = get_errno(msgctl(msgid, cmd, NULL));
4205         break;
4206     case IPC_INFO:
4207     case MSG_INFO:
4208         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4209         if (host_to_target_msginfo(ptr, &msginfo))
4210             return -TARGET_EFAULT;
4211         break;
4212     }
4213 
4214     return ret;
4215 }
4216 
4217 struct target_msgbuf {
4218     abi_long mtype;
4219     char	mtext[1];
4220 };
4221 
4222 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4223                                  ssize_t msgsz, int msgflg)
4224 {
4225     struct target_msgbuf *target_mb;
4226     struct msgbuf *host_mb;
4227     abi_long ret = 0;
4228 
4229     if (msgsz < 0) {
4230         return -TARGET_EINVAL;
4231     }
4232 
4233     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4234         return -TARGET_EFAULT;
4235     host_mb = g_try_malloc(msgsz + sizeof(long));
4236     if (!host_mb) {
4237         unlock_user_struct(target_mb, msgp, 0);
4238         return -TARGET_ENOMEM;
4239     }
4240     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4241     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4242     ret = -TARGET_ENOSYS;
4243 #ifdef __NR_msgsnd
4244     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4245 #endif
4246 #ifdef __NR_ipc
4247     if (ret == -TARGET_ENOSYS) {
4248 #ifdef __s390x__
4249         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4250                                  host_mb));
4251 #else
4252         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4253                                  host_mb, 0));
4254 #endif
4255     }
4256 #endif
4257     g_free(host_mb);
4258     unlock_user_struct(target_mb, msgp, 0);
4259 
4260     return ret;
4261 }
4262 
4263 #ifdef __NR_ipc
4264 #if defined(__sparc__)
4265 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4266 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4267 #elif defined(__s390x__)
4268 /* The s390 sys_ipc variant has only five parameters.  */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4270     ((long int[]){(long int)__msgp, __msgtyp})
4271 #else
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273     ((long int[]){(long int)__msgp, __msgtyp}), 0
4274 #endif
4275 #endif
4276 
4277 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4278                                  ssize_t msgsz, abi_long msgtyp,
4279                                  int msgflg)
4280 {
4281     struct target_msgbuf *target_mb;
4282     char *target_mtext;
4283     struct msgbuf *host_mb;
4284     abi_long ret = 0;
4285 
4286     if (msgsz < 0) {
4287         return -TARGET_EINVAL;
4288     }
4289 
4290     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4291         return -TARGET_EFAULT;
4292 
4293     host_mb = g_try_malloc(msgsz + sizeof(long));
4294     if (!host_mb) {
4295         ret = -TARGET_ENOMEM;
4296         goto end;
4297     }
4298     ret = -TARGET_ENOSYS;
4299 #ifdef __NR_msgrcv
4300     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4301 #endif
4302 #ifdef __NR_ipc
4303     if (ret == -TARGET_ENOSYS) {
4304         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4305                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4306     }
4307 #endif
4308 
4309     if (ret > 0) {
4310         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4311         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4312         if (!target_mtext) {
4313             ret = -TARGET_EFAULT;
4314             goto end;
4315         }
4316         memcpy(target_mb->mtext, host_mb->mtext, ret);
4317         unlock_user(target_mtext, target_mtext_addr, ret);
4318     }
4319 
4320     target_mb->mtype = tswapal(host_mb->mtype);
4321 
4322 end:
4323     if (target_mb)
4324         unlock_user_struct(target_mb, msgp, 1);
4325     g_free(host_mb);
4326     return ret;
4327 }
4328 
4329 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4330                                                abi_ulong target_addr)
4331 {
4332     struct target_shmid_ds *target_sd;
4333 
4334     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4335         return -TARGET_EFAULT;
4336     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4337         return -TARGET_EFAULT;
4338     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4339     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4340     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4341     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4342     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4343     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4344     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4345     unlock_user_struct(target_sd, target_addr, 0);
4346     return 0;
4347 }
4348 
4349 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4350                                                struct shmid_ds *host_sd)
4351 {
4352     struct target_shmid_ds *target_sd;
4353 
4354     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4355         return -TARGET_EFAULT;
4356     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4357         return -TARGET_EFAULT;
4358     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4359     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4360     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4361     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4362     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4363     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4364     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4365     unlock_user_struct(target_sd, target_addr, 1);
4366     return 0;
4367 }
4368 
4369 struct  target_shminfo {
4370     abi_ulong shmmax;
4371     abi_ulong shmmin;
4372     abi_ulong shmmni;
4373     abi_ulong shmseg;
4374     abi_ulong shmall;
4375 };
4376 
4377 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4378                                               struct shminfo *host_shminfo)
4379 {
4380     struct target_shminfo *target_shminfo;
4381     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4382         return -TARGET_EFAULT;
4383     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4384     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4385     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4386     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4387     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4388     unlock_user_struct(target_shminfo, target_addr, 1);
4389     return 0;
4390 }
4391 
4392 struct target_shm_info {
4393     int used_ids;
4394     abi_ulong shm_tot;
4395     abi_ulong shm_rss;
4396     abi_ulong shm_swp;
4397     abi_ulong swap_attempts;
4398     abi_ulong swap_successes;
4399 };
4400 
4401 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4402                                                struct shm_info *host_shm_info)
4403 {
4404     struct target_shm_info *target_shm_info;
4405     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4406         return -TARGET_EFAULT;
4407     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4408     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4409     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4410     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4411     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4412     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4413     unlock_user_struct(target_shm_info, target_addr, 1);
4414     return 0;
4415 }
4416 
4417 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4418 {
4419     struct shmid_ds dsarg;
4420     struct shminfo shminfo;
4421     struct shm_info shm_info;
4422     abi_long ret = -TARGET_EINVAL;
4423 
4424     cmd &= 0xff;
4425 
4426     switch(cmd) {
4427     case IPC_STAT:
4428     case IPC_SET:
4429     case SHM_STAT:
4430         if (target_to_host_shmid_ds(&dsarg, buf))
4431             return -TARGET_EFAULT;
4432         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4433         if (host_to_target_shmid_ds(buf, &dsarg))
4434             return -TARGET_EFAULT;
4435         break;
4436     case IPC_INFO:
4437         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4438         if (host_to_target_shminfo(buf, &shminfo))
4439             return -TARGET_EFAULT;
4440         break;
4441     case SHM_INFO:
4442         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4443         if (host_to_target_shm_info(buf, &shm_info))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_RMID:
4447     case SHM_LOCK:
4448     case SHM_UNLOCK:
4449         ret = get_errno(shmctl(shmid, cmd, NULL));
4450         break;
4451     }
4452 
4453     return ret;
4454 }
4455 
4456 #ifndef TARGET_FORCE_SHMLBA
4457 /* For most architectures, SHMLBA is the same as the page size;
4458  * some architectures have larger values, in which case they should
4459  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4460  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4461  * and defining its own value for SHMLBA.
4462  *
4463  * The kernel also permits SHMLBA to be set by the architecture to a
4464  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4465  * this means that addresses are rounded to the large size if
4466  * SHM_RND is set but addresses not aligned to that size are not rejected
4467  * as long as they are at least page-aligned. Since the only architecture
4468  * which uses this is ia64 this code doesn't provide for that oddity.
4469  */
4470 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4471 {
4472     return TARGET_PAGE_SIZE;
4473 }
4474 #endif
4475 
4476 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4477                                  int shmid, abi_ulong shmaddr, int shmflg)
4478 {
4479     CPUState *cpu = env_cpu(cpu_env);
4480     abi_long raddr;
4481     void *host_raddr;
4482     struct shmid_ds shm_info;
4483     int i,ret;
4484     abi_ulong shmlba;
4485 
4486     /* shmat pointers are always untagged */
4487 
4488     /* find out the length of the shared memory segment */
4489     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4490     if (is_error(ret)) {
4491         /* can't get length, bail out */
4492         return ret;
4493     }
4494 
4495     shmlba = target_shmlba(cpu_env);
4496 
4497     if (shmaddr & (shmlba - 1)) {
4498         if (shmflg & SHM_RND) {
4499             shmaddr &= ~(shmlba - 1);
4500         } else {
4501             return -TARGET_EINVAL;
4502         }
4503     }
4504     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4505         return -TARGET_EINVAL;
4506     }
4507 
4508     mmap_lock();
4509 
4510     /*
4511      * We're mapping shared memory, so ensure we generate code for parallel
4512      * execution and flush old translations.  This will work up to the level
4513      * supported by the host -- anything that requires EXCP_ATOMIC will not
4514      * be atomic with respect to an external process.
4515      */
4516     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4517         cpu->tcg_cflags |= CF_PARALLEL;
4518         tb_flush(cpu);
4519     }
4520 
4521     if (shmaddr)
4522         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4523     else {
4524         abi_ulong mmap_start;
4525 
4526         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4527         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4528 
4529         if (mmap_start == -1) {
4530             errno = ENOMEM;
4531             host_raddr = (void *)-1;
4532         } else
4533             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4534                                shmflg | SHM_REMAP);
4535     }
4536 
4537     if (host_raddr == (void *)-1) {
4538         mmap_unlock();
4539         return get_errno((long)host_raddr);
4540     }
4541     raddr=h2g((unsigned long)host_raddr);
4542 
4543     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4544                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4545                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4546 
4547     for (i = 0; i < N_SHM_REGIONS; i++) {
4548         if (!shm_regions[i].in_use) {
4549             shm_regions[i].in_use = true;
4550             shm_regions[i].start = raddr;
4551             shm_regions[i].size = shm_info.shm_segsz;
4552             break;
4553         }
4554     }
4555 
4556     mmap_unlock();
4557     return raddr;
4558 
4559 }
4560 
4561 static inline abi_long do_shmdt(abi_ulong shmaddr)
4562 {
4563     int i;
4564     abi_long rv;
4565 
4566     /* shmdt pointers are always untagged */
4567 
4568     mmap_lock();
4569 
4570     for (i = 0; i < N_SHM_REGIONS; ++i) {
4571         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4572             shm_regions[i].in_use = false;
4573             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4574             break;
4575         }
4576     }
4577     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4578 
4579     mmap_unlock();
4580 
4581     return rv;
4582 }
4583 
4584 #ifdef TARGET_NR_ipc
4585 /* ??? This only works with linear mappings.  */
4586 /* do_ipc() must return target values and target errnos. */
4587 static abi_long do_ipc(CPUArchState *cpu_env,
4588                        unsigned int call, abi_long first,
4589                        abi_long second, abi_long third,
4590                        abi_long ptr, abi_long fifth)
4591 {
4592     int version;
4593     abi_long ret = 0;
4594 
4595     version = call >> 16;
4596     call &= 0xffff;
4597 
4598     switch (call) {
4599     case IPCOP_semop:
4600         ret = do_semtimedop(first, ptr, second, 0, false);
4601         break;
4602     case IPCOP_semtimedop:
4603     /*
4604      * The s390 sys_ipc variant has only five parameters instead of six
4605      * (as for default variant) and the only difference is the handling of
4606      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4607      * to a struct timespec where the generic variant uses fifth parameter.
4608      */
4609 #if defined(TARGET_S390X)
4610         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4611 #else
4612         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4613 #endif
4614         break;
4615 
4616     case IPCOP_semget:
4617         ret = get_errno(semget(first, second, third));
4618         break;
4619 
4620     case IPCOP_semctl: {
4621         /* The semun argument to semctl is passed by value, so dereference the
4622          * ptr argument. */
4623         abi_ulong atptr;
4624         get_user_ual(atptr, ptr);
4625         ret = do_semctl(first, second, third, atptr);
4626         break;
4627     }
4628 
4629     case IPCOP_msgget:
4630         ret = get_errno(msgget(first, second));
4631         break;
4632 
4633     case IPCOP_msgsnd:
4634         ret = do_msgsnd(first, ptr, second, third);
4635         break;
4636 
4637     case IPCOP_msgctl:
4638         ret = do_msgctl(first, second, ptr);
4639         break;
4640 
4641     case IPCOP_msgrcv:
4642         switch (version) {
4643         case 0:
4644             {
4645                 struct target_ipc_kludge {
4646                     abi_long msgp;
4647                     abi_long msgtyp;
4648                 } *tmp;
4649 
4650                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4651                     ret = -TARGET_EFAULT;
4652                     break;
4653                 }
4654 
4655                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4656 
4657                 unlock_user_struct(tmp, ptr, 0);
4658                 break;
4659             }
4660         default:
4661             ret = do_msgrcv(first, ptr, second, fifth, third);
4662         }
4663         break;
4664 
4665     case IPCOP_shmat:
4666         switch (version) {
4667         default:
4668         {
4669             abi_ulong raddr;
4670             raddr = do_shmat(cpu_env, first, ptr, second);
4671             if (is_error(raddr))
4672                 return get_errno(raddr);
4673             if (put_user_ual(raddr, third))
4674                 return -TARGET_EFAULT;
4675             break;
4676         }
4677         case 1:
4678             ret = -TARGET_EINVAL;
4679             break;
4680         }
4681 	break;
4682     case IPCOP_shmdt:
4683         ret = do_shmdt(ptr);
4684 	break;
4685 
4686     case IPCOP_shmget:
4687 	/* IPC_* flag values are the same on all linux platforms */
4688 	ret = get_errno(shmget(first, second, third));
4689 	break;
4690 
4691 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4692     case IPCOP_shmctl:
4693         ret = do_shmctl(first, second, ptr);
4694         break;
4695     default:
4696         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4697                       call, version);
4698 	ret = -TARGET_ENOSYS;
4699 	break;
4700     }
4701     return ret;
4702 }
4703 #endif
4704 
4705 /* kernel structure types definitions */
4706 
4707 #define STRUCT(name, ...) STRUCT_ ## name,
4708 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4709 enum {
4710 #include "syscall_types.h"
4711 STRUCT_MAX
4712 };
4713 #undef STRUCT
4714 #undef STRUCT_SPECIAL
4715 
4716 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4717 #define STRUCT_SPECIAL(name)
4718 #include "syscall_types.h"
4719 #undef STRUCT
4720 #undef STRUCT_SPECIAL
4721 
4722 #define MAX_STRUCT_SIZE 4096
4723 
4724 #ifdef CONFIG_FIEMAP
4725 /* So fiemap access checks don't overflow on 32 bit systems.
4726  * This is very slightly smaller than the limit imposed by
4727  * the underlying kernel.
4728  */
4729 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4730                             / sizeof(struct fiemap_extent))
4731 
4732 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4733                                        int fd, int cmd, abi_long arg)
4734 {
4735     /* The parameter for this ioctl is a struct fiemap followed
4736      * by an array of struct fiemap_extent whose size is set
4737      * in fiemap->fm_extent_count. The array is filled in by the
4738      * ioctl.
4739      */
4740     int target_size_in, target_size_out;
4741     struct fiemap *fm;
4742     const argtype *arg_type = ie->arg_type;
4743     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4744     void *argptr, *p;
4745     abi_long ret;
4746     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4747     uint32_t outbufsz;
4748     int free_fm = 0;
4749 
4750     assert(arg_type[0] == TYPE_PTR);
4751     assert(ie->access == IOC_RW);
4752     arg_type++;
4753     target_size_in = thunk_type_size(arg_type, 0);
4754     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4755     if (!argptr) {
4756         return -TARGET_EFAULT;
4757     }
4758     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4759     unlock_user(argptr, arg, 0);
4760     fm = (struct fiemap *)buf_temp;
4761     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4762         return -TARGET_EINVAL;
4763     }
4764 
4765     outbufsz = sizeof (*fm) +
4766         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4767 
4768     if (outbufsz > MAX_STRUCT_SIZE) {
4769         /* We can't fit all the extents into the fixed size buffer.
4770          * Allocate one that is large enough and use it instead.
4771          */
4772         fm = g_try_malloc(outbufsz);
4773         if (!fm) {
4774             return -TARGET_ENOMEM;
4775         }
4776         memcpy(fm, buf_temp, sizeof(struct fiemap));
4777         free_fm = 1;
4778     }
4779     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4780     if (!is_error(ret)) {
4781         target_size_out = target_size_in;
4782         /* An extent_count of 0 means we were only counting the extents
4783          * so there are no structs to copy
4784          */
4785         if (fm->fm_extent_count != 0) {
4786             target_size_out += fm->fm_mapped_extents * extent_size;
4787         }
4788         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4789         if (!argptr) {
4790             ret = -TARGET_EFAULT;
4791         } else {
4792             /* Convert the struct fiemap */
4793             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4794             if (fm->fm_extent_count != 0) {
4795                 p = argptr + target_size_in;
4796                 /* ...and then all the struct fiemap_extents */
4797                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4798                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4799                                   THUNK_TARGET);
4800                     p += extent_size;
4801                 }
4802             }
4803             unlock_user(argptr, arg, target_size_out);
4804         }
4805     }
4806     if (free_fm) {
4807         g_free(fm);
4808     }
4809     return ret;
4810 }
4811 #endif
4812 
4813 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4814                                 int fd, int cmd, abi_long arg)
4815 {
4816     const argtype *arg_type = ie->arg_type;
4817     int target_size;
4818     void *argptr;
4819     int ret;
4820     struct ifconf *host_ifconf;
4821     uint32_t outbufsz;
4822     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4823     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4824     int target_ifreq_size;
4825     int nb_ifreq;
4826     int free_buf = 0;
4827     int i;
4828     int target_ifc_len;
4829     abi_long target_ifc_buf;
4830     int host_ifc_len;
4831     char *host_ifc_buf;
4832 
4833     assert(arg_type[0] == TYPE_PTR);
4834     assert(ie->access == IOC_RW);
4835 
4836     arg_type++;
4837     target_size = thunk_type_size(arg_type, 0);
4838 
4839     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4840     if (!argptr)
4841         return -TARGET_EFAULT;
4842     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4843     unlock_user(argptr, arg, 0);
4844 
4845     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4846     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4847     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4848 
4849     if (target_ifc_buf != 0) {
4850         target_ifc_len = host_ifconf->ifc_len;
4851         nb_ifreq = target_ifc_len / target_ifreq_size;
4852         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4853 
4854         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4855         if (outbufsz > MAX_STRUCT_SIZE) {
4856             /*
4857              * We can't fit all the extents into the fixed size buffer.
4858              * Allocate one that is large enough and use it instead.
4859              */
4860             host_ifconf = malloc(outbufsz);
4861             if (!host_ifconf) {
4862                 return -TARGET_ENOMEM;
4863             }
4864             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4865             free_buf = 1;
4866         }
4867         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4868 
4869         host_ifconf->ifc_len = host_ifc_len;
4870     } else {
4871       host_ifc_buf = NULL;
4872     }
4873     host_ifconf->ifc_buf = host_ifc_buf;
4874 
4875     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4876     if (!is_error(ret)) {
4877 	/* convert host ifc_len to target ifc_len */
4878 
4879         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4880         target_ifc_len = nb_ifreq * target_ifreq_size;
4881         host_ifconf->ifc_len = target_ifc_len;
4882 
4883 	/* restore target ifc_buf */
4884 
4885         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4886 
4887 	/* copy struct ifconf to target user */
4888 
4889         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4890         if (!argptr)
4891             return -TARGET_EFAULT;
4892         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4893         unlock_user(argptr, arg, target_size);
4894 
4895         if (target_ifc_buf != 0) {
4896             /* copy ifreq[] to target user */
4897             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4898             for (i = 0; i < nb_ifreq ; i++) {
4899                 thunk_convert(argptr + i * target_ifreq_size,
4900                               host_ifc_buf + i * sizeof(struct ifreq),
4901                               ifreq_arg_type, THUNK_TARGET);
4902             }
4903             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4904         }
4905     }
4906 
4907     if (free_buf) {
4908         free(host_ifconf);
4909     }
4910 
4911     return ret;
4912 }
4913 
4914 #if defined(CONFIG_USBFS)
4915 #if HOST_LONG_BITS > 64
4916 #error USBDEVFS thunks do not support >64 bit hosts yet.
4917 #endif
4918 struct live_urb {
4919     uint64_t target_urb_adr;
4920     uint64_t target_buf_adr;
4921     char *target_buf_ptr;
4922     struct usbdevfs_urb host_urb;
4923 };
4924 
4925 static GHashTable *usbdevfs_urb_hashtable(void)
4926 {
4927     static GHashTable *urb_hashtable;
4928 
4929     if (!urb_hashtable) {
4930         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4931     }
4932     return urb_hashtable;
4933 }
4934 
4935 static void urb_hashtable_insert(struct live_urb *urb)
4936 {
4937     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4938     g_hash_table_insert(urb_hashtable, urb, urb);
4939 }
4940 
4941 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4942 {
4943     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4944     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4945 }
4946 
4947 static void urb_hashtable_remove(struct live_urb *urb)
4948 {
4949     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4950     g_hash_table_remove(urb_hashtable, urb);
4951 }
4952 
4953 static abi_long
4954 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4955                           int fd, int cmd, abi_long arg)
4956 {
4957     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4958     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4959     struct live_urb *lurb;
4960     void *argptr;
4961     uint64_t hurb;
4962     int target_size;
4963     uintptr_t target_urb_adr;
4964     abi_long ret;
4965 
4966     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4967 
4968     memset(buf_temp, 0, sizeof(uint64_t));
4969     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4970     if (is_error(ret)) {
4971         return ret;
4972     }
4973 
4974     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4975     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4976     if (!lurb->target_urb_adr) {
4977         return -TARGET_EFAULT;
4978     }
4979     urb_hashtable_remove(lurb);
4980     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4981         lurb->host_urb.buffer_length);
4982     lurb->target_buf_ptr = NULL;
4983 
4984     /* restore the guest buffer pointer */
4985     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4986 
4987     /* update the guest urb struct */
4988     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4989     if (!argptr) {
4990         g_free(lurb);
4991         return -TARGET_EFAULT;
4992     }
4993     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4994     unlock_user(argptr, lurb->target_urb_adr, target_size);
4995 
4996     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4997     /* write back the urb handle */
4998     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003 
5004     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5005     target_urb_adr = lurb->target_urb_adr;
5006     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5007     unlock_user(argptr, arg, target_size);
5008 
5009     g_free(lurb);
5010     return ret;
5011 }
5012 
5013 static abi_long
5014 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5015                              uint8_t *buf_temp __attribute__((unused)),
5016                              int fd, int cmd, abi_long arg)
5017 {
5018     struct live_urb *lurb;
5019 
5020     /* map target address back to host URB with metadata. */
5021     lurb = urb_hashtable_lookup(arg);
5022     if (!lurb) {
5023         return -TARGET_EFAULT;
5024     }
5025     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5026 }
5027 
5028 static abi_long
5029 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5030                             int fd, int cmd, abi_long arg)
5031 {
5032     const argtype *arg_type = ie->arg_type;
5033     int target_size;
5034     abi_long ret;
5035     void *argptr;
5036     int rw_dir;
5037     struct live_urb *lurb;
5038 
5039     /*
5040      * each submitted URB needs to map to a unique ID for the
5041      * kernel, and that unique ID needs to be a pointer to
5042      * host memory.  hence, we need to malloc for each URB.
5043      * isochronous transfers have a variable length struct.
5044      */
5045     arg_type++;
5046     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5047 
5048     /* construct host copy of urb and metadata */
5049     lurb = g_try_malloc0(sizeof(struct live_urb));
5050     if (!lurb) {
5051         return -TARGET_ENOMEM;
5052     }
5053 
5054     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5055     if (!argptr) {
5056         g_free(lurb);
5057         return -TARGET_EFAULT;
5058     }
5059     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5060     unlock_user(argptr, arg, 0);
5061 
5062     lurb->target_urb_adr = arg;
5063     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5064 
5065     /* buffer space used depends on endpoint type so lock the entire buffer */
5066     /* control type urbs should check the buffer contents for true direction */
5067     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5068     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5069         lurb->host_urb.buffer_length, 1);
5070     if (lurb->target_buf_ptr == NULL) {
5071         g_free(lurb);
5072         return -TARGET_EFAULT;
5073     }
5074 
5075     /* update buffer pointer in host copy */
5076     lurb->host_urb.buffer = lurb->target_buf_ptr;
5077 
5078     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5079     if (is_error(ret)) {
5080         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5081         g_free(lurb);
5082     } else {
5083         urb_hashtable_insert(lurb);
5084     }
5085 
5086     return ret;
5087 }
5088 #endif /* CONFIG_USBFS */
5089 
5090 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5091                             int cmd, abi_long arg)
5092 {
5093     void *argptr;
5094     struct dm_ioctl *host_dm;
5095     abi_long guest_data;
5096     uint32_t guest_data_size;
5097     int target_size;
5098     const argtype *arg_type = ie->arg_type;
5099     abi_long ret;
5100     void *big_buf = NULL;
5101     char *host_data;
5102 
5103     arg_type++;
5104     target_size = thunk_type_size(arg_type, 0);
5105     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5106     if (!argptr) {
5107         ret = -TARGET_EFAULT;
5108         goto out;
5109     }
5110     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5111     unlock_user(argptr, arg, 0);
5112 
5113     /* buf_temp is too small, so fetch things into a bigger buffer */
5114     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5115     memcpy(big_buf, buf_temp, target_size);
5116     buf_temp = big_buf;
5117     host_dm = big_buf;
5118 
5119     guest_data = arg + host_dm->data_start;
5120     if ((guest_data - arg) < 0) {
5121         ret = -TARGET_EINVAL;
5122         goto out;
5123     }
5124     guest_data_size = host_dm->data_size - host_dm->data_start;
5125     host_data = (char*)host_dm + host_dm->data_start;
5126 
5127     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5128     if (!argptr) {
5129         ret = -TARGET_EFAULT;
5130         goto out;
5131     }
5132 
5133     switch (ie->host_cmd) {
5134     case DM_REMOVE_ALL:
5135     case DM_LIST_DEVICES:
5136     case DM_DEV_CREATE:
5137     case DM_DEV_REMOVE:
5138     case DM_DEV_SUSPEND:
5139     case DM_DEV_STATUS:
5140     case DM_DEV_WAIT:
5141     case DM_TABLE_STATUS:
5142     case DM_TABLE_CLEAR:
5143     case DM_TABLE_DEPS:
5144     case DM_LIST_VERSIONS:
5145         /* no input data */
5146         break;
5147     case DM_DEV_RENAME:
5148     case DM_DEV_SET_GEOMETRY:
5149         /* data contains only strings */
5150         memcpy(host_data, argptr, guest_data_size);
5151         break;
5152     case DM_TARGET_MSG:
5153         memcpy(host_data, argptr, guest_data_size);
5154         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5155         break;
5156     case DM_TABLE_LOAD:
5157     {
5158         void *gspec = argptr;
5159         void *cur_data = host_data;
5160         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5161         int spec_size = thunk_type_size(arg_type, 0);
5162         int i;
5163 
5164         for (i = 0; i < host_dm->target_count; i++) {
5165             struct dm_target_spec *spec = cur_data;
5166             uint32_t next;
5167             int slen;
5168 
5169             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5170             slen = strlen((char*)gspec + spec_size) + 1;
5171             next = spec->next;
5172             spec->next = sizeof(*spec) + slen;
5173             strcpy((char*)&spec[1], gspec + spec_size);
5174             gspec += next;
5175             cur_data += spec->next;
5176         }
5177         break;
5178     }
5179     default:
5180         ret = -TARGET_EINVAL;
5181         unlock_user(argptr, guest_data, 0);
5182         goto out;
5183     }
5184     unlock_user(argptr, guest_data, 0);
5185 
5186     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5187     if (!is_error(ret)) {
5188         guest_data = arg + host_dm->data_start;
5189         guest_data_size = host_dm->data_size - host_dm->data_start;
5190         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5191         switch (ie->host_cmd) {
5192         case DM_REMOVE_ALL:
5193         case DM_DEV_CREATE:
5194         case DM_DEV_REMOVE:
5195         case DM_DEV_RENAME:
5196         case DM_DEV_SUSPEND:
5197         case DM_DEV_STATUS:
5198         case DM_TABLE_LOAD:
5199         case DM_TABLE_CLEAR:
5200         case DM_TARGET_MSG:
5201         case DM_DEV_SET_GEOMETRY:
5202             /* no return data */
5203             break;
5204         case DM_LIST_DEVICES:
5205         {
5206             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5207             uint32_t remaining_data = guest_data_size;
5208             void *cur_data = argptr;
5209             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5210             int nl_size = 12; /* can't use thunk_size due to alignment */
5211 
5212             while (1) {
5213                 uint32_t next = nl->next;
5214                 if (next) {
5215                     nl->next = nl_size + (strlen(nl->name) + 1);
5216                 }
5217                 if (remaining_data < nl->next) {
5218                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5219                     break;
5220                 }
5221                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5222                 strcpy(cur_data + nl_size, nl->name);
5223                 cur_data += nl->next;
5224                 remaining_data -= nl->next;
5225                 if (!next) {
5226                     break;
5227                 }
5228                 nl = (void*)nl + next;
5229             }
5230             break;
5231         }
5232         case DM_DEV_WAIT:
5233         case DM_TABLE_STATUS:
5234         {
5235             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5236             void *cur_data = argptr;
5237             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5238             int spec_size = thunk_type_size(arg_type, 0);
5239             int i;
5240 
5241             for (i = 0; i < host_dm->target_count; i++) {
5242                 uint32_t next = spec->next;
5243                 int slen = strlen((char*)&spec[1]) + 1;
5244                 spec->next = (cur_data - argptr) + spec_size + slen;
5245                 if (guest_data_size < spec->next) {
5246                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5247                     break;
5248                 }
5249                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5250                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5251                 cur_data = argptr + spec->next;
5252                 spec = (void*)host_dm + host_dm->data_start + next;
5253             }
5254             break;
5255         }
5256         case DM_TABLE_DEPS:
5257         {
5258             void *hdata = (void*)host_dm + host_dm->data_start;
5259             int count = *(uint32_t*)hdata;
5260             uint64_t *hdev = hdata + 8;
5261             uint64_t *gdev = argptr + 8;
5262             int i;
5263 
5264             *(uint32_t*)argptr = tswap32(count);
5265             for (i = 0; i < count; i++) {
5266                 *gdev = tswap64(*hdev);
5267                 gdev++;
5268                 hdev++;
5269             }
5270             break;
5271         }
5272         case DM_LIST_VERSIONS:
5273         {
5274             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5275             uint32_t remaining_data = guest_data_size;
5276             void *cur_data = argptr;
5277             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5278             int vers_size = thunk_type_size(arg_type, 0);
5279 
5280             while (1) {
5281                 uint32_t next = vers->next;
5282                 if (next) {
5283                     vers->next = vers_size + (strlen(vers->name) + 1);
5284                 }
5285                 if (remaining_data < vers->next) {
5286                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5287                     break;
5288                 }
5289                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5290                 strcpy(cur_data + vers_size, vers->name);
5291                 cur_data += vers->next;
5292                 remaining_data -= vers->next;
5293                 if (!next) {
5294                     break;
5295                 }
5296                 vers = (void*)vers + next;
5297             }
5298             break;
5299         }
5300         default:
5301             unlock_user(argptr, guest_data, 0);
5302             ret = -TARGET_EINVAL;
5303             goto out;
5304         }
5305         unlock_user(argptr, guest_data, guest_data_size);
5306 
5307         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5308         if (!argptr) {
5309             ret = -TARGET_EFAULT;
5310             goto out;
5311         }
5312         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5313         unlock_user(argptr, arg, target_size);
5314     }
5315 out:
5316     g_free(big_buf);
5317     return ret;
5318 }
5319 
5320 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5321                                int cmd, abi_long arg)
5322 {
5323     void *argptr;
5324     int target_size;
5325     const argtype *arg_type = ie->arg_type;
5326     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5327     abi_long ret;
5328 
5329     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5330     struct blkpg_partition host_part;
5331 
5332     /* Read and convert blkpg */
5333     arg_type++;
5334     target_size = thunk_type_size(arg_type, 0);
5335     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5336     if (!argptr) {
5337         ret = -TARGET_EFAULT;
5338         goto out;
5339     }
5340     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5341     unlock_user(argptr, arg, 0);
5342 
5343     switch (host_blkpg->op) {
5344     case BLKPG_ADD_PARTITION:
5345     case BLKPG_DEL_PARTITION:
5346         /* payload is struct blkpg_partition */
5347         break;
5348     default:
5349         /* Unknown opcode */
5350         ret = -TARGET_EINVAL;
5351         goto out;
5352     }
5353 
5354     /* Read and convert blkpg->data */
5355     arg = (abi_long)(uintptr_t)host_blkpg->data;
5356     target_size = thunk_type_size(part_arg_type, 0);
5357     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5358     if (!argptr) {
5359         ret = -TARGET_EFAULT;
5360         goto out;
5361     }
5362     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5363     unlock_user(argptr, arg, 0);
5364 
5365     /* Swizzle the data pointer to our local copy and call! */
5366     host_blkpg->data = &host_part;
5367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5368 
5369 out:
5370     return ret;
5371 }
5372 
5373 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5374                                 int fd, int cmd, abi_long arg)
5375 {
5376     const argtype *arg_type = ie->arg_type;
5377     const StructEntry *se;
5378     const argtype *field_types;
5379     const int *dst_offsets, *src_offsets;
5380     int target_size;
5381     void *argptr;
5382     abi_ulong *target_rt_dev_ptr = NULL;
5383     unsigned long *host_rt_dev_ptr = NULL;
5384     abi_long ret;
5385     int i;
5386 
5387     assert(ie->access == IOC_W);
5388     assert(*arg_type == TYPE_PTR);
5389     arg_type++;
5390     assert(*arg_type == TYPE_STRUCT);
5391     target_size = thunk_type_size(arg_type, 0);
5392     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5393     if (!argptr) {
5394         return -TARGET_EFAULT;
5395     }
5396     arg_type++;
5397     assert(*arg_type == (int)STRUCT_rtentry);
5398     se = struct_entries + *arg_type++;
5399     assert(se->convert[0] == NULL);
5400     /* convert struct here to be able to catch rt_dev string */
5401     field_types = se->field_types;
5402     dst_offsets = se->field_offsets[THUNK_HOST];
5403     src_offsets = se->field_offsets[THUNK_TARGET];
5404     for (i = 0; i < se->nb_fields; i++) {
5405         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5406             assert(*field_types == TYPE_PTRVOID);
5407             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5408             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5409             if (*target_rt_dev_ptr != 0) {
5410                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5411                                                   tswapal(*target_rt_dev_ptr));
5412                 if (!*host_rt_dev_ptr) {
5413                     unlock_user(argptr, arg, 0);
5414                     return -TARGET_EFAULT;
5415                 }
5416             } else {
5417                 *host_rt_dev_ptr = 0;
5418             }
5419             field_types++;
5420             continue;
5421         }
5422         field_types = thunk_convert(buf_temp + dst_offsets[i],
5423                                     argptr + src_offsets[i],
5424                                     field_types, THUNK_HOST);
5425     }
5426     unlock_user(argptr, arg, 0);
5427 
5428     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5429 
5430     assert(host_rt_dev_ptr != NULL);
5431     assert(target_rt_dev_ptr != NULL);
5432     if (*host_rt_dev_ptr != 0) {
5433         unlock_user((void *)*host_rt_dev_ptr,
5434                     *target_rt_dev_ptr, 0);
5435     }
5436     return ret;
5437 }
5438 
5439 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                      int fd, int cmd, abi_long arg)
5441 {
5442     int sig = target_to_host_signal(arg);
5443     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5444 }
5445 
5446 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5447                                     int fd, int cmd, abi_long arg)
5448 {
5449     struct timeval tv;
5450     abi_long ret;
5451 
5452     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5453     if (is_error(ret)) {
5454         return ret;
5455     }
5456 
5457     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5458         if (copy_to_user_timeval(arg, &tv)) {
5459             return -TARGET_EFAULT;
5460         }
5461     } else {
5462         if (copy_to_user_timeval64(arg, &tv)) {
5463             return -TARGET_EFAULT;
5464         }
5465     }
5466 
5467     return ret;
5468 }
5469 
5470 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5471                                       int fd, int cmd, abi_long arg)
5472 {
5473     struct timespec ts;
5474     abi_long ret;
5475 
5476     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5477     if (is_error(ret)) {
5478         return ret;
5479     }
5480 
5481     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5482         if (host_to_target_timespec(arg, &ts)) {
5483             return -TARGET_EFAULT;
5484         }
5485     } else{
5486         if (host_to_target_timespec64(arg, &ts)) {
5487             return -TARGET_EFAULT;
5488         }
5489     }
5490 
5491     return ret;
5492 }
5493 
5494 #ifdef TIOCGPTPEER
5495 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5496                                      int fd, int cmd, abi_long arg)
5497 {
5498     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5499     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5500 }
5501 #endif
5502 
5503 #ifdef HAVE_DRM_H
5504 
5505 static void unlock_drm_version(struct drm_version *host_ver,
5506                                struct target_drm_version *target_ver,
5507                                bool copy)
5508 {
5509     unlock_user(host_ver->name, target_ver->name,
5510                                 copy ? host_ver->name_len : 0);
5511     unlock_user(host_ver->date, target_ver->date,
5512                                 copy ? host_ver->date_len : 0);
5513     unlock_user(host_ver->desc, target_ver->desc,
5514                                 copy ? host_ver->desc_len : 0);
5515 }
5516 
5517 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5518                                           struct target_drm_version *target_ver)
5519 {
5520     memset(host_ver, 0, sizeof(*host_ver));
5521 
5522     __get_user(host_ver->name_len, &target_ver->name_len);
5523     if (host_ver->name_len) {
5524         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5525                                    target_ver->name_len, 0);
5526         if (!host_ver->name) {
5527             return -EFAULT;
5528         }
5529     }
5530 
5531     __get_user(host_ver->date_len, &target_ver->date_len);
5532     if (host_ver->date_len) {
5533         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5534                                    target_ver->date_len, 0);
5535         if (!host_ver->date) {
5536             goto err;
5537         }
5538     }
5539 
5540     __get_user(host_ver->desc_len, &target_ver->desc_len);
5541     if (host_ver->desc_len) {
5542         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5543                                    target_ver->desc_len, 0);
5544         if (!host_ver->desc) {
5545             goto err;
5546         }
5547     }
5548 
5549     return 0;
5550 err:
5551     unlock_drm_version(host_ver, target_ver, false);
5552     return -EFAULT;
5553 }
5554 
5555 static inline void host_to_target_drmversion(
5556                                           struct target_drm_version *target_ver,
5557                                           struct drm_version *host_ver)
5558 {
5559     __put_user(host_ver->version_major, &target_ver->version_major);
5560     __put_user(host_ver->version_minor, &target_ver->version_minor);
5561     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5562     __put_user(host_ver->name_len, &target_ver->name_len);
5563     __put_user(host_ver->date_len, &target_ver->date_len);
5564     __put_user(host_ver->desc_len, &target_ver->desc_len);
5565     unlock_drm_version(host_ver, target_ver, true);
5566 }
5567 
5568 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5569                              int fd, int cmd, abi_long arg)
5570 {
5571     struct drm_version *ver;
5572     struct target_drm_version *target_ver;
5573     abi_long ret;
5574 
5575     switch (ie->host_cmd) {
5576     case DRM_IOCTL_VERSION:
5577         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5578             return -TARGET_EFAULT;
5579         }
5580         ver = (struct drm_version *)buf_temp;
5581         ret = target_to_host_drmversion(ver, target_ver);
5582         if (!is_error(ret)) {
5583             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5584             if (is_error(ret)) {
5585                 unlock_drm_version(ver, target_ver, false);
5586             } else {
5587                 host_to_target_drmversion(target_ver, ver);
5588             }
5589         }
5590         unlock_user_struct(target_ver, arg, 0);
5591         return ret;
5592     }
5593     return -TARGET_ENOSYS;
5594 }
5595 
5596 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5597                                            struct drm_i915_getparam *gparam,
5598                                            int fd, abi_long arg)
5599 {
5600     abi_long ret;
5601     int value;
5602     struct target_drm_i915_getparam *target_gparam;
5603 
5604     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5605         return -TARGET_EFAULT;
5606     }
5607 
5608     __get_user(gparam->param, &target_gparam->param);
5609     gparam->value = &value;
5610     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5611     put_user_s32(value, target_gparam->value);
5612 
5613     unlock_user_struct(target_gparam, arg, 0);
5614     return ret;
5615 }
5616 
5617 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5618                                   int fd, int cmd, abi_long arg)
5619 {
5620     switch (ie->host_cmd) {
5621     case DRM_IOCTL_I915_GETPARAM:
5622         return do_ioctl_drm_i915_getparam(ie,
5623                                           (struct drm_i915_getparam *)buf_temp,
5624                                           fd, arg);
5625     default:
5626         return -TARGET_ENOSYS;
5627     }
5628 }
5629 
5630 #endif
5631 
5632 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5633                                         int fd, int cmd, abi_long arg)
5634 {
5635     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5636     struct tun_filter *target_filter;
5637     char *target_addr;
5638 
5639     assert(ie->access == IOC_W);
5640 
5641     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5642     if (!target_filter) {
5643         return -TARGET_EFAULT;
5644     }
5645     filter->flags = tswap16(target_filter->flags);
5646     filter->count = tswap16(target_filter->count);
5647     unlock_user(target_filter, arg, 0);
5648 
5649     if (filter->count) {
5650         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5651             MAX_STRUCT_SIZE) {
5652             return -TARGET_EFAULT;
5653         }
5654 
5655         target_addr = lock_user(VERIFY_READ,
5656                                 arg + offsetof(struct tun_filter, addr),
5657                                 filter->count * ETH_ALEN, 1);
5658         if (!target_addr) {
5659             return -TARGET_EFAULT;
5660         }
5661         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5662         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5663     }
5664 
5665     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5666 }
5667 
5668 IOCTLEntry ioctl_entries[] = {
5669 #define IOCTL(cmd, access, ...) \
5670     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5671 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5672     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5673 #define IOCTL_IGNORE(cmd) \
5674     { TARGET_ ## cmd, 0, #cmd },
5675 #include "ioctls.h"
5676     { 0, 0, },
5677 };
5678 
5679 /* ??? Implement proper locking for ioctls.  */
5680 /* do_ioctl() Must return target values and target errnos. */
5681 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5682 {
5683     const IOCTLEntry *ie;
5684     const argtype *arg_type;
5685     abi_long ret;
5686     uint8_t buf_temp[MAX_STRUCT_SIZE];
5687     int target_size;
5688     void *argptr;
5689 
5690     ie = ioctl_entries;
5691     for(;;) {
5692         if (ie->target_cmd == 0) {
5693             qemu_log_mask(
5694                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5695             return -TARGET_ENOSYS;
5696         }
5697         if (ie->target_cmd == cmd)
5698             break;
5699         ie++;
5700     }
5701     arg_type = ie->arg_type;
5702     if (ie->do_ioctl) {
5703         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5704     } else if (!ie->host_cmd) {
5705         /* Some architectures define BSD ioctls in their headers
5706            that are not implemented in Linux.  */
5707         return -TARGET_ENOSYS;
5708     }
5709 
5710     switch(arg_type[0]) {
5711     case TYPE_NULL:
5712         /* no argument */
5713         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5714         break;
5715     case TYPE_PTRVOID:
5716     case TYPE_INT:
5717     case TYPE_LONG:
5718     case TYPE_ULONG:
5719         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5720         break;
5721     case TYPE_PTR:
5722         arg_type++;
5723         target_size = thunk_type_size(arg_type, 0);
5724         switch(ie->access) {
5725         case IOC_R:
5726             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5727             if (!is_error(ret)) {
5728                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5729                 if (!argptr)
5730                     return -TARGET_EFAULT;
5731                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5732                 unlock_user(argptr, arg, target_size);
5733             }
5734             break;
5735         case IOC_W:
5736             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737             if (!argptr)
5738                 return -TARGET_EFAULT;
5739             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5740             unlock_user(argptr, arg, 0);
5741             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5742             break;
5743         default:
5744         case IOC_RW:
5745             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746             if (!argptr)
5747                 return -TARGET_EFAULT;
5748             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5749             unlock_user(argptr, arg, 0);
5750             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5751             if (!is_error(ret)) {
5752                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5753                 if (!argptr)
5754                     return -TARGET_EFAULT;
5755                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5756                 unlock_user(argptr, arg, target_size);
5757             }
5758             break;
5759         }
5760         break;
5761     default:
5762         qemu_log_mask(LOG_UNIMP,
5763                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5764                       (long)cmd, arg_type[0]);
5765         ret = -TARGET_ENOSYS;
5766         break;
5767     }
5768     return ret;
5769 }
5770 
5771 static const bitmask_transtbl iflag_tbl[] = {
5772         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5773         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5774         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5775         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5776         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5777         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5778         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5779         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5780         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5781         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5782         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5783         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5784         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5785         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5786         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5787         { 0, 0, 0, 0 }
5788 };
5789 
5790 static const bitmask_transtbl oflag_tbl[] = {
5791 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5792 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5793 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5794 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5795 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5796 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5797 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5798 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5799 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5800 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5801 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5802 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5803 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5804 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5805 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5806 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5807 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5808 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5809 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5810 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5811 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5812 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5813 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5814 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5815 	{ 0, 0, 0, 0 }
5816 };
5817 
5818 static const bitmask_transtbl cflag_tbl[] = {
5819 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5820 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5821 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5822 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5823 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5824 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5825 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5826 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5827 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5828 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5829 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5830 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5831 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5832 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5833 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5834 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5835 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5836 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5837 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5838 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5839 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5840 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5841 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5842 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5843 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5844 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5845 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5846 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5847 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5848 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5849 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5850 	{ 0, 0, 0, 0 }
5851 };
5852 
5853 static const bitmask_transtbl lflag_tbl[] = {
5854   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5855   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5856   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5857   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5858   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5859   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5860   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5861   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5862   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5863   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5864   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5865   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5866   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5867   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5868   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5869   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5870   { 0, 0, 0, 0 }
5871 };
5872 
5873 static void target_to_host_termios (void *dst, const void *src)
5874 {
5875     struct host_termios *host = dst;
5876     const struct target_termios *target = src;
5877 
5878     host->c_iflag =
5879         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5880     host->c_oflag =
5881         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5882     host->c_cflag =
5883         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5884     host->c_lflag =
5885         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5886     host->c_line = target->c_line;
5887 
5888     memset(host->c_cc, 0, sizeof(host->c_cc));
5889     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5890     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5891     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5892     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5893     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5894     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5895     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5896     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5897     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5898     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5899     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5900     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5901     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5902     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5903     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5904     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5905     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5906 }
5907 
5908 static void host_to_target_termios (void *dst, const void *src)
5909 {
5910     struct target_termios *target = dst;
5911     const struct host_termios *host = src;
5912 
5913     target->c_iflag =
5914         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5915     target->c_oflag =
5916         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5917     target->c_cflag =
5918         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5919     target->c_lflag =
5920         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5921     target->c_line = host->c_line;
5922 
5923     memset(target->c_cc, 0, sizeof(target->c_cc));
5924     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5925     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5926     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5927     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5928     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5929     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5930     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5931     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5932     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5933     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5934     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5935     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5936     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5937     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5938     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5939     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5940     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5941 }
5942 
5943 static const StructEntry struct_termios_def = {
5944     .convert = { host_to_target_termios, target_to_host_termios },
5945     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5946     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5947     .print = print_termios,
5948 };
5949 
5950 static const bitmask_transtbl mmap_flags_tbl[] = {
5951     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5952     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5953     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5954     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5955       MAP_ANONYMOUS, MAP_ANONYMOUS },
5956     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5957       MAP_GROWSDOWN, MAP_GROWSDOWN },
5958     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5959       MAP_DENYWRITE, MAP_DENYWRITE },
5960     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5961       MAP_EXECUTABLE, MAP_EXECUTABLE },
5962     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5963     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5964       MAP_NORESERVE, MAP_NORESERVE },
5965     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5966     /* MAP_STACK had been ignored by the kernel for quite some time.
5967        Recognize it for the target insofar as we do not want to pass
5968        it through to the host.  */
5969     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5970     { 0, 0, 0, 0 }
5971 };
5972 
5973 /*
5974  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5975  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5976  */
5977 #if defined(TARGET_I386)
5978 
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table;
5981 
5982 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5983 {
5984     int size;
5985     void *p;
5986 
5987     if (!ldt_table)
5988         return 0;
5989     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5990     if (size > bytecount)
5991         size = bytecount;
5992     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5993     if (!p)
5994         return -TARGET_EFAULT;
5995     /* ??? Should this by byteswapped?  */
5996     memcpy(p, ldt_table, size);
5997     unlock_user(p, ptr, size);
5998     return size;
5999 }
6000 
6001 /* XXX: add locking support */
6002 static abi_long write_ldt(CPUX86State *env,
6003                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6004 {
6005     struct target_modify_ldt_ldt_s ldt_info;
6006     struct target_modify_ldt_ldt_s *target_ldt_info;
6007     int seg_32bit, contents, read_exec_only, limit_in_pages;
6008     int seg_not_present, useable, lm;
6009     uint32_t *lp, entry_1, entry_2;
6010 
6011     if (bytecount != sizeof(ldt_info))
6012         return -TARGET_EINVAL;
6013     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6014         return -TARGET_EFAULT;
6015     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6016     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6017     ldt_info.limit = tswap32(target_ldt_info->limit);
6018     ldt_info.flags = tswap32(target_ldt_info->flags);
6019     unlock_user_struct(target_ldt_info, ptr, 0);
6020 
6021     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6022         return -TARGET_EINVAL;
6023     seg_32bit = ldt_info.flags & 1;
6024     contents = (ldt_info.flags >> 1) & 3;
6025     read_exec_only = (ldt_info.flags >> 3) & 1;
6026     limit_in_pages = (ldt_info.flags >> 4) & 1;
6027     seg_not_present = (ldt_info.flags >> 5) & 1;
6028     useable = (ldt_info.flags >> 6) & 1;
6029 #ifdef TARGET_ABI32
6030     lm = 0;
6031 #else
6032     lm = (ldt_info.flags >> 7) & 1;
6033 #endif
6034     if (contents == 3) {
6035         if (oldmode)
6036             return -TARGET_EINVAL;
6037         if (seg_not_present == 0)
6038             return -TARGET_EINVAL;
6039     }
6040     /* allocate the LDT */
6041     if (!ldt_table) {
6042         env->ldt.base = target_mmap(0,
6043                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6044                                     PROT_READ|PROT_WRITE,
6045                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6046         if (env->ldt.base == -1)
6047             return -TARGET_ENOMEM;
6048         memset(g2h_untagged(env->ldt.base), 0,
6049                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6050         env->ldt.limit = 0xffff;
6051         ldt_table = g2h_untagged(env->ldt.base);
6052     }
6053 
6054     /* NOTE: same code as Linux kernel */
6055     /* Allow LDTs to be cleared by the user. */
6056     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6057         if (oldmode ||
6058             (contents == 0		&&
6059              read_exec_only == 1	&&
6060              seg_32bit == 0		&&
6061              limit_in_pages == 0	&&
6062              seg_not_present == 1	&&
6063              useable == 0 )) {
6064             entry_1 = 0;
6065             entry_2 = 0;
6066             goto install;
6067         }
6068     }
6069 
6070     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6071         (ldt_info.limit & 0x0ffff);
6072     entry_2 = (ldt_info.base_addr & 0xff000000) |
6073         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6074         (ldt_info.limit & 0xf0000) |
6075         ((read_exec_only ^ 1) << 9) |
6076         (contents << 10) |
6077         ((seg_not_present ^ 1) << 15) |
6078         (seg_32bit << 22) |
6079         (limit_in_pages << 23) |
6080         (lm << 21) |
6081         0x7000;
6082     if (!oldmode)
6083         entry_2 |= (useable << 20);
6084 
6085     /* Install the new entry ...  */
6086 install:
6087     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6088     lp[0] = tswap32(entry_1);
6089     lp[1] = tswap32(entry_2);
6090     return 0;
6091 }
6092 
6093 /* specific and weird i386 syscalls */
6094 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6095                               unsigned long bytecount)
6096 {
6097     abi_long ret;
6098 
6099     switch (func) {
6100     case 0:
6101         ret = read_ldt(ptr, bytecount);
6102         break;
6103     case 1:
6104         ret = write_ldt(env, ptr, bytecount, 1);
6105         break;
6106     case 0x11:
6107         ret = write_ldt(env, ptr, bytecount, 0);
6108         break;
6109     default:
6110         ret = -TARGET_ENOSYS;
6111         break;
6112     }
6113     return ret;
6114 }
6115 
6116 #if defined(TARGET_ABI32)
6117 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6118 {
6119     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6120     struct target_modify_ldt_ldt_s ldt_info;
6121     struct target_modify_ldt_ldt_s *target_ldt_info;
6122     int seg_32bit, contents, read_exec_only, limit_in_pages;
6123     int seg_not_present, useable, lm;
6124     uint32_t *lp, entry_1, entry_2;
6125     int i;
6126 
6127     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6128     if (!target_ldt_info)
6129         return -TARGET_EFAULT;
6130     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132     ldt_info.limit = tswap32(target_ldt_info->limit);
6133     ldt_info.flags = tswap32(target_ldt_info->flags);
6134     if (ldt_info.entry_number == -1) {
6135         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6136             if (gdt_table[i] == 0) {
6137                 ldt_info.entry_number = i;
6138                 target_ldt_info->entry_number = tswap32(i);
6139                 break;
6140             }
6141         }
6142     }
6143     unlock_user_struct(target_ldt_info, ptr, 1);
6144 
6145     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6146         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6147            return -TARGET_EINVAL;
6148     seg_32bit = ldt_info.flags & 1;
6149     contents = (ldt_info.flags >> 1) & 3;
6150     read_exec_only = (ldt_info.flags >> 3) & 1;
6151     limit_in_pages = (ldt_info.flags >> 4) & 1;
6152     seg_not_present = (ldt_info.flags >> 5) & 1;
6153     useable = (ldt_info.flags >> 6) & 1;
6154 #ifdef TARGET_ABI32
6155     lm = 0;
6156 #else
6157     lm = (ldt_info.flags >> 7) & 1;
6158 #endif
6159 
6160     if (contents == 3) {
6161         if (seg_not_present == 0)
6162             return -TARGET_EINVAL;
6163     }
6164 
6165     /* NOTE: same code as Linux kernel */
6166     /* Allow LDTs to be cleared by the user. */
6167     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6168         if ((contents == 0             &&
6169              read_exec_only == 1       &&
6170              seg_32bit == 0            &&
6171              limit_in_pages == 0       &&
6172              seg_not_present == 1      &&
6173              useable == 0 )) {
6174             entry_1 = 0;
6175             entry_2 = 0;
6176             goto install;
6177         }
6178     }
6179 
6180     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181         (ldt_info.limit & 0x0ffff);
6182     entry_2 = (ldt_info.base_addr & 0xff000000) |
6183         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184         (ldt_info.limit & 0xf0000) |
6185         ((read_exec_only ^ 1) << 9) |
6186         (contents << 10) |
6187         ((seg_not_present ^ 1) << 15) |
6188         (seg_32bit << 22) |
6189         (limit_in_pages << 23) |
6190         (useable << 20) |
6191         (lm << 21) |
6192         0x7000;
6193 
6194     /* Install the new entry ...  */
6195 install:
6196     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6197     lp[0] = tswap32(entry_1);
6198     lp[1] = tswap32(entry_2);
6199     return 0;
6200 }
6201 
6202 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6203 {
6204     struct target_modify_ldt_ldt_s *target_ldt_info;
6205     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6206     uint32_t base_addr, limit, flags;
6207     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6208     int seg_not_present, useable, lm;
6209     uint32_t *lp, entry_1, entry_2;
6210 
6211     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212     if (!target_ldt_info)
6213         return -TARGET_EFAULT;
6214     idx = tswap32(target_ldt_info->entry_number);
6215     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6216         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6217         unlock_user_struct(target_ldt_info, ptr, 1);
6218         return -TARGET_EINVAL;
6219     }
6220     lp = (uint32_t *)(gdt_table + idx);
6221     entry_1 = tswap32(lp[0]);
6222     entry_2 = tswap32(lp[1]);
6223 
6224     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6225     contents = (entry_2 >> 10) & 3;
6226     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6227     seg_32bit = (entry_2 >> 22) & 1;
6228     limit_in_pages = (entry_2 >> 23) & 1;
6229     useable = (entry_2 >> 20) & 1;
6230 #ifdef TARGET_ABI32
6231     lm = 0;
6232 #else
6233     lm = (entry_2 >> 21) & 1;
6234 #endif
6235     flags = (seg_32bit << 0) | (contents << 1) |
6236         (read_exec_only << 3) | (limit_in_pages << 4) |
6237         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6238     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6239     base_addr = (entry_1 >> 16) |
6240         (entry_2 & 0xff000000) |
6241         ((entry_2 & 0xff) << 16);
6242     target_ldt_info->base_addr = tswapal(base_addr);
6243     target_ldt_info->limit = tswap32(limit);
6244     target_ldt_info->flags = tswap32(flags);
6245     unlock_user_struct(target_ldt_info, ptr, 1);
6246     return 0;
6247 }
6248 
6249 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6250 {
6251     return -TARGET_ENOSYS;
6252 }
6253 #else
6254 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6255 {
6256     abi_long ret = 0;
6257     abi_ulong val;
6258     int idx;
6259 
6260     switch(code) {
6261     case TARGET_ARCH_SET_GS:
6262     case TARGET_ARCH_SET_FS:
6263         if (code == TARGET_ARCH_SET_GS)
6264             idx = R_GS;
6265         else
6266             idx = R_FS;
6267         cpu_x86_load_seg(env, idx, 0);
6268         env->segs[idx].base = addr;
6269         break;
6270     case TARGET_ARCH_GET_GS:
6271     case TARGET_ARCH_GET_FS:
6272         if (code == TARGET_ARCH_GET_GS)
6273             idx = R_GS;
6274         else
6275             idx = R_FS;
6276         val = env->segs[idx].base;
6277         if (put_user(val, addr, abi_ulong))
6278             ret = -TARGET_EFAULT;
6279         break;
6280     default:
6281         ret = -TARGET_EINVAL;
6282         break;
6283     }
6284     return ret;
6285 }
6286 #endif /* defined(TARGET_ABI32 */
6287 
6288 #endif /* defined(TARGET_I386) */
6289 
6290 #define NEW_STACK_SIZE 0x40000
6291 
6292 
6293 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6294 typedef struct {
6295     CPUArchState *env;
6296     pthread_mutex_t mutex;
6297     pthread_cond_t cond;
6298     pthread_t thread;
6299     uint32_t tid;
6300     abi_ulong child_tidptr;
6301     abi_ulong parent_tidptr;
6302     sigset_t sigmask;
6303 } new_thread_info;
6304 
6305 static void *clone_func(void *arg)
6306 {
6307     new_thread_info *info = arg;
6308     CPUArchState *env;
6309     CPUState *cpu;
6310     TaskState *ts;
6311 
6312     rcu_register_thread();
6313     tcg_register_thread();
6314     env = info->env;
6315     cpu = env_cpu(env);
6316     thread_cpu = cpu;
6317     ts = (TaskState *)cpu->opaque;
6318     info->tid = sys_gettid();
6319     task_settid(ts);
6320     if (info->child_tidptr)
6321         put_user_u32(info->tid, info->child_tidptr);
6322     if (info->parent_tidptr)
6323         put_user_u32(info->tid, info->parent_tidptr);
6324     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6325     /* Enable signals.  */
6326     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6327     /* Signal to the parent that we're ready.  */
6328     pthread_mutex_lock(&info->mutex);
6329     pthread_cond_broadcast(&info->cond);
6330     pthread_mutex_unlock(&info->mutex);
6331     /* Wait until the parent has finished initializing the tls state.  */
6332     pthread_mutex_lock(&clone_lock);
6333     pthread_mutex_unlock(&clone_lock);
6334     cpu_loop(env);
6335     /* never exits */
6336     return NULL;
6337 }
6338 
6339 /* do_fork() Must return host values and target errnos (unlike most
6340    do_*() functions). */
6341 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6342                    abi_ulong parent_tidptr, target_ulong newtls,
6343                    abi_ulong child_tidptr)
6344 {
6345     CPUState *cpu = env_cpu(env);
6346     int ret;
6347     TaskState *ts;
6348     CPUState *new_cpu;
6349     CPUArchState *new_env;
6350     sigset_t sigmask;
6351 
6352     flags &= ~CLONE_IGNORED_FLAGS;
6353 
6354     /* Emulate vfork() with fork() */
6355     if (flags & CLONE_VFORK)
6356         flags &= ~(CLONE_VFORK | CLONE_VM);
6357 
6358     if (flags & CLONE_VM) {
6359         TaskState *parent_ts = (TaskState *)cpu->opaque;
6360         new_thread_info info;
6361         pthread_attr_t attr;
6362 
6363         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6364             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6365             return -TARGET_EINVAL;
6366         }
6367 
6368         ts = g_new0(TaskState, 1);
6369         init_task_state(ts);
6370 
6371         /* Grab a mutex so that thread setup appears atomic.  */
6372         pthread_mutex_lock(&clone_lock);
6373 
6374         /*
6375          * If this is our first additional thread, we need to ensure we
6376          * generate code for parallel execution and flush old translations.
6377          * Do this now so that the copy gets CF_PARALLEL too.
6378          */
6379         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6380             cpu->tcg_cflags |= CF_PARALLEL;
6381             tb_flush(cpu);
6382         }
6383 
6384         /* we create a new CPU instance. */
6385         new_env = cpu_copy(env);
6386         /* Init regs that differ from the parent.  */
6387         cpu_clone_regs_child(new_env, newsp, flags);
6388         cpu_clone_regs_parent(env, flags);
6389         new_cpu = env_cpu(new_env);
6390         new_cpu->opaque = ts;
6391         ts->bprm = parent_ts->bprm;
6392         ts->info = parent_ts->info;
6393         ts->signal_mask = parent_ts->signal_mask;
6394 
6395         if (flags & CLONE_CHILD_CLEARTID) {
6396             ts->child_tidptr = child_tidptr;
6397         }
6398 
6399         if (flags & CLONE_SETTLS) {
6400             cpu_set_tls (new_env, newtls);
6401         }
6402 
6403         memset(&info, 0, sizeof(info));
6404         pthread_mutex_init(&info.mutex, NULL);
6405         pthread_mutex_lock(&info.mutex);
6406         pthread_cond_init(&info.cond, NULL);
6407         info.env = new_env;
6408         if (flags & CLONE_CHILD_SETTID) {
6409             info.child_tidptr = child_tidptr;
6410         }
6411         if (flags & CLONE_PARENT_SETTID) {
6412             info.parent_tidptr = parent_tidptr;
6413         }
6414 
6415         ret = pthread_attr_init(&attr);
6416         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6417         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6418         /* It is not safe to deliver signals until the child has finished
6419            initializing, so temporarily block all signals.  */
6420         sigfillset(&sigmask);
6421         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6422         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6423 
6424         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6425         /* TODO: Free new CPU state if thread creation failed.  */
6426 
6427         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6428         pthread_attr_destroy(&attr);
6429         if (ret == 0) {
6430             /* Wait for the child to initialize.  */
6431             pthread_cond_wait(&info.cond, &info.mutex);
6432             ret = info.tid;
6433         } else {
6434             ret = -1;
6435         }
6436         pthread_mutex_unlock(&info.mutex);
6437         pthread_cond_destroy(&info.cond);
6438         pthread_mutex_destroy(&info.mutex);
6439         pthread_mutex_unlock(&clone_lock);
6440     } else {
6441         /* if no CLONE_VM, we consider it is a fork */
6442         if (flags & CLONE_INVALID_FORK_FLAGS) {
6443             return -TARGET_EINVAL;
6444         }
6445 
6446         /* We can't support custom termination signals */
6447         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6448             return -TARGET_EINVAL;
6449         }
6450 
6451         if (block_signals()) {
6452             return -TARGET_ERESTARTSYS;
6453         }
6454 
6455         fork_start();
6456         ret = fork();
6457         if (ret == 0) {
6458             /* Child Process.  */
6459             cpu_clone_regs_child(env, newsp, flags);
6460             fork_end(1);
6461             /* There is a race condition here.  The parent process could
6462                theoretically read the TID in the child process before the child
6463                tid is set.  This would require using either ptrace
6464                (not implemented) or having *_tidptr to point at a shared memory
6465                mapping.  We can't repeat the spinlock hack used above because
6466                the child process gets its own copy of the lock.  */
6467             if (flags & CLONE_CHILD_SETTID)
6468                 put_user_u32(sys_gettid(), child_tidptr);
6469             if (flags & CLONE_PARENT_SETTID)
6470                 put_user_u32(sys_gettid(), parent_tidptr);
6471             ts = (TaskState *)cpu->opaque;
6472             if (flags & CLONE_SETTLS)
6473                 cpu_set_tls (env, newtls);
6474             if (flags & CLONE_CHILD_CLEARTID)
6475                 ts->child_tidptr = child_tidptr;
6476         } else {
6477             cpu_clone_regs_parent(env, flags);
6478             fork_end(0);
6479         }
6480     }
6481     return ret;
6482 }
6483 
6484 /* warning : doesn't handle linux specific flags... */
6485 static int target_to_host_fcntl_cmd(int cmd)
6486 {
6487     int ret;
6488 
6489     switch(cmd) {
6490     case TARGET_F_DUPFD:
6491     case TARGET_F_GETFD:
6492     case TARGET_F_SETFD:
6493     case TARGET_F_GETFL:
6494     case TARGET_F_SETFL:
6495     case TARGET_F_OFD_GETLK:
6496     case TARGET_F_OFD_SETLK:
6497     case TARGET_F_OFD_SETLKW:
6498         ret = cmd;
6499         break;
6500     case TARGET_F_GETLK:
6501         ret = F_GETLK64;
6502         break;
6503     case TARGET_F_SETLK:
6504         ret = F_SETLK64;
6505         break;
6506     case TARGET_F_SETLKW:
6507         ret = F_SETLKW64;
6508         break;
6509     case TARGET_F_GETOWN:
6510         ret = F_GETOWN;
6511         break;
6512     case TARGET_F_SETOWN:
6513         ret = F_SETOWN;
6514         break;
6515     case TARGET_F_GETSIG:
6516         ret = F_GETSIG;
6517         break;
6518     case TARGET_F_SETSIG:
6519         ret = F_SETSIG;
6520         break;
6521 #if TARGET_ABI_BITS == 32
6522     case TARGET_F_GETLK64:
6523         ret = F_GETLK64;
6524         break;
6525     case TARGET_F_SETLK64:
6526         ret = F_SETLK64;
6527         break;
6528     case TARGET_F_SETLKW64:
6529         ret = F_SETLKW64;
6530         break;
6531 #endif
6532     case TARGET_F_SETLEASE:
6533         ret = F_SETLEASE;
6534         break;
6535     case TARGET_F_GETLEASE:
6536         ret = F_GETLEASE;
6537         break;
6538 #ifdef F_DUPFD_CLOEXEC
6539     case TARGET_F_DUPFD_CLOEXEC:
6540         ret = F_DUPFD_CLOEXEC;
6541         break;
6542 #endif
6543     case TARGET_F_NOTIFY:
6544         ret = F_NOTIFY;
6545         break;
6546 #ifdef F_GETOWN_EX
6547     case TARGET_F_GETOWN_EX:
6548         ret = F_GETOWN_EX;
6549         break;
6550 #endif
6551 #ifdef F_SETOWN_EX
6552     case TARGET_F_SETOWN_EX:
6553         ret = F_SETOWN_EX;
6554         break;
6555 #endif
6556 #ifdef F_SETPIPE_SZ
6557     case TARGET_F_SETPIPE_SZ:
6558         ret = F_SETPIPE_SZ;
6559         break;
6560     case TARGET_F_GETPIPE_SZ:
6561         ret = F_GETPIPE_SZ;
6562         break;
6563 #endif
6564 #ifdef F_ADD_SEALS
6565     case TARGET_F_ADD_SEALS:
6566         ret = F_ADD_SEALS;
6567         break;
6568     case TARGET_F_GET_SEALS:
6569         ret = F_GET_SEALS;
6570         break;
6571 #endif
6572     default:
6573         ret = -TARGET_EINVAL;
6574         break;
6575     }
6576 
6577 #if defined(__powerpc64__)
6578     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6579      * is not supported by kernel. The glibc fcntl call actually adjusts
6580      * them to 5, 6 and 7 before making the syscall(). Since we make the
6581      * syscall directly, adjust to what is supported by the kernel.
6582      */
6583     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6584         ret -= F_GETLK64 - 5;
6585     }
6586 #endif
6587 
6588     return ret;
6589 }
6590 
6591 #define FLOCK_TRANSTBL \
6592     switch (type) { \
6593     TRANSTBL_CONVERT(F_RDLCK); \
6594     TRANSTBL_CONVERT(F_WRLCK); \
6595     TRANSTBL_CONVERT(F_UNLCK); \
6596     }
6597 
6598 static int target_to_host_flock(int type)
6599 {
6600 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6601     FLOCK_TRANSTBL
6602 #undef  TRANSTBL_CONVERT
6603     return -TARGET_EINVAL;
6604 }
6605 
6606 static int host_to_target_flock(int type)
6607 {
6608 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6609     FLOCK_TRANSTBL
6610 #undef  TRANSTBL_CONVERT
6611     /* if we don't know how to convert the value coming
6612      * from the host we copy to the target field as-is
6613      */
6614     return type;
6615 }
6616 
6617 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6618                                             abi_ulong target_flock_addr)
6619 {
6620     struct target_flock *target_fl;
6621     int l_type;
6622 
6623     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6624         return -TARGET_EFAULT;
6625     }
6626 
6627     __get_user(l_type, &target_fl->l_type);
6628     l_type = target_to_host_flock(l_type);
6629     if (l_type < 0) {
6630         return l_type;
6631     }
6632     fl->l_type = l_type;
6633     __get_user(fl->l_whence, &target_fl->l_whence);
6634     __get_user(fl->l_start, &target_fl->l_start);
6635     __get_user(fl->l_len, &target_fl->l_len);
6636     __get_user(fl->l_pid, &target_fl->l_pid);
6637     unlock_user_struct(target_fl, target_flock_addr, 0);
6638     return 0;
6639 }
6640 
6641 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6642                                           const struct flock64 *fl)
6643 {
6644     struct target_flock *target_fl;
6645     short l_type;
6646 
6647     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6648         return -TARGET_EFAULT;
6649     }
6650 
6651     l_type = host_to_target_flock(fl->l_type);
6652     __put_user(l_type, &target_fl->l_type);
6653     __put_user(fl->l_whence, &target_fl->l_whence);
6654     __put_user(fl->l_start, &target_fl->l_start);
6655     __put_user(fl->l_len, &target_fl->l_len);
6656     __put_user(fl->l_pid, &target_fl->l_pid);
6657     unlock_user_struct(target_fl, target_flock_addr, 1);
6658     return 0;
6659 }
6660 
6661 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6662 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6663 
6664 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6665 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6666                                                    abi_ulong target_flock_addr)
6667 {
6668     struct target_oabi_flock64 *target_fl;
6669     int l_type;
6670 
6671     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6672         return -TARGET_EFAULT;
6673     }
6674 
6675     __get_user(l_type, &target_fl->l_type);
6676     l_type = target_to_host_flock(l_type);
6677     if (l_type < 0) {
6678         return l_type;
6679     }
6680     fl->l_type = l_type;
6681     __get_user(fl->l_whence, &target_fl->l_whence);
6682     __get_user(fl->l_start, &target_fl->l_start);
6683     __get_user(fl->l_len, &target_fl->l_len);
6684     __get_user(fl->l_pid, &target_fl->l_pid);
6685     unlock_user_struct(target_fl, target_flock_addr, 0);
6686     return 0;
6687 }
6688 
6689 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6690                                                  const struct flock64 *fl)
6691 {
6692     struct target_oabi_flock64 *target_fl;
6693     short l_type;
6694 
6695     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6696         return -TARGET_EFAULT;
6697     }
6698 
6699     l_type = host_to_target_flock(fl->l_type);
6700     __put_user(l_type, &target_fl->l_type);
6701     __put_user(fl->l_whence, &target_fl->l_whence);
6702     __put_user(fl->l_start, &target_fl->l_start);
6703     __put_user(fl->l_len, &target_fl->l_len);
6704     __put_user(fl->l_pid, &target_fl->l_pid);
6705     unlock_user_struct(target_fl, target_flock_addr, 1);
6706     return 0;
6707 }
6708 #endif
6709 
6710 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6711                                               abi_ulong target_flock_addr)
6712 {
6713     struct target_flock64 *target_fl;
6714     int l_type;
6715 
6716     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6717         return -TARGET_EFAULT;
6718     }
6719 
6720     __get_user(l_type, &target_fl->l_type);
6721     l_type = target_to_host_flock(l_type);
6722     if (l_type < 0) {
6723         return l_type;
6724     }
6725     fl->l_type = l_type;
6726     __get_user(fl->l_whence, &target_fl->l_whence);
6727     __get_user(fl->l_start, &target_fl->l_start);
6728     __get_user(fl->l_len, &target_fl->l_len);
6729     __get_user(fl->l_pid, &target_fl->l_pid);
6730     unlock_user_struct(target_fl, target_flock_addr, 0);
6731     return 0;
6732 }
6733 
6734 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6735                                             const struct flock64 *fl)
6736 {
6737     struct target_flock64 *target_fl;
6738     short l_type;
6739 
6740     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6741         return -TARGET_EFAULT;
6742     }
6743 
6744     l_type = host_to_target_flock(fl->l_type);
6745     __put_user(l_type, &target_fl->l_type);
6746     __put_user(fl->l_whence, &target_fl->l_whence);
6747     __put_user(fl->l_start, &target_fl->l_start);
6748     __put_user(fl->l_len, &target_fl->l_len);
6749     __put_user(fl->l_pid, &target_fl->l_pid);
6750     unlock_user_struct(target_fl, target_flock_addr, 1);
6751     return 0;
6752 }
6753 
6754 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6755 {
6756     struct flock64 fl64;
6757 #ifdef F_GETOWN_EX
6758     struct f_owner_ex fox;
6759     struct target_f_owner_ex *target_fox;
6760 #endif
6761     abi_long ret;
6762     int host_cmd = target_to_host_fcntl_cmd(cmd);
6763 
6764     if (host_cmd == -TARGET_EINVAL)
6765 	    return host_cmd;
6766 
6767     switch(cmd) {
6768     case TARGET_F_GETLK:
6769         ret = copy_from_user_flock(&fl64, arg);
6770         if (ret) {
6771             return ret;
6772         }
6773         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6774         if (ret == 0) {
6775             ret = copy_to_user_flock(arg, &fl64);
6776         }
6777         break;
6778 
6779     case TARGET_F_SETLK:
6780     case TARGET_F_SETLKW:
6781         ret = copy_from_user_flock(&fl64, arg);
6782         if (ret) {
6783             return ret;
6784         }
6785         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6786         break;
6787 
6788     case TARGET_F_GETLK64:
6789     case TARGET_F_OFD_GETLK:
6790         ret = copy_from_user_flock64(&fl64, arg);
6791         if (ret) {
6792             return ret;
6793         }
6794         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6795         if (ret == 0) {
6796             ret = copy_to_user_flock64(arg, &fl64);
6797         }
6798         break;
6799     case TARGET_F_SETLK64:
6800     case TARGET_F_SETLKW64:
6801     case TARGET_F_OFD_SETLK:
6802     case TARGET_F_OFD_SETLKW:
6803         ret = copy_from_user_flock64(&fl64, arg);
6804         if (ret) {
6805             return ret;
6806         }
6807         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6808         break;
6809 
6810     case TARGET_F_GETFL:
6811         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6812         if (ret >= 0) {
6813             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6814         }
6815         break;
6816 
6817     case TARGET_F_SETFL:
6818         ret = get_errno(safe_fcntl(fd, host_cmd,
6819                                    target_to_host_bitmask(arg,
6820                                                           fcntl_flags_tbl)));
6821         break;
6822 
6823 #ifdef F_GETOWN_EX
6824     case TARGET_F_GETOWN_EX:
6825         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6826         if (ret >= 0) {
6827             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6828                 return -TARGET_EFAULT;
6829             target_fox->type = tswap32(fox.type);
6830             target_fox->pid = tswap32(fox.pid);
6831             unlock_user_struct(target_fox, arg, 1);
6832         }
6833         break;
6834 #endif
6835 
6836 #ifdef F_SETOWN_EX
6837     case TARGET_F_SETOWN_EX:
6838         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6839             return -TARGET_EFAULT;
6840         fox.type = tswap32(target_fox->type);
6841         fox.pid = tswap32(target_fox->pid);
6842         unlock_user_struct(target_fox, arg, 0);
6843         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6844         break;
6845 #endif
6846 
6847     case TARGET_F_SETSIG:
6848         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6849         break;
6850 
6851     case TARGET_F_GETSIG:
6852         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6853         break;
6854 
6855     case TARGET_F_SETOWN:
6856     case TARGET_F_GETOWN:
6857     case TARGET_F_SETLEASE:
6858     case TARGET_F_GETLEASE:
6859     case TARGET_F_SETPIPE_SZ:
6860     case TARGET_F_GETPIPE_SZ:
6861     case TARGET_F_ADD_SEALS:
6862     case TARGET_F_GET_SEALS:
6863         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6864         break;
6865 
6866     default:
6867         ret = get_errno(safe_fcntl(fd, cmd, arg));
6868         break;
6869     }
6870     return ret;
6871 }
6872 
6873 #ifdef USE_UID16
6874 
6875 static inline int high2lowuid(int uid)
6876 {
6877     if (uid > 65535)
6878         return 65534;
6879     else
6880         return uid;
6881 }
6882 
6883 static inline int high2lowgid(int gid)
6884 {
6885     if (gid > 65535)
6886         return 65534;
6887     else
6888         return gid;
6889 }
6890 
6891 static inline int low2highuid(int uid)
6892 {
6893     if ((int16_t)uid == -1)
6894         return -1;
6895     else
6896         return uid;
6897 }
6898 
6899 static inline int low2highgid(int gid)
6900 {
6901     if ((int16_t)gid == -1)
6902         return -1;
6903     else
6904         return gid;
6905 }
6906 static inline int tswapid(int id)
6907 {
6908     return tswap16(id);
6909 }
6910 
6911 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6912 
6913 #else /* !USE_UID16 */
6914 static inline int high2lowuid(int uid)
6915 {
6916     return uid;
6917 }
6918 static inline int high2lowgid(int gid)
6919 {
6920     return gid;
6921 }
6922 static inline int low2highuid(int uid)
6923 {
6924     return uid;
6925 }
6926 static inline int low2highgid(int gid)
6927 {
6928     return gid;
6929 }
6930 static inline int tswapid(int id)
6931 {
6932     return tswap32(id);
6933 }
6934 
6935 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6936 
6937 #endif /* USE_UID16 */
6938 
6939 /* We must do direct syscalls for setting UID/GID, because we want to
6940  * implement the Linux system call semantics of "change only for this thread",
6941  * not the libc/POSIX semantics of "change for all threads in process".
6942  * (See http://ewontfix.com/17/ for more details.)
6943  * We use the 32-bit version of the syscalls if present; if it is not
6944  * then either the host architecture supports 32-bit UIDs natively with
6945  * the standard syscall, or the 16-bit UID is the best we can do.
6946  */
6947 #ifdef __NR_setuid32
6948 #define __NR_sys_setuid __NR_setuid32
6949 #else
6950 #define __NR_sys_setuid __NR_setuid
6951 #endif
6952 #ifdef __NR_setgid32
6953 #define __NR_sys_setgid __NR_setgid32
6954 #else
6955 #define __NR_sys_setgid __NR_setgid
6956 #endif
6957 #ifdef __NR_setresuid32
6958 #define __NR_sys_setresuid __NR_setresuid32
6959 #else
6960 #define __NR_sys_setresuid __NR_setresuid
6961 #endif
6962 #ifdef __NR_setresgid32
6963 #define __NR_sys_setresgid __NR_setresgid32
6964 #else
6965 #define __NR_sys_setresgid __NR_setresgid
6966 #endif
6967 
6968 _syscall1(int, sys_setuid, uid_t, uid)
6969 _syscall1(int, sys_setgid, gid_t, gid)
6970 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6971 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6972 
6973 void syscall_init(void)
6974 {
6975     IOCTLEntry *ie;
6976     const argtype *arg_type;
6977     int size;
6978 
6979     thunk_init(STRUCT_MAX);
6980 
6981 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6982 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6983 #include "syscall_types.h"
6984 #undef STRUCT
6985 #undef STRUCT_SPECIAL
6986 
6987     /* we patch the ioctl size if necessary. We rely on the fact that
6988        no ioctl has all the bits at '1' in the size field */
6989     ie = ioctl_entries;
6990     while (ie->target_cmd != 0) {
6991         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6992             TARGET_IOC_SIZEMASK) {
6993             arg_type = ie->arg_type;
6994             if (arg_type[0] != TYPE_PTR) {
6995                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6996                         ie->target_cmd);
6997                 exit(1);
6998             }
6999             arg_type++;
7000             size = thunk_type_size(arg_type, 0);
7001             ie->target_cmd = (ie->target_cmd &
7002                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7003                 (size << TARGET_IOC_SIZESHIFT);
7004         }
7005 
7006         /* automatic consistency check if same arch */
7007 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7008     (defined(__x86_64__) && defined(TARGET_X86_64))
7009         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7010             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7011                     ie->name, ie->target_cmd, ie->host_cmd);
7012         }
7013 #endif
7014         ie++;
7015     }
7016 }
7017 
7018 #ifdef TARGET_NR_truncate64
7019 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7020                                          abi_long arg2,
7021                                          abi_long arg3,
7022                                          abi_long arg4)
7023 {
7024     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7025         arg2 = arg3;
7026         arg3 = arg4;
7027     }
7028     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7029 }
7030 #endif
7031 
7032 #ifdef TARGET_NR_ftruncate64
7033 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7034                                           abi_long arg2,
7035                                           abi_long arg3,
7036                                           abi_long arg4)
7037 {
7038     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7039         arg2 = arg3;
7040         arg3 = arg4;
7041     }
7042     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7043 }
7044 #endif
7045 
7046 #if defined(TARGET_NR_timer_settime) || \
7047     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7048 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7049                                                  abi_ulong target_addr)
7050 {
7051     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7052                                 offsetof(struct target_itimerspec,
7053                                          it_interval)) ||
7054         target_to_host_timespec(&host_its->it_value, target_addr +
7055                                 offsetof(struct target_itimerspec,
7056                                          it_value))) {
7057         return -TARGET_EFAULT;
7058     }
7059 
7060     return 0;
7061 }
7062 #endif
7063 
7064 #if defined(TARGET_NR_timer_settime64) || \
7065     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7066 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7067                                                    abi_ulong target_addr)
7068 {
7069     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7070                                   offsetof(struct target__kernel_itimerspec,
7071                                            it_interval)) ||
7072         target_to_host_timespec64(&host_its->it_value, target_addr +
7073                                   offsetof(struct target__kernel_itimerspec,
7074                                            it_value))) {
7075         return -TARGET_EFAULT;
7076     }
7077 
7078     return 0;
7079 }
7080 #endif
7081 
7082 #if ((defined(TARGET_NR_timerfd_gettime) || \
7083       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7084       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7085 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7086                                                  struct itimerspec *host_its)
7087 {
7088     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7089                                                        it_interval),
7090                                 &host_its->it_interval) ||
7091         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7092                                                        it_value),
7093                                 &host_its->it_value)) {
7094         return -TARGET_EFAULT;
7095     }
7096     return 0;
7097 }
7098 #endif
7099 
7100 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7101       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7102       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7103 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7104                                                    struct itimerspec *host_its)
7105 {
7106     if (host_to_target_timespec64(target_addr +
7107                                   offsetof(struct target__kernel_itimerspec,
7108                                            it_interval),
7109                                   &host_its->it_interval) ||
7110         host_to_target_timespec64(target_addr +
7111                                   offsetof(struct target__kernel_itimerspec,
7112                                            it_value),
7113                                   &host_its->it_value)) {
7114         return -TARGET_EFAULT;
7115     }
7116     return 0;
7117 }
7118 #endif
7119 
7120 #if defined(TARGET_NR_adjtimex) || \
7121     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7122 static inline abi_long target_to_host_timex(struct timex *host_tx,
7123                                             abi_long target_addr)
7124 {
7125     struct target_timex *target_tx;
7126 
7127     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7128         return -TARGET_EFAULT;
7129     }
7130 
7131     __get_user(host_tx->modes, &target_tx->modes);
7132     __get_user(host_tx->offset, &target_tx->offset);
7133     __get_user(host_tx->freq, &target_tx->freq);
7134     __get_user(host_tx->maxerror, &target_tx->maxerror);
7135     __get_user(host_tx->esterror, &target_tx->esterror);
7136     __get_user(host_tx->status, &target_tx->status);
7137     __get_user(host_tx->constant, &target_tx->constant);
7138     __get_user(host_tx->precision, &target_tx->precision);
7139     __get_user(host_tx->tolerance, &target_tx->tolerance);
7140     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7141     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7142     __get_user(host_tx->tick, &target_tx->tick);
7143     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7144     __get_user(host_tx->jitter, &target_tx->jitter);
7145     __get_user(host_tx->shift, &target_tx->shift);
7146     __get_user(host_tx->stabil, &target_tx->stabil);
7147     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7148     __get_user(host_tx->calcnt, &target_tx->calcnt);
7149     __get_user(host_tx->errcnt, &target_tx->errcnt);
7150     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7151     __get_user(host_tx->tai, &target_tx->tai);
7152 
7153     unlock_user_struct(target_tx, target_addr, 0);
7154     return 0;
7155 }
7156 
7157 static inline abi_long host_to_target_timex(abi_long target_addr,
7158                                             struct timex *host_tx)
7159 {
7160     struct target_timex *target_tx;
7161 
7162     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7163         return -TARGET_EFAULT;
7164     }
7165 
7166     __put_user(host_tx->modes, &target_tx->modes);
7167     __put_user(host_tx->offset, &target_tx->offset);
7168     __put_user(host_tx->freq, &target_tx->freq);
7169     __put_user(host_tx->maxerror, &target_tx->maxerror);
7170     __put_user(host_tx->esterror, &target_tx->esterror);
7171     __put_user(host_tx->status, &target_tx->status);
7172     __put_user(host_tx->constant, &target_tx->constant);
7173     __put_user(host_tx->precision, &target_tx->precision);
7174     __put_user(host_tx->tolerance, &target_tx->tolerance);
7175     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7176     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7177     __put_user(host_tx->tick, &target_tx->tick);
7178     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7179     __put_user(host_tx->jitter, &target_tx->jitter);
7180     __put_user(host_tx->shift, &target_tx->shift);
7181     __put_user(host_tx->stabil, &target_tx->stabil);
7182     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7183     __put_user(host_tx->calcnt, &target_tx->calcnt);
7184     __put_user(host_tx->errcnt, &target_tx->errcnt);
7185     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7186     __put_user(host_tx->tai, &target_tx->tai);
7187 
7188     unlock_user_struct(target_tx, target_addr, 1);
7189     return 0;
7190 }
7191 #endif
7192 
7193 
7194 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7195 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7196                                               abi_long target_addr)
7197 {
7198     struct target__kernel_timex *target_tx;
7199 
7200     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7201                                  offsetof(struct target__kernel_timex,
7202                                           time))) {
7203         return -TARGET_EFAULT;
7204     }
7205 
7206     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7207         return -TARGET_EFAULT;
7208     }
7209 
7210     __get_user(host_tx->modes, &target_tx->modes);
7211     __get_user(host_tx->offset, &target_tx->offset);
7212     __get_user(host_tx->freq, &target_tx->freq);
7213     __get_user(host_tx->maxerror, &target_tx->maxerror);
7214     __get_user(host_tx->esterror, &target_tx->esterror);
7215     __get_user(host_tx->status, &target_tx->status);
7216     __get_user(host_tx->constant, &target_tx->constant);
7217     __get_user(host_tx->precision, &target_tx->precision);
7218     __get_user(host_tx->tolerance, &target_tx->tolerance);
7219     __get_user(host_tx->tick, &target_tx->tick);
7220     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7221     __get_user(host_tx->jitter, &target_tx->jitter);
7222     __get_user(host_tx->shift, &target_tx->shift);
7223     __get_user(host_tx->stabil, &target_tx->stabil);
7224     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7225     __get_user(host_tx->calcnt, &target_tx->calcnt);
7226     __get_user(host_tx->errcnt, &target_tx->errcnt);
7227     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7228     __get_user(host_tx->tai, &target_tx->tai);
7229 
7230     unlock_user_struct(target_tx, target_addr, 0);
7231     return 0;
7232 }
7233 
7234 static inline abi_long host_to_target_timex64(abi_long target_addr,
7235                                               struct timex *host_tx)
7236 {
7237     struct target__kernel_timex *target_tx;
7238 
7239    if (copy_to_user_timeval64(target_addr +
7240                               offsetof(struct target__kernel_timex, time),
7241                               &host_tx->time)) {
7242         return -TARGET_EFAULT;
7243     }
7244 
7245     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7246         return -TARGET_EFAULT;
7247     }
7248 
7249     __put_user(host_tx->modes, &target_tx->modes);
7250     __put_user(host_tx->offset, &target_tx->offset);
7251     __put_user(host_tx->freq, &target_tx->freq);
7252     __put_user(host_tx->maxerror, &target_tx->maxerror);
7253     __put_user(host_tx->esterror, &target_tx->esterror);
7254     __put_user(host_tx->status, &target_tx->status);
7255     __put_user(host_tx->constant, &target_tx->constant);
7256     __put_user(host_tx->precision, &target_tx->precision);
7257     __put_user(host_tx->tolerance, &target_tx->tolerance);
7258     __put_user(host_tx->tick, &target_tx->tick);
7259     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7260     __put_user(host_tx->jitter, &target_tx->jitter);
7261     __put_user(host_tx->shift, &target_tx->shift);
7262     __put_user(host_tx->stabil, &target_tx->stabil);
7263     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7264     __put_user(host_tx->calcnt, &target_tx->calcnt);
7265     __put_user(host_tx->errcnt, &target_tx->errcnt);
7266     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7267     __put_user(host_tx->tai, &target_tx->tai);
7268 
7269     unlock_user_struct(target_tx, target_addr, 1);
7270     return 0;
7271 }
7272 #endif
7273 
7274 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7275 #define sigev_notify_thread_id _sigev_un._tid
7276 #endif
7277 
7278 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7279                                                abi_ulong target_addr)
7280 {
7281     struct target_sigevent *target_sevp;
7282 
7283     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7284         return -TARGET_EFAULT;
7285     }
7286 
7287     /* This union is awkward on 64 bit systems because it has a 32 bit
7288      * integer and a pointer in it; we follow the conversion approach
7289      * used for handling sigval types in signal.c so the guest should get
7290      * the correct value back even if we did a 64 bit byteswap and it's
7291      * using the 32 bit integer.
7292      */
7293     host_sevp->sigev_value.sival_ptr =
7294         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7295     host_sevp->sigev_signo =
7296         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7297     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7298     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7299 
7300     unlock_user_struct(target_sevp, target_addr, 1);
7301     return 0;
7302 }
7303 
7304 #if defined(TARGET_NR_mlockall)
7305 static inline int target_to_host_mlockall_arg(int arg)
7306 {
7307     int result = 0;
7308 
7309     if (arg & TARGET_MCL_CURRENT) {
7310         result |= MCL_CURRENT;
7311     }
7312     if (arg & TARGET_MCL_FUTURE) {
7313         result |= MCL_FUTURE;
7314     }
7315 #ifdef MCL_ONFAULT
7316     if (arg & TARGET_MCL_ONFAULT) {
7317         result |= MCL_ONFAULT;
7318     }
7319 #endif
7320 
7321     return result;
7322 }
7323 #endif
7324 
7325 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7326      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7327      defined(TARGET_NR_newfstatat))
7328 static inline abi_long host_to_target_stat64(void *cpu_env,
7329                                              abi_ulong target_addr,
7330                                              struct stat *host_st)
7331 {
7332 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7333     if (((CPUARMState *)cpu_env)->eabi) {
7334         struct target_eabi_stat64 *target_st;
7335 
7336         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7337             return -TARGET_EFAULT;
7338         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7339         __put_user(host_st->st_dev, &target_st->st_dev);
7340         __put_user(host_st->st_ino, &target_st->st_ino);
7341 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7342         __put_user(host_st->st_ino, &target_st->__st_ino);
7343 #endif
7344         __put_user(host_st->st_mode, &target_st->st_mode);
7345         __put_user(host_st->st_nlink, &target_st->st_nlink);
7346         __put_user(host_st->st_uid, &target_st->st_uid);
7347         __put_user(host_st->st_gid, &target_st->st_gid);
7348         __put_user(host_st->st_rdev, &target_st->st_rdev);
7349         __put_user(host_st->st_size, &target_st->st_size);
7350         __put_user(host_st->st_blksize, &target_st->st_blksize);
7351         __put_user(host_st->st_blocks, &target_st->st_blocks);
7352         __put_user(host_st->st_atime, &target_st->target_st_atime);
7353         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7354         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7355 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7356         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7357         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7358         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7359 #endif
7360         unlock_user_struct(target_st, target_addr, 1);
7361     } else
7362 #endif
7363     {
7364 #if defined(TARGET_HAS_STRUCT_STAT64)
7365         struct target_stat64 *target_st;
7366 #else
7367         struct target_stat *target_st;
7368 #endif
7369 
7370         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7371             return -TARGET_EFAULT;
7372         memset(target_st, 0, sizeof(*target_st));
7373         __put_user(host_st->st_dev, &target_st->st_dev);
7374         __put_user(host_st->st_ino, &target_st->st_ino);
7375 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7376         __put_user(host_st->st_ino, &target_st->__st_ino);
7377 #endif
7378         __put_user(host_st->st_mode, &target_st->st_mode);
7379         __put_user(host_st->st_nlink, &target_st->st_nlink);
7380         __put_user(host_st->st_uid, &target_st->st_uid);
7381         __put_user(host_st->st_gid, &target_st->st_gid);
7382         __put_user(host_st->st_rdev, &target_st->st_rdev);
7383         /* XXX: better use of kernel struct */
7384         __put_user(host_st->st_size, &target_st->st_size);
7385         __put_user(host_st->st_blksize, &target_st->st_blksize);
7386         __put_user(host_st->st_blocks, &target_st->st_blocks);
7387         __put_user(host_st->st_atime, &target_st->target_st_atime);
7388         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7389         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7390 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7391         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7392         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7393         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7394 #endif
7395         unlock_user_struct(target_st, target_addr, 1);
7396     }
7397 
7398     return 0;
7399 }
7400 #endif
7401 
7402 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7403 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7404                                             abi_ulong target_addr)
7405 {
7406     struct target_statx *target_stx;
7407 
7408     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7409         return -TARGET_EFAULT;
7410     }
7411     memset(target_stx, 0, sizeof(*target_stx));
7412 
7413     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7414     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7415     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7416     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7417     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7418     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7419     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7420     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7421     __put_user(host_stx->stx_size, &target_stx->stx_size);
7422     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7423     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7424     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7425     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7426     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7427     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7428     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7429     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7430     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7431     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7432     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7433     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7434     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7435     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7436 
7437     unlock_user_struct(target_stx, target_addr, 1);
7438 
7439     return 0;
7440 }
7441 #endif
7442 
7443 static int do_sys_futex(int *uaddr, int op, int val,
7444                          const struct timespec *timeout, int *uaddr2,
7445                          int val3)
7446 {
7447 #if HOST_LONG_BITS == 64
7448 #if defined(__NR_futex)
7449     /* always a 64-bit time_t, it doesn't define _time64 version  */
7450     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7451 
7452 #endif
7453 #else /* HOST_LONG_BITS == 64 */
7454 #if defined(__NR_futex_time64)
7455     if (sizeof(timeout->tv_sec) == 8) {
7456         /* _time64 function on 32bit arch */
7457         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7458     }
7459 #endif
7460 #if defined(__NR_futex)
7461     /* old function on 32bit arch */
7462     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7463 #endif
7464 #endif /* HOST_LONG_BITS == 64 */
7465     g_assert_not_reached();
7466 }
7467 
7468 static int do_safe_futex(int *uaddr, int op, int val,
7469                          const struct timespec *timeout, int *uaddr2,
7470                          int val3)
7471 {
7472 #if HOST_LONG_BITS == 64
7473 #if defined(__NR_futex)
7474     /* always a 64-bit time_t, it doesn't define _time64 version  */
7475     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7476 #endif
7477 #else /* HOST_LONG_BITS == 64 */
7478 #if defined(__NR_futex_time64)
7479     if (sizeof(timeout->tv_sec) == 8) {
7480         /* _time64 function on 32bit arch */
7481         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7482                                            val3));
7483     }
7484 #endif
7485 #if defined(__NR_futex)
7486     /* old function on 32bit arch */
7487     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7488 #endif
7489 #endif /* HOST_LONG_BITS == 64 */
7490     return -TARGET_ENOSYS;
7491 }
7492 
7493 /* ??? Using host futex calls even when target atomic operations
7494    are not really atomic probably breaks things.  However implementing
7495    futexes locally would make futexes shared between multiple processes
7496    tricky.  However they're probably useless because guest atomic
7497    operations won't work either.  */
7498 #if defined(TARGET_NR_futex)
7499 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7500                     target_ulong timeout, target_ulong uaddr2, int val3)
7501 {
7502     struct timespec ts, *pts;
7503     int base_op;
7504 
7505     /* ??? We assume FUTEX_* constants are the same on both host
7506        and target.  */
7507 #ifdef FUTEX_CMD_MASK
7508     base_op = op & FUTEX_CMD_MASK;
7509 #else
7510     base_op = op;
7511 #endif
7512     switch (base_op) {
7513     case FUTEX_WAIT:
7514     case FUTEX_WAIT_BITSET:
7515         if (timeout) {
7516             pts = &ts;
7517             target_to_host_timespec(pts, timeout);
7518         } else {
7519             pts = NULL;
7520         }
7521         return do_safe_futex(g2h(cpu, uaddr),
7522                              op, tswap32(val), pts, NULL, val3);
7523     case FUTEX_WAKE:
7524         return do_safe_futex(g2h(cpu, uaddr),
7525                              op, val, NULL, NULL, 0);
7526     case FUTEX_FD:
7527         return do_safe_futex(g2h(cpu, uaddr),
7528                              op, val, NULL, NULL, 0);
7529     case FUTEX_REQUEUE:
7530     case FUTEX_CMP_REQUEUE:
7531     case FUTEX_WAKE_OP:
7532         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7533            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7534            But the prototype takes a `struct timespec *'; insert casts
7535            to satisfy the compiler.  We do not need to tswap TIMEOUT
7536            since it's not compared to guest memory.  */
7537         pts = (struct timespec *)(uintptr_t) timeout;
7538         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7539                              (base_op == FUTEX_CMP_REQUEUE
7540                               ? tswap32(val3) : val3));
7541     default:
7542         return -TARGET_ENOSYS;
7543     }
7544 }
7545 #endif
7546 
7547 #if defined(TARGET_NR_futex_time64)
7548 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7549                            int val, target_ulong timeout,
7550                            target_ulong uaddr2, int val3)
7551 {
7552     struct timespec ts, *pts;
7553     int base_op;
7554 
7555     /* ??? We assume FUTEX_* constants are the same on both host
7556        and target.  */
7557 #ifdef FUTEX_CMD_MASK
7558     base_op = op & FUTEX_CMD_MASK;
7559 #else
7560     base_op = op;
7561 #endif
7562     switch (base_op) {
7563     case FUTEX_WAIT:
7564     case FUTEX_WAIT_BITSET:
7565         if (timeout) {
7566             pts = &ts;
7567             if (target_to_host_timespec64(pts, timeout)) {
7568                 return -TARGET_EFAULT;
7569             }
7570         } else {
7571             pts = NULL;
7572         }
7573         return do_safe_futex(g2h(cpu, uaddr), op,
7574                              tswap32(val), pts, NULL, val3);
7575     case FUTEX_WAKE:
7576         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7577     case FUTEX_FD:
7578         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7579     case FUTEX_REQUEUE:
7580     case FUTEX_CMP_REQUEUE:
7581     case FUTEX_WAKE_OP:
7582         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7583            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7584            But the prototype takes a `struct timespec *'; insert casts
7585            to satisfy the compiler.  We do not need to tswap TIMEOUT
7586            since it's not compared to guest memory.  */
7587         pts = (struct timespec *)(uintptr_t) timeout;
7588         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7589                              (base_op == FUTEX_CMP_REQUEUE
7590                               ? tswap32(val3) : val3));
7591     default:
7592         return -TARGET_ENOSYS;
7593     }
7594 }
7595 #endif
7596 
7597 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7598 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7599                                      abi_long handle, abi_long mount_id,
7600                                      abi_long flags)
7601 {
7602     struct file_handle *target_fh;
7603     struct file_handle *fh;
7604     int mid = 0;
7605     abi_long ret;
7606     char *name;
7607     unsigned int size, total_size;
7608 
7609     if (get_user_s32(size, handle)) {
7610         return -TARGET_EFAULT;
7611     }
7612 
7613     name = lock_user_string(pathname);
7614     if (!name) {
7615         return -TARGET_EFAULT;
7616     }
7617 
7618     total_size = sizeof(struct file_handle) + size;
7619     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7620     if (!target_fh) {
7621         unlock_user(name, pathname, 0);
7622         return -TARGET_EFAULT;
7623     }
7624 
7625     fh = g_malloc0(total_size);
7626     fh->handle_bytes = size;
7627 
7628     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7629     unlock_user(name, pathname, 0);
7630 
7631     /* man name_to_handle_at(2):
7632      * Other than the use of the handle_bytes field, the caller should treat
7633      * the file_handle structure as an opaque data type
7634      */
7635 
7636     memcpy(target_fh, fh, total_size);
7637     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7638     target_fh->handle_type = tswap32(fh->handle_type);
7639     g_free(fh);
7640     unlock_user(target_fh, handle, total_size);
7641 
7642     if (put_user_s32(mid, mount_id)) {
7643         return -TARGET_EFAULT;
7644     }
7645 
7646     return ret;
7647 
7648 }
7649 #endif
7650 
7651 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7652 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7653                                      abi_long flags)
7654 {
7655     struct file_handle *target_fh;
7656     struct file_handle *fh;
7657     unsigned int size, total_size;
7658     abi_long ret;
7659 
7660     if (get_user_s32(size, handle)) {
7661         return -TARGET_EFAULT;
7662     }
7663 
7664     total_size = sizeof(struct file_handle) + size;
7665     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7666     if (!target_fh) {
7667         return -TARGET_EFAULT;
7668     }
7669 
7670     fh = g_memdup(target_fh, total_size);
7671     fh->handle_bytes = size;
7672     fh->handle_type = tswap32(target_fh->handle_type);
7673 
7674     ret = get_errno(open_by_handle_at(mount_fd, fh,
7675                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7676 
7677     g_free(fh);
7678 
7679     unlock_user(target_fh, handle, total_size);
7680 
7681     return ret;
7682 }
7683 #endif
7684 
7685 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7686 
7687 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7688 {
7689     int host_flags;
7690     target_sigset_t *target_mask;
7691     sigset_t host_mask;
7692     abi_long ret;
7693 
7694     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7695         return -TARGET_EINVAL;
7696     }
7697     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7698         return -TARGET_EFAULT;
7699     }
7700 
7701     target_to_host_sigset(&host_mask, target_mask);
7702 
7703     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7704 
7705     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7706     if (ret >= 0) {
7707         fd_trans_register(ret, &target_signalfd_trans);
7708     }
7709 
7710     unlock_user_struct(target_mask, mask, 0);
7711 
7712     return ret;
7713 }
7714 #endif
7715 
7716 /* Map host to target signal numbers for the wait family of syscalls.
7717    Assume all other status bits are the same.  */
7718 int host_to_target_waitstatus(int status)
7719 {
7720     if (WIFSIGNALED(status)) {
7721         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7722     }
7723     if (WIFSTOPPED(status)) {
7724         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7725                | (status & 0xff);
7726     }
7727     return status;
7728 }
7729 
7730 static int open_self_cmdline(void *cpu_env, int fd)
7731 {
7732     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7733     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7734     int i;
7735 
7736     for (i = 0; i < bprm->argc; i++) {
7737         size_t len = strlen(bprm->argv[i]) + 1;
7738 
7739         if (write(fd, bprm->argv[i], len) != len) {
7740             return -1;
7741         }
7742     }
7743 
7744     return 0;
7745 }
7746 
7747 static int open_self_maps(void *cpu_env, int fd)
7748 {
7749     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7750     TaskState *ts = cpu->opaque;
7751     GSList *map_info = read_self_maps();
7752     GSList *s;
7753     int count;
7754 
7755     for (s = map_info; s; s = g_slist_next(s)) {
7756         MapInfo *e = (MapInfo *) s->data;
7757 
7758         if (h2g_valid(e->start)) {
7759             unsigned long min = e->start;
7760             unsigned long max = e->end;
7761             int flags = page_get_flags(h2g(min));
7762             const char *path;
7763 
7764             max = h2g_valid(max - 1) ?
7765                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7766 
7767             if (page_check_range(h2g(min), max - min, flags) == -1) {
7768                 continue;
7769             }
7770 
7771             if (h2g(min) == ts->info->stack_limit) {
7772                 path = "[stack]";
7773             } else {
7774                 path = e->path;
7775             }
7776 
7777             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7778                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7779                             h2g(min), h2g(max - 1) + 1,
7780                             (flags & PAGE_READ) ? 'r' : '-',
7781                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7782                             (flags & PAGE_EXEC) ? 'x' : '-',
7783                             e->is_priv ? 'p' : '-',
7784                             (uint64_t) e->offset, e->dev, e->inode);
7785             if (path) {
7786                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7787             } else {
7788                 dprintf(fd, "\n");
7789             }
7790         }
7791     }
7792 
7793     free_self_maps(map_info);
7794 
7795 #ifdef TARGET_VSYSCALL_PAGE
7796     /*
7797      * We only support execution from the vsyscall page.
7798      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7799      */
7800     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7801                     " --xp 00000000 00:00 0",
7802                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7803     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7804 #endif
7805 
7806     return 0;
7807 }
7808 
7809 static int open_self_stat(void *cpu_env, int fd)
7810 {
7811     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7812     TaskState *ts = cpu->opaque;
7813     g_autoptr(GString) buf = g_string_new(NULL);
7814     int i;
7815 
7816     for (i = 0; i < 44; i++) {
7817         if (i == 0) {
7818             /* pid */
7819             g_string_printf(buf, FMT_pid " ", getpid());
7820         } else if (i == 1) {
7821             /* app name */
7822             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7823             bin = bin ? bin + 1 : ts->bprm->argv[0];
7824             g_string_printf(buf, "(%.15s) ", bin);
7825         } else if (i == 3) {
7826             /* ppid */
7827             g_string_printf(buf, FMT_pid " ", getppid());
7828         } else if (i == 27) {
7829             /* stack bottom */
7830             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7831         } else {
7832             /* for the rest, there is MasterCard */
7833             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7834         }
7835 
7836         if (write(fd, buf->str, buf->len) != buf->len) {
7837             return -1;
7838         }
7839     }
7840 
7841     return 0;
7842 }
7843 
7844 static int open_self_auxv(void *cpu_env, int fd)
7845 {
7846     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7847     TaskState *ts = cpu->opaque;
7848     abi_ulong auxv = ts->info->saved_auxv;
7849     abi_ulong len = ts->info->auxv_len;
7850     char *ptr;
7851 
7852     /*
7853      * Auxiliary vector is stored in target process stack.
7854      * read in whole auxv vector and copy it to file
7855      */
7856     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7857     if (ptr != NULL) {
7858         while (len > 0) {
7859             ssize_t r;
7860             r = write(fd, ptr, len);
7861             if (r <= 0) {
7862                 break;
7863             }
7864             len -= r;
7865             ptr += r;
7866         }
7867         lseek(fd, 0, SEEK_SET);
7868         unlock_user(ptr, auxv, len);
7869     }
7870 
7871     return 0;
7872 }
7873 
7874 static int is_proc_myself(const char *filename, const char *entry)
7875 {
7876     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7877         filename += strlen("/proc/");
7878         if (!strncmp(filename, "self/", strlen("self/"))) {
7879             filename += strlen("self/");
7880         } else if (*filename >= '1' && *filename <= '9') {
7881             char myself[80];
7882             snprintf(myself, sizeof(myself), "%d/", getpid());
7883             if (!strncmp(filename, myself, strlen(myself))) {
7884                 filename += strlen(myself);
7885             } else {
7886                 return 0;
7887             }
7888         } else {
7889             return 0;
7890         }
7891         if (!strcmp(filename, entry)) {
7892             return 1;
7893         }
7894     }
7895     return 0;
7896 }
7897 
7898 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7899     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7900 static int is_proc(const char *filename, const char *entry)
7901 {
7902     return strcmp(filename, entry) == 0;
7903 }
7904 #endif
7905 
7906 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7907 static int open_net_route(void *cpu_env, int fd)
7908 {
7909     FILE *fp;
7910     char *line = NULL;
7911     size_t len = 0;
7912     ssize_t read;
7913 
7914     fp = fopen("/proc/net/route", "r");
7915     if (fp == NULL) {
7916         return -1;
7917     }
7918 
7919     /* read header */
7920 
7921     read = getline(&line, &len, fp);
7922     dprintf(fd, "%s", line);
7923 
7924     /* read routes */
7925 
7926     while ((read = getline(&line, &len, fp)) != -1) {
7927         char iface[16];
7928         uint32_t dest, gw, mask;
7929         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7930         int fields;
7931 
7932         fields = sscanf(line,
7933                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7934                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7935                         &mask, &mtu, &window, &irtt);
7936         if (fields != 11) {
7937             continue;
7938         }
7939         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7940                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7941                 metric, tswap32(mask), mtu, window, irtt);
7942     }
7943 
7944     free(line);
7945     fclose(fp);
7946 
7947     return 0;
7948 }
7949 #endif
7950 
7951 #if defined(TARGET_SPARC)
7952 static int open_cpuinfo(void *cpu_env, int fd)
7953 {
7954     dprintf(fd, "type\t\t: sun4u\n");
7955     return 0;
7956 }
7957 #endif
7958 
7959 #if defined(TARGET_HPPA)
7960 static int open_cpuinfo(void *cpu_env, int fd)
7961 {
7962     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7963     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7964     dprintf(fd, "capabilities\t: os32\n");
7965     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7966     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7967     return 0;
7968 }
7969 #endif
7970 
7971 #if defined(TARGET_M68K)
7972 static int open_hardware(void *cpu_env, int fd)
7973 {
7974     dprintf(fd, "Model:\t\tqemu-m68k\n");
7975     return 0;
7976 }
7977 #endif
7978 
7979 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7980 {
7981     struct fake_open {
7982         const char *filename;
7983         int (*fill)(void *cpu_env, int fd);
7984         int (*cmp)(const char *s1, const char *s2);
7985     };
7986     const struct fake_open *fake_open;
7987     static const struct fake_open fakes[] = {
7988         { "maps", open_self_maps, is_proc_myself },
7989         { "stat", open_self_stat, is_proc_myself },
7990         { "auxv", open_self_auxv, is_proc_myself },
7991         { "cmdline", open_self_cmdline, is_proc_myself },
7992 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7993         { "/proc/net/route", open_net_route, is_proc },
7994 #endif
7995 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7996         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7997 #endif
7998 #if defined(TARGET_M68K)
7999         { "/proc/hardware", open_hardware, is_proc },
8000 #endif
8001         { NULL, NULL, NULL }
8002     };
8003 
8004     if (is_proc_myself(pathname, "exe")) {
8005         int execfd = qemu_getauxval(AT_EXECFD);
8006         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8007     }
8008 
8009     for (fake_open = fakes; fake_open->filename; fake_open++) {
8010         if (fake_open->cmp(pathname, fake_open->filename)) {
8011             break;
8012         }
8013     }
8014 
8015     if (fake_open->filename) {
8016         const char *tmpdir;
8017         char filename[PATH_MAX];
8018         int fd, r;
8019 
8020         /* create temporary file to map stat to */
8021         tmpdir = getenv("TMPDIR");
8022         if (!tmpdir)
8023             tmpdir = "/tmp";
8024         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8025         fd = mkstemp(filename);
8026         if (fd < 0) {
8027             return fd;
8028         }
8029         unlink(filename);
8030 
8031         if ((r = fake_open->fill(cpu_env, fd))) {
8032             int e = errno;
8033             close(fd);
8034             errno = e;
8035             return r;
8036         }
8037         lseek(fd, 0, SEEK_SET);
8038 
8039         return fd;
8040     }
8041 
8042     return safe_openat(dirfd, path(pathname), flags, mode);
8043 }
8044 
8045 #define TIMER_MAGIC 0x0caf0000
8046 #define TIMER_MAGIC_MASK 0xffff0000
8047 
8048 /* Convert QEMU provided timer ID back to internal 16bit index format */
8049 static target_timer_t get_timer_id(abi_long arg)
8050 {
8051     target_timer_t timerid = arg;
8052 
8053     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8054         return -TARGET_EINVAL;
8055     }
8056 
8057     timerid &= 0xffff;
8058 
8059     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8060         return -TARGET_EINVAL;
8061     }
8062 
8063     return timerid;
8064 }
8065 
8066 static int target_to_host_cpu_mask(unsigned long *host_mask,
8067                                    size_t host_size,
8068                                    abi_ulong target_addr,
8069                                    size_t target_size)
8070 {
8071     unsigned target_bits = sizeof(abi_ulong) * 8;
8072     unsigned host_bits = sizeof(*host_mask) * 8;
8073     abi_ulong *target_mask;
8074     unsigned i, j;
8075 
8076     assert(host_size >= target_size);
8077 
8078     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8079     if (!target_mask) {
8080         return -TARGET_EFAULT;
8081     }
8082     memset(host_mask, 0, host_size);
8083 
8084     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8085         unsigned bit = i * target_bits;
8086         abi_ulong val;
8087 
8088         __get_user(val, &target_mask[i]);
8089         for (j = 0; j < target_bits; j++, bit++) {
8090             if (val & (1UL << j)) {
8091                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8092             }
8093         }
8094     }
8095 
8096     unlock_user(target_mask, target_addr, 0);
8097     return 0;
8098 }
8099 
8100 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8101                                    size_t host_size,
8102                                    abi_ulong target_addr,
8103                                    size_t target_size)
8104 {
8105     unsigned target_bits = sizeof(abi_ulong) * 8;
8106     unsigned host_bits = sizeof(*host_mask) * 8;
8107     abi_ulong *target_mask;
8108     unsigned i, j;
8109 
8110     assert(host_size >= target_size);
8111 
8112     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8113     if (!target_mask) {
8114         return -TARGET_EFAULT;
8115     }
8116 
8117     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8118         unsigned bit = i * target_bits;
8119         abi_ulong val = 0;
8120 
8121         for (j = 0; j < target_bits; j++, bit++) {
8122             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8123                 val |= 1UL << j;
8124             }
8125         }
8126         __put_user(val, &target_mask[i]);
8127     }
8128 
8129     unlock_user(target_mask, target_addr, target_size);
8130     return 0;
8131 }
8132 
8133 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8134 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8135 #endif
8136 
8137 /* This is an internal helper for do_syscall so that it is easier
8138  * to have a single return point, so that actions, such as logging
8139  * of syscall results, can be performed.
8140  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8141  */
8142 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8143                             abi_long arg2, abi_long arg3, abi_long arg4,
8144                             abi_long arg5, abi_long arg6, abi_long arg7,
8145                             abi_long arg8)
8146 {
8147     CPUState *cpu = env_cpu(cpu_env);
8148     abi_long ret;
8149 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8150     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8151     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8152     || defined(TARGET_NR_statx)
8153     struct stat st;
8154 #endif
8155 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8156     || defined(TARGET_NR_fstatfs)
8157     struct statfs stfs;
8158 #endif
8159     void *p;
8160 
8161     switch(num) {
8162     case TARGET_NR_exit:
8163         /* In old applications this may be used to implement _exit(2).
8164            However in threaded applications it is used for thread termination,
8165            and _exit_group is used for application termination.
8166            Do thread termination if we have more then one thread.  */
8167 
8168         if (block_signals()) {
8169             return -TARGET_ERESTARTSYS;
8170         }
8171 
8172         pthread_mutex_lock(&clone_lock);
8173 
8174         if (CPU_NEXT(first_cpu)) {
8175             TaskState *ts = cpu->opaque;
8176 
8177             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8178             object_unref(OBJECT(cpu));
8179             /*
8180              * At this point the CPU should be unrealized and removed
8181              * from cpu lists. We can clean-up the rest of the thread
8182              * data without the lock held.
8183              */
8184 
8185             pthread_mutex_unlock(&clone_lock);
8186 
8187             if (ts->child_tidptr) {
8188                 put_user_u32(0, ts->child_tidptr);
8189                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8190                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8191             }
8192             thread_cpu = NULL;
8193             g_free(ts);
8194             rcu_unregister_thread();
8195             pthread_exit(NULL);
8196         }
8197 
8198         pthread_mutex_unlock(&clone_lock);
8199         preexit_cleanup(cpu_env, arg1);
8200         _exit(arg1);
8201         return 0; /* avoid warning */
8202     case TARGET_NR_read:
8203         if (arg2 == 0 && arg3 == 0) {
8204             return get_errno(safe_read(arg1, 0, 0));
8205         } else {
8206             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8207                 return -TARGET_EFAULT;
8208             ret = get_errno(safe_read(arg1, p, arg3));
8209             if (ret >= 0 &&
8210                 fd_trans_host_to_target_data(arg1)) {
8211                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8212             }
8213             unlock_user(p, arg2, ret);
8214         }
8215         return ret;
8216     case TARGET_NR_write:
8217         if (arg2 == 0 && arg3 == 0) {
8218             return get_errno(safe_write(arg1, 0, 0));
8219         }
8220         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8221             return -TARGET_EFAULT;
8222         if (fd_trans_target_to_host_data(arg1)) {
8223             void *copy = g_malloc(arg3);
8224             memcpy(copy, p, arg3);
8225             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8226             if (ret >= 0) {
8227                 ret = get_errno(safe_write(arg1, copy, ret));
8228             }
8229             g_free(copy);
8230         } else {
8231             ret = get_errno(safe_write(arg1, p, arg3));
8232         }
8233         unlock_user(p, arg2, 0);
8234         return ret;
8235 
8236 #ifdef TARGET_NR_open
8237     case TARGET_NR_open:
8238         if (!(p = lock_user_string(arg1)))
8239             return -TARGET_EFAULT;
8240         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8241                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8242                                   arg3));
8243         fd_trans_unregister(ret);
8244         unlock_user(p, arg1, 0);
8245         return ret;
8246 #endif
8247     case TARGET_NR_openat:
8248         if (!(p = lock_user_string(arg2)))
8249             return -TARGET_EFAULT;
8250         ret = get_errno(do_openat(cpu_env, arg1, p,
8251                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8252                                   arg4));
8253         fd_trans_unregister(ret);
8254         unlock_user(p, arg2, 0);
8255         return ret;
8256 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8257     case TARGET_NR_name_to_handle_at:
8258         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8259         return ret;
8260 #endif
8261 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8262     case TARGET_NR_open_by_handle_at:
8263         ret = do_open_by_handle_at(arg1, arg2, arg3);
8264         fd_trans_unregister(ret);
8265         return ret;
8266 #endif
8267     case TARGET_NR_close:
8268         fd_trans_unregister(arg1);
8269         return get_errno(close(arg1));
8270 
8271     case TARGET_NR_brk:
8272         return do_brk(arg1);
8273 #ifdef TARGET_NR_fork
8274     case TARGET_NR_fork:
8275         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8276 #endif
8277 #ifdef TARGET_NR_waitpid
8278     case TARGET_NR_waitpid:
8279         {
8280             int status;
8281             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8282             if (!is_error(ret) && arg2 && ret
8283                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8284                 return -TARGET_EFAULT;
8285         }
8286         return ret;
8287 #endif
8288 #ifdef TARGET_NR_waitid
8289     case TARGET_NR_waitid:
8290         {
8291             siginfo_t info;
8292             info.si_pid = 0;
8293             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8294             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8295                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8296                     return -TARGET_EFAULT;
8297                 host_to_target_siginfo(p, &info);
8298                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8299             }
8300         }
8301         return ret;
8302 #endif
8303 #ifdef TARGET_NR_creat /* not on alpha */
8304     case TARGET_NR_creat:
8305         if (!(p = lock_user_string(arg1)))
8306             return -TARGET_EFAULT;
8307         ret = get_errno(creat(p, arg2));
8308         fd_trans_unregister(ret);
8309         unlock_user(p, arg1, 0);
8310         return ret;
8311 #endif
8312 #ifdef TARGET_NR_link
8313     case TARGET_NR_link:
8314         {
8315             void * p2;
8316             p = lock_user_string(arg1);
8317             p2 = lock_user_string(arg2);
8318             if (!p || !p2)
8319                 ret = -TARGET_EFAULT;
8320             else
8321                 ret = get_errno(link(p, p2));
8322             unlock_user(p2, arg2, 0);
8323             unlock_user(p, arg1, 0);
8324         }
8325         return ret;
8326 #endif
8327 #if defined(TARGET_NR_linkat)
8328     case TARGET_NR_linkat:
8329         {
8330             void * p2 = NULL;
8331             if (!arg2 || !arg4)
8332                 return -TARGET_EFAULT;
8333             p  = lock_user_string(arg2);
8334             p2 = lock_user_string(arg4);
8335             if (!p || !p2)
8336                 ret = -TARGET_EFAULT;
8337             else
8338                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8339             unlock_user(p, arg2, 0);
8340             unlock_user(p2, arg4, 0);
8341         }
8342         return ret;
8343 #endif
8344 #ifdef TARGET_NR_unlink
8345     case TARGET_NR_unlink:
8346         if (!(p = lock_user_string(arg1)))
8347             return -TARGET_EFAULT;
8348         ret = get_errno(unlink(p));
8349         unlock_user(p, arg1, 0);
8350         return ret;
8351 #endif
8352 #if defined(TARGET_NR_unlinkat)
8353     case TARGET_NR_unlinkat:
8354         if (!(p = lock_user_string(arg2)))
8355             return -TARGET_EFAULT;
8356         ret = get_errno(unlinkat(arg1, p, arg3));
8357         unlock_user(p, arg2, 0);
8358         return ret;
8359 #endif
8360     case TARGET_NR_execve:
8361         {
8362             char **argp, **envp;
8363             int argc, envc;
8364             abi_ulong gp;
8365             abi_ulong guest_argp;
8366             abi_ulong guest_envp;
8367             abi_ulong addr;
8368             char **q;
8369 
8370             argc = 0;
8371             guest_argp = arg2;
8372             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8373                 if (get_user_ual(addr, gp))
8374                     return -TARGET_EFAULT;
8375                 if (!addr)
8376                     break;
8377                 argc++;
8378             }
8379             envc = 0;
8380             guest_envp = arg3;
8381             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8382                 if (get_user_ual(addr, gp))
8383                     return -TARGET_EFAULT;
8384                 if (!addr)
8385                     break;
8386                 envc++;
8387             }
8388 
8389             argp = g_new0(char *, argc + 1);
8390             envp = g_new0(char *, envc + 1);
8391 
8392             for (gp = guest_argp, q = argp; gp;
8393                   gp += sizeof(abi_ulong), q++) {
8394                 if (get_user_ual(addr, gp))
8395                     goto execve_efault;
8396                 if (!addr)
8397                     break;
8398                 if (!(*q = lock_user_string(addr)))
8399                     goto execve_efault;
8400             }
8401             *q = NULL;
8402 
8403             for (gp = guest_envp, q = envp; gp;
8404                   gp += sizeof(abi_ulong), q++) {
8405                 if (get_user_ual(addr, gp))
8406                     goto execve_efault;
8407                 if (!addr)
8408                     break;
8409                 if (!(*q = lock_user_string(addr)))
8410                     goto execve_efault;
8411             }
8412             *q = NULL;
8413 
8414             if (!(p = lock_user_string(arg1)))
8415                 goto execve_efault;
8416             /* Although execve() is not an interruptible syscall it is
8417              * a special case where we must use the safe_syscall wrapper:
8418              * if we allow a signal to happen before we make the host
8419              * syscall then we will 'lose' it, because at the point of
8420              * execve the process leaves QEMU's control. So we use the
8421              * safe syscall wrapper to ensure that we either take the
8422              * signal as a guest signal, or else it does not happen
8423              * before the execve completes and makes it the other
8424              * program's problem.
8425              */
8426             ret = get_errno(safe_execve(p, argp, envp));
8427             unlock_user(p, arg1, 0);
8428 
8429             goto execve_end;
8430 
8431         execve_efault:
8432             ret = -TARGET_EFAULT;
8433 
8434         execve_end:
8435             for (gp = guest_argp, q = argp; *q;
8436                   gp += sizeof(abi_ulong), q++) {
8437                 if (get_user_ual(addr, gp)
8438                     || !addr)
8439                     break;
8440                 unlock_user(*q, addr, 0);
8441             }
8442             for (gp = guest_envp, q = envp; *q;
8443                   gp += sizeof(abi_ulong), q++) {
8444                 if (get_user_ual(addr, gp)
8445                     || !addr)
8446                     break;
8447                 unlock_user(*q, addr, 0);
8448             }
8449 
8450             g_free(argp);
8451             g_free(envp);
8452         }
8453         return ret;
8454     case TARGET_NR_chdir:
8455         if (!(p = lock_user_string(arg1)))
8456             return -TARGET_EFAULT;
8457         ret = get_errno(chdir(p));
8458         unlock_user(p, arg1, 0);
8459         return ret;
8460 #ifdef TARGET_NR_time
8461     case TARGET_NR_time:
8462         {
8463             time_t host_time;
8464             ret = get_errno(time(&host_time));
8465             if (!is_error(ret)
8466                 && arg1
8467                 && put_user_sal(host_time, arg1))
8468                 return -TARGET_EFAULT;
8469         }
8470         return ret;
8471 #endif
8472 #ifdef TARGET_NR_mknod
8473     case TARGET_NR_mknod:
8474         if (!(p = lock_user_string(arg1)))
8475             return -TARGET_EFAULT;
8476         ret = get_errno(mknod(p, arg2, arg3));
8477         unlock_user(p, arg1, 0);
8478         return ret;
8479 #endif
8480 #if defined(TARGET_NR_mknodat)
8481     case TARGET_NR_mknodat:
8482         if (!(p = lock_user_string(arg2)))
8483             return -TARGET_EFAULT;
8484         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8485         unlock_user(p, arg2, 0);
8486         return ret;
8487 #endif
8488 #ifdef TARGET_NR_chmod
8489     case TARGET_NR_chmod:
8490         if (!(p = lock_user_string(arg1)))
8491             return -TARGET_EFAULT;
8492         ret = get_errno(chmod(p, arg2));
8493         unlock_user(p, arg1, 0);
8494         return ret;
8495 #endif
8496 #ifdef TARGET_NR_lseek
8497     case TARGET_NR_lseek:
8498         return get_errno(lseek(arg1, arg2, arg3));
8499 #endif
8500 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8501     /* Alpha specific */
8502     case TARGET_NR_getxpid:
8503         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8504         return get_errno(getpid());
8505 #endif
8506 #ifdef TARGET_NR_getpid
8507     case TARGET_NR_getpid:
8508         return get_errno(getpid());
8509 #endif
8510     case TARGET_NR_mount:
8511         {
8512             /* need to look at the data field */
8513             void *p2, *p3;
8514 
8515             if (arg1) {
8516                 p = lock_user_string(arg1);
8517                 if (!p) {
8518                     return -TARGET_EFAULT;
8519                 }
8520             } else {
8521                 p = NULL;
8522             }
8523 
8524             p2 = lock_user_string(arg2);
8525             if (!p2) {
8526                 if (arg1) {
8527                     unlock_user(p, arg1, 0);
8528                 }
8529                 return -TARGET_EFAULT;
8530             }
8531 
8532             if (arg3) {
8533                 p3 = lock_user_string(arg3);
8534                 if (!p3) {
8535                     if (arg1) {
8536                         unlock_user(p, arg1, 0);
8537                     }
8538                     unlock_user(p2, arg2, 0);
8539                     return -TARGET_EFAULT;
8540                 }
8541             } else {
8542                 p3 = NULL;
8543             }
8544 
8545             /* FIXME - arg5 should be locked, but it isn't clear how to
8546              * do that since it's not guaranteed to be a NULL-terminated
8547              * string.
8548              */
8549             if (!arg5) {
8550                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8551             } else {
8552                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8553             }
8554             ret = get_errno(ret);
8555 
8556             if (arg1) {
8557                 unlock_user(p, arg1, 0);
8558             }
8559             unlock_user(p2, arg2, 0);
8560             if (arg3) {
8561                 unlock_user(p3, arg3, 0);
8562             }
8563         }
8564         return ret;
8565 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8566 #if defined(TARGET_NR_umount)
8567     case TARGET_NR_umount:
8568 #endif
8569 #if defined(TARGET_NR_oldumount)
8570     case TARGET_NR_oldumount:
8571 #endif
8572         if (!(p = lock_user_string(arg1)))
8573             return -TARGET_EFAULT;
8574         ret = get_errno(umount(p));
8575         unlock_user(p, arg1, 0);
8576         return ret;
8577 #endif
8578 #ifdef TARGET_NR_stime /* not on alpha */
8579     case TARGET_NR_stime:
8580         {
8581             struct timespec ts;
8582             ts.tv_nsec = 0;
8583             if (get_user_sal(ts.tv_sec, arg1)) {
8584                 return -TARGET_EFAULT;
8585             }
8586             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8587         }
8588 #endif
8589 #ifdef TARGET_NR_alarm /* not on alpha */
8590     case TARGET_NR_alarm:
8591         return alarm(arg1);
8592 #endif
8593 #ifdef TARGET_NR_pause /* not on alpha */
8594     case TARGET_NR_pause:
8595         if (!block_signals()) {
8596             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8597         }
8598         return -TARGET_EINTR;
8599 #endif
8600 #ifdef TARGET_NR_utime
8601     case TARGET_NR_utime:
8602         {
8603             struct utimbuf tbuf, *host_tbuf;
8604             struct target_utimbuf *target_tbuf;
8605             if (arg2) {
8606                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8607                     return -TARGET_EFAULT;
8608                 tbuf.actime = tswapal(target_tbuf->actime);
8609                 tbuf.modtime = tswapal(target_tbuf->modtime);
8610                 unlock_user_struct(target_tbuf, arg2, 0);
8611                 host_tbuf = &tbuf;
8612             } else {
8613                 host_tbuf = NULL;
8614             }
8615             if (!(p = lock_user_string(arg1)))
8616                 return -TARGET_EFAULT;
8617             ret = get_errno(utime(p, host_tbuf));
8618             unlock_user(p, arg1, 0);
8619         }
8620         return ret;
8621 #endif
8622 #ifdef TARGET_NR_utimes
8623     case TARGET_NR_utimes:
8624         {
8625             struct timeval *tvp, tv[2];
8626             if (arg2) {
8627                 if (copy_from_user_timeval(&tv[0], arg2)
8628                     || copy_from_user_timeval(&tv[1],
8629                                               arg2 + sizeof(struct target_timeval)))
8630                     return -TARGET_EFAULT;
8631                 tvp = tv;
8632             } else {
8633                 tvp = NULL;
8634             }
8635             if (!(p = lock_user_string(arg1)))
8636                 return -TARGET_EFAULT;
8637             ret = get_errno(utimes(p, tvp));
8638             unlock_user(p, arg1, 0);
8639         }
8640         return ret;
8641 #endif
8642 #if defined(TARGET_NR_futimesat)
8643     case TARGET_NR_futimesat:
8644         {
8645             struct timeval *tvp, tv[2];
8646             if (arg3) {
8647                 if (copy_from_user_timeval(&tv[0], arg3)
8648                     || copy_from_user_timeval(&tv[1],
8649                                               arg3 + sizeof(struct target_timeval)))
8650                     return -TARGET_EFAULT;
8651                 tvp = tv;
8652             } else {
8653                 tvp = NULL;
8654             }
8655             if (!(p = lock_user_string(arg2))) {
8656                 return -TARGET_EFAULT;
8657             }
8658             ret = get_errno(futimesat(arg1, path(p), tvp));
8659             unlock_user(p, arg2, 0);
8660         }
8661         return ret;
8662 #endif
8663 #ifdef TARGET_NR_access
8664     case TARGET_NR_access:
8665         if (!(p = lock_user_string(arg1))) {
8666             return -TARGET_EFAULT;
8667         }
8668         ret = get_errno(access(path(p), arg2));
8669         unlock_user(p, arg1, 0);
8670         return ret;
8671 #endif
8672 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8673     case TARGET_NR_faccessat:
8674         if (!(p = lock_user_string(arg2))) {
8675             return -TARGET_EFAULT;
8676         }
8677         ret = get_errno(faccessat(arg1, p, arg3, 0));
8678         unlock_user(p, arg2, 0);
8679         return ret;
8680 #endif
8681 #ifdef TARGET_NR_nice /* not on alpha */
8682     case TARGET_NR_nice:
8683         return get_errno(nice(arg1));
8684 #endif
8685     case TARGET_NR_sync:
8686         sync();
8687         return 0;
8688 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8689     case TARGET_NR_syncfs:
8690         return get_errno(syncfs(arg1));
8691 #endif
8692     case TARGET_NR_kill:
8693         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8694 #ifdef TARGET_NR_rename
8695     case TARGET_NR_rename:
8696         {
8697             void *p2;
8698             p = lock_user_string(arg1);
8699             p2 = lock_user_string(arg2);
8700             if (!p || !p2)
8701                 ret = -TARGET_EFAULT;
8702             else
8703                 ret = get_errno(rename(p, p2));
8704             unlock_user(p2, arg2, 0);
8705             unlock_user(p, arg1, 0);
8706         }
8707         return ret;
8708 #endif
8709 #if defined(TARGET_NR_renameat)
8710     case TARGET_NR_renameat:
8711         {
8712             void *p2;
8713             p  = lock_user_string(arg2);
8714             p2 = lock_user_string(arg4);
8715             if (!p || !p2)
8716                 ret = -TARGET_EFAULT;
8717             else
8718                 ret = get_errno(renameat(arg1, p, arg3, p2));
8719             unlock_user(p2, arg4, 0);
8720             unlock_user(p, arg2, 0);
8721         }
8722         return ret;
8723 #endif
8724 #if defined(TARGET_NR_renameat2)
8725     case TARGET_NR_renameat2:
8726         {
8727             void *p2;
8728             p  = lock_user_string(arg2);
8729             p2 = lock_user_string(arg4);
8730             if (!p || !p2) {
8731                 ret = -TARGET_EFAULT;
8732             } else {
8733                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8734             }
8735             unlock_user(p2, arg4, 0);
8736             unlock_user(p, arg2, 0);
8737         }
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_mkdir
8741     case TARGET_NR_mkdir:
8742         if (!(p = lock_user_string(arg1)))
8743             return -TARGET_EFAULT;
8744         ret = get_errno(mkdir(p, arg2));
8745         unlock_user(p, arg1, 0);
8746         return ret;
8747 #endif
8748 #if defined(TARGET_NR_mkdirat)
8749     case TARGET_NR_mkdirat:
8750         if (!(p = lock_user_string(arg2)))
8751             return -TARGET_EFAULT;
8752         ret = get_errno(mkdirat(arg1, p, arg3));
8753         unlock_user(p, arg2, 0);
8754         return ret;
8755 #endif
8756 #ifdef TARGET_NR_rmdir
8757     case TARGET_NR_rmdir:
8758         if (!(p = lock_user_string(arg1)))
8759             return -TARGET_EFAULT;
8760         ret = get_errno(rmdir(p));
8761         unlock_user(p, arg1, 0);
8762         return ret;
8763 #endif
8764     case TARGET_NR_dup:
8765         ret = get_errno(dup(arg1));
8766         if (ret >= 0) {
8767             fd_trans_dup(arg1, ret);
8768         }
8769         return ret;
8770 #ifdef TARGET_NR_pipe
8771     case TARGET_NR_pipe:
8772         return do_pipe(cpu_env, arg1, 0, 0);
8773 #endif
8774 #ifdef TARGET_NR_pipe2
8775     case TARGET_NR_pipe2:
8776         return do_pipe(cpu_env, arg1,
8777                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8778 #endif
8779     case TARGET_NR_times:
8780         {
8781             struct target_tms *tmsp;
8782             struct tms tms;
8783             ret = get_errno(times(&tms));
8784             if (arg1) {
8785                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8786                 if (!tmsp)
8787                     return -TARGET_EFAULT;
8788                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8789                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8790                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8791                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8792             }
8793             if (!is_error(ret))
8794                 ret = host_to_target_clock_t(ret);
8795         }
8796         return ret;
8797     case TARGET_NR_acct:
8798         if (arg1 == 0) {
8799             ret = get_errno(acct(NULL));
8800         } else {
8801             if (!(p = lock_user_string(arg1))) {
8802                 return -TARGET_EFAULT;
8803             }
8804             ret = get_errno(acct(path(p)));
8805             unlock_user(p, arg1, 0);
8806         }
8807         return ret;
8808 #ifdef TARGET_NR_umount2
8809     case TARGET_NR_umount2:
8810         if (!(p = lock_user_string(arg1)))
8811             return -TARGET_EFAULT;
8812         ret = get_errno(umount2(p, arg2));
8813         unlock_user(p, arg1, 0);
8814         return ret;
8815 #endif
8816     case TARGET_NR_ioctl:
8817         return do_ioctl(arg1, arg2, arg3);
8818 #ifdef TARGET_NR_fcntl
8819     case TARGET_NR_fcntl:
8820         return do_fcntl(arg1, arg2, arg3);
8821 #endif
8822     case TARGET_NR_setpgid:
8823         return get_errno(setpgid(arg1, arg2));
8824     case TARGET_NR_umask:
8825         return get_errno(umask(arg1));
8826     case TARGET_NR_chroot:
8827         if (!(p = lock_user_string(arg1)))
8828             return -TARGET_EFAULT;
8829         ret = get_errno(chroot(p));
8830         unlock_user(p, arg1, 0);
8831         return ret;
8832 #ifdef TARGET_NR_dup2
8833     case TARGET_NR_dup2:
8834         ret = get_errno(dup2(arg1, arg2));
8835         if (ret >= 0) {
8836             fd_trans_dup(arg1, arg2);
8837         }
8838         return ret;
8839 #endif
8840 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8841     case TARGET_NR_dup3:
8842     {
8843         int host_flags;
8844 
8845         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8846             return -EINVAL;
8847         }
8848         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8849         ret = get_errno(dup3(arg1, arg2, host_flags));
8850         if (ret >= 0) {
8851             fd_trans_dup(arg1, arg2);
8852         }
8853         return ret;
8854     }
8855 #endif
8856 #ifdef TARGET_NR_getppid /* not on alpha */
8857     case TARGET_NR_getppid:
8858         return get_errno(getppid());
8859 #endif
8860 #ifdef TARGET_NR_getpgrp
8861     case TARGET_NR_getpgrp:
8862         return get_errno(getpgrp());
8863 #endif
8864     case TARGET_NR_setsid:
8865         return get_errno(setsid());
8866 #ifdef TARGET_NR_sigaction
8867     case TARGET_NR_sigaction:
8868         {
8869 #if defined(TARGET_MIPS)
8870 	    struct target_sigaction act, oact, *pact, *old_act;
8871 
8872 	    if (arg2) {
8873                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8874                     return -TARGET_EFAULT;
8875 		act._sa_handler = old_act->_sa_handler;
8876 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8877 		act.sa_flags = old_act->sa_flags;
8878 		unlock_user_struct(old_act, arg2, 0);
8879 		pact = &act;
8880 	    } else {
8881 		pact = NULL;
8882 	    }
8883 
8884         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8885 
8886 	    if (!is_error(ret) && arg3) {
8887                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8888                     return -TARGET_EFAULT;
8889 		old_act->_sa_handler = oact._sa_handler;
8890 		old_act->sa_flags = oact.sa_flags;
8891 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8892 		old_act->sa_mask.sig[1] = 0;
8893 		old_act->sa_mask.sig[2] = 0;
8894 		old_act->sa_mask.sig[3] = 0;
8895 		unlock_user_struct(old_act, arg3, 1);
8896 	    }
8897 #else
8898             struct target_old_sigaction *old_act;
8899             struct target_sigaction act, oact, *pact;
8900             if (arg2) {
8901                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8902                     return -TARGET_EFAULT;
8903                 act._sa_handler = old_act->_sa_handler;
8904                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8905                 act.sa_flags = old_act->sa_flags;
8906 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8907                 act.sa_restorer = old_act->sa_restorer;
8908 #endif
8909                 unlock_user_struct(old_act, arg2, 0);
8910                 pact = &act;
8911             } else {
8912                 pact = NULL;
8913             }
8914             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8915             if (!is_error(ret) && arg3) {
8916                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8917                     return -TARGET_EFAULT;
8918                 old_act->_sa_handler = oact._sa_handler;
8919                 old_act->sa_mask = oact.sa_mask.sig[0];
8920                 old_act->sa_flags = oact.sa_flags;
8921 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8922                 old_act->sa_restorer = oact.sa_restorer;
8923 #endif
8924                 unlock_user_struct(old_act, arg3, 1);
8925             }
8926 #endif
8927         }
8928         return ret;
8929 #endif
8930     case TARGET_NR_rt_sigaction:
8931         {
8932             /*
8933              * For Alpha and SPARC this is a 5 argument syscall, with
8934              * a 'restorer' parameter which must be copied into the
8935              * sa_restorer field of the sigaction struct.
8936              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8937              * and arg5 is the sigsetsize.
8938              */
8939 #if defined(TARGET_ALPHA)
8940             target_ulong sigsetsize = arg4;
8941             target_ulong restorer = arg5;
8942 #elif defined(TARGET_SPARC)
8943             target_ulong restorer = arg4;
8944             target_ulong sigsetsize = arg5;
8945 #else
8946             target_ulong sigsetsize = arg4;
8947             target_ulong restorer = 0;
8948 #endif
8949             struct target_sigaction *act = NULL;
8950             struct target_sigaction *oact = NULL;
8951 
8952             if (sigsetsize != sizeof(target_sigset_t)) {
8953                 return -TARGET_EINVAL;
8954             }
8955             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8956                 return -TARGET_EFAULT;
8957             }
8958             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8959                 ret = -TARGET_EFAULT;
8960             } else {
8961                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8962                 if (oact) {
8963                     unlock_user_struct(oact, arg3, 1);
8964                 }
8965             }
8966             if (act) {
8967                 unlock_user_struct(act, arg2, 0);
8968             }
8969         }
8970         return ret;
8971 #ifdef TARGET_NR_sgetmask /* not on alpha */
8972     case TARGET_NR_sgetmask:
8973         {
8974             sigset_t cur_set;
8975             abi_ulong target_set;
8976             ret = do_sigprocmask(0, NULL, &cur_set);
8977             if (!ret) {
8978                 host_to_target_old_sigset(&target_set, &cur_set);
8979                 ret = target_set;
8980             }
8981         }
8982         return ret;
8983 #endif
8984 #ifdef TARGET_NR_ssetmask /* not on alpha */
8985     case TARGET_NR_ssetmask:
8986         {
8987             sigset_t set, oset;
8988             abi_ulong target_set = arg1;
8989             target_to_host_old_sigset(&set, &target_set);
8990             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8991             if (!ret) {
8992                 host_to_target_old_sigset(&target_set, &oset);
8993                 ret = target_set;
8994             }
8995         }
8996         return ret;
8997 #endif
8998 #ifdef TARGET_NR_sigprocmask
8999     case TARGET_NR_sigprocmask:
9000         {
9001 #if defined(TARGET_ALPHA)
9002             sigset_t set, oldset;
9003             abi_ulong mask;
9004             int how;
9005 
9006             switch (arg1) {
9007             case TARGET_SIG_BLOCK:
9008                 how = SIG_BLOCK;
9009                 break;
9010             case TARGET_SIG_UNBLOCK:
9011                 how = SIG_UNBLOCK;
9012                 break;
9013             case TARGET_SIG_SETMASK:
9014                 how = SIG_SETMASK;
9015                 break;
9016             default:
9017                 return -TARGET_EINVAL;
9018             }
9019             mask = arg2;
9020             target_to_host_old_sigset(&set, &mask);
9021 
9022             ret = do_sigprocmask(how, &set, &oldset);
9023             if (!is_error(ret)) {
9024                 host_to_target_old_sigset(&mask, &oldset);
9025                 ret = mask;
9026                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9027             }
9028 #else
9029             sigset_t set, oldset, *set_ptr;
9030             int how;
9031 
9032             if (arg2) {
9033                 switch (arg1) {
9034                 case TARGET_SIG_BLOCK:
9035                     how = SIG_BLOCK;
9036                     break;
9037                 case TARGET_SIG_UNBLOCK:
9038                     how = SIG_UNBLOCK;
9039                     break;
9040                 case TARGET_SIG_SETMASK:
9041                     how = SIG_SETMASK;
9042                     break;
9043                 default:
9044                     return -TARGET_EINVAL;
9045                 }
9046                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9047                     return -TARGET_EFAULT;
9048                 target_to_host_old_sigset(&set, p);
9049                 unlock_user(p, arg2, 0);
9050                 set_ptr = &set;
9051             } else {
9052                 how = 0;
9053                 set_ptr = NULL;
9054             }
9055             ret = do_sigprocmask(how, set_ptr, &oldset);
9056             if (!is_error(ret) && arg3) {
9057                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9058                     return -TARGET_EFAULT;
9059                 host_to_target_old_sigset(p, &oldset);
9060                 unlock_user(p, arg3, sizeof(target_sigset_t));
9061             }
9062 #endif
9063         }
9064         return ret;
9065 #endif
9066     case TARGET_NR_rt_sigprocmask:
9067         {
9068             int how = arg1;
9069             sigset_t set, oldset, *set_ptr;
9070 
9071             if (arg4 != sizeof(target_sigset_t)) {
9072                 return -TARGET_EINVAL;
9073             }
9074 
9075             if (arg2) {
9076                 switch(how) {
9077                 case TARGET_SIG_BLOCK:
9078                     how = SIG_BLOCK;
9079                     break;
9080                 case TARGET_SIG_UNBLOCK:
9081                     how = SIG_UNBLOCK;
9082                     break;
9083                 case TARGET_SIG_SETMASK:
9084                     how = SIG_SETMASK;
9085                     break;
9086                 default:
9087                     return -TARGET_EINVAL;
9088                 }
9089                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9090                     return -TARGET_EFAULT;
9091                 target_to_host_sigset(&set, p);
9092                 unlock_user(p, arg2, 0);
9093                 set_ptr = &set;
9094             } else {
9095                 how = 0;
9096                 set_ptr = NULL;
9097             }
9098             ret = do_sigprocmask(how, set_ptr, &oldset);
9099             if (!is_error(ret) && arg3) {
9100                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9101                     return -TARGET_EFAULT;
9102                 host_to_target_sigset(p, &oldset);
9103                 unlock_user(p, arg3, sizeof(target_sigset_t));
9104             }
9105         }
9106         return ret;
9107 #ifdef TARGET_NR_sigpending
9108     case TARGET_NR_sigpending:
9109         {
9110             sigset_t set;
9111             ret = get_errno(sigpending(&set));
9112             if (!is_error(ret)) {
9113                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9114                     return -TARGET_EFAULT;
9115                 host_to_target_old_sigset(p, &set);
9116                 unlock_user(p, arg1, sizeof(target_sigset_t));
9117             }
9118         }
9119         return ret;
9120 #endif
9121     case TARGET_NR_rt_sigpending:
9122         {
9123             sigset_t set;
9124 
9125             /* Yes, this check is >, not != like most. We follow the kernel's
9126              * logic and it does it like this because it implements
9127              * NR_sigpending through the same code path, and in that case
9128              * the old_sigset_t is smaller in size.
9129              */
9130             if (arg2 > sizeof(target_sigset_t)) {
9131                 return -TARGET_EINVAL;
9132             }
9133 
9134             ret = get_errno(sigpending(&set));
9135             if (!is_error(ret)) {
9136                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9137                     return -TARGET_EFAULT;
9138                 host_to_target_sigset(p, &set);
9139                 unlock_user(p, arg1, sizeof(target_sigset_t));
9140             }
9141         }
9142         return ret;
9143 #ifdef TARGET_NR_sigsuspend
9144     case TARGET_NR_sigsuspend:
9145         {
9146             TaskState *ts = cpu->opaque;
9147 #if defined(TARGET_ALPHA)
9148             abi_ulong mask = arg1;
9149             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9150 #else
9151             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9152                 return -TARGET_EFAULT;
9153             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9154             unlock_user(p, arg1, 0);
9155 #endif
9156             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9157                                                SIGSET_T_SIZE));
9158             if (ret != -TARGET_ERESTARTSYS) {
9159                 ts->in_sigsuspend = 1;
9160             }
9161         }
9162         return ret;
9163 #endif
9164     case TARGET_NR_rt_sigsuspend:
9165         {
9166             TaskState *ts = cpu->opaque;
9167 
9168             if (arg2 != sizeof(target_sigset_t)) {
9169                 return -TARGET_EINVAL;
9170             }
9171             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9172                 return -TARGET_EFAULT;
9173             target_to_host_sigset(&ts->sigsuspend_mask, p);
9174             unlock_user(p, arg1, 0);
9175             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9176                                                SIGSET_T_SIZE));
9177             if (ret != -TARGET_ERESTARTSYS) {
9178                 ts->in_sigsuspend = 1;
9179             }
9180         }
9181         return ret;
9182 #ifdef TARGET_NR_rt_sigtimedwait
9183     case TARGET_NR_rt_sigtimedwait:
9184         {
9185             sigset_t set;
9186             struct timespec uts, *puts;
9187             siginfo_t uinfo;
9188 
9189             if (arg4 != sizeof(target_sigset_t)) {
9190                 return -TARGET_EINVAL;
9191             }
9192 
9193             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9194                 return -TARGET_EFAULT;
9195             target_to_host_sigset(&set, p);
9196             unlock_user(p, arg1, 0);
9197             if (arg3) {
9198                 puts = &uts;
9199                 if (target_to_host_timespec(puts, arg3)) {
9200                     return -TARGET_EFAULT;
9201                 }
9202             } else {
9203                 puts = NULL;
9204             }
9205             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9206                                                  SIGSET_T_SIZE));
9207             if (!is_error(ret)) {
9208                 if (arg2) {
9209                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9210                                   0);
9211                     if (!p) {
9212                         return -TARGET_EFAULT;
9213                     }
9214                     host_to_target_siginfo(p, &uinfo);
9215                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9216                 }
9217                 ret = host_to_target_signal(ret);
9218             }
9219         }
9220         return ret;
9221 #endif
9222 #ifdef TARGET_NR_rt_sigtimedwait_time64
9223     case TARGET_NR_rt_sigtimedwait_time64:
9224         {
9225             sigset_t set;
9226             struct timespec uts, *puts;
9227             siginfo_t uinfo;
9228 
9229             if (arg4 != sizeof(target_sigset_t)) {
9230                 return -TARGET_EINVAL;
9231             }
9232 
9233             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9234             if (!p) {
9235                 return -TARGET_EFAULT;
9236             }
9237             target_to_host_sigset(&set, p);
9238             unlock_user(p, arg1, 0);
9239             if (arg3) {
9240                 puts = &uts;
9241                 if (target_to_host_timespec64(puts, arg3)) {
9242                     return -TARGET_EFAULT;
9243                 }
9244             } else {
9245                 puts = NULL;
9246             }
9247             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9248                                                  SIGSET_T_SIZE));
9249             if (!is_error(ret)) {
9250                 if (arg2) {
9251                     p = lock_user(VERIFY_WRITE, arg2,
9252                                   sizeof(target_siginfo_t), 0);
9253                     if (!p) {
9254                         return -TARGET_EFAULT;
9255                     }
9256                     host_to_target_siginfo(p, &uinfo);
9257                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9258                 }
9259                 ret = host_to_target_signal(ret);
9260             }
9261         }
9262         return ret;
9263 #endif
9264     case TARGET_NR_rt_sigqueueinfo:
9265         {
9266             siginfo_t uinfo;
9267 
9268             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9269             if (!p) {
9270                 return -TARGET_EFAULT;
9271             }
9272             target_to_host_siginfo(&uinfo, p);
9273             unlock_user(p, arg3, 0);
9274             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9275         }
9276         return ret;
9277     case TARGET_NR_rt_tgsigqueueinfo:
9278         {
9279             siginfo_t uinfo;
9280 
9281             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9282             if (!p) {
9283                 return -TARGET_EFAULT;
9284             }
9285             target_to_host_siginfo(&uinfo, p);
9286             unlock_user(p, arg4, 0);
9287             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9288         }
9289         return ret;
9290 #ifdef TARGET_NR_sigreturn
9291     case TARGET_NR_sigreturn:
9292         if (block_signals()) {
9293             return -TARGET_ERESTARTSYS;
9294         }
9295         return do_sigreturn(cpu_env);
9296 #endif
9297     case TARGET_NR_rt_sigreturn:
9298         if (block_signals()) {
9299             return -TARGET_ERESTARTSYS;
9300         }
9301         return do_rt_sigreturn(cpu_env);
9302     case TARGET_NR_sethostname:
9303         if (!(p = lock_user_string(arg1)))
9304             return -TARGET_EFAULT;
9305         ret = get_errno(sethostname(p, arg2));
9306         unlock_user(p, arg1, 0);
9307         return ret;
9308 #ifdef TARGET_NR_setrlimit
9309     case TARGET_NR_setrlimit:
9310         {
9311             int resource = target_to_host_resource(arg1);
9312             struct target_rlimit *target_rlim;
9313             struct rlimit rlim;
9314             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9315                 return -TARGET_EFAULT;
9316             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9317             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9318             unlock_user_struct(target_rlim, arg2, 0);
9319             /*
9320              * If we just passed through resource limit settings for memory then
9321              * they would also apply to QEMU's own allocations, and QEMU will
9322              * crash or hang or die if its allocations fail. Ideally we would
9323              * track the guest allocations in QEMU and apply the limits ourselves.
9324              * For now, just tell the guest the call succeeded but don't actually
9325              * limit anything.
9326              */
9327             if (resource != RLIMIT_AS &&
9328                 resource != RLIMIT_DATA &&
9329                 resource != RLIMIT_STACK) {
9330                 return get_errno(setrlimit(resource, &rlim));
9331             } else {
9332                 return 0;
9333             }
9334         }
9335 #endif
9336 #ifdef TARGET_NR_getrlimit
9337     case TARGET_NR_getrlimit:
9338         {
9339             int resource = target_to_host_resource(arg1);
9340             struct target_rlimit *target_rlim;
9341             struct rlimit rlim;
9342 
9343             ret = get_errno(getrlimit(resource, &rlim));
9344             if (!is_error(ret)) {
9345                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9346                     return -TARGET_EFAULT;
9347                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9348                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9349                 unlock_user_struct(target_rlim, arg2, 1);
9350             }
9351         }
9352         return ret;
9353 #endif
9354     case TARGET_NR_getrusage:
9355         {
9356             struct rusage rusage;
9357             ret = get_errno(getrusage(arg1, &rusage));
9358             if (!is_error(ret)) {
9359                 ret = host_to_target_rusage(arg2, &rusage);
9360             }
9361         }
9362         return ret;
9363 #if defined(TARGET_NR_gettimeofday)
9364     case TARGET_NR_gettimeofday:
9365         {
9366             struct timeval tv;
9367             struct timezone tz;
9368 
9369             ret = get_errno(gettimeofday(&tv, &tz));
9370             if (!is_error(ret)) {
9371                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9372                     return -TARGET_EFAULT;
9373                 }
9374                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9375                     return -TARGET_EFAULT;
9376                 }
9377             }
9378         }
9379         return ret;
9380 #endif
9381 #if defined(TARGET_NR_settimeofday)
9382     case TARGET_NR_settimeofday:
9383         {
9384             struct timeval tv, *ptv = NULL;
9385             struct timezone tz, *ptz = NULL;
9386 
9387             if (arg1) {
9388                 if (copy_from_user_timeval(&tv, arg1)) {
9389                     return -TARGET_EFAULT;
9390                 }
9391                 ptv = &tv;
9392             }
9393 
9394             if (arg2) {
9395                 if (copy_from_user_timezone(&tz, arg2)) {
9396                     return -TARGET_EFAULT;
9397                 }
9398                 ptz = &tz;
9399             }
9400 
9401             return get_errno(settimeofday(ptv, ptz));
9402         }
9403 #endif
9404 #if defined(TARGET_NR_select)
9405     case TARGET_NR_select:
9406 #if defined(TARGET_WANT_NI_OLD_SELECT)
9407         /* some architectures used to have old_select here
9408          * but now ENOSYS it.
9409          */
9410         ret = -TARGET_ENOSYS;
9411 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9412         ret = do_old_select(arg1);
9413 #else
9414         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9415 #endif
9416         return ret;
9417 #endif
9418 #ifdef TARGET_NR_pselect6
9419     case TARGET_NR_pselect6:
9420         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9421 #endif
9422 #ifdef TARGET_NR_pselect6_time64
9423     case TARGET_NR_pselect6_time64:
9424         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9425 #endif
9426 #ifdef TARGET_NR_symlink
9427     case TARGET_NR_symlink:
9428         {
9429             void *p2;
9430             p = lock_user_string(arg1);
9431             p2 = lock_user_string(arg2);
9432             if (!p || !p2)
9433                 ret = -TARGET_EFAULT;
9434             else
9435                 ret = get_errno(symlink(p, p2));
9436             unlock_user(p2, arg2, 0);
9437             unlock_user(p, arg1, 0);
9438         }
9439         return ret;
9440 #endif
9441 #if defined(TARGET_NR_symlinkat)
9442     case TARGET_NR_symlinkat:
9443         {
9444             void *p2;
9445             p  = lock_user_string(arg1);
9446             p2 = lock_user_string(arg3);
9447             if (!p || !p2)
9448                 ret = -TARGET_EFAULT;
9449             else
9450                 ret = get_errno(symlinkat(p, arg2, p2));
9451             unlock_user(p2, arg3, 0);
9452             unlock_user(p, arg1, 0);
9453         }
9454         return ret;
9455 #endif
9456 #ifdef TARGET_NR_readlink
9457     case TARGET_NR_readlink:
9458         {
9459             void *p2;
9460             p = lock_user_string(arg1);
9461             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9462             if (!p || !p2) {
9463                 ret = -TARGET_EFAULT;
9464             } else if (!arg3) {
9465                 /* Short circuit this for the magic exe check. */
9466                 ret = -TARGET_EINVAL;
9467             } else if (is_proc_myself((const char *)p, "exe")) {
9468                 char real[PATH_MAX], *temp;
9469                 temp = realpath(exec_path, real);
9470                 /* Return value is # of bytes that we wrote to the buffer. */
9471                 if (temp == NULL) {
9472                     ret = get_errno(-1);
9473                 } else {
9474                     /* Don't worry about sign mismatch as earlier mapping
9475                      * logic would have thrown a bad address error. */
9476                     ret = MIN(strlen(real), arg3);
9477                     /* We cannot NUL terminate the string. */
9478                     memcpy(p2, real, ret);
9479                 }
9480             } else {
9481                 ret = get_errno(readlink(path(p), p2, arg3));
9482             }
9483             unlock_user(p2, arg2, ret);
9484             unlock_user(p, arg1, 0);
9485         }
9486         return ret;
9487 #endif
9488 #if defined(TARGET_NR_readlinkat)
9489     case TARGET_NR_readlinkat:
9490         {
9491             void *p2;
9492             p  = lock_user_string(arg2);
9493             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9494             if (!p || !p2) {
9495                 ret = -TARGET_EFAULT;
9496             } else if (is_proc_myself((const char *)p, "exe")) {
9497                 char real[PATH_MAX], *temp;
9498                 temp = realpath(exec_path, real);
9499                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9500                 snprintf((char *)p2, arg4, "%s", real);
9501             } else {
9502                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9503             }
9504             unlock_user(p2, arg3, ret);
9505             unlock_user(p, arg2, 0);
9506         }
9507         return ret;
9508 #endif
9509 #ifdef TARGET_NR_swapon
9510     case TARGET_NR_swapon:
9511         if (!(p = lock_user_string(arg1)))
9512             return -TARGET_EFAULT;
9513         ret = get_errno(swapon(p, arg2));
9514         unlock_user(p, arg1, 0);
9515         return ret;
9516 #endif
9517     case TARGET_NR_reboot:
9518         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9519            /* arg4 must be ignored in all other cases */
9520            p = lock_user_string(arg4);
9521            if (!p) {
9522                return -TARGET_EFAULT;
9523            }
9524            ret = get_errno(reboot(arg1, arg2, arg3, p));
9525            unlock_user(p, arg4, 0);
9526         } else {
9527            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9528         }
9529         return ret;
9530 #ifdef TARGET_NR_mmap
9531     case TARGET_NR_mmap:
9532 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9533     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9534     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9535     || defined(TARGET_S390X)
9536         {
9537             abi_ulong *v;
9538             abi_ulong v1, v2, v3, v4, v5, v6;
9539             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9540                 return -TARGET_EFAULT;
9541             v1 = tswapal(v[0]);
9542             v2 = tswapal(v[1]);
9543             v3 = tswapal(v[2]);
9544             v4 = tswapal(v[3]);
9545             v5 = tswapal(v[4]);
9546             v6 = tswapal(v[5]);
9547             unlock_user(v, arg1, 0);
9548             ret = get_errno(target_mmap(v1, v2, v3,
9549                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9550                                         v5, v6));
9551         }
9552 #else
9553         /* mmap pointers are always untagged */
9554         ret = get_errno(target_mmap(arg1, arg2, arg3,
9555                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9556                                     arg5,
9557                                     arg6));
9558 #endif
9559         return ret;
9560 #endif
9561 #ifdef TARGET_NR_mmap2
9562     case TARGET_NR_mmap2:
9563 #ifndef MMAP_SHIFT
9564 #define MMAP_SHIFT 12
9565 #endif
9566         ret = target_mmap(arg1, arg2, arg3,
9567                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9568                           arg5, arg6 << MMAP_SHIFT);
9569         return get_errno(ret);
9570 #endif
9571     case TARGET_NR_munmap:
9572         arg1 = cpu_untagged_addr(cpu, arg1);
9573         return get_errno(target_munmap(arg1, arg2));
9574     case TARGET_NR_mprotect:
9575         arg1 = cpu_untagged_addr(cpu, arg1);
9576         {
9577             TaskState *ts = cpu->opaque;
9578             /* Special hack to detect libc making the stack executable.  */
9579             if ((arg3 & PROT_GROWSDOWN)
9580                 && arg1 >= ts->info->stack_limit
9581                 && arg1 <= ts->info->start_stack) {
9582                 arg3 &= ~PROT_GROWSDOWN;
9583                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9584                 arg1 = ts->info->stack_limit;
9585             }
9586         }
9587         return get_errno(target_mprotect(arg1, arg2, arg3));
9588 #ifdef TARGET_NR_mremap
9589     case TARGET_NR_mremap:
9590         arg1 = cpu_untagged_addr(cpu, arg1);
9591         /* mremap new_addr (arg5) is always untagged */
9592         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9593 #endif
9594         /* ??? msync/mlock/munlock are broken for softmmu.  */
9595 #ifdef TARGET_NR_msync
9596     case TARGET_NR_msync:
9597         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9598 #endif
9599 #ifdef TARGET_NR_mlock
9600     case TARGET_NR_mlock:
9601         return get_errno(mlock(g2h(cpu, arg1), arg2));
9602 #endif
9603 #ifdef TARGET_NR_munlock
9604     case TARGET_NR_munlock:
9605         return get_errno(munlock(g2h(cpu, arg1), arg2));
9606 #endif
9607 #ifdef TARGET_NR_mlockall
9608     case TARGET_NR_mlockall:
9609         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9610 #endif
9611 #ifdef TARGET_NR_munlockall
9612     case TARGET_NR_munlockall:
9613         return get_errno(munlockall());
9614 #endif
9615 #ifdef TARGET_NR_truncate
9616     case TARGET_NR_truncate:
9617         if (!(p = lock_user_string(arg1)))
9618             return -TARGET_EFAULT;
9619         ret = get_errno(truncate(p, arg2));
9620         unlock_user(p, arg1, 0);
9621         return ret;
9622 #endif
9623 #ifdef TARGET_NR_ftruncate
9624     case TARGET_NR_ftruncate:
9625         return get_errno(ftruncate(arg1, arg2));
9626 #endif
9627     case TARGET_NR_fchmod:
9628         return get_errno(fchmod(arg1, arg2));
9629 #if defined(TARGET_NR_fchmodat)
9630     case TARGET_NR_fchmodat:
9631         if (!(p = lock_user_string(arg2)))
9632             return -TARGET_EFAULT;
9633         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9634         unlock_user(p, arg2, 0);
9635         return ret;
9636 #endif
9637     case TARGET_NR_getpriority:
9638         /* Note that negative values are valid for getpriority, so we must
9639            differentiate based on errno settings.  */
9640         errno = 0;
9641         ret = getpriority(arg1, arg2);
9642         if (ret == -1 && errno != 0) {
9643             return -host_to_target_errno(errno);
9644         }
9645 #ifdef TARGET_ALPHA
9646         /* Return value is the unbiased priority.  Signal no error.  */
9647         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9648 #else
9649         /* Return value is a biased priority to avoid negative numbers.  */
9650         ret = 20 - ret;
9651 #endif
9652         return ret;
9653     case TARGET_NR_setpriority:
9654         return get_errno(setpriority(arg1, arg2, arg3));
9655 #ifdef TARGET_NR_statfs
9656     case TARGET_NR_statfs:
9657         if (!(p = lock_user_string(arg1))) {
9658             return -TARGET_EFAULT;
9659         }
9660         ret = get_errno(statfs(path(p), &stfs));
9661         unlock_user(p, arg1, 0);
9662     convert_statfs:
9663         if (!is_error(ret)) {
9664             struct target_statfs *target_stfs;
9665 
9666             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9667                 return -TARGET_EFAULT;
9668             __put_user(stfs.f_type, &target_stfs->f_type);
9669             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9670             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9671             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9672             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9673             __put_user(stfs.f_files, &target_stfs->f_files);
9674             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9675             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9676             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9677             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9678             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9679 #ifdef _STATFS_F_FLAGS
9680             __put_user(stfs.f_flags, &target_stfs->f_flags);
9681 #else
9682             __put_user(0, &target_stfs->f_flags);
9683 #endif
9684             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9685             unlock_user_struct(target_stfs, arg2, 1);
9686         }
9687         return ret;
9688 #endif
9689 #ifdef TARGET_NR_fstatfs
9690     case TARGET_NR_fstatfs:
9691         ret = get_errno(fstatfs(arg1, &stfs));
9692         goto convert_statfs;
9693 #endif
9694 #ifdef TARGET_NR_statfs64
9695     case TARGET_NR_statfs64:
9696         if (!(p = lock_user_string(arg1))) {
9697             return -TARGET_EFAULT;
9698         }
9699         ret = get_errno(statfs(path(p), &stfs));
9700         unlock_user(p, arg1, 0);
9701     convert_statfs64:
9702         if (!is_error(ret)) {
9703             struct target_statfs64 *target_stfs;
9704 
9705             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9706                 return -TARGET_EFAULT;
9707             __put_user(stfs.f_type, &target_stfs->f_type);
9708             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9709             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9710             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9711             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9712             __put_user(stfs.f_files, &target_stfs->f_files);
9713             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9714             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9715             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9716             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9717             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9718 #ifdef _STATFS_F_FLAGS
9719             __put_user(stfs.f_flags, &target_stfs->f_flags);
9720 #else
9721             __put_user(0, &target_stfs->f_flags);
9722 #endif
9723             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9724             unlock_user_struct(target_stfs, arg3, 1);
9725         }
9726         return ret;
9727     case TARGET_NR_fstatfs64:
9728         ret = get_errno(fstatfs(arg1, &stfs));
9729         goto convert_statfs64;
9730 #endif
9731 #ifdef TARGET_NR_socketcall
9732     case TARGET_NR_socketcall:
9733         return do_socketcall(arg1, arg2);
9734 #endif
9735 #ifdef TARGET_NR_accept
9736     case TARGET_NR_accept:
9737         return do_accept4(arg1, arg2, arg3, 0);
9738 #endif
9739 #ifdef TARGET_NR_accept4
9740     case TARGET_NR_accept4:
9741         return do_accept4(arg1, arg2, arg3, arg4);
9742 #endif
9743 #ifdef TARGET_NR_bind
9744     case TARGET_NR_bind:
9745         return do_bind(arg1, arg2, arg3);
9746 #endif
9747 #ifdef TARGET_NR_connect
9748     case TARGET_NR_connect:
9749         return do_connect(arg1, arg2, arg3);
9750 #endif
9751 #ifdef TARGET_NR_getpeername
9752     case TARGET_NR_getpeername:
9753         return do_getpeername(arg1, arg2, arg3);
9754 #endif
9755 #ifdef TARGET_NR_getsockname
9756     case TARGET_NR_getsockname:
9757         return do_getsockname(arg1, arg2, arg3);
9758 #endif
9759 #ifdef TARGET_NR_getsockopt
9760     case TARGET_NR_getsockopt:
9761         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9762 #endif
9763 #ifdef TARGET_NR_listen
9764     case TARGET_NR_listen:
9765         return get_errno(listen(arg1, arg2));
9766 #endif
9767 #ifdef TARGET_NR_recv
9768     case TARGET_NR_recv:
9769         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9770 #endif
9771 #ifdef TARGET_NR_recvfrom
9772     case TARGET_NR_recvfrom:
9773         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9774 #endif
9775 #ifdef TARGET_NR_recvmsg
9776     case TARGET_NR_recvmsg:
9777         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9778 #endif
9779 #ifdef TARGET_NR_send
9780     case TARGET_NR_send:
9781         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9782 #endif
9783 #ifdef TARGET_NR_sendmsg
9784     case TARGET_NR_sendmsg:
9785         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9786 #endif
9787 #ifdef TARGET_NR_sendmmsg
9788     case TARGET_NR_sendmmsg:
9789         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9790 #endif
9791 #ifdef TARGET_NR_recvmmsg
9792     case TARGET_NR_recvmmsg:
9793         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9794 #endif
9795 #ifdef TARGET_NR_sendto
9796     case TARGET_NR_sendto:
9797         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9798 #endif
9799 #ifdef TARGET_NR_shutdown
9800     case TARGET_NR_shutdown:
9801         return get_errno(shutdown(arg1, arg2));
9802 #endif
9803 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9804     case TARGET_NR_getrandom:
9805         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9806         if (!p) {
9807             return -TARGET_EFAULT;
9808         }
9809         ret = get_errno(getrandom(p, arg2, arg3));
9810         unlock_user(p, arg1, ret);
9811         return ret;
9812 #endif
9813 #ifdef TARGET_NR_socket
9814     case TARGET_NR_socket:
9815         return do_socket(arg1, arg2, arg3);
9816 #endif
9817 #ifdef TARGET_NR_socketpair
9818     case TARGET_NR_socketpair:
9819         return do_socketpair(arg1, arg2, arg3, arg4);
9820 #endif
9821 #ifdef TARGET_NR_setsockopt
9822     case TARGET_NR_setsockopt:
9823         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9824 #endif
9825 #if defined(TARGET_NR_syslog)
9826     case TARGET_NR_syslog:
9827         {
9828             int len = arg2;
9829 
9830             switch (arg1) {
9831             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9832             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9833             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9834             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9835             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9836             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9837             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9838             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9839                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9840             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9841             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9842             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9843                 {
9844                     if (len < 0) {
9845                         return -TARGET_EINVAL;
9846                     }
9847                     if (len == 0) {
9848                         return 0;
9849                     }
9850                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9851                     if (!p) {
9852                         return -TARGET_EFAULT;
9853                     }
9854                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9855                     unlock_user(p, arg2, arg3);
9856                 }
9857                 return ret;
9858             default:
9859                 return -TARGET_EINVAL;
9860             }
9861         }
9862         break;
9863 #endif
9864     case TARGET_NR_setitimer:
9865         {
9866             struct itimerval value, ovalue, *pvalue;
9867 
9868             if (arg2) {
9869                 pvalue = &value;
9870                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9871                     || copy_from_user_timeval(&pvalue->it_value,
9872                                               arg2 + sizeof(struct target_timeval)))
9873                     return -TARGET_EFAULT;
9874             } else {
9875                 pvalue = NULL;
9876             }
9877             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9878             if (!is_error(ret) && arg3) {
9879                 if (copy_to_user_timeval(arg3,
9880                                          &ovalue.it_interval)
9881                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9882                                             &ovalue.it_value))
9883                     return -TARGET_EFAULT;
9884             }
9885         }
9886         return ret;
9887     case TARGET_NR_getitimer:
9888         {
9889             struct itimerval value;
9890 
9891             ret = get_errno(getitimer(arg1, &value));
9892             if (!is_error(ret) && arg2) {
9893                 if (copy_to_user_timeval(arg2,
9894                                          &value.it_interval)
9895                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9896                                             &value.it_value))
9897                     return -TARGET_EFAULT;
9898             }
9899         }
9900         return ret;
9901 #ifdef TARGET_NR_stat
9902     case TARGET_NR_stat:
9903         if (!(p = lock_user_string(arg1))) {
9904             return -TARGET_EFAULT;
9905         }
9906         ret = get_errno(stat(path(p), &st));
9907         unlock_user(p, arg1, 0);
9908         goto do_stat;
9909 #endif
9910 #ifdef TARGET_NR_lstat
9911     case TARGET_NR_lstat:
9912         if (!(p = lock_user_string(arg1))) {
9913             return -TARGET_EFAULT;
9914         }
9915         ret = get_errno(lstat(path(p), &st));
9916         unlock_user(p, arg1, 0);
9917         goto do_stat;
9918 #endif
9919 #ifdef TARGET_NR_fstat
9920     case TARGET_NR_fstat:
9921         {
9922             ret = get_errno(fstat(arg1, &st));
9923 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9924         do_stat:
9925 #endif
9926             if (!is_error(ret)) {
9927                 struct target_stat *target_st;
9928 
9929                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9930                     return -TARGET_EFAULT;
9931                 memset(target_st, 0, sizeof(*target_st));
9932                 __put_user(st.st_dev, &target_st->st_dev);
9933                 __put_user(st.st_ino, &target_st->st_ino);
9934                 __put_user(st.st_mode, &target_st->st_mode);
9935                 __put_user(st.st_uid, &target_st->st_uid);
9936                 __put_user(st.st_gid, &target_st->st_gid);
9937                 __put_user(st.st_nlink, &target_st->st_nlink);
9938                 __put_user(st.st_rdev, &target_st->st_rdev);
9939                 __put_user(st.st_size, &target_st->st_size);
9940                 __put_user(st.st_blksize, &target_st->st_blksize);
9941                 __put_user(st.st_blocks, &target_st->st_blocks);
9942                 __put_user(st.st_atime, &target_st->target_st_atime);
9943                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9944                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9945 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9946                 __put_user(st.st_atim.tv_nsec,
9947                            &target_st->target_st_atime_nsec);
9948                 __put_user(st.st_mtim.tv_nsec,
9949                            &target_st->target_st_mtime_nsec);
9950                 __put_user(st.st_ctim.tv_nsec,
9951                            &target_st->target_st_ctime_nsec);
9952 #endif
9953                 unlock_user_struct(target_st, arg2, 1);
9954             }
9955         }
9956         return ret;
9957 #endif
9958     case TARGET_NR_vhangup:
9959         return get_errno(vhangup());
9960 #ifdef TARGET_NR_syscall
9961     case TARGET_NR_syscall:
9962         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9963                           arg6, arg7, arg8, 0);
9964 #endif
9965 #if defined(TARGET_NR_wait4)
9966     case TARGET_NR_wait4:
9967         {
9968             int status;
9969             abi_long status_ptr = arg2;
9970             struct rusage rusage, *rusage_ptr;
9971             abi_ulong target_rusage = arg4;
9972             abi_long rusage_err;
9973             if (target_rusage)
9974                 rusage_ptr = &rusage;
9975             else
9976                 rusage_ptr = NULL;
9977             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9978             if (!is_error(ret)) {
9979                 if (status_ptr && ret) {
9980                     status = host_to_target_waitstatus(status);
9981                     if (put_user_s32(status, status_ptr))
9982                         return -TARGET_EFAULT;
9983                 }
9984                 if (target_rusage) {
9985                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9986                     if (rusage_err) {
9987                         ret = rusage_err;
9988                     }
9989                 }
9990             }
9991         }
9992         return ret;
9993 #endif
9994 #ifdef TARGET_NR_swapoff
9995     case TARGET_NR_swapoff:
9996         if (!(p = lock_user_string(arg1)))
9997             return -TARGET_EFAULT;
9998         ret = get_errno(swapoff(p));
9999         unlock_user(p, arg1, 0);
10000         return ret;
10001 #endif
10002     case TARGET_NR_sysinfo:
10003         {
10004             struct target_sysinfo *target_value;
10005             struct sysinfo value;
10006             ret = get_errno(sysinfo(&value));
10007             if (!is_error(ret) && arg1)
10008             {
10009                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10010                     return -TARGET_EFAULT;
10011                 __put_user(value.uptime, &target_value->uptime);
10012                 __put_user(value.loads[0], &target_value->loads[0]);
10013                 __put_user(value.loads[1], &target_value->loads[1]);
10014                 __put_user(value.loads[2], &target_value->loads[2]);
10015                 __put_user(value.totalram, &target_value->totalram);
10016                 __put_user(value.freeram, &target_value->freeram);
10017                 __put_user(value.sharedram, &target_value->sharedram);
10018                 __put_user(value.bufferram, &target_value->bufferram);
10019                 __put_user(value.totalswap, &target_value->totalswap);
10020                 __put_user(value.freeswap, &target_value->freeswap);
10021                 __put_user(value.procs, &target_value->procs);
10022                 __put_user(value.totalhigh, &target_value->totalhigh);
10023                 __put_user(value.freehigh, &target_value->freehigh);
10024                 __put_user(value.mem_unit, &target_value->mem_unit);
10025                 unlock_user_struct(target_value, arg1, 1);
10026             }
10027         }
10028         return ret;
10029 #ifdef TARGET_NR_ipc
10030     case TARGET_NR_ipc:
10031         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10032 #endif
10033 #ifdef TARGET_NR_semget
10034     case TARGET_NR_semget:
10035         return get_errno(semget(arg1, arg2, arg3));
10036 #endif
10037 #ifdef TARGET_NR_semop
10038     case TARGET_NR_semop:
10039         return do_semtimedop(arg1, arg2, arg3, 0, false);
10040 #endif
10041 #ifdef TARGET_NR_semtimedop
10042     case TARGET_NR_semtimedop:
10043         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10044 #endif
10045 #ifdef TARGET_NR_semtimedop_time64
10046     case TARGET_NR_semtimedop_time64:
10047         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10048 #endif
10049 #ifdef TARGET_NR_semctl
10050     case TARGET_NR_semctl:
10051         return do_semctl(arg1, arg2, arg3, arg4);
10052 #endif
10053 #ifdef TARGET_NR_msgctl
10054     case TARGET_NR_msgctl:
10055         return do_msgctl(arg1, arg2, arg3);
10056 #endif
10057 #ifdef TARGET_NR_msgget
10058     case TARGET_NR_msgget:
10059         return get_errno(msgget(arg1, arg2));
10060 #endif
10061 #ifdef TARGET_NR_msgrcv
10062     case TARGET_NR_msgrcv:
10063         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10064 #endif
10065 #ifdef TARGET_NR_msgsnd
10066     case TARGET_NR_msgsnd:
10067         return do_msgsnd(arg1, arg2, arg3, arg4);
10068 #endif
10069 #ifdef TARGET_NR_shmget
10070     case TARGET_NR_shmget:
10071         return get_errno(shmget(arg1, arg2, arg3));
10072 #endif
10073 #ifdef TARGET_NR_shmctl
10074     case TARGET_NR_shmctl:
10075         return do_shmctl(arg1, arg2, arg3);
10076 #endif
10077 #ifdef TARGET_NR_shmat
10078     case TARGET_NR_shmat:
10079         return do_shmat(cpu_env, arg1, arg2, arg3);
10080 #endif
10081 #ifdef TARGET_NR_shmdt
10082     case TARGET_NR_shmdt:
10083         return do_shmdt(arg1);
10084 #endif
10085     case TARGET_NR_fsync:
10086         return get_errno(fsync(arg1));
10087     case TARGET_NR_clone:
10088         /* Linux manages to have three different orderings for its
10089          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10090          * match the kernel's CONFIG_CLONE_* settings.
10091          * Microblaze is further special in that it uses a sixth
10092          * implicit argument to clone for the TLS pointer.
10093          */
10094 #if defined(TARGET_MICROBLAZE)
10095         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10096 #elif defined(TARGET_CLONE_BACKWARDS)
10097         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10098 #elif defined(TARGET_CLONE_BACKWARDS2)
10099         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10100 #else
10101         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10102 #endif
10103         return ret;
10104 #ifdef __NR_exit_group
10105         /* new thread calls */
10106     case TARGET_NR_exit_group:
10107         preexit_cleanup(cpu_env, arg1);
10108         return get_errno(exit_group(arg1));
10109 #endif
10110     case TARGET_NR_setdomainname:
10111         if (!(p = lock_user_string(arg1)))
10112             return -TARGET_EFAULT;
10113         ret = get_errno(setdomainname(p, arg2));
10114         unlock_user(p, arg1, 0);
10115         return ret;
10116     case TARGET_NR_uname:
10117         /* no need to transcode because we use the linux syscall */
10118         {
10119             struct new_utsname * buf;
10120 
10121             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10122                 return -TARGET_EFAULT;
10123             ret = get_errno(sys_uname(buf));
10124             if (!is_error(ret)) {
10125                 /* Overwrite the native machine name with whatever is being
10126                    emulated. */
10127                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10128                           sizeof(buf->machine));
10129                 /* Allow the user to override the reported release.  */
10130                 if (qemu_uname_release && *qemu_uname_release) {
10131                     g_strlcpy(buf->release, qemu_uname_release,
10132                               sizeof(buf->release));
10133                 }
10134             }
10135             unlock_user_struct(buf, arg1, 1);
10136         }
10137         return ret;
10138 #ifdef TARGET_I386
10139     case TARGET_NR_modify_ldt:
10140         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10141 #if !defined(TARGET_X86_64)
10142     case TARGET_NR_vm86:
10143         return do_vm86(cpu_env, arg1, arg2);
10144 #endif
10145 #endif
10146 #if defined(TARGET_NR_adjtimex)
10147     case TARGET_NR_adjtimex:
10148         {
10149             struct timex host_buf;
10150 
10151             if (target_to_host_timex(&host_buf, arg1) != 0) {
10152                 return -TARGET_EFAULT;
10153             }
10154             ret = get_errno(adjtimex(&host_buf));
10155             if (!is_error(ret)) {
10156                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10157                     return -TARGET_EFAULT;
10158                 }
10159             }
10160         }
10161         return ret;
10162 #endif
10163 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10164     case TARGET_NR_clock_adjtime:
10165         {
10166             struct timex htx, *phtx = &htx;
10167 
10168             if (target_to_host_timex(phtx, arg2) != 0) {
10169                 return -TARGET_EFAULT;
10170             }
10171             ret = get_errno(clock_adjtime(arg1, phtx));
10172             if (!is_error(ret) && phtx) {
10173                 if (host_to_target_timex(arg2, phtx) != 0) {
10174                     return -TARGET_EFAULT;
10175                 }
10176             }
10177         }
10178         return ret;
10179 #endif
10180 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10181     case TARGET_NR_clock_adjtime64:
10182         {
10183             struct timex htx;
10184 
10185             if (target_to_host_timex64(&htx, arg2) != 0) {
10186                 return -TARGET_EFAULT;
10187             }
10188             ret = get_errno(clock_adjtime(arg1, &htx));
10189             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10190                     return -TARGET_EFAULT;
10191             }
10192         }
10193         return ret;
10194 #endif
10195     case TARGET_NR_getpgid:
10196         return get_errno(getpgid(arg1));
10197     case TARGET_NR_fchdir:
10198         return get_errno(fchdir(arg1));
10199     case TARGET_NR_personality:
10200         return get_errno(personality(arg1));
10201 #ifdef TARGET_NR__llseek /* Not on alpha */
10202     case TARGET_NR__llseek:
10203         {
10204             int64_t res;
10205 #if !defined(__NR_llseek)
10206             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10207             if (res == -1) {
10208                 ret = get_errno(res);
10209             } else {
10210                 ret = 0;
10211             }
10212 #else
10213             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10214 #endif
10215             if ((ret == 0) && put_user_s64(res, arg4)) {
10216                 return -TARGET_EFAULT;
10217             }
10218         }
10219         return ret;
10220 #endif
10221 #ifdef TARGET_NR_getdents
10222     case TARGET_NR_getdents:
10223 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10224 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10225         {
10226             struct target_dirent *target_dirp;
10227             struct linux_dirent *dirp;
10228             abi_long count = arg3;
10229 
10230             dirp = g_try_malloc(count);
10231             if (!dirp) {
10232                 return -TARGET_ENOMEM;
10233             }
10234 
10235             ret = get_errno(sys_getdents(arg1, dirp, count));
10236             if (!is_error(ret)) {
10237                 struct linux_dirent *de;
10238 		struct target_dirent *tde;
10239                 int len = ret;
10240                 int reclen, treclen;
10241 		int count1, tnamelen;
10242 
10243 		count1 = 0;
10244                 de = dirp;
10245                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10246                     return -TARGET_EFAULT;
10247 		tde = target_dirp;
10248                 while (len > 0) {
10249                     reclen = de->d_reclen;
10250                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10251                     assert(tnamelen >= 0);
10252                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10253                     assert(count1 + treclen <= count);
10254                     tde->d_reclen = tswap16(treclen);
10255                     tde->d_ino = tswapal(de->d_ino);
10256                     tde->d_off = tswapal(de->d_off);
10257                     memcpy(tde->d_name, de->d_name, tnamelen);
10258                     de = (struct linux_dirent *)((char *)de + reclen);
10259                     len -= reclen;
10260                     tde = (struct target_dirent *)((char *)tde + treclen);
10261 		    count1 += treclen;
10262                 }
10263 		ret = count1;
10264                 unlock_user(target_dirp, arg2, ret);
10265             }
10266             g_free(dirp);
10267         }
10268 #else
10269         {
10270             struct linux_dirent *dirp;
10271             abi_long count = arg3;
10272 
10273             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10274                 return -TARGET_EFAULT;
10275             ret = get_errno(sys_getdents(arg1, dirp, count));
10276             if (!is_error(ret)) {
10277                 struct linux_dirent *de;
10278                 int len = ret;
10279                 int reclen;
10280                 de = dirp;
10281                 while (len > 0) {
10282                     reclen = de->d_reclen;
10283                     if (reclen > len)
10284                         break;
10285                     de->d_reclen = tswap16(reclen);
10286                     tswapls(&de->d_ino);
10287                     tswapls(&de->d_off);
10288                     de = (struct linux_dirent *)((char *)de + reclen);
10289                     len -= reclen;
10290                 }
10291             }
10292             unlock_user(dirp, arg2, ret);
10293         }
10294 #endif
10295 #else
10296         /* Implement getdents in terms of getdents64 */
10297         {
10298             struct linux_dirent64 *dirp;
10299             abi_long count = arg3;
10300 
10301             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10302             if (!dirp) {
10303                 return -TARGET_EFAULT;
10304             }
10305             ret = get_errno(sys_getdents64(arg1, dirp, count));
10306             if (!is_error(ret)) {
10307                 /* Convert the dirent64 structs to target dirent.  We do this
10308                  * in-place, since we can guarantee that a target_dirent is no
10309                  * larger than a dirent64; however this means we have to be
10310                  * careful to read everything before writing in the new format.
10311                  */
10312                 struct linux_dirent64 *de;
10313                 struct target_dirent *tde;
10314                 int len = ret;
10315                 int tlen = 0;
10316 
10317                 de = dirp;
10318                 tde = (struct target_dirent *)dirp;
10319                 while (len > 0) {
10320                     int namelen, treclen;
10321                     int reclen = de->d_reclen;
10322                     uint64_t ino = de->d_ino;
10323                     int64_t off = de->d_off;
10324                     uint8_t type = de->d_type;
10325 
10326                     namelen = strlen(de->d_name);
10327                     treclen = offsetof(struct target_dirent, d_name)
10328                         + namelen + 2;
10329                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10330 
10331                     memmove(tde->d_name, de->d_name, namelen + 1);
10332                     tde->d_ino = tswapal(ino);
10333                     tde->d_off = tswapal(off);
10334                     tde->d_reclen = tswap16(treclen);
10335                     /* The target_dirent type is in what was formerly a padding
10336                      * byte at the end of the structure:
10337                      */
10338                     *(((char *)tde) + treclen - 1) = type;
10339 
10340                     de = (struct linux_dirent64 *)((char *)de + reclen);
10341                     tde = (struct target_dirent *)((char *)tde + treclen);
10342                     len -= reclen;
10343                     tlen += treclen;
10344                 }
10345                 ret = tlen;
10346             }
10347             unlock_user(dirp, arg2, ret);
10348         }
10349 #endif
10350         return ret;
10351 #endif /* TARGET_NR_getdents */
10352 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10353     case TARGET_NR_getdents64:
10354         {
10355             struct linux_dirent64 *dirp;
10356             abi_long count = arg3;
10357             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10358                 return -TARGET_EFAULT;
10359             ret = get_errno(sys_getdents64(arg1, dirp, count));
10360             if (!is_error(ret)) {
10361                 struct linux_dirent64 *de;
10362                 int len = ret;
10363                 int reclen;
10364                 de = dirp;
10365                 while (len > 0) {
10366                     reclen = de->d_reclen;
10367                     if (reclen > len)
10368                         break;
10369                     de->d_reclen = tswap16(reclen);
10370                     tswap64s((uint64_t *)&de->d_ino);
10371                     tswap64s((uint64_t *)&de->d_off);
10372                     de = (struct linux_dirent64 *)((char *)de + reclen);
10373                     len -= reclen;
10374                 }
10375             }
10376             unlock_user(dirp, arg2, ret);
10377         }
10378         return ret;
10379 #endif /* TARGET_NR_getdents64 */
10380 #if defined(TARGET_NR__newselect)
10381     case TARGET_NR__newselect:
10382         return do_select(arg1, arg2, arg3, arg4, arg5);
10383 #endif
10384 #ifdef TARGET_NR_poll
10385     case TARGET_NR_poll:
10386         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10387 #endif
10388 #ifdef TARGET_NR_ppoll
10389     case TARGET_NR_ppoll:
10390         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10391 #endif
10392 #ifdef TARGET_NR_ppoll_time64
10393     case TARGET_NR_ppoll_time64:
10394         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10395 #endif
10396     case TARGET_NR_flock:
10397         /* NOTE: the flock constant seems to be the same for every
10398            Linux platform */
10399         return get_errno(safe_flock(arg1, arg2));
10400     case TARGET_NR_readv:
10401         {
10402             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10403             if (vec != NULL) {
10404                 ret = get_errno(safe_readv(arg1, vec, arg3));
10405                 unlock_iovec(vec, arg2, arg3, 1);
10406             } else {
10407                 ret = -host_to_target_errno(errno);
10408             }
10409         }
10410         return ret;
10411     case TARGET_NR_writev:
10412         {
10413             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10414             if (vec != NULL) {
10415                 ret = get_errno(safe_writev(arg1, vec, arg3));
10416                 unlock_iovec(vec, arg2, arg3, 0);
10417             } else {
10418                 ret = -host_to_target_errno(errno);
10419             }
10420         }
10421         return ret;
10422 #if defined(TARGET_NR_preadv)
10423     case TARGET_NR_preadv:
10424         {
10425             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10426             if (vec != NULL) {
10427                 unsigned long low, high;
10428 
10429                 target_to_host_low_high(arg4, arg5, &low, &high);
10430                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10431                 unlock_iovec(vec, arg2, arg3, 1);
10432             } else {
10433                 ret = -host_to_target_errno(errno);
10434            }
10435         }
10436         return ret;
10437 #endif
10438 #if defined(TARGET_NR_pwritev)
10439     case TARGET_NR_pwritev:
10440         {
10441             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10442             if (vec != NULL) {
10443                 unsigned long low, high;
10444 
10445                 target_to_host_low_high(arg4, arg5, &low, &high);
10446                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10447                 unlock_iovec(vec, arg2, arg3, 0);
10448             } else {
10449                 ret = -host_to_target_errno(errno);
10450            }
10451         }
10452         return ret;
10453 #endif
10454     case TARGET_NR_getsid:
10455         return get_errno(getsid(arg1));
10456 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10457     case TARGET_NR_fdatasync:
10458         return get_errno(fdatasync(arg1));
10459 #endif
10460     case TARGET_NR_sched_getaffinity:
10461         {
10462             unsigned int mask_size;
10463             unsigned long *mask;
10464 
10465             /*
10466              * sched_getaffinity needs multiples of ulong, so need to take
10467              * care of mismatches between target ulong and host ulong sizes.
10468              */
10469             if (arg2 & (sizeof(abi_ulong) - 1)) {
10470                 return -TARGET_EINVAL;
10471             }
10472             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10473 
10474             mask = alloca(mask_size);
10475             memset(mask, 0, mask_size);
10476             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10477 
10478             if (!is_error(ret)) {
10479                 if (ret > arg2) {
10480                     /* More data returned than the caller's buffer will fit.
10481                      * This only happens if sizeof(abi_long) < sizeof(long)
10482                      * and the caller passed us a buffer holding an odd number
10483                      * of abi_longs. If the host kernel is actually using the
10484                      * extra 4 bytes then fail EINVAL; otherwise we can just
10485                      * ignore them and only copy the interesting part.
10486                      */
10487                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10488                     if (numcpus > arg2 * 8) {
10489                         return -TARGET_EINVAL;
10490                     }
10491                     ret = arg2;
10492                 }
10493 
10494                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10495                     return -TARGET_EFAULT;
10496                 }
10497             }
10498         }
10499         return ret;
10500     case TARGET_NR_sched_setaffinity:
10501         {
10502             unsigned int mask_size;
10503             unsigned long *mask;
10504 
10505             /*
10506              * sched_setaffinity needs multiples of ulong, so need to take
10507              * care of mismatches between target ulong and host ulong sizes.
10508              */
10509             if (arg2 & (sizeof(abi_ulong) - 1)) {
10510                 return -TARGET_EINVAL;
10511             }
10512             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10513             mask = alloca(mask_size);
10514 
10515             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10516             if (ret) {
10517                 return ret;
10518             }
10519 
10520             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10521         }
10522     case TARGET_NR_getcpu:
10523         {
10524             unsigned cpu, node;
10525             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10526                                        arg2 ? &node : NULL,
10527                                        NULL));
10528             if (is_error(ret)) {
10529                 return ret;
10530             }
10531             if (arg1 && put_user_u32(cpu, arg1)) {
10532                 return -TARGET_EFAULT;
10533             }
10534             if (arg2 && put_user_u32(node, arg2)) {
10535                 return -TARGET_EFAULT;
10536             }
10537         }
10538         return ret;
10539     case TARGET_NR_sched_setparam:
10540         {
10541             struct sched_param *target_schp;
10542             struct sched_param schp;
10543 
10544             if (arg2 == 0) {
10545                 return -TARGET_EINVAL;
10546             }
10547             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10548                 return -TARGET_EFAULT;
10549             schp.sched_priority = tswap32(target_schp->sched_priority);
10550             unlock_user_struct(target_schp, arg2, 0);
10551             return get_errno(sched_setparam(arg1, &schp));
10552         }
10553     case TARGET_NR_sched_getparam:
10554         {
10555             struct sched_param *target_schp;
10556             struct sched_param schp;
10557 
10558             if (arg2 == 0) {
10559                 return -TARGET_EINVAL;
10560             }
10561             ret = get_errno(sched_getparam(arg1, &schp));
10562             if (!is_error(ret)) {
10563                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10564                     return -TARGET_EFAULT;
10565                 target_schp->sched_priority = tswap32(schp.sched_priority);
10566                 unlock_user_struct(target_schp, arg2, 1);
10567             }
10568         }
10569         return ret;
10570     case TARGET_NR_sched_setscheduler:
10571         {
10572             struct sched_param *target_schp;
10573             struct sched_param schp;
10574             if (arg3 == 0) {
10575                 return -TARGET_EINVAL;
10576             }
10577             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10578                 return -TARGET_EFAULT;
10579             schp.sched_priority = tswap32(target_schp->sched_priority);
10580             unlock_user_struct(target_schp, arg3, 0);
10581             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10582         }
10583     case TARGET_NR_sched_getscheduler:
10584         return get_errno(sched_getscheduler(arg1));
10585     case TARGET_NR_sched_yield:
10586         return get_errno(sched_yield());
10587     case TARGET_NR_sched_get_priority_max:
10588         return get_errno(sched_get_priority_max(arg1));
10589     case TARGET_NR_sched_get_priority_min:
10590         return get_errno(sched_get_priority_min(arg1));
10591 #ifdef TARGET_NR_sched_rr_get_interval
10592     case TARGET_NR_sched_rr_get_interval:
10593         {
10594             struct timespec ts;
10595             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10596             if (!is_error(ret)) {
10597                 ret = host_to_target_timespec(arg2, &ts);
10598             }
10599         }
10600         return ret;
10601 #endif
10602 #ifdef TARGET_NR_sched_rr_get_interval_time64
10603     case TARGET_NR_sched_rr_get_interval_time64:
10604         {
10605             struct timespec ts;
10606             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10607             if (!is_error(ret)) {
10608                 ret = host_to_target_timespec64(arg2, &ts);
10609             }
10610         }
10611         return ret;
10612 #endif
10613 #if defined(TARGET_NR_nanosleep)
10614     case TARGET_NR_nanosleep:
10615         {
10616             struct timespec req, rem;
10617             target_to_host_timespec(&req, arg1);
10618             ret = get_errno(safe_nanosleep(&req, &rem));
10619             if (is_error(ret) && arg2) {
10620                 host_to_target_timespec(arg2, &rem);
10621             }
10622         }
10623         return ret;
10624 #endif
10625     case TARGET_NR_prctl:
10626         switch (arg1) {
10627         case PR_GET_PDEATHSIG:
10628         {
10629             int deathsig;
10630             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10631             if (!is_error(ret) && arg2
10632                 && put_user_s32(deathsig, arg2)) {
10633                 return -TARGET_EFAULT;
10634             }
10635             return ret;
10636         }
10637 #ifdef PR_GET_NAME
10638         case PR_GET_NAME:
10639         {
10640             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10641             if (!name) {
10642                 return -TARGET_EFAULT;
10643             }
10644             ret = get_errno(prctl(arg1, (unsigned long)name,
10645                                   arg3, arg4, arg5));
10646             unlock_user(name, arg2, 16);
10647             return ret;
10648         }
10649         case PR_SET_NAME:
10650         {
10651             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10652             if (!name) {
10653                 return -TARGET_EFAULT;
10654             }
10655             ret = get_errno(prctl(arg1, (unsigned long)name,
10656                                   arg3, arg4, arg5));
10657             unlock_user(name, arg2, 0);
10658             return ret;
10659         }
10660 #endif
10661 #ifdef TARGET_MIPS
10662         case TARGET_PR_GET_FP_MODE:
10663         {
10664             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10665             ret = 0;
10666             if (env->CP0_Status & (1 << CP0St_FR)) {
10667                 ret |= TARGET_PR_FP_MODE_FR;
10668             }
10669             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10670                 ret |= TARGET_PR_FP_MODE_FRE;
10671             }
10672             return ret;
10673         }
10674         case TARGET_PR_SET_FP_MODE:
10675         {
10676             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10677             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10678             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10679             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10680             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10681 
10682             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10683                                             TARGET_PR_FP_MODE_FRE;
10684 
10685             /* If nothing to change, return right away, successfully.  */
10686             if (old_fr == new_fr && old_fre == new_fre) {
10687                 return 0;
10688             }
10689             /* Check the value is valid */
10690             if (arg2 & ~known_bits) {
10691                 return -TARGET_EOPNOTSUPP;
10692             }
10693             /* Setting FRE without FR is not supported.  */
10694             if (new_fre && !new_fr) {
10695                 return -TARGET_EOPNOTSUPP;
10696             }
10697             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10698                 /* FR1 is not supported */
10699                 return -TARGET_EOPNOTSUPP;
10700             }
10701             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10702                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10703                 /* cannot set FR=0 */
10704                 return -TARGET_EOPNOTSUPP;
10705             }
10706             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10707                 /* Cannot set FRE=1 */
10708                 return -TARGET_EOPNOTSUPP;
10709             }
10710 
10711             int i;
10712             fpr_t *fpr = env->active_fpu.fpr;
10713             for (i = 0; i < 32 ; i += 2) {
10714                 if (!old_fr && new_fr) {
10715                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10716                 } else if (old_fr && !new_fr) {
10717                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10718                 }
10719             }
10720 
10721             if (new_fr) {
10722                 env->CP0_Status |= (1 << CP0St_FR);
10723                 env->hflags |= MIPS_HFLAG_F64;
10724             } else {
10725                 env->CP0_Status &= ~(1 << CP0St_FR);
10726                 env->hflags &= ~MIPS_HFLAG_F64;
10727             }
10728             if (new_fre) {
10729                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10730                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10731                     env->hflags |= MIPS_HFLAG_FRE;
10732                 }
10733             } else {
10734                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10735                 env->hflags &= ~MIPS_HFLAG_FRE;
10736             }
10737 
10738             return 0;
10739         }
10740 #endif /* MIPS */
10741 #ifdef TARGET_AARCH64
10742         case TARGET_PR_SVE_SET_VL:
10743             /*
10744              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10745              * PR_SVE_VL_INHERIT.  Note the kernel definition
10746              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10747              * even though the current architectural maximum is VQ=16.
10748              */
10749             ret = -TARGET_EINVAL;
10750             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10751                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10752                 CPUARMState *env = cpu_env;
10753                 ARMCPU *cpu = env_archcpu(env);
10754                 uint32_t vq, old_vq;
10755 
10756                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10757                 vq = MAX(arg2 / 16, 1);
10758                 vq = MIN(vq, cpu->sve_max_vq);
10759 
10760                 if (vq < old_vq) {
10761                     aarch64_sve_narrow_vq(env, vq);
10762                 }
10763                 env->vfp.zcr_el[1] = vq - 1;
10764                 arm_rebuild_hflags(env);
10765                 ret = vq * 16;
10766             }
10767             return ret;
10768         case TARGET_PR_SVE_GET_VL:
10769             ret = -TARGET_EINVAL;
10770             {
10771                 ARMCPU *cpu = env_archcpu(cpu_env);
10772                 if (cpu_isar_feature(aa64_sve, cpu)) {
10773                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10774                 }
10775             }
10776             return ret;
10777         case TARGET_PR_PAC_RESET_KEYS:
10778             {
10779                 CPUARMState *env = cpu_env;
10780                 ARMCPU *cpu = env_archcpu(env);
10781 
10782                 if (arg3 || arg4 || arg5) {
10783                     return -TARGET_EINVAL;
10784                 }
10785                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10786                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10787                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10788                                TARGET_PR_PAC_APGAKEY);
10789                     int ret = 0;
10790                     Error *err = NULL;
10791 
10792                     if (arg2 == 0) {
10793                         arg2 = all;
10794                     } else if (arg2 & ~all) {
10795                         return -TARGET_EINVAL;
10796                     }
10797                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10798                         ret |= qemu_guest_getrandom(&env->keys.apia,
10799                                                     sizeof(ARMPACKey), &err);
10800                     }
10801                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10802                         ret |= qemu_guest_getrandom(&env->keys.apib,
10803                                                     sizeof(ARMPACKey), &err);
10804                     }
10805                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10806                         ret |= qemu_guest_getrandom(&env->keys.apda,
10807                                                     sizeof(ARMPACKey), &err);
10808                     }
10809                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10810                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10811                                                     sizeof(ARMPACKey), &err);
10812                     }
10813                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10814                         ret |= qemu_guest_getrandom(&env->keys.apga,
10815                                                     sizeof(ARMPACKey), &err);
10816                     }
10817                     if (ret != 0) {
10818                         /*
10819                          * Some unknown failure in the crypto.  The best
10820                          * we can do is log it and fail the syscall.
10821                          * The real syscall cannot fail this way.
10822                          */
10823                         qemu_log_mask(LOG_UNIMP,
10824                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10825                                       error_get_pretty(err));
10826                         error_free(err);
10827                         return -TARGET_EIO;
10828                     }
10829                     return 0;
10830                 }
10831             }
10832             return -TARGET_EINVAL;
10833         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10834             {
10835                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10836                 CPUARMState *env = cpu_env;
10837                 ARMCPU *cpu = env_archcpu(env);
10838 
10839                 if (cpu_isar_feature(aa64_mte, cpu)) {
10840                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10841                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10842                 }
10843 
10844                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10845                     return -TARGET_EINVAL;
10846                 }
10847                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10848 
10849                 if (cpu_isar_feature(aa64_mte, cpu)) {
10850                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10851                     case TARGET_PR_MTE_TCF_NONE:
10852                     case TARGET_PR_MTE_TCF_SYNC:
10853                     case TARGET_PR_MTE_TCF_ASYNC:
10854                         break;
10855                     default:
10856                         return -EINVAL;
10857                     }
10858 
10859                     /*
10860                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10861                      * Note that the syscall values are consistent with hw.
10862                      */
10863                     env->cp15.sctlr_el[1] =
10864                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10865                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10866 
10867                     /*
10868                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10869                      * Note that the syscall uses an include mask,
10870                      * and hardware uses an exclude mask -- invert.
10871                      */
10872                     env->cp15.gcr_el1 =
10873                         deposit64(env->cp15.gcr_el1, 0, 16,
10874                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10875                     arm_rebuild_hflags(env);
10876                 }
10877                 return 0;
10878             }
10879         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10880             {
10881                 abi_long ret = 0;
10882                 CPUARMState *env = cpu_env;
10883                 ARMCPU *cpu = env_archcpu(env);
10884 
10885                 if (arg2 || arg3 || arg4 || arg5) {
10886                     return -TARGET_EINVAL;
10887                 }
10888                 if (env->tagged_addr_enable) {
10889                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10890                 }
10891                 if (cpu_isar_feature(aa64_mte, cpu)) {
10892                     /* See above. */
10893                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10894                             << TARGET_PR_MTE_TCF_SHIFT);
10895                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10896                                     ~env->cp15.gcr_el1);
10897                 }
10898                 return ret;
10899             }
10900 #endif /* AARCH64 */
10901         case PR_GET_SECCOMP:
10902         case PR_SET_SECCOMP:
10903             /* Disable seccomp to prevent the target disabling syscalls we
10904              * need. */
10905             return -TARGET_EINVAL;
10906         default:
10907             /* Most prctl options have no pointer arguments */
10908             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10909         }
10910         break;
10911 #ifdef TARGET_NR_arch_prctl
10912     case TARGET_NR_arch_prctl:
10913         return do_arch_prctl(cpu_env, arg1, arg2);
10914 #endif
10915 #ifdef TARGET_NR_pread64
10916     case TARGET_NR_pread64:
10917         if (regpairs_aligned(cpu_env, num)) {
10918             arg4 = arg5;
10919             arg5 = arg6;
10920         }
10921         if (arg2 == 0 && arg3 == 0) {
10922             /* Special-case NULL buffer and zero length, which should succeed */
10923             p = 0;
10924         } else {
10925             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10926             if (!p) {
10927                 return -TARGET_EFAULT;
10928             }
10929         }
10930         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10931         unlock_user(p, arg2, ret);
10932         return ret;
10933     case TARGET_NR_pwrite64:
10934         if (regpairs_aligned(cpu_env, num)) {
10935             arg4 = arg5;
10936             arg5 = arg6;
10937         }
10938         if (arg2 == 0 && arg3 == 0) {
10939             /* Special-case NULL buffer and zero length, which should succeed */
10940             p = 0;
10941         } else {
10942             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10943             if (!p) {
10944                 return -TARGET_EFAULT;
10945             }
10946         }
10947         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10948         unlock_user(p, arg2, 0);
10949         return ret;
10950 #endif
10951     case TARGET_NR_getcwd:
10952         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10953             return -TARGET_EFAULT;
10954         ret = get_errno(sys_getcwd1(p, arg2));
10955         unlock_user(p, arg1, ret);
10956         return ret;
10957     case TARGET_NR_capget:
10958     case TARGET_NR_capset:
10959     {
10960         struct target_user_cap_header *target_header;
10961         struct target_user_cap_data *target_data = NULL;
10962         struct __user_cap_header_struct header;
10963         struct __user_cap_data_struct data[2];
10964         struct __user_cap_data_struct *dataptr = NULL;
10965         int i, target_datalen;
10966         int data_items = 1;
10967 
10968         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10969             return -TARGET_EFAULT;
10970         }
10971         header.version = tswap32(target_header->version);
10972         header.pid = tswap32(target_header->pid);
10973 
10974         if (header.version != _LINUX_CAPABILITY_VERSION) {
10975             /* Version 2 and up takes pointer to two user_data structs */
10976             data_items = 2;
10977         }
10978 
10979         target_datalen = sizeof(*target_data) * data_items;
10980 
10981         if (arg2) {
10982             if (num == TARGET_NR_capget) {
10983                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10984             } else {
10985                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10986             }
10987             if (!target_data) {
10988                 unlock_user_struct(target_header, arg1, 0);
10989                 return -TARGET_EFAULT;
10990             }
10991 
10992             if (num == TARGET_NR_capset) {
10993                 for (i = 0; i < data_items; i++) {
10994                     data[i].effective = tswap32(target_data[i].effective);
10995                     data[i].permitted = tswap32(target_data[i].permitted);
10996                     data[i].inheritable = tswap32(target_data[i].inheritable);
10997                 }
10998             }
10999 
11000             dataptr = data;
11001         }
11002 
11003         if (num == TARGET_NR_capget) {
11004             ret = get_errno(capget(&header, dataptr));
11005         } else {
11006             ret = get_errno(capset(&header, dataptr));
11007         }
11008 
11009         /* The kernel always updates version for both capget and capset */
11010         target_header->version = tswap32(header.version);
11011         unlock_user_struct(target_header, arg1, 1);
11012 
11013         if (arg2) {
11014             if (num == TARGET_NR_capget) {
11015                 for (i = 0; i < data_items; i++) {
11016                     target_data[i].effective = tswap32(data[i].effective);
11017                     target_data[i].permitted = tswap32(data[i].permitted);
11018                     target_data[i].inheritable = tswap32(data[i].inheritable);
11019                 }
11020                 unlock_user(target_data, arg2, target_datalen);
11021             } else {
11022                 unlock_user(target_data, arg2, 0);
11023             }
11024         }
11025         return ret;
11026     }
11027     case TARGET_NR_sigaltstack:
11028         return do_sigaltstack(arg1, arg2, cpu_env);
11029 
11030 #ifdef CONFIG_SENDFILE
11031 #ifdef TARGET_NR_sendfile
11032     case TARGET_NR_sendfile:
11033     {
11034         off_t *offp = NULL;
11035         off_t off;
11036         if (arg3) {
11037             ret = get_user_sal(off, arg3);
11038             if (is_error(ret)) {
11039                 return ret;
11040             }
11041             offp = &off;
11042         }
11043         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11044         if (!is_error(ret) && arg3) {
11045             abi_long ret2 = put_user_sal(off, arg3);
11046             if (is_error(ret2)) {
11047                 ret = ret2;
11048             }
11049         }
11050         return ret;
11051     }
11052 #endif
11053 #ifdef TARGET_NR_sendfile64
11054     case TARGET_NR_sendfile64:
11055     {
11056         off_t *offp = NULL;
11057         off_t off;
11058         if (arg3) {
11059             ret = get_user_s64(off, arg3);
11060             if (is_error(ret)) {
11061                 return ret;
11062             }
11063             offp = &off;
11064         }
11065         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11066         if (!is_error(ret) && arg3) {
11067             abi_long ret2 = put_user_s64(off, arg3);
11068             if (is_error(ret2)) {
11069                 ret = ret2;
11070             }
11071         }
11072         return ret;
11073     }
11074 #endif
11075 #endif
11076 #ifdef TARGET_NR_vfork
11077     case TARGET_NR_vfork:
11078         return get_errno(do_fork(cpu_env,
11079                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11080                          0, 0, 0, 0));
11081 #endif
11082 #ifdef TARGET_NR_ugetrlimit
11083     case TARGET_NR_ugetrlimit:
11084     {
11085 	struct rlimit rlim;
11086 	int resource = target_to_host_resource(arg1);
11087 	ret = get_errno(getrlimit(resource, &rlim));
11088 	if (!is_error(ret)) {
11089 	    struct target_rlimit *target_rlim;
11090             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11091                 return -TARGET_EFAULT;
11092 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11093 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11094             unlock_user_struct(target_rlim, arg2, 1);
11095 	}
11096         return ret;
11097     }
11098 #endif
11099 #ifdef TARGET_NR_truncate64
11100     case TARGET_NR_truncate64:
11101         if (!(p = lock_user_string(arg1)))
11102             return -TARGET_EFAULT;
11103 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11104         unlock_user(p, arg1, 0);
11105         return ret;
11106 #endif
11107 #ifdef TARGET_NR_ftruncate64
11108     case TARGET_NR_ftruncate64:
11109         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11110 #endif
11111 #ifdef TARGET_NR_stat64
11112     case TARGET_NR_stat64:
11113         if (!(p = lock_user_string(arg1))) {
11114             return -TARGET_EFAULT;
11115         }
11116         ret = get_errno(stat(path(p), &st));
11117         unlock_user(p, arg1, 0);
11118         if (!is_error(ret))
11119             ret = host_to_target_stat64(cpu_env, arg2, &st);
11120         return ret;
11121 #endif
11122 #ifdef TARGET_NR_lstat64
11123     case TARGET_NR_lstat64:
11124         if (!(p = lock_user_string(arg1))) {
11125             return -TARGET_EFAULT;
11126         }
11127         ret = get_errno(lstat(path(p), &st));
11128         unlock_user(p, arg1, 0);
11129         if (!is_error(ret))
11130             ret = host_to_target_stat64(cpu_env, arg2, &st);
11131         return ret;
11132 #endif
11133 #ifdef TARGET_NR_fstat64
11134     case TARGET_NR_fstat64:
11135         ret = get_errno(fstat(arg1, &st));
11136         if (!is_error(ret))
11137             ret = host_to_target_stat64(cpu_env, arg2, &st);
11138         return ret;
11139 #endif
11140 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11141 #ifdef TARGET_NR_fstatat64
11142     case TARGET_NR_fstatat64:
11143 #endif
11144 #ifdef TARGET_NR_newfstatat
11145     case TARGET_NR_newfstatat:
11146 #endif
11147         if (!(p = lock_user_string(arg2))) {
11148             return -TARGET_EFAULT;
11149         }
11150         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11151         unlock_user(p, arg2, 0);
11152         if (!is_error(ret))
11153             ret = host_to_target_stat64(cpu_env, arg3, &st);
11154         return ret;
11155 #endif
11156 #if defined(TARGET_NR_statx)
11157     case TARGET_NR_statx:
11158         {
11159             struct target_statx *target_stx;
11160             int dirfd = arg1;
11161             int flags = arg3;
11162 
11163             p = lock_user_string(arg2);
11164             if (p == NULL) {
11165                 return -TARGET_EFAULT;
11166             }
11167 #if defined(__NR_statx)
11168             {
11169                 /*
11170                  * It is assumed that struct statx is architecture independent.
11171                  */
11172                 struct target_statx host_stx;
11173                 int mask = arg4;
11174 
11175                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11176                 if (!is_error(ret)) {
11177                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11178                         unlock_user(p, arg2, 0);
11179                         return -TARGET_EFAULT;
11180                     }
11181                 }
11182 
11183                 if (ret != -TARGET_ENOSYS) {
11184                     unlock_user(p, arg2, 0);
11185                     return ret;
11186                 }
11187             }
11188 #endif
11189             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11190             unlock_user(p, arg2, 0);
11191 
11192             if (!is_error(ret)) {
11193                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11194                     return -TARGET_EFAULT;
11195                 }
11196                 memset(target_stx, 0, sizeof(*target_stx));
11197                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11198                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11199                 __put_user(st.st_ino, &target_stx->stx_ino);
11200                 __put_user(st.st_mode, &target_stx->stx_mode);
11201                 __put_user(st.st_uid, &target_stx->stx_uid);
11202                 __put_user(st.st_gid, &target_stx->stx_gid);
11203                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11204                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11205                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11206                 __put_user(st.st_size, &target_stx->stx_size);
11207                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11208                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11209                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11210                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11211                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11212                 unlock_user_struct(target_stx, arg5, 1);
11213             }
11214         }
11215         return ret;
11216 #endif
11217 #ifdef TARGET_NR_lchown
11218     case TARGET_NR_lchown:
11219         if (!(p = lock_user_string(arg1)))
11220             return -TARGET_EFAULT;
11221         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11222         unlock_user(p, arg1, 0);
11223         return ret;
11224 #endif
11225 #ifdef TARGET_NR_getuid
11226     case TARGET_NR_getuid:
11227         return get_errno(high2lowuid(getuid()));
11228 #endif
11229 #ifdef TARGET_NR_getgid
11230     case TARGET_NR_getgid:
11231         return get_errno(high2lowgid(getgid()));
11232 #endif
11233 #ifdef TARGET_NR_geteuid
11234     case TARGET_NR_geteuid:
11235         return get_errno(high2lowuid(geteuid()));
11236 #endif
11237 #ifdef TARGET_NR_getegid
11238     case TARGET_NR_getegid:
11239         return get_errno(high2lowgid(getegid()));
11240 #endif
11241     case TARGET_NR_setreuid:
11242         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11243     case TARGET_NR_setregid:
11244         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11245     case TARGET_NR_getgroups:
11246         {
11247             int gidsetsize = arg1;
11248             target_id *target_grouplist;
11249             gid_t *grouplist;
11250             int i;
11251 
11252             grouplist = alloca(gidsetsize * sizeof(gid_t));
11253             ret = get_errno(getgroups(gidsetsize, grouplist));
11254             if (gidsetsize == 0)
11255                 return ret;
11256             if (!is_error(ret)) {
11257                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11258                 if (!target_grouplist)
11259                     return -TARGET_EFAULT;
11260                 for(i = 0;i < ret; i++)
11261                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11262                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11263             }
11264         }
11265         return ret;
11266     case TARGET_NR_setgroups:
11267         {
11268             int gidsetsize = arg1;
11269             target_id *target_grouplist;
11270             gid_t *grouplist = NULL;
11271             int i;
11272             if (gidsetsize) {
11273                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11274                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11275                 if (!target_grouplist) {
11276                     return -TARGET_EFAULT;
11277                 }
11278                 for (i = 0; i < gidsetsize; i++) {
11279                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11280                 }
11281                 unlock_user(target_grouplist, arg2, 0);
11282             }
11283             return get_errno(setgroups(gidsetsize, grouplist));
11284         }
11285     case TARGET_NR_fchown:
11286         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11287 #if defined(TARGET_NR_fchownat)
11288     case TARGET_NR_fchownat:
11289         if (!(p = lock_user_string(arg2)))
11290             return -TARGET_EFAULT;
11291         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11292                                  low2highgid(arg4), arg5));
11293         unlock_user(p, arg2, 0);
11294         return ret;
11295 #endif
11296 #ifdef TARGET_NR_setresuid
11297     case TARGET_NR_setresuid:
11298         return get_errno(sys_setresuid(low2highuid(arg1),
11299                                        low2highuid(arg2),
11300                                        low2highuid(arg3)));
11301 #endif
11302 #ifdef TARGET_NR_getresuid
11303     case TARGET_NR_getresuid:
11304         {
11305             uid_t ruid, euid, suid;
11306             ret = get_errno(getresuid(&ruid, &euid, &suid));
11307             if (!is_error(ret)) {
11308                 if (put_user_id(high2lowuid(ruid), arg1)
11309                     || put_user_id(high2lowuid(euid), arg2)
11310                     || put_user_id(high2lowuid(suid), arg3))
11311                     return -TARGET_EFAULT;
11312             }
11313         }
11314         return ret;
11315 #endif
11316 #ifdef TARGET_NR_getresgid
11317     case TARGET_NR_setresgid:
11318         return get_errno(sys_setresgid(low2highgid(arg1),
11319                                        low2highgid(arg2),
11320                                        low2highgid(arg3)));
11321 #endif
11322 #ifdef TARGET_NR_getresgid
11323     case TARGET_NR_getresgid:
11324         {
11325             gid_t rgid, egid, sgid;
11326             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11327             if (!is_error(ret)) {
11328                 if (put_user_id(high2lowgid(rgid), arg1)
11329                     || put_user_id(high2lowgid(egid), arg2)
11330                     || put_user_id(high2lowgid(sgid), arg3))
11331                     return -TARGET_EFAULT;
11332             }
11333         }
11334         return ret;
11335 #endif
11336 #ifdef TARGET_NR_chown
11337     case TARGET_NR_chown:
11338         if (!(p = lock_user_string(arg1)))
11339             return -TARGET_EFAULT;
11340         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11341         unlock_user(p, arg1, 0);
11342         return ret;
11343 #endif
11344     case TARGET_NR_setuid:
11345         return get_errno(sys_setuid(low2highuid(arg1)));
11346     case TARGET_NR_setgid:
11347         return get_errno(sys_setgid(low2highgid(arg1)));
11348     case TARGET_NR_setfsuid:
11349         return get_errno(setfsuid(arg1));
11350     case TARGET_NR_setfsgid:
11351         return get_errno(setfsgid(arg1));
11352 
11353 #ifdef TARGET_NR_lchown32
11354     case TARGET_NR_lchown32:
11355         if (!(p = lock_user_string(arg1)))
11356             return -TARGET_EFAULT;
11357         ret = get_errno(lchown(p, arg2, arg3));
11358         unlock_user(p, arg1, 0);
11359         return ret;
11360 #endif
11361 #ifdef TARGET_NR_getuid32
11362     case TARGET_NR_getuid32:
11363         return get_errno(getuid());
11364 #endif
11365 
11366 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11367    /* Alpha specific */
11368     case TARGET_NR_getxuid:
11369          {
11370             uid_t euid;
11371             euid=geteuid();
11372             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11373          }
11374         return get_errno(getuid());
11375 #endif
11376 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11377    /* Alpha specific */
11378     case TARGET_NR_getxgid:
11379          {
11380             uid_t egid;
11381             egid=getegid();
11382             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11383          }
11384         return get_errno(getgid());
11385 #endif
11386 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11387     /* Alpha specific */
11388     case TARGET_NR_osf_getsysinfo:
11389         ret = -TARGET_EOPNOTSUPP;
11390         switch (arg1) {
11391           case TARGET_GSI_IEEE_FP_CONTROL:
11392             {
11393                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11394                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11395 
11396                 swcr &= ~SWCR_STATUS_MASK;
11397                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11398 
11399                 if (put_user_u64 (swcr, arg2))
11400                         return -TARGET_EFAULT;
11401                 ret = 0;
11402             }
11403             break;
11404 
11405           /* case GSI_IEEE_STATE_AT_SIGNAL:
11406              -- Not implemented in linux kernel.
11407              case GSI_UACPROC:
11408              -- Retrieves current unaligned access state; not much used.
11409              case GSI_PROC_TYPE:
11410              -- Retrieves implver information; surely not used.
11411              case GSI_GET_HWRPB:
11412              -- Grabs a copy of the HWRPB; surely not used.
11413           */
11414         }
11415         return ret;
11416 #endif
11417 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11418     /* Alpha specific */
11419     case TARGET_NR_osf_setsysinfo:
11420         ret = -TARGET_EOPNOTSUPP;
11421         switch (arg1) {
11422           case TARGET_SSI_IEEE_FP_CONTROL:
11423             {
11424                 uint64_t swcr, fpcr;
11425 
11426                 if (get_user_u64 (swcr, arg2)) {
11427                     return -TARGET_EFAULT;
11428                 }
11429 
11430                 /*
11431                  * The kernel calls swcr_update_status to update the
11432                  * status bits from the fpcr at every point that it
11433                  * could be queried.  Therefore, we store the status
11434                  * bits only in FPCR.
11435                  */
11436                 ((CPUAlphaState *)cpu_env)->swcr
11437                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11438 
11439                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11440                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11441                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11442                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11443                 ret = 0;
11444             }
11445             break;
11446 
11447           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11448             {
11449                 uint64_t exc, fpcr, fex;
11450 
11451                 if (get_user_u64(exc, arg2)) {
11452                     return -TARGET_EFAULT;
11453                 }
11454                 exc &= SWCR_STATUS_MASK;
11455                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11456 
11457                 /* Old exceptions are not signaled.  */
11458                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11459                 fex = exc & ~fex;
11460                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11461                 fex &= ((CPUArchState *)cpu_env)->swcr;
11462 
11463                 /* Update the hardware fpcr.  */
11464                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11465                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11466 
11467                 if (fex) {
11468                     int si_code = TARGET_FPE_FLTUNK;
11469                     target_siginfo_t info;
11470 
11471                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11472                         si_code = TARGET_FPE_FLTUND;
11473                     }
11474                     if (fex & SWCR_TRAP_ENABLE_INE) {
11475                         si_code = TARGET_FPE_FLTRES;
11476                     }
11477                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11478                         si_code = TARGET_FPE_FLTUND;
11479                     }
11480                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11481                         si_code = TARGET_FPE_FLTOVF;
11482                     }
11483                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11484                         si_code = TARGET_FPE_FLTDIV;
11485                     }
11486                     if (fex & SWCR_TRAP_ENABLE_INV) {
11487                         si_code = TARGET_FPE_FLTINV;
11488                     }
11489 
11490                     info.si_signo = SIGFPE;
11491                     info.si_errno = 0;
11492                     info.si_code = si_code;
11493                     info._sifields._sigfault._addr
11494                         = ((CPUArchState *)cpu_env)->pc;
11495                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11496                                  QEMU_SI_FAULT, &info);
11497                 }
11498                 ret = 0;
11499             }
11500             break;
11501 
11502           /* case SSI_NVPAIRS:
11503              -- Used with SSIN_UACPROC to enable unaligned accesses.
11504              case SSI_IEEE_STATE_AT_SIGNAL:
11505              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11506              -- Not implemented in linux kernel
11507           */
11508         }
11509         return ret;
11510 #endif
11511 #ifdef TARGET_NR_osf_sigprocmask
11512     /* Alpha specific.  */
11513     case TARGET_NR_osf_sigprocmask:
11514         {
11515             abi_ulong mask;
11516             int how;
11517             sigset_t set, oldset;
11518 
11519             switch(arg1) {
11520             case TARGET_SIG_BLOCK:
11521                 how = SIG_BLOCK;
11522                 break;
11523             case TARGET_SIG_UNBLOCK:
11524                 how = SIG_UNBLOCK;
11525                 break;
11526             case TARGET_SIG_SETMASK:
11527                 how = SIG_SETMASK;
11528                 break;
11529             default:
11530                 return -TARGET_EINVAL;
11531             }
11532             mask = arg2;
11533             target_to_host_old_sigset(&set, &mask);
11534             ret = do_sigprocmask(how, &set, &oldset);
11535             if (!ret) {
11536                 host_to_target_old_sigset(&mask, &oldset);
11537                 ret = mask;
11538             }
11539         }
11540         return ret;
11541 #endif
11542 
11543 #ifdef TARGET_NR_getgid32
11544     case TARGET_NR_getgid32:
11545         return get_errno(getgid());
11546 #endif
11547 #ifdef TARGET_NR_geteuid32
11548     case TARGET_NR_geteuid32:
11549         return get_errno(geteuid());
11550 #endif
11551 #ifdef TARGET_NR_getegid32
11552     case TARGET_NR_getegid32:
11553         return get_errno(getegid());
11554 #endif
11555 #ifdef TARGET_NR_setreuid32
11556     case TARGET_NR_setreuid32:
11557         return get_errno(setreuid(arg1, arg2));
11558 #endif
11559 #ifdef TARGET_NR_setregid32
11560     case TARGET_NR_setregid32:
11561         return get_errno(setregid(arg1, arg2));
11562 #endif
11563 #ifdef TARGET_NR_getgroups32
11564     case TARGET_NR_getgroups32:
11565         {
11566             int gidsetsize = arg1;
11567             uint32_t *target_grouplist;
11568             gid_t *grouplist;
11569             int i;
11570 
11571             grouplist = alloca(gidsetsize * sizeof(gid_t));
11572             ret = get_errno(getgroups(gidsetsize, grouplist));
11573             if (gidsetsize == 0)
11574                 return ret;
11575             if (!is_error(ret)) {
11576                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11577                 if (!target_grouplist) {
11578                     return -TARGET_EFAULT;
11579                 }
11580                 for(i = 0;i < ret; i++)
11581                     target_grouplist[i] = tswap32(grouplist[i]);
11582                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11583             }
11584         }
11585         return ret;
11586 #endif
11587 #ifdef TARGET_NR_setgroups32
11588     case TARGET_NR_setgroups32:
11589         {
11590             int gidsetsize = arg1;
11591             uint32_t *target_grouplist;
11592             gid_t *grouplist;
11593             int i;
11594 
11595             grouplist = alloca(gidsetsize * sizeof(gid_t));
11596             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11597             if (!target_grouplist) {
11598                 return -TARGET_EFAULT;
11599             }
11600             for(i = 0;i < gidsetsize; i++)
11601                 grouplist[i] = tswap32(target_grouplist[i]);
11602             unlock_user(target_grouplist, arg2, 0);
11603             return get_errno(setgroups(gidsetsize, grouplist));
11604         }
11605 #endif
11606 #ifdef TARGET_NR_fchown32
11607     case TARGET_NR_fchown32:
11608         return get_errno(fchown(arg1, arg2, arg3));
11609 #endif
11610 #ifdef TARGET_NR_setresuid32
11611     case TARGET_NR_setresuid32:
11612         return get_errno(sys_setresuid(arg1, arg2, arg3));
11613 #endif
11614 #ifdef TARGET_NR_getresuid32
11615     case TARGET_NR_getresuid32:
11616         {
11617             uid_t ruid, euid, suid;
11618             ret = get_errno(getresuid(&ruid, &euid, &suid));
11619             if (!is_error(ret)) {
11620                 if (put_user_u32(ruid, arg1)
11621                     || put_user_u32(euid, arg2)
11622                     || put_user_u32(suid, arg3))
11623                     return -TARGET_EFAULT;
11624             }
11625         }
11626         return ret;
11627 #endif
11628 #ifdef TARGET_NR_setresgid32
11629     case TARGET_NR_setresgid32:
11630         return get_errno(sys_setresgid(arg1, arg2, arg3));
11631 #endif
11632 #ifdef TARGET_NR_getresgid32
11633     case TARGET_NR_getresgid32:
11634         {
11635             gid_t rgid, egid, sgid;
11636             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11637             if (!is_error(ret)) {
11638                 if (put_user_u32(rgid, arg1)
11639                     || put_user_u32(egid, arg2)
11640                     || put_user_u32(sgid, arg3))
11641                     return -TARGET_EFAULT;
11642             }
11643         }
11644         return ret;
11645 #endif
11646 #ifdef TARGET_NR_chown32
11647     case TARGET_NR_chown32:
11648         if (!(p = lock_user_string(arg1)))
11649             return -TARGET_EFAULT;
11650         ret = get_errno(chown(p, arg2, arg3));
11651         unlock_user(p, arg1, 0);
11652         return ret;
11653 #endif
11654 #ifdef TARGET_NR_setuid32
11655     case TARGET_NR_setuid32:
11656         return get_errno(sys_setuid(arg1));
11657 #endif
11658 #ifdef TARGET_NR_setgid32
11659     case TARGET_NR_setgid32:
11660         return get_errno(sys_setgid(arg1));
11661 #endif
11662 #ifdef TARGET_NR_setfsuid32
11663     case TARGET_NR_setfsuid32:
11664         return get_errno(setfsuid(arg1));
11665 #endif
11666 #ifdef TARGET_NR_setfsgid32
11667     case TARGET_NR_setfsgid32:
11668         return get_errno(setfsgid(arg1));
11669 #endif
11670 #ifdef TARGET_NR_mincore
11671     case TARGET_NR_mincore:
11672         {
11673             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11674             if (!a) {
11675                 return -TARGET_ENOMEM;
11676             }
11677             p = lock_user_string(arg3);
11678             if (!p) {
11679                 ret = -TARGET_EFAULT;
11680             } else {
11681                 ret = get_errno(mincore(a, arg2, p));
11682                 unlock_user(p, arg3, ret);
11683             }
11684             unlock_user(a, arg1, 0);
11685         }
11686         return ret;
11687 #endif
11688 #ifdef TARGET_NR_arm_fadvise64_64
11689     case TARGET_NR_arm_fadvise64_64:
11690         /* arm_fadvise64_64 looks like fadvise64_64 but
11691          * with different argument order: fd, advice, offset, len
11692          * rather than the usual fd, offset, len, advice.
11693          * Note that offset and len are both 64-bit so appear as
11694          * pairs of 32-bit registers.
11695          */
11696         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11697                             target_offset64(arg5, arg6), arg2);
11698         return -host_to_target_errno(ret);
11699 #endif
11700 
11701 #if TARGET_ABI_BITS == 32
11702 
11703 #ifdef TARGET_NR_fadvise64_64
11704     case TARGET_NR_fadvise64_64:
11705 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11706         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11707         ret = arg2;
11708         arg2 = arg3;
11709         arg3 = arg4;
11710         arg4 = arg5;
11711         arg5 = arg6;
11712         arg6 = ret;
11713 #else
11714         /* 6 args: fd, offset (high, low), len (high, low), advice */
11715         if (regpairs_aligned(cpu_env, num)) {
11716             /* offset is in (3,4), len in (5,6) and advice in 7 */
11717             arg2 = arg3;
11718             arg3 = arg4;
11719             arg4 = arg5;
11720             arg5 = arg6;
11721             arg6 = arg7;
11722         }
11723 #endif
11724         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11725                             target_offset64(arg4, arg5), arg6);
11726         return -host_to_target_errno(ret);
11727 #endif
11728 
11729 #ifdef TARGET_NR_fadvise64
11730     case TARGET_NR_fadvise64:
11731         /* 5 args: fd, offset (high, low), len, advice */
11732         if (regpairs_aligned(cpu_env, num)) {
11733             /* offset is in (3,4), len in 5 and advice in 6 */
11734             arg2 = arg3;
11735             arg3 = arg4;
11736             arg4 = arg5;
11737             arg5 = arg6;
11738         }
11739         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11740         return -host_to_target_errno(ret);
11741 #endif
11742 
11743 #else /* not a 32-bit ABI */
11744 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11745 #ifdef TARGET_NR_fadvise64_64
11746     case TARGET_NR_fadvise64_64:
11747 #endif
11748 #ifdef TARGET_NR_fadvise64
11749     case TARGET_NR_fadvise64:
11750 #endif
11751 #ifdef TARGET_S390X
11752         switch (arg4) {
11753         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11754         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11755         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11756         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11757         default: break;
11758         }
11759 #endif
11760         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11761 #endif
11762 #endif /* end of 64-bit ABI fadvise handling */
11763 
11764 #ifdef TARGET_NR_madvise
11765     case TARGET_NR_madvise:
11766         /* A straight passthrough may not be safe because qemu sometimes
11767            turns private file-backed mappings into anonymous mappings.
11768            This will break MADV_DONTNEED.
11769            This is a hint, so ignoring and returning success is ok.  */
11770         return 0;
11771 #endif
11772 #ifdef TARGET_NR_fcntl64
11773     case TARGET_NR_fcntl64:
11774     {
11775         int cmd;
11776         struct flock64 fl;
11777         from_flock64_fn *copyfrom = copy_from_user_flock64;
11778         to_flock64_fn *copyto = copy_to_user_flock64;
11779 
11780 #ifdef TARGET_ARM
11781         if (!((CPUARMState *)cpu_env)->eabi) {
11782             copyfrom = copy_from_user_oabi_flock64;
11783             copyto = copy_to_user_oabi_flock64;
11784         }
11785 #endif
11786 
11787         cmd = target_to_host_fcntl_cmd(arg2);
11788         if (cmd == -TARGET_EINVAL) {
11789             return cmd;
11790         }
11791 
11792         switch(arg2) {
11793         case TARGET_F_GETLK64:
11794             ret = copyfrom(&fl, arg3);
11795             if (ret) {
11796                 break;
11797             }
11798             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11799             if (ret == 0) {
11800                 ret = copyto(arg3, &fl);
11801             }
11802 	    break;
11803 
11804         case TARGET_F_SETLK64:
11805         case TARGET_F_SETLKW64:
11806             ret = copyfrom(&fl, arg3);
11807             if (ret) {
11808                 break;
11809             }
11810             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11811 	    break;
11812         default:
11813             ret = do_fcntl(arg1, arg2, arg3);
11814             break;
11815         }
11816         return ret;
11817     }
11818 #endif
11819 #ifdef TARGET_NR_cacheflush
11820     case TARGET_NR_cacheflush:
11821         /* self-modifying code is handled automatically, so nothing needed */
11822         return 0;
11823 #endif
11824 #ifdef TARGET_NR_getpagesize
11825     case TARGET_NR_getpagesize:
11826         return TARGET_PAGE_SIZE;
11827 #endif
11828     case TARGET_NR_gettid:
11829         return get_errno(sys_gettid());
11830 #ifdef TARGET_NR_readahead
11831     case TARGET_NR_readahead:
11832 #if TARGET_ABI_BITS == 32
11833         if (regpairs_aligned(cpu_env, num)) {
11834             arg2 = arg3;
11835             arg3 = arg4;
11836             arg4 = arg5;
11837         }
11838         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11839 #else
11840         ret = get_errno(readahead(arg1, arg2, arg3));
11841 #endif
11842         return ret;
11843 #endif
11844 #ifdef CONFIG_ATTR
11845 #ifdef TARGET_NR_setxattr
11846     case TARGET_NR_listxattr:
11847     case TARGET_NR_llistxattr:
11848     {
11849         void *p, *b = 0;
11850         if (arg2) {
11851             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11852             if (!b) {
11853                 return -TARGET_EFAULT;
11854             }
11855         }
11856         p = lock_user_string(arg1);
11857         if (p) {
11858             if (num == TARGET_NR_listxattr) {
11859                 ret = get_errno(listxattr(p, b, arg3));
11860             } else {
11861                 ret = get_errno(llistxattr(p, b, arg3));
11862             }
11863         } else {
11864             ret = -TARGET_EFAULT;
11865         }
11866         unlock_user(p, arg1, 0);
11867         unlock_user(b, arg2, arg3);
11868         return ret;
11869     }
11870     case TARGET_NR_flistxattr:
11871     {
11872         void *b = 0;
11873         if (arg2) {
11874             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11875             if (!b) {
11876                 return -TARGET_EFAULT;
11877             }
11878         }
11879         ret = get_errno(flistxattr(arg1, b, arg3));
11880         unlock_user(b, arg2, arg3);
11881         return ret;
11882     }
11883     case TARGET_NR_setxattr:
11884     case TARGET_NR_lsetxattr:
11885         {
11886             void *p, *n, *v = 0;
11887             if (arg3) {
11888                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11889                 if (!v) {
11890                     return -TARGET_EFAULT;
11891                 }
11892             }
11893             p = lock_user_string(arg1);
11894             n = lock_user_string(arg2);
11895             if (p && n) {
11896                 if (num == TARGET_NR_setxattr) {
11897                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11898                 } else {
11899                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11900                 }
11901             } else {
11902                 ret = -TARGET_EFAULT;
11903             }
11904             unlock_user(p, arg1, 0);
11905             unlock_user(n, arg2, 0);
11906             unlock_user(v, arg3, 0);
11907         }
11908         return ret;
11909     case TARGET_NR_fsetxattr:
11910         {
11911             void *n, *v = 0;
11912             if (arg3) {
11913                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11914                 if (!v) {
11915                     return -TARGET_EFAULT;
11916                 }
11917             }
11918             n = lock_user_string(arg2);
11919             if (n) {
11920                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11921             } else {
11922                 ret = -TARGET_EFAULT;
11923             }
11924             unlock_user(n, arg2, 0);
11925             unlock_user(v, arg3, 0);
11926         }
11927         return ret;
11928     case TARGET_NR_getxattr:
11929     case TARGET_NR_lgetxattr:
11930         {
11931             void *p, *n, *v = 0;
11932             if (arg3) {
11933                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11934                 if (!v) {
11935                     return -TARGET_EFAULT;
11936                 }
11937             }
11938             p = lock_user_string(arg1);
11939             n = lock_user_string(arg2);
11940             if (p && n) {
11941                 if (num == TARGET_NR_getxattr) {
11942                     ret = get_errno(getxattr(p, n, v, arg4));
11943                 } else {
11944                     ret = get_errno(lgetxattr(p, n, v, arg4));
11945                 }
11946             } else {
11947                 ret = -TARGET_EFAULT;
11948             }
11949             unlock_user(p, arg1, 0);
11950             unlock_user(n, arg2, 0);
11951             unlock_user(v, arg3, arg4);
11952         }
11953         return ret;
11954     case TARGET_NR_fgetxattr:
11955         {
11956             void *n, *v = 0;
11957             if (arg3) {
11958                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11959                 if (!v) {
11960                     return -TARGET_EFAULT;
11961                 }
11962             }
11963             n = lock_user_string(arg2);
11964             if (n) {
11965                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11966             } else {
11967                 ret = -TARGET_EFAULT;
11968             }
11969             unlock_user(n, arg2, 0);
11970             unlock_user(v, arg3, arg4);
11971         }
11972         return ret;
11973     case TARGET_NR_removexattr:
11974     case TARGET_NR_lremovexattr:
11975         {
11976             void *p, *n;
11977             p = lock_user_string(arg1);
11978             n = lock_user_string(arg2);
11979             if (p && n) {
11980                 if (num == TARGET_NR_removexattr) {
11981                     ret = get_errno(removexattr(p, n));
11982                 } else {
11983                     ret = get_errno(lremovexattr(p, n));
11984                 }
11985             } else {
11986                 ret = -TARGET_EFAULT;
11987             }
11988             unlock_user(p, arg1, 0);
11989             unlock_user(n, arg2, 0);
11990         }
11991         return ret;
11992     case TARGET_NR_fremovexattr:
11993         {
11994             void *n;
11995             n = lock_user_string(arg2);
11996             if (n) {
11997                 ret = get_errno(fremovexattr(arg1, n));
11998             } else {
11999                 ret = -TARGET_EFAULT;
12000             }
12001             unlock_user(n, arg2, 0);
12002         }
12003         return ret;
12004 #endif
12005 #endif /* CONFIG_ATTR */
12006 #ifdef TARGET_NR_set_thread_area
12007     case TARGET_NR_set_thread_area:
12008 #if defined(TARGET_MIPS)
12009       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12010       return 0;
12011 #elif defined(TARGET_CRIS)
12012       if (arg1 & 0xff)
12013           ret = -TARGET_EINVAL;
12014       else {
12015           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12016           ret = 0;
12017       }
12018       return ret;
12019 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12020       return do_set_thread_area(cpu_env, arg1);
12021 #elif defined(TARGET_M68K)
12022       {
12023           TaskState *ts = cpu->opaque;
12024           ts->tp_value = arg1;
12025           return 0;
12026       }
12027 #else
12028       return -TARGET_ENOSYS;
12029 #endif
12030 #endif
12031 #ifdef TARGET_NR_get_thread_area
12032     case TARGET_NR_get_thread_area:
12033 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12034         return do_get_thread_area(cpu_env, arg1);
12035 #elif defined(TARGET_M68K)
12036         {
12037             TaskState *ts = cpu->opaque;
12038             return ts->tp_value;
12039         }
12040 #else
12041         return -TARGET_ENOSYS;
12042 #endif
12043 #endif
12044 #ifdef TARGET_NR_getdomainname
12045     case TARGET_NR_getdomainname:
12046         return -TARGET_ENOSYS;
12047 #endif
12048 
12049 #ifdef TARGET_NR_clock_settime
12050     case TARGET_NR_clock_settime:
12051     {
12052         struct timespec ts;
12053 
12054         ret = target_to_host_timespec(&ts, arg2);
12055         if (!is_error(ret)) {
12056             ret = get_errno(clock_settime(arg1, &ts));
12057         }
12058         return ret;
12059     }
12060 #endif
12061 #ifdef TARGET_NR_clock_settime64
12062     case TARGET_NR_clock_settime64:
12063     {
12064         struct timespec ts;
12065 
12066         ret = target_to_host_timespec64(&ts, arg2);
12067         if (!is_error(ret)) {
12068             ret = get_errno(clock_settime(arg1, &ts));
12069         }
12070         return ret;
12071     }
12072 #endif
12073 #ifdef TARGET_NR_clock_gettime
12074     case TARGET_NR_clock_gettime:
12075     {
12076         struct timespec ts;
12077         ret = get_errno(clock_gettime(arg1, &ts));
12078         if (!is_error(ret)) {
12079             ret = host_to_target_timespec(arg2, &ts);
12080         }
12081         return ret;
12082     }
12083 #endif
12084 #ifdef TARGET_NR_clock_gettime64
12085     case TARGET_NR_clock_gettime64:
12086     {
12087         struct timespec ts;
12088         ret = get_errno(clock_gettime(arg1, &ts));
12089         if (!is_error(ret)) {
12090             ret = host_to_target_timespec64(arg2, &ts);
12091         }
12092         return ret;
12093     }
12094 #endif
12095 #ifdef TARGET_NR_clock_getres
12096     case TARGET_NR_clock_getres:
12097     {
12098         struct timespec ts;
12099         ret = get_errno(clock_getres(arg1, &ts));
12100         if (!is_error(ret)) {
12101             host_to_target_timespec(arg2, &ts);
12102         }
12103         return ret;
12104     }
12105 #endif
12106 #ifdef TARGET_NR_clock_getres_time64
12107     case TARGET_NR_clock_getres_time64:
12108     {
12109         struct timespec ts;
12110         ret = get_errno(clock_getres(arg1, &ts));
12111         if (!is_error(ret)) {
12112             host_to_target_timespec64(arg2, &ts);
12113         }
12114         return ret;
12115     }
12116 #endif
12117 #ifdef TARGET_NR_clock_nanosleep
12118     case TARGET_NR_clock_nanosleep:
12119     {
12120         struct timespec ts;
12121         if (target_to_host_timespec(&ts, arg3)) {
12122             return -TARGET_EFAULT;
12123         }
12124         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12125                                              &ts, arg4 ? &ts : NULL));
12126         /*
12127          * if the call is interrupted by a signal handler, it fails
12128          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12129          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12130          */
12131         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12132             host_to_target_timespec(arg4, &ts)) {
12133               return -TARGET_EFAULT;
12134         }
12135 
12136         return ret;
12137     }
12138 #endif
12139 #ifdef TARGET_NR_clock_nanosleep_time64
12140     case TARGET_NR_clock_nanosleep_time64:
12141     {
12142         struct timespec ts;
12143 
12144         if (target_to_host_timespec64(&ts, arg3)) {
12145             return -TARGET_EFAULT;
12146         }
12147 
12148         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12149                                              &ts, arg4 ? &ts : NULL));
12150 
12151         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12152             host_to_target_timespec64(arg4, &ts)) {
12153             return -TARGET_EFAULT;
12154         }
12155         return ret;
12156     }
12157 #endif
12158 
12159 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12160     case TARGET_NR_set_tid_address:
12161         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12162 #endif
12163 
12164     case TARGET_NR_tkill:
12165         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12166 
12167     case TARGET_NR_tgkill:
12168         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12169                          target_to_host_signal(arg3)));
12170 
12171 #ifdef TARGET_NR_set_robust_list
12172     case TARGET_NR_set_robust_list:
12173     case TARGET_NR_get_robust_list:
12174         /* The ABI for supporting robust futexes has userspace pass
12175          * the kernel a pointer to a linked list which is updated by
12176          * userspace after the syscall; the list is walked by the kernel
12177          * when the thread exits. Since the linked list in QEMU guest
12178          * memory isn't a valid linked list for the host and we have
12179          * no way to reliably intercept the thread-death event, we can't
12180          * support these. Silently return ENOSYS so that guest userspace
12181          * falls back to a non-robust futex implementation (which should
12182          * be OK except in the corner case of the guest crashing while
12183          * holding a mutex that is shared with another process via
12184          * shared memory).
12185          */
12186         return -TARGET_ENOSYS;
12187 #endif
12188 
12189 #if defined(TARGET_NR_utimensat)
12190     case TARGET_NR_utimensat:
12191         {
12192             struct timespec *tsp, ts[2];
12193             if (!arg3) {
12194                 tsp = NULL;
12195             } else {
12196                 if (target_to_host_timespec(ts, arg3)) {
12197                     return -TARGET_EFAULT;
12198                 }
12199                 if (target_to_host_timespec(ts + 1, arg3 +
12200                                             sizeof(struct target_timespec))) {
12201                     return -TARGET_EFAULT;
12202                 }
12203                 tsp = ts;
12204             }
12205             if (!arg2)
12206                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12207             else {
12208                 if (!(p = lock_user_string(arg2))) {
12209                     return -TARGET_EFAULT;
12210                 }
12211                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12212                 unlock_user(p, arg2, 0);
12213             }
12214         }
12215         return ret;
12216 #endif
12217 #ifdef TARGET_NR_utimensat_time64
12218     case TARGET_NR_utimensat_time64:
12219         {
12220             struct timespec *tsp, ts[2];
12221             if (!arg3) {
12222                 tsp = NULL;
12223             } else {
12224                 if (target_to_host_timespec64(ts, arg3)) {
12225                     return -TARGET_EFAULT;
12226                 }
12227                 if (target_to_host_timespec64(ts + 1, arg3 +
12228                                      sizeof(struct target__kernel_timespec))) {
12229                     return -TARGET_EFAULT;
12230                 }
12231                 tsp = ts;
12232             }
12233             if (!arg2)
12234                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12235             else {
12236                 p = lock_user_string(arg2);
12237                 if (!p) {
12238                     return -TARGET_EFAULT;
12239                 }
12240                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12241                 unlock_user(p, arg2, 0);
12242             }
12243         }
12244         return ret;
12245 #endif
12246 #ifdef TARGET_NR_futex
12247     case TARGET_NR_futex:
12248         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12249 #endif
12250 #ifdef TARGET_NR_futex_time64
12251     case TARGET_NR_futex_time64:
12252         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12253 #endif
12254 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12255     case TARGET_NR_inotify_init:
12256         ret = get_errno(sys_inotify_init());
12257         if (ret >= 0) {
12258             fd_trans_register(ret, &target_inotify_trans);
12259         }
12260         return ret;
12261 #endif
12262 #ifdef CONFIG_INOTIFY1
12263 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12264     case TARGET_NR_inotify_init1:
12265         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12266                                           fcntl_flags_tbl)));
12267         if (ret >= 0) {
12268             fd_trans_register(ret, &target_inotify_trans);
12269         }
12270         return ret;
12271 #endif
12272 #endif
12273 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12274     case TARGET_NR_inotify_add_watch:
12275         p = lock_user_string(arg2);
12276         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12277         unlock_user(p, arg2, 0);
12278         return ret;
12279 #endif
12280 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12281     case TARGET_NR_inotify_rm_watch:
12282         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12283 #endif
12284 
12285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12286     case TARGET_NR_mq_open:
12287         {
12288             struct mq_attr posix_mq_attr;
12289             struct mq_attr *pposix_mq_attr;
12290             int host_flags;
12291 
12292             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12293             pposix_mq_attr = NULL;
12294             if (arg4) {
12295                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12296                     return -TARGET_EFAULT;
12297                 }
12298                 pposix_mq_attr = &posix_mq_attr;
12299             }
12300             p = lock_user_string(arg1 - 1);
12301             if (!p) {
12302                 return -TARGET_EFAULT;
12303             }
12304             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12305             unlock_user (p, arg1, 0);
12306         }
12307         return ret;
12308 
12309     case TARGET_NR_mq_unlink:
12310         p = lock_user_string(arg1 - 1);
12311         if (!p) {
12312             return -TARGET_EFAULT;
12313         }
12314         ret = get_errno(mq_unlink(p));
12315         unlock_user (p, arg1, 0);
12316         return ret;
12317 
12318 #ifdef TARGET_NR_mq_timedsend
12319     case TARGET_NR_mq_timedsend:
12320         {
12321             struct timespec ts;
12322 
12323             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12324             if (arg5 != 0) {
12325                 if (target_to_host_timespec(&ts, arg5)) {
12326                     return -TARGET_EFAULT;
12327                 }
12328                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12329                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12330                     return -TARGET_EFAULT;
12331                 }
12332             } else {
12333                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12334             }
12335             unlock_user (p, arg2, arg3);
12336         }
12337         return ret;
12338 #endif
12339 #ifdef TARGET_NR_mq_timedsend_time64
12340     case TARGET_NR_mq_timedsend_time64:
12341         {
12342             struct timespec ts;
12343 
12344             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12345             if (arg5 != 0) {
12346                 if (target_to_host_timespec64(&ts, arg5)) {
12347                     return -TARGET_EFAULT;
12348                 }
12349                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12350                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12351                     return -TARGET_EFAULT;
12352                 }
12353             } else {
12354                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12355             }
12356             unlock_user(p, arg2, arg3);
12357         }
12358         return ret;
12359 #endif
12360 
12361 #ifdef TARGET_NR_mq_timedreceive
12362     case TARGET_NR_mq_timedreceive:
12363         {
12364             struct timespec ts;
12365             unsigned int prio;
12366 
12367             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12368             if (arg5 != 0) {
12369                 if (target_to_host_timespec(&ts, arg5)) {
12370                     return -TARGET_EFAULT;
12371                 }
12372                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12373                                                      &prio, &ts));
12374                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12375                     return -TARGET_EFAULT;
12376                 }
12377             } else {
12378                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12379                                                      &prio, NULL));
12380             }
12381             unlock_user (p, arg2, arg3);
12382             if (arg4 != 0)
12383                 put_user_u32(prio, arg4);
12384         }
12385         return ret;
12386 #endif
12387 #ifdef TARGET_NR_mq_timedreceive_time64
12388     case TARGET_NR_mq_timedreceive_time64:
12389         {
12390             struct timespec ts;
12391             unsigned int prio;
12392 
12393             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12394             if (arg5 != 0) {
12395                 if (target_to_host_timespec64(&ts, arg5)) {
12396                     return -TARGET_EFAULT;
12397                 }
12398                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12399                                                      &prio, &ts));
12400                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12401                     return -TARGET_EFAULT;
12402                 }
12403             } else {
12404                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12405                                                      &prio, NULL));
12406             }
12407             unlock_user(p, arg2, arg3);
12408             if (arg4 != 0) {
12409                 put_user_u32(prio, arg4);
12410             }
12411         }
12412         return ret;
12413 #endif
12414 
12415     /* Not implemented for now... */
12416 /*     case TARGET_NR_mq_notify: */
12417 /*         break; */
12418 
12419     case TARGET_NR_mq_getsetattr:
12420         {
12421             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12422             ret = 0;
12423             if (arg2 != 0) {
12424                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12425                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12426                                            &posix_mq_attr_out));
12427             } else if (arg3 != 0) {
12428                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12429             }
12430             if (ret == 0 && arg3 != 0) {
12431                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12432             }
12433         }
12434         return ret;
12435 #endif
12436 
12437 #ifdef CONFIG_SPLICE
12438 #ifdef TARGET_NR_tee
12439     case TARGET_NR_tee:
12440         {
12441             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12442         }
12443         return ret;
12444 #endif
12445 #ifdef TARGET_NR_splice
12446     case TARGET_NR_splice:
12447         {
12448             loff_t loff_in, loff_out;
12449             loff_t *ploff_in = NULL, *ploff_out = NULL;
12450             if (arg2) {
12451                 if (get_user_u64(loff_in, arg2)) {
12452                     return -TARGET_EFAULT;
12453                 }
12454                 ploff_in = &loff_in;
12455             }
12456             if (arg4) {
12457                 if (get_user_u64(loff_out, arg4)) {
12458                     return -TARGET_EFAULT;
12459                 }
12460                 ploff_out = &loff_out;
12461             }
12462             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12463             if (arg2) {
12464                 if (put_user_u64(loff_in, arg2)) {
12465                     return -TARGET_EFAULT;
12466                 }
12467             }
12468             if (arg4) {
12469                 if (put_user_u64(loff_out, arg4)) {
12470                     return -TARGET_EFAULT;
12471                 }
12472             }
12473         }
12474         return ret;
12475 #endif
12476 #ifdef TARGET_NR_vmsplice
12477 	case TARGET_NR_vmsplice:
12478         {
12479             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12480             if (vec != NULL) {
12481                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12482                 unlock_iovec(vec, arg2, arg3, 0);
12483             } else {
12484                 ret = -host_to_target_errno(errno);
12485             }
12486         }
12487         return ret;
12488 #endif
12489 #endif /* CONFIG_SPLICE */
12490 #ifdef CONFIG_EVENTFD
12491 #if defined(TARGET_NR_eventfd)
12492     case TARGET_NR_eventfd:
12493         ret = get_errno(eventfd(arg1, 0));
12494         if (ret >= 0) {
12495             fd_trans_register(ret, &target_eventfd_trans);
12496         }
12497         return ret;
12498 #endif
12499 #if defined(TARGET_NR_eventfd2)
12500     case TARGET_NR_eventfd2:
12501     {
12502         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12503         if (arg2 & TARGET_O_NONBLOCK) {
12504             host_flags |= O_NONBLOCK;
12505         }
12506         if (arg2 & TARGET_O_CLOEXEC) {
12507             host_flags |= O_CLOEXEC;
12508         }
12509         ret = get_errno(eventfd(arg1, host_flags));
12510         if (ret >= 0) {
12511             fd_trans_register(ret, &target_eventfd_trans);
12512         }
12513         return ret;
12514     }
12515 #endif
12516 #endif /* CONFIG_EVENTFD  */
12517 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12518     case TARGET_NR_fallocate:
12519 #if TARGET_ABI_BITS == 32
12520         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12521                                   target_offset64(arg5, arg6)));
12522 #else
12523         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12524 #endif
12525         return ret;
12526 #endif
12527 #if defined(CONFIG_SYNC_FILE_RANGE)
12528 #if defined(TARGET_NR_sync_file_range)
12529     case TARGET_NR_sync_file_range:
12530 #if TARGET_ABI_BITS == 32
12531 #if defined(TARGET_MIPS)
12532         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12533                                         target_offset64(arg5, arg6), arg7));
12534 #else
12535         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12536                                         target_offset64(arg4, arg5), arg6));
12537 #endif /* !TARGET_MIPS */
12538 #else
12539         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12540 #endif
12541         return ret;
12542 #endif
12543 #if defined(TARGET_NR_sync_file_range2) || \
12544     defined(TARGET_NR_arm_sync_file_range)
12545 #if defined(TARGET_NR_sync_file_range2)
12546     case TARGET_NR_sync_file_range2:
12547 #endif
12548 #if defined(TARGET_NR_arm_sync_file_range)
12549     case TARGET_NR_arm_sync_file_range:
12550 #endif
12551         /* This is like sync_file_range but the arguments are reordered */
12552 #if TARGET_ABI_BITS == 32
12553         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12554                                         target_offset64(arg5, arg6), arg2));
12555 #else
12556         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12557 #endif
12558         return ret;
12559 #endif
12560 #endif
12561 #if defined(TARGET_NR_signalfd4)
12562     case TARGET_NR_signalfd4:
12563         return do_signalfd4(arg1, arg2, arg4);
12564 #endif
12565 #if defined(TARGET_NR_signalfd)
12566     case TARGET_NR_signalfd:
12567         return do_signalfd4(arg1, arg2, 0);
12568 #endif
12569 #if defined(CONFIG_EPOLL)
12570 #if defined(TARGET_NR_epoll_create)
12571     case TARGET_NR_epoll_create:
12572         return get_errno(epoll_create(arg1));
12573 #endif
12574 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12575     case TARGET_NR_epoll_create1:
12576         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12577 #endif
12578 #if defined(TARGET_NR_epoll_ctl)
12579     case TARGET_NR_epoll_ctl:
12580     {
12581         struct epoll_event ep;
12582         struct epoll_event *epp = 0;
12583         if (arg4) {
12584             if (arg2 != EPOLL_CTL_DEL) {
12585                 struct target_epoll_event *target_ep;
12586                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12587                     return -TARGET_EFAULT;
12588                 }
12589                 ep.events = tswap32(target_ep->events);
12590                 /*
12591                  * The epoll_data_t union is just opaque data to the kernel,
12592                  * so we transfer all 64 bits across and need not worry what
12593                  * actual data type it is.
12594                  */
12595                 ep.data.u64 = tswap64(target_ep->data.u64);
12596                 unlock_user_struct(target_ep, arg4, 0);
12597             }
12598             /*
12599              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12600              * non-null pointer, even though this argument is ignored.
12601              *
12602              */
12603             epp = &ep;
12604         }
12605         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12606     }
12607 #endif
12608 
12609 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12610 #if defined(TARGET_NR_epoll_wait)
12611     case TARGET_NR_epoll_wait:
12612 #endif
12613 #if defined(TARGET_NR_epoll_pwait)
12614     case TARGET_NR_epoll_pwait:
12615 #endif
12616     {
12617         struct target_epoll_event *target_ep;
12618         struct epoll_event *ep;
12619         int epfd = arg1;
12620         int maxevents = arg3;
12621         int timeout = arg4;
12622 
12623         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12624             return -TARGET_EINVAL;
12625         }
12626 
12627         target_ep = lock_user(VERIFY_WRITE, arg2,
12628                               maxevents * sizeof(struct target_epoll_event), 1);
12629         if (!target_ep) {
12630             return -TARGET_EFAULT;
12631         }
12632 
12633         ep = g_try_new(struct epoll_event, maxevents);
12634         if (!ep) {
12635             unlock_user(target_ep, arg2, 0);
12636             return -TARGET_ENOMEM;
12637         }
12638 
12639         switch (num) {
12640 #if defined(TARGET_NR_epoll_pwait)
12641         case TARGET_NR_epoll_pwait:
12642         {
12643             target_sigset_t *target_set;
12644             sigset_t _set, *set = &_set;
12645 
12646             if (arg5) {
12647                 if (arg6 != sizeof(target_sigset_t)) {
12648                     ret = -TARGET_EINVAL;
12649                     break;
12650                 }
12651 
12652                 target_set = lock_user(VERIFY_READ, arg5,
12653                                        sizeof(target_sigset_t), 1);
12654                 if (!target_set) {
12655                     ret = -TARGET_EFAULT;
12656                     break;
12657                 }
12658                 target_to_host_sigset(set, target_set);
12659                 unlock_user(target_set, arg5, 0);
12660             } else {
12661                 set = NULL;
12662             }
12663 
12664             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12665                                              set, SIGSET_T_SIZE));
12666             break;
12667         }
12668 #endif
12669 #if defined(TARGET_NR_epoll_wait)
12670         case TARGET_NR_epoll_wait:
12671             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12672                                              NULL, 0));
12673             break;
12674 #endif
12675         default:
12676             ret = -TARGET_ENOSYS;
12677         }
12678         if (!is_error(ret)) {
12679             int i;
12680             for (i = 0; i < ret; i++) {
12681                 target_ep[i].events = tswap32(ep[i].events);
12682                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12683             }
12684             unlock_user(target_ep, arg2,
12685                         ret * sizeof(struct target_epoll_event));
12686         } else {
12687             unlock_user(target_ep, arg2, 0);
12688         }
12689         g_free(ep);
12690         return ret;
12691     }
12692 #endif
12693 #endif
12694 #ifdef TARGET_NR_prlimit64
12695     case TARGET_NR_prlimit64:
12696     {
12697         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12698         struct target_rlimit64 *target_rnew, *target_rold;
12699         struct host_rlimit64 rnew, rold, *rnewp = 0;
12700         int resource = target_to_host_resource(arg2);
12701 
12702         if (arg3 && (resource != RLIMIT_AS &&
12703                      resource != RLIMIT_DATA &&
12704                      resource != RLIMIT_STACK)) {
12705             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12706                 return -TARGET_EFAULT;
12707             }
12708             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12709             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12710             unlock_user_struct(target_rnew, arg3, 0);
12711             rnewp = &rnew;
12712         }
12713 
12714         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12715         if (!is_error(ret) && arg4) {
12716             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12717                 return -TARGET_EFAULT;
12718             }
12719             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12720             target_rold->rlim_max = tswap64(rold.rlim_max);
12721             unlock_user_struct(target_rold, arg4, 1);
12722         }
12723         return ret;
12724     }
12725 #endif
12726 #ifdef TARGET_NR_gethostname
12727     case TARGET_NR_gethostname:
12728     {
12729         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12730         if (name) {
12731             ret = get_errno(gethostname(name, arg2));
12732             unlock_user(name, arg1, arg2);
12733         } else {
12734             ret = -TARGET_EFAULT;
12735         }
12736         return ret;
12737     }
12738 #endif
12739 #ifdef TARGET_NR_atomic_cmpxchg_32
12740     case TARGET_NR_atomic_cmpxchg_32:
12741     {
12742         /* should use start_exclusive from main.c */
12743         abi_ulong mem_value;
12744         if (get_user_u32(mem_value, arg6)) {
12745             target_siginfo_t info;
12746             info.si_signo = SIGSEGV;
12747             info.si_errno = 0;
12748             info.si_code = TARGET_SEGV_MAPERR;
12749             info._sifields._sigfault._addr = arg6;
12750             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12751                          QEMU_SI_FAULT, &info);
12752             ret = 0xdeadbeef;
12753 
12754         }
12755         if (mem_value == arg2)
12756             put_user_u32(arg1, arg6);
12757         return mem_value;
12758     }
12759 #endif
12760 #ifdef TARGET_NR_atomic_barrier
12761     case TARGET_NR_atomic_barrier:
12762         /* Like the kernel implementation and the
12763            qemu arm barrier, no-op this? */
12764         return 0;
12765 #endif
12766 
12767 #ifdef TARGET_NR_timer_create
12768     case TARGET_NR_timer_create:
12769     {
12770         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12771 
12772         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12773 
12774         int clkid = arg1;
12775         int timer_index = next_free_host_timer();
12776 
12777         if (timer_index < 0) {
12778             ret = -TARGET_EAGAIN;
12779         } else {
12780             timer_t *phtimer = g_posix_timers  + timer_index;
12781 
12782             if (arg2) {
12783                 phost_sevp = &host_sevp;
12784                 ret = target_to_host_sigevent(phost_sevp, arg2);
12785                 if (ret != 0) {
12786                     return ret;
12787                 }
12788             }
12789 
12790             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12791             if (ret) {
12792                 phtimer = NULL;
12793             } else {
12794                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12795                     return -TARGET_EFAULT;
12796                 }
12797             }
12798         }
12799         return ret;
12800     }
12801 #endif
12802 
12803 #ifdef TARGET_NR_timer_settime
12804     case TARGET_NR_timer_settime:
12805     {
12806         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12807          * struct itimerspec * old_value */
12808         target_timer_t timerid = get_timer_id(arg1);
12809 
12810         if (timerid < 0) {
12811             ret = timerid;
12812         } else if (arg3 == 0) {
12813             ret = -TARGET_EINVAL;
12814         } else {
12815             timer_t htimer = g_posix_timers[timerid];
12816             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12817 
12818             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12819                 return -TARGET_EFAULT;
12820             }
12821             ret = get_errno(
12822                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12823             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12824                 return -TARGET_EFAULT;
12825             }
12826         }
12827         return ret;
12828     }
12829 #endif
12830 
12831 #ifdef TARGET_NR_timer_settime64
12832     case TARGET_NR_timer_settime64:
12833     {
12834         target_timer_t timerid = get_timer_id(arg1);
12835 
12836         if (timerid < 0) {
12837             ret = timerid;
12838         } else if (arg3 == 0) {
12839             ret = -TARGET_EINVAL;
12840         } else {
12841             timer_t htimer = g_posix_timers[timerid];
12842             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12843 
12844             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12845                 return -TARGET_EFAULT;
12846             }
12847             ret = get_errno(
12848                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12849             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12850                 return -TARGET_EFAULT;
12851             }
12852         }
12853         return ret;
12854     }
12855 #endif
12856 
12857 #ifdef TARGET_NR_timer_gettime
12858     case TARGET_NR_timer_gettime:
12859     {
12860         /* args: timer_t timerid, struct itimerspec *curr_value */
12861         target_timer_t timerid = get_timer_id(arg1);
12862 
12863         if (timerid < 0) {
12864             ret = timerid;
12865         } else if (!arg2) {
12866             ret = -TARGET_EFAULT;
12867         } else {
12868             timer_t htimer = g_posix_timers[timerid];
12869             struct itimerspec hspec;
12870             ret = get_errno(timer_gettime(htimer, &hspec));
12871 
12872             if (host_to_target_itimerspec(arg2, &hspec)) {
12873                 ret = -TARGET_EFAULT;
12874             }
12875         }
12876         return ret;
12877     }
12878 #endif
12879 
12880 #ifdef TARGET_NR_timer_gettime64
12881     case TARGET_NR_timer_gettime64:
12882     {
12883         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12884         target_timer_t timerid = get_timer_id(arg1);
12885 
12886         if (timerid < 0) {
12887             ret = timerid;
12888         } else if (!arg2) {
12889             ret = -TARGET_EFAULT;
12890         } else {
12891             timer_t htimer = g_posix_timers[timerid];
12892             struct itimerspec hspec;
12893             ret = get_errno(timer_gettime(htimer, &hspec));
12894 
12895             if (host_to_target_itimerspec64(arg2, &hspec)) {
12896                 ret = -TARGET_EFAULT;
12897             }
12898         }
12899         return ret;
12900     }
12901 #endif
12902 
12903 #ifdef TARGET_NR_timer_getoverrun
12904     case TARGET_NR_timer_getoverrun:
12905     {
12906         /* args: timer_t timerid */
12907         target_timer_t timerid = get_timer_id(arg1);
12908 
12909         if (timerid < 0) {
12910             ret = timerid;
12911         } else {
12912             timer_t htimer = g_posix_timers[timerid];
12913             ret = get_errno(timer_getoverrun(htimer));
12914         }
12915         return ret;
12916     }
12917 #endif
12918 
12919 #ifdef TARGET_NR_timer_delete
12920     case TARGET_NR_timer_delete:
12921     {
12922         /* args: timer_t timerid */
12923         target_timer_t timerid = get_timer_id(arg1);
12924 
12925         if (timerid < 0) {
12926             ret = timerid;
12927         } else {
12928             timer_t htimer = g_posix_timers[timerid];
12929             ret = get_errno(timer_delete(htimer));
12930             g_posix_timers[timerid] = 0;
12931         }
12932         return ret;
12933     }
12934 #endif
12935 
12936 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12937     case TARGET_NR_timerfd_create:
12938         return get_errno(timerfd_create(arg1,
12939                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12940 #endif
12941 
12942 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12943     case TARGET_NR_timerfd_gettime:
12944         {
12945             struct itimerspec its_curr;
12946 
12947             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12948 
12949             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12950                 return -TARGET_EFAULT;
12951             }
12952         }
12953         return ret;
12954 #endif
12955 
12956 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12957     case TARGET_NR_timerfd_gettime64:
12958         {
12959             struct itimerspec its_curr;
12960 
12961             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12962 
12963             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12964                 return -TARGET_EFAULT;
12965             }
12966         }
12967         return ret;
12968 #endif
12969 
12970 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12971     case TARGET_NR_timerfd_settime:
12972         {
12973             struct itimerspec its_new, its_old, *p_new;
12974 
12975             if (arg3) {
12976                 if (target_to_host_itimerspec(&its_new, arg3)) {
12977                     return -TARGET_EFAULT;
12978                 }
12979                 p_new = &its_new;
12980             } else {
12981                 p_new = NULL;
12982             }
12983 
12984             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12985 
12986             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12987                 return -TARGET_EFAULT;
12988             }
12989         }
12990         return ret;
12991 #endif
12992 
12993 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12994     case TARGET_NR_timerfd_settime64:
12995         {
12996             struct itimerspec its_new, its_old, *p_new;
12997 
12998             if (arg3) {
12999                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13000                     return -TARGET_EFAULT;
13001                 }
13002                 p_new = &its_new;
13003             } else {
13004                 p_new = NULL;
13005             }
13006 
13007             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13008 
13009             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13010                 return -TARGET_EFAULT;
13011             }
13012         }
13013         return ret;
13014 #endif
13015 
13016 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13017     case TARGET_NR_ioprio_get:
13018         return get_errno(ioprio_get(arg1, arg2));
13019 #endif
13020 
13021 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13022     case TARGET_NR_ioprio_set:
13023         return get_errno(ioprio_set(arg1, arg2, arg3));
13024 #endif
13025 
13026 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13027     case TARGET_NR_setns:
13028         return get_errno(setns(arg1, arg2));
13029 #endif
13030 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13031     case TARGET_NR_unshare:
13032         return get_errno(unshare(arg1));
13033 #endif
13034 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13035     case TARGET_NR_kcmp:
13036         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13037 #endif
13038 #ifdef TARGET_NR_swapcontext
13039     case TARGET_NR_swapcontext:
13040         /* PowerPC specific.  */
13041         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13042 #endif
13043 #ifdef TARGET_NR_memfd_create
13044     case TARGET_NR_memfd_create:
13045         p = lock_user_string(arg1);
13046         if (!p) {
13047             return -TARGET_EFAULT;
13048         }
13049         ret = get_errno(memfd_create(p, arg2));
13050         fd_trans_unregister(ret);
13051         unlock_user(p, arg1, 0);
13052         return ret;
13053 #endif
13054 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13055     case TARGET_NR_membarrier:
13056         return get_errno(membarrier(arg1, arg2));
13057 #endif
13058 
13059 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13060     case TARGET_NR_copy_file_range:
13061         {
13062             loff_t inoff, outoff;
13063             loff_t *pinoff = NULL, *poutoff = NULL;
13064 
13065             if (arg2) {
13066                 if (get_user_u64(inoff, arg2)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 pinoff = &inoff;
13070             }
13071             if (arg4) {
13072                 if (get_user_u64(outoff, arg4)) {
13073                     return -TARGET_EFAULT;
13074                 }
13075                 poutoff = &outoff;
13076             }
13077             /* Do not sign-extend the count parameter. */
13078             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13079                                                  (abi_ulong)arg5, arg6));
13080             if (!is_error(ret) && ret > 0) {
13081                 if (arg2) {
13082                     if (put_user_u64(inoff, arg2)) {
13083                         return -TARGET_EFAULT;
13084                     }
13085                 }
13086                 if (arg4) {
13087                     if (put_user_u64(outoff, arg4)) {
13088                         return -TARGET_EFAULT;
13089                     }
13090                 }
13091             }
13092         }
13093         return ret;
13094 #endif
13095 
13096 #if defined(TARGET_NR_pivot_root)
13097     case TARGET_NR_pivot_root:
13098         {
13099             void *p2;
13100             p = lock_user_string(arg1); /* new_root */
13101             p2 = lock_user_string(arg2); /* put_old */
13102             if (!p || !p2) {
13103                 ret = -TARGET_EFAULT;
13104             } else {
13105                 ret = get_errno(pivot_root(p, p2));
13106             }
13107             unlock_user(p2, arg2, 0);
13108             unlock_user(p, arg1, 0);
13109         }
13110         return ret;
13111 #endif
13112 
13113     default:
13114         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13115         return -TARGET_ENOSYS;
13116     }
13117     return ret;
13118 }
13119 
13120 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13121                     abi_long arg2, abi_long arg3, abi_long arg4,
13122                     abi_long arg5, abi_long arg6, abi_long arg7,
13123                     abi_long arg8)
13124 {
13125     CPUState *cpu = env_cpu(cpu_env);
13126     abi_long ret;
13127 
13128 #ifdef DEBUG_ERESTARTSYS
13129     /* Debug-only code for exercising the syscall-restart code paths
13130      * in the per-architecture cpu main loops: restart every syscall
13131      * the guest makes once before letting it through.
13132      */
13133     {
13134         static bool flag;
13135         flag = !flag;
13136         if (flag) {
13137             return -TARGET_ERESTARTSYS;
13138         }
13139     }
13140 #endif
13141 
13142     record_syscall_start(cpu, num, arg1,
13143                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13144 
13145     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13146         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13147     }
13148 
13149     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13150                       arg5, arg6, arg7, arg8);
13151 
13152     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13153         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13154                           arg3, arg4, arg5, arg6);
13155     }
13156 
13157     record_syscall_return(cpu, num, ret);
13158     return ret;
13159 }
13160